Skip to content

Commit

Permalink
[release-17.0] [VTAdmin API] Fix schema cache flag, add documentation (
Browse files Browse the repository at this point in the history
…#15704) (#15718)

Signed-off-by: notfelineit <notfelineit@gmail.com>
Co-authored-by: vitess-bot[bot] <108069721+vitess-bot[bot]@users.noreply.github.com>
Co-authored-by: notfelineit <notfelineit@gmail.com>
  • Loading branch information
vitess-bot[bot] and notfelineit committed Apr 15, 2024
1 parent c7bc1ce commit 8aea612
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 0 deletions.
21 changes: 21 additions & 0 deletions doc/vtadmin/clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,3 +54,24 @@ defaults:
# - schema-read-pool => for GetSchema, GetSchemas, and FindSchema api methods
# - topo-read-pool => for generic topo methods (e.g. GetKeyspace, FindAllShardsInKeyspace)
# - workflow-read-pool => for GetWorkflow/GetWorkflows api methods.

# How long to keep values in schema cache by default (duration passed to Add takes precedence).
# A value of "0m" means values will never be cached, a positive duration "1m" means items will be cached
# for that duration, and passing nothing will default to "NoExpiration".
schema-cache-default-expiration: 1m
# How many outstanding backfil requests to permit in schema cache.
# If the queue is full, calls backfill schemas will return false, and those requests will be discarded.
# A value of "0" means that the underlying channel will have a size of 0,
# and every send to the backfill queue will block until the queue is "empty" again.
schema-cache-backfill-queue-size: 0
# How often expired values are removed from schema cache.
schema-cache-cleanup-interval: 5m
# How long a backfill request is considered valid.
# If the backfill goroutin encounters a request older than this, it is discarded.
schema-cache-backfill-request-ttl: 100ms
# How much time must pass before the backfill goroutine will re-backfill the same key.
# Used to prevent multiple callers from queueing up too many requests for the same key,
# when one backfill would satisfy all of them.
schema-cache-backfill-request-duplicate-interval: 1m
# How long to wait whe attempting to enqueue a backfill request before giving up.
schema-cache-backfill-enqueue-wait-time: 50ms
8 changes: 8 additions & 0 deletions go/vt/vtadmin/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ const (
// backfill requests to still process, if a config is passed with a
// non-positive BackfillRequestTTL.
DefaultBackfillRequestTTL = time.Millisecond * 100
// DefaultBackfillQueueSize is the default value used for the size of the
// backfill queue, if a config is passed with a non-positive BackfillQueueSize.
DefaultBackfillQueueSize = 0
)

// Config is the configuration for a cache.
Expand Down Expand Up @@ -125,6 +128,11 @@ func New[Key Keyer, Value any](fillFunc func(ctx context.Context, req Key) (Valu
cfg.BackfillRequestTTL = DefaultBackfillRequestTTL
}

if cfg.BackfillQueueSize < 0 {
log.Warningf("BackfillQueueSize (%v) must be positive, defaulting to %v", cfg.BackfillQueueSize, DefaultBackfillQueueSize)
cfg.BackfillQueueSize = DefaultBackfillQueueSize
}

c := &Cache[Key, Value]{
cache: cache.New(cfg.DefaultExpiration, cfg.CleanupInterval),
lastFill: map[string]time.Time{},
Expand Down
39 changes: 39 additions & 0 deletions go/vt/vtadmin/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,45 @@ func TestBackfillDuplicates(t *testing.T) {
}
}

func TestBackfillQueueSize(t *testing.T) {
t.Parallel()

tests := []struct {
name string
configuredBackfillQueueSize int
expectedBackfillQueueSize int
}{
{
name: "configured negative backfill queue size",
configuredBackfillQueueSize: -1,
expectedBackfillQueueSize: 0,
}, {
name: "configured 0 backfill queue size",
configuredBackfillQueueSize: 0,
expectedBackfillQueueSize: 0,
}, {
name: "configured positive backfill queue size",
configuredBackfillQueueSize: 1,
expectedBackfillQueueSize: 1,
},
}
for _, tt := range tests {
tt := tt

t.Run(tt.name, func(t *testing.T) {
t.Parallel()

c := cache.New(func(ctx context.Context, req intkey) (any, error) {
return nil, nil
}, cache.Config{
BackfillQueueSize: tt.configuredBackfillQueueSize,
})
var config cache.Config = c.Debug()["config"].(cache.Config)
assert.Equal(t, tt.expectedBackfillQueueSize, config.BackfillQueueSize)
})
}
}

func TestBackfillTTL(t *testing.T) {
t.Parallel()

Expand Down
3 changes: 3 additions & 0 deletions go/vt/vtadmin/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1424,7 +1424,10 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta

span.Annotate("cache_hit", ok)
if ok {
log.Infof("GetSchemas(cluster = %s) fetching schemas from schema cache", c.ID)
return schemas, err
} else {
log.Infof("GetSchemas(cluster = %s) bypassing schema cache", c.ID)
}
}

Expand Down

0 comments on commit 8aea612

Please sign in to comment.