diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index f01de43810..e849e16f72 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -47,10 +47,10 @@ var ( // to receive events for Kubernetes objects (at a low-level), // and add indices to fields on the objects stored in the cache. type Cache interface { - // Cache acts as a client to objects stored in the cache. + // Reader acts as a client to objects stored in the cache. client.Reader - // Cache loads informers and adds field indices. + // Informers loads informers and adds field indices. Informers } @@ -70,39 +70,43 @@ type Informers interface { // It blocks. Start(ctx context.Context) error - // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. + // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. WaitForCacheSync(ctx context.Context) bool - // Informers knows how to add indices to the caches (informers) that it manages. + // FieldIndexer adds indices to the managed informers. client.FieldIndexer } -// Informer - informer allows you interact with the underlying informer. +// Informer allows you to interact with the underlying informer. type Informer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync - // period. Events to a single handler are delivered sequentially, but there is no coordination + // period. Events to a single handler are delivered sequentially, but there is no coordination // between different handlers. // It returns a registration handle for the handler that can be used to remove - // the handler again. + // the handler again and an error if the handler cannot be added. AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the - // specified resync period. Events to a single handler are delivered sequentially, but there is + // specified resync period. Events to a single handler are delivered sequentially, but there is // no coordination between different handlers. // It returns a registration handle for the handler that can be used to remove // the handler again and an error if the handler cannot be added. AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) - // RemoveEventHandler removes a formerly added event handler given by + + // RemoveEventHandler removes a previously added event handler given by // its registration handle. - // This function is guaranteed to be idempotent, and thread-safe. + // This function is guaranteed to be idempotent and thread-safe. RemoveEventHandler(handle toolscache.ResourceEventHandlerRegistration) error - // AddIndexers adds more indexers to this store. If you call this after you already have data + + // AddIndexers adds indexers to this store. If this is called after there is already data // in the store, the results are undefined. AddIndexers(indexers toolscache.Indexers) error + // HasSynced return true if the informers underlying store has synced. HasSynced() bool } -// Options are the optional arguments for creating a new InformersMap object. +// Options are the optional arguments for creating a new Cache object. type Options struct { // HTTPClient is the http client to use for the REST client HTTPClient *http.Client @@ -141,14 +145,14 @@ type Options struct { SyncPeriod *time.Duration // Namespaces restricts the cache's ListWatch to the desired namespaces - // Default watches all namespaces + // Per default ListWatch watches all namespaces Namespaces []string - // DefaultLabelSelector will be used as a label selectors for all object types + // DefaultLabelSelector will be used as a label selector for all object types // unless they have a more specific selector set in ByObject. DefaultLabelSelector labels.Selector - // DefaultFieldSelector will be used as a field selectors for all object types + // DefaultFieldSelector will be used as a field selector for all object types // unless they have a more specific selector set in ByObject. DefaultFieldSelector fields.Selector @@ -156,9 +160,6 @@ type Options struct { // unless they have a more specific transform set in ByObject. DefaultTransform toolscache.TransformFunc - // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. - ByObject map[client.Object]ByObject - // UnsafeDisableDeepCopy indicates not to deep copy objects during get or // list objects for EVERY object. // Be very careful with this, when enabled you must DeepCopy any object before mutating it, @@ -166,6 +167,9 @@ type Options struct { // // This is a global setting for all objects, and can be overridden by the ByObject setting. UnsafeDisableDeepCopy *bool + + // ByObject restricts the cache's ListWatch to the desired fields per GVK at the specified object. + ByObject map[client.Object]ByObject } // ByObject offers more fine-grained control over the cache's ListWatch by object. @@ -176,9 +180,8 @@ type ByObject struct { // Field represents a field selector for the object. Field fields.Selector - // Transform is a map from objects to transformer functions which - // get applied when objects of the transformation are about to be committed - // to cache. + // Transform is a transformer function for the object which gets applied + // when objects of the transformation are about to be committed to the cache. // // This function is called both for new objects to enter the cache, // and for updated objects. @@ -236,15 +239,12 @@ func New(config *rest.Config, opts Options) (Cache, error) { } func defaultOpts(config *rest.Config, opts Options) (Options, error) { - logger := log.WithName("setup") - // Use the rest HTTP client for the provided config if unset if opts.HTTPClient == nil { var err error opts.HTTPClient, err = rest.HTTPClientFor(config) if err != nil { - logger.Error(err, "Failed to get HTTP client") - return opts, fmt.Errorf("could not create HTTP client from config: %w", err) + return Options{}, fmt.Errorf("could not create HTTP client from config: %w", err) } } @@ -258,8 +258,7 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) { var err error opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config, opts.HTTPClient) if err != nil { - logger.Error(err, "Failed to get API Group-Resources") - return opts, fmt.Errorf("could not create RESTMapper from config: %w", err) + return Options{}, fmt.Errorf("could not create RESTMapper from config: %w", err) } } diff --git a/pkg/cache/informer_cache.go b/pkg/cache/informer_cache.go index 771244d52a..70c9e1eced 100644 --- a/pkg/cache/informer_cache.go +++ b/pkg/cache/informer_cache.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -67,7 +68,7 @@ func (ic *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie if !started { return &ErrCacheNotStarted{} } - return cache.Reader.Get(ctx, key, out) + return cache.Reader.Get(ctx, key, out, opts...) } // List implements Reader. @@ -135,7 +136,7 @@ func (ic *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou if err != nil { return nil, err } - return i.Informer, err + return i.Informer, nil } // GetInformer returns the informer for the obj. @@ -149,7 +150,7 @@ func (ic *informerCache) GetInformer(ctx context.Context, obj client.Object) (In if err != nil { return nil, err } - return i.Informer, err + return i.Informer, nil } // NeedLeaderElection implements the LeaderElectionRunnable interface @@ -158,11 +159,11 @@ func (ic *informerCache) NeedLeaderElection() bool { return false } -// IndexField adds an indexer to the underlying cache, using extraction function to get -// value(s) from the given field. This index can then be used by passing a field selector +// IndexField adds an indexer to the underlying informer, using extractValue function to get +// value(s) from the given field. This index can then be used by passing a field selector // to List. For one-to-one compatibility with "normal" field selectors, only return one value. -// The values may be anything. They will automatically be prefixed with the namespace of the -// given object, if present. The objects passed are guaranteed to be objects of the correct type. +// The values may be anything. They will automatically be prefixed with the namespace of the +// given object, if present. The objects passed are guaranteed to be objects of the correct type. func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { informer, err := ic.GetInformer(ctx, obj) if err != nil { @@ -171,7 +172,7 @@ func (ic *informerCache) IndexField(ctx context.Context, obj client.Object, fiel return indexByField(informer, field, extractValue) } -func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { +func indexByField(informer Informer, field string, extractValue client.IndexerFunc) error { indexFunc := func(objRaw interface{}) ([]string, error) { // TODO(directxman12): check if this is the correct type? obj, isObj := objRaw.(client.Object) @@ -184,7 +185,7 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc) } ns := meta.GetNamespace() - rawVals := extractor(obj) + rawVals := extractValue(obj) var vals []string if ns == "" { // if we're not doubling the keys for the namespaced case, just create a new slice with same length @@ -207,5 +208,5 @@ func indexByField(indexer Informer, field string, extractor client.IndexerFunc) return vals, nil } - return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) + return informer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) } diff --git a/pkg/cache/informertest/fake_cache.go b/pkg/cache/informertest/fake_cache.go index da3bf8e0d4..bb1ccee34b 100644 --- a/pkg/cache/informertest/fake_cache.go +++ b/pkg/cache/informertest/fake_cache.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes/scheme" toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" @@ -52,14 +53,7 @@ func (c *FakeInformers) GetInformerForKind(ctx context.Context, gvk schema.Group // FakeInformerForKind implements Informers. func (c *FakeInformers) FakeInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (*controllertest.FakeInformer, error) { - if c.Scheme == nil { - c.Scheme = scheme.Scheme - } - obj, err := c.Scheme.New(gvk) - if err != nil { - return nil, err - } - i, err := c.informerFor(gvk, obj) + i, err := c.GetInformerForKind(ctx, gvk) if err != nil { return nil, err } @@ -88,16 +82,8 @@ func (c *FakeInformers) WaitForCacheSync(ctx context.Context) bool { } // FakeInformerFor implements Informers. -func (c *FakeInformers) FakeInformerFor(obj runtime.Object) (*controllertest.FakeInformer, error) { - if c.Scheme == nil { - c.Scheme = scheme.Scheme - } - gvks, _, err := c.Scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - gvk := gvks[0] - i, err := c.informerFor(gvk, obj) +func (c *FakeInformers) FakeInformerFor(ctx context.Context, obj client.Object) (*controllertest.FakeInformer, error) { + i, err := c.GetInformer(ctx, obj) if err != nil { return nil, err } diff --git a/pkg/cache/internal/cache_reader.go b/pkg/cache/internal/cache_reader.go index 3c8355bbde..5d18bed08f 100644 --- a/pkg/cache/internal/cache_reader.go +++ b/pkg/cache/internal/cache_reader.go @@ -53,7 +53,7 @@ type CacheReader struct { } // Get checks the indexer for the object and writes a copy of it if found. -func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, _ ...client.GetOption) error { if c.scopeName == apimeta.RESTScopeNameRoot { key.Namespace = "" } @@ -67,9 +67,9 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob // Not found, return an error if !exists { - // Resource gets transformed into Kind in the error anyway, so this is fine return apierrors.NewNotFound(schema.GroupResource{ - Group: c.groupVersionKind.Group, + Group: c.groupVersionKind.Group, + // Resource gets set as Kind in the error so this is fine Resource: c.groupVersionKind.Kind, }, key.Name) } @@ -119,8 +119,8 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli if !requiresExact { return fmt.Errorf("non-exact field matches are not supported by the cache") } - // list all objects by the field selector. If this is namespaced and we have one, ask for the - // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" + // list all objects by the field selector. If this is namespaced and we have one, ask for the + // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" // namespace. objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) case listOpts.Namespace != "": @@ -175,7 +175,7 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli } // objectKeyToStorageKey converts an object key to store key. -// It's akin to MetaNamespaceKeyFunc. It's separate from +// It's akin to MetaNamespaceKeyFunc. It's separate from // String to allow keeping the key format easily in sync with // MetaNamespaceKeyFunc. func objectKeyToStoreKey(k client.ObjectKey) string { @@ -191,7 +191,7 @@ func FieldIndexName(field string) string { return "field:" + field } -// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. +// allNamespacesNamespace is used as the "namespace" when we want to list across all namespaces. const allNamespacesNamespace = "__all_namespaces" // KeyToNamespacedKey prefixes the given index key with a namespace diff --git a/pkg/cache/internal/informers.go b/pkg/cache/internal/informers.go index 09e0111114..c01d70a940 100644 --- a/pkg/cache/internal/informers.go +++ b/pkg/cache/internal/informers.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/metadata" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -48,7 +49,7 @@ type InformersOpts struct { ByGVK map[schema.GroupVersionKind]InformersOptsByGVK } -// InformersOptsByGVK configured additional by group version kind (or object) +// InformersOptsByGVK configures additionally by group version kind (or object) // in an InformerMap. type InformersOptsByGVK struct { Selector Selector @@ -186,7 +187,7 @@ func (ip *Informers) getDisableDeepCopy(gvk schema.GroupVersionKind) bool { return false } -// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// Start calls Run on each of the informers and sets started to true. Blocks on the context. // It doesn't return start because it can't return an error, and it's not a runnable directly. func (ip *Informers) Start(ctx context.Context) error { func() { @@ -278,7 +279,7 @@ func (ip *Informers) get(gvk schema.GroupVersionKind, obj runtime.Object) (res * return i, ip.started, ok } -// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns // the Informer from the map. func (ip *Informers) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *Cache, error) { // Return the informer if it is found @@ -311,11 +312,12 @@ func (ip *Informers) informersByType(obj runtime.Object) map[schema.GroupVersion } } +// addInformerToMap either returns an existing informer or creates a new informer, adds it to the map and returns it. func (ip *Informers) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*Cache, bool, error) { ip.mu.Lock() defer ip.mu.Unlock() - // Check the cache to see if we already have an Informer. If we do, return the Informer. + // Check the cache to see if we already have an Informer. If we do, return the Informer. // This is for the case where 2 routines tried to get the informer when it wasn't in the map // so neither returned early, but the first one created it. if i, ok := ip.informersByType(obj)[gvk]; ok { diff --git a/pkg/cache/internal/transformers.go b/pkg/cache/internal/transformers.go deleted file mode 100644 index 0725f550c5..0000000000 --- a/pkg/cache/internal/transformers.go +++ /dev/null @@ -1,55 +0,0 @@ -package internal - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" - - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// TransformFuncByGVK provides access to the correct transform function for -// any given GVK. -type TransformFuncByGVK interface { - Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error - Get(schema.GroupVersionKind) cache.TransformFunc - SetDefault(transformer cache.TransformFunc) -} - -type transformFuncByGVK struct { - defaultTransform cache.TransformFunc - transformers map[schema.GroupVersionKind]cache.TransformFunc -} - -// TransformFuncByGVKFromMap creates a TransformFuncByGVK from a map that -// maps GVKs to TransformFuncs. -func TransformFuncByGVKFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByGVK { - byGVK := &transformFuncByGVK{} - if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault { - byGVK.defaultTransform = defaultFunc - } - delete(in, schema.GroupVersionKind{}) - byGVK.transformers = in - return byGVK -} - -func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) { - t.defaultTransform = transformer -} - -func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error { - gvk, err := apiutil.GVKForObject(obj, scheme) - if err != nil { - return err - } - - t.transformers[gvk] = transformer - return nil -} - -func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc { - if val, ok := t.transformers[gvk]; ok { - return val - } - return t.defaultTransform -} diff --git a/pkg/cache/multi_namespace_cache.go b/pkg/cache/multi_namespace_cache.go index ac97beae94..36dcf3d4e5 100644 --- a/pkg/cache/multi_namespace_cache.go +++ b/pkg/cache/multi_namespace_cache.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -36,7 +37,7 @@ const globalCache = "_cluster-scope" // MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. // This will scope the cache to a list of namespaces. Listing for all namespaces -// will list for all the namespaces that this knows about. By default this will create +// will list for all the namespaces that this knows about. By default, this will create // a global cache for cluster scoped resource. Note that this is not intended // to be used for excluding namespaces, this is better done via a Predicate. Also note that // you may face performance issues when using this with a high number of namespaces. @@ -71,12 +72,12 @@ func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { // Create a cache for cluster scoped resources. opts.Namespaces = []string{} - gCache, err := New(config, opts) + clusterCache, err := New(config, opts) if err != nil { return nil, fmt.Errorf("error creating global cache: %w", err) } - return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: clusterCache}, nil } // multiNamespaceCache knows how to handle multiple namespaced caches @@ -84,90 +85,93 @@ func newMultiNamespaceCache(config *rest.Config, opts Options) (Cache, error) { // operator to a list of namespaces instead of watching every namespace // in the cluster. type multiNamespaceCache struct { - namespaceToCache map[string]Cache Scheme *runtime.Scheme RESTMapper apimeta.RESTMapper + namespaceToCache map[string]Cache clusterCache Cache } var _ Cache = &multiNamespaceCache{} // Methods for multiNamespaceCache to conform to the Informers interface. -func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { - informers := map[string]Informer{} - // If the object is clusterscoped, get the informer from clusterCache, +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + // If the object is cluster scoped, get the informer from clusterCache, // if not use the namespaced caches. isNamespaced, err := apiutil.IsObjectNamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { return nil, err } if !isNamespaced { - clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj) + clusterCacheInformer, err := c.clusterCache.GetInformer(ctx, obj) if err != nil { return nil, err } - informers[globalCache] = clusterCacheInf - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{ + namespaceToInformer: map[string]Informer{ + globalCache: clusterCacheInformer, + }, + }, nil } + namespaceToInformer := map[string]Informer{} for ns, cache := range c.namespaceToCache { informer, err := cache.GetInformer(ctx, obj) if err != nil { return nil, err } - informers[ns] = informer + namespaceToInformer[ns] = informer } - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil } func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { - informers := map[string]Informer{} - - // If the object is clusterscoped, get the informer from clusterCache, + // If the object is cluster scoped, get the informer from clusterCache, // if not use the namespaced caches. isNamespaced, err := apiutil.IsGVKNamespaced(gvk, c.RESTMapper) if err != nil { return nil, err } if !isNamespaced { - clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk) + clusterCacheInformer, err := c.clusterCache.GetInformerForKind(ctx, gvk) if err != nil { return nil, err } - informers[globalCache] = clusterCacheInf - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{ + namespaceToInformer: map[string]Informer{ + globalCache: clusterCacheInformer, + }, + }, nil } + namespaceToInformer := map[string]Informer{} for ns, cache := range c.namespaceToCache { informer, err := cache.GetInformerForKind(ctx, gvk) if err != nil { return nil, err } - informers[ns] = informer + namespaceToInformer[ns] = informer } - return &multiNamespaceInformer{namespaceToInformer: informers}, nil + return &multiNamespaceInformer{namespaceToInformer: namespaceToInformer}, nil } func (c *multiNamespaceCache) Start(ctx context.Context) error { // start global cache go func() { - err := c.clusterCache.Start(ctx) - if err != nil { - log.Error(err, "cluster scoped cache failed to start") + if err := c.clusterCache.Start(ctx); err != nil { + log.Error(err, "multi-namespace cache failed to start cluster scoped cache") } }() // start namespaced caches for ns, cache := range c.namespaceToCache { go func(ns string, cache Cache) { - err := cache.Start(ctx) - if err != nil { - log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + if err := cache.Start(ctx); err != nil { + log.Error(err, "multi-namespace cache failed to start namespaced informer", "namespace", ns) } }(ns, cache) } @@ -179,8 +183,8 @@ func (c *multiNamespaceCache) Start(ctx context.Context) error { func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { synced := true for _, cache := range c.namespaceToCache { - if s := cache.WaitForCacheSync(ctx); !s { - synced = s + if !cache.WaitForCacheSync(ctx) { + synced = false } } @@ -224,7 +228,7 @@ func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj if !ok { return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) } - return cache.Get(ctx, key, obj) + return cache.Get(ctx, key, obj, opts...) } // List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. @@ -245,7 +249,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, if listOpts.Namespace != corev1.NamespaceAll { cache, ok := c.namespaceToCache[listOpts.Namespace] if !ok { - return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + return fmt.Errorf("unable to list: %v because of unknown namespace for the cache", listOpts.Namespace) } return cache.List(ctx, list, opts...) } @@ -278,12 +282,14 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, return fmt.Errorf("object: %T must be a list type", list) } allItems = append(allItems, items...) + // The last list call should have the most correct resource version. resourceVersion = accessor.GetResourceVersion() if limitSet { // decrement Limit by the number of items // fetched from the current namespace. listOpts.Limit -= int64(len(items)) + // if a Limit was set and the number of // items read has reached this set limit, // then stop reading. @@ -325,9 +331,12 @@ func (h handlerRegistration) HasSynced() bool { var _ Informer = &multiNamespaceInformer{} -// AddEventHandler adds the handler to each namespaced informer. +// AddEventHandler adds the handler to each informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { - handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} + handles := handlerRegistration{ + handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)), + } + for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandler(handler) if err != nil { @@ -335,12 +344,16 @@ func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEven } handles.handles[ns] = registration } + return handles, nil } // AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { - handles := handlerRegistration{handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer))} + handles := handlerRegistration{ + handles: make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)), + } + for ns, informer := range i.namespaceToInformer { registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) if err != nil { @@ -348,14 +361,15 @@ func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolsca } handles.handles[ns] = registration } + return handles, nil } -// RemoveEventHandler removes a formerly added event handler given by its registration handle. +// RemoveEventHandler removes a previously added event handler given by its registration handle. func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { handles, ok := h.(handlerRegistration) if !ok { - return fmt.Errorf("it is not the registration returned by multiNamespaceInformer") + return fmt.Errorf("registration is not a registration returned by multiNamespaceInformer") } for ns, informer := range i.namespaceToInformer { registration, ok := handles.handles[ns] @@ -369,7 +383,7 @@ func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHa return nil } -// AddIndexers adds the indexer for each namespaced informer. +// AddIndexers adds the indexers to each informer. func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { for _, informer := range i.namespaceToInformer { err := informer.AddIndexers(indexers) @@ -380,11 +394,11 @@ func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error return nil } -// HasSynced checks if each namespaced informer has synced. +// HasSynced checks if each informer has synced. func (i *multiNamespaceInformer) HasSynced() bool { for _, informer := range i.namespaceToInformer { - if ok := informer.HasSynced(); !ok { - return ok + if !informer.HasSynced() { + return false } } return true diff --git a/pkg/source/source_test.go b/pkg/source/source_test.go index f5e231d540..16c365e8a2 100644 --- a/pkg/source/source_test.go +++ b/pkg/source/source_test.go @@ -23,6 +23,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "sigs.k8s.io/controller-runtime/pkg/cache/informertest" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -88,7 +89,7 @@ var _ = Describe("Source", func() { Expect(err).NotTo(HaveOccurred()) Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) - i, err := ic.FakeInformerFor(&corev1.Pod{}) + i, err := ic.FakeInformerFor(ctx, &corev1.Pod{}) Expect(err).NotTo(HaveOccurred()) i.Add(p) @@ -128,7 +129,7 @@ var _ = Describe("Source", func() { Expect(err).NotTo(HaveOccurred()) Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) - i, err := ic.FakeInformerFor(&corev1.Pod{}) + i, err := ic.FakeInformerFor(ctx, &corev1.Pod{}) Expect(err).NotTo(HaveOccurred()) i.Update(p, p2) @@ -170,7 +171,7 @@ var _ = Describe("Source", func() { Expect(err).NotTo(HaveOccurred()) Expect(instance.WaitForSync(context.Background())).NotTo(HaveOccurred()) - i, err := ic.FakeInformerFor(&corev1.Pod{}) + i, err := ic.FakeInformerFor(ctx, &corev1.Pod{}) Expect(err).NotTo(HaveOccurred()) i.Delete(p)