From 81225c59cd5e564274951a93fa944e93e7c590df Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Thu, 5 Nov 2020 14:54:38 -0500 Subject: [PATCH 1/2] Backport release-0.5: Support metadata-only watches Add support for metadata-only watches. This backports a series of commits from the main branch: Add low-level metadata-only informer support This adds support for informers that communicate with the API server in metadata-only form. They are *completely* separate from normal informers -- that is: just like unstructured, if you ask for both a "normal" informer & a metadata-only informer, you'll get two copies of the cache. Support metadata-only client operations This adds support for a metadata-only client. It only implements the operations supported by metadata (delete, deleteallof, patch, get, list, status.patch). The higher-level client will now delegate to this for when a PartialObjectMetadata object is passed in. Support "projections" in the controller builder This adds options to "project" watches as only metadata to the builder, making it more convienient to use these forms. For instance: ```go .Owns(&corev1.Pod{}, builder.OnlyMetadata) ``` is equivalent to ```go .Owns(&metav1.PartialObjectMetadata{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", }, }) ``` Co-authored-by: Solly Ross Signed-off-by: Vince Prignano --- pkg/builder/controller.go | 39 +- pkg/builder/controller_test.go | 61 ++- pkg/builder/example_test.go | 55 ++ pkg/builder/options.go | 36 ++ pkg/cache/cache_test.go | 311 +++++++++++- pkg/cache/internal/deleg_map.go | 32 +- pkg/cache/internal/informers_map.go | 35 ++ pkg/client/apiutil/apimachinery.go | 22 + pkg/client/client.go | 93 +++- pkg/client/client_test.go | 762 +++++++++++++++++++++++++++- pkg/client/metadata_client.go | 194 +++++++ pkg/handler/enqueue_owner.go | 13 +- 12 files changed, 1581 insertions(+), 72 deletions(-) create mode 100644 pkg/builder/options.go create mode 100644 pkg/client/metadata_client.go diff --git a/pkg/builder/controller.go b/pkg/builder/controller.go index ffbdc07f46..01a0518940 100644 --- a/pkg/builder/controller.go +++ b/pkg/builder/controller.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -155,20 +156,42 @@ func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, erro return blder.ctrl, nil } +func (blder *Builder) project(obj runtime.Object) (runtime.Object, error) { + switch o := obj.(type) { + case *onlyMetadataWrapper: + metaObj := &metav1.PartialObjectMetadata{} + gvk, err := getGvk(o.Object, blder.mgr.GetScheme()) + if err != nil { + return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) + } + metaObj.SetGroupVersionKind(gvk) + return metaObj, nil + default: + return obj, nil + } +} + func (blder *Builder) doWatch() error { // Reconcile type - src := &source.Kind{Type: blder.apiType} - hdler := &handler.EnqueueRequestForObject{} - err := blder.ctrl.Watch(src, hdler, blder.predicates...) + apiType, err := blder.project(blder.apiType) if err != nil { return err } + src := &source.Kind{Type: apiType} + hdler := &handler.EnqueueRequestForObject{} + if err := blder.ctrl.Watch(src, hdler, blder.predicates...); err != nil { + return err + } // Watches the managed types for _, obj := range blder.managedObjects { - src := &source.Kind{Type: obj} + typeForSrc, err := blder.project(obj) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} hdler := &handler.EnqueueRequestForOwner{ - OwnerType: blder.apiType, + OwnerType: apiType, IsController: true, } if err := blder.ctrl.Watch(src, hdler, blder.predicates...); err != nil { @@ -196,7 +219,11 @@ func (blder *Builder) getControllerName() (string, error) { if blder.name != "" { return blder.name, nil } - gvk, err := getGvk(blder.apiType, blder.mgr.GetScheme()) + obj, err := blder.project(blder.apiType) + if err != nil { + return "", err + } + gvk, err := getGvk(obj, blder.mgr.GetScheme()) if err != nil { return "", err } diff --git a/pkg/builder/controller_test.go b/pkg/builder/controller_test.go index 9f1d5062e5..1c609f7481 100644 --- a/pkg/builder/controller_test.go +++ b/pkg/builder/controller_test.go @@ -23,6 +23,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -203,8 +205,59 @@ var _ = Describe("application", func() { close(done) }, 10) }) + + Describe("watching with projections", func() { + var mgr manager.Manager + BeforeEach(func() { + // use a cache that intercepts requests for fully typed objects to + // ensure we use the projected versions + var err error + mgr, err = manager.New(cfg, manager.Options{NewCache: newNonTypedOnlyCache}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should support watching For & Owns as metadata", func() { + bldr := ControllerManagedBy(mgr). + For(OnlyMetadata(&appsv1.Deployment{})). + Owns(OnlyMetadata(&appsv1.ReplicaSet{})) + + doReconcileTest("8", stop, bldr, mgr, true) + }) + }) }) +// newNonTypedOnlyCache returns a new cache that wraps the normal cache, +// returning an error if normal, typed objects have informers requested. +func newNonTypedOnlyCache(config *rest.Config, opts cache.Options) (cache.Cache, error) { + normalCache, err := cache.New(config, opts) + if err != nil { + return nil, err + } + return &nonTypedOnlyCache{ + Cache: normalCache, + }, nil +} + +// nonTypedOnlyCache is a cache.Cache that only provides metadata & +// unstructured informers. +type nonTypedOnlyCache struct { + cache.Cache +} + +func (c *nonTypedOnlyCache) GetInformer(obj runtime.Object) (cache.Informer, error) { + switch obj.(type) { + case (*metav1.PartialObjectMetadata): + return c.Cache.GetInformer(obj) + default: + return nil, fmt.Errorf("did not want to provide an informer for normal type %T", obj) + } +} +func (c *nonTypedOnlyCache) GetInformerForKind(gvk schema.GroupVersionKind) (cache.Informer, error) { + return nil, fmt.Errorf("don't try to sidestep the restriction on informer types by calling GetInformerForKind") +} + +// TODO(directxman12): this function has too many arguments, and the whole +// "nameSuffix" think is a bit of a hack It should be cleaned up significantly by someone with a bit of time func doReconcileTest(nameSuffix string, stop chan struct{}, blder *Builder, mgr manager.Manager, complete bool) { deployName := "deploy-name-" + nameSuffix rsName := "rs-name-" + nameSuffix @@ -267,8 +320,8 @@ func doReconcileTest(nameSuffix string, stop chan struct{}, blder *Builder, mgr Expect(err).NotTo(HaveOccurred()) By("Waiting for the Deployment Reconcile") - Expect(<-ch).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}})) + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) By("Creating a ReplicaSet") // Expect a Reconcile when an Owned object is managedObjects. @@ -297,8 +350,8 @@ func doReconcileTest(nameSuffix string, stop chan struct{}, blder *Builder, mgr Expect(err).NotTo(HaveOccurred()) By("Waiting for the ReplicaSet Reconcile") - Expect(<-ch).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}})) + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) } diff --git a/pkg/builder/example_test.go b/pkg/builder/example_test.go index 128f729d75..5d68114b5f 100644 --- a/pkg/builder/example_test.go +++ b/pkg/builder/example_test.go @@ -21,6 +21,7 @@ import ( "fmt" "os" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logf "sigs.k8s.io/controller-runtime/pkg/log" appsv1 "k8s.io/api/apps/v1" @@ -34,6 +35,60 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +func ExampleBuilder_metadata_only() { + logf.SetLogger(zap.New()) + + var log = logf.Log.WithName("builder-examples") + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + cl := mgr.GetClient() + err = builder. + ControllerManagedBy(mgr). // Create the ControllerManagedBy + For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API + Owns(builder.OnlyMetadata(&corev1.Pod{})). // ReplicaSet owns Pods created by it, and caches them as metadata only + Complete(reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Read the ReplicaSet + rs := &appsv1.ReplicaSet{} + err := cl.Get(ctx, req.NamespacedName, rs) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // List the Pods matching the PodTemplate Labels, but only their metadata + var podsMeta metav1.PartialObjectMetadataList + err = cl.List(ctx, &podsMeta, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // Update the ReplicaSet + rs.Labels["pod-count"] = fmt.Sprintf("%v", len(podsMeta.Items)) + err = cl.Update(ctx, rs) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + })) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + // This example creates a simple application ControllerManagedBy that is configured for ReplicaSets and Pods. // // * Create a new application for ReplicaSets that manages Pods owned by the ReplicaSet and calls into diff --git a/pkg/builder/options.go b/pkg/builder/options.go new file mode 100644 index 0000000000..c9012230cd --- /dev/null +++ b/pkg/builder/options.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import "k8s.io/apimachinery/pkg/runtime" + +// OnlyMetadata tells the controller to *only* cache metadata, and to watch +// the the API server in metadata-only form. This is useful when watching +// lots of objects, really big objects, or objects for which you only know +// the the GVK, but not the structure. You'll need to pass +// metav1.PartialObjectMetadata to the client when fetching objects in your +// reconciler, otherwise you'll end up with a duplicate structured or +// unstructured cache. +func OnlyMetadata(obj runtime.Object) runtime.Object { + return &onlyMetadataWrapper{obj} +} + +type onlyMetadataWrapper struct { + runtime.Object +} + +// }}} diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index e6fb0afa98..ec7fff1705 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -153,7 +153,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(listObj.Items).NotTo(BeEmpty()) hasKubeService := false for _, svc := range listObj.Items { - if svc.Namespace == "default" && svc.Name == "kubernetes" { + if isKubeService(&svc) { hasKubeService = true break } @@ -300,7 +300,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(listObj.Items).NotTo(BeEmpty()) hasKubeService := false for _, svc := range listObj.Items { - if svc.GetNamespace() == "default" && svc.GetName() == "kubernetes" { + if isKubeService(&svc) { hasKubeService = true break } @@ -472,6 +472,204 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca svc := &kcorev1.Service{} svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with metadata-only objects", func() { + It("should be able to list objects that haven't been watched previously", func() { + By("listing all services in the cluster") + listObj := &kmetav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "ServiceList", + }) + err := informerCache.List(context.Background(), listObj) + Expect(err).To(Succeed()) + + By("verifying that the returned list contains the Kubernetes service") + // NB: kubernetes default service is automatically created in testenv. + Expect(listObj.Items).NotTo(BeEmpty()) + hasKubeService := false + for _, svc := range listObj.Items { + if isKubeService(&svc) { + hasKubeService = true + break + } + } + Expect(hasKubeService).To(BeTrue()) + }) + It("should be able to get objects that haven't been watched previously", func() { + By("getting the Kubernetes service") + svc := &kmetav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svc.GetName()).To(Equal("kubernetes")) + Expect(svc.GetNamespace()).To(Equal("default")) + }) + + It("should support filtering by labels in a single namespace", func() { + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := kmetav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), &out, + client.InNamespace(testNamespaceTwo), + client.MatchingLabels(map[string]string{"test-label": "test-pod-2"})) + Expect(err).To(Succeed()) + + By("verifying the returned pods have the correct label") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + actual := out.Items[0] + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + }) + + It("should support filtering by labels from multiple namespaces", func() { + By("creating another pod with the same label but different namespace") + anotherPod := createPod("test-pod-2", testNamespaceOne, kcorev1.RestartPolicyAlways) + defer deletePod(anotherPod) + + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := kmetav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + labels := map[string]string{"test-label": "test-pod-2"} + err := informerCache.List(context.Background(), &out, client.MatchingLabels(labels)) + Expect(err).To(Succeed()) + + By("verifying multiple pods with the same label in different namespaces are returned") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, actual := range out.Items { + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + } + + }) + + It("should be able to list objects by namespace", func() { + By("listing pods in test-namespace-1") + listObj := &kmetav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), listObj, client.InNamespace(testNamespaceOne)) + Expect(err).To(Succeed()) + + By("verifying that the returned pods are in test-namespace-1") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.GetNamespace()).To(Equal(testNamespaceOne)) + }) + + It("should be able to restrict cache to a namespace", func() { + By("creating a namespaced cache") + namespacedCache, err := cache.New(cfg, cache.Options{Namespace: testNamespaceOne}) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(namespacedCache.Start(stop)).To(Succeed()) + }() + Expect(namespacedCache.WaitForCacheSync(stop)).NotTo(BeFalse()) + + By("listing pods in all namespaces") + out := &kmetav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(namespacedCache.List(context.Background(), out)).To(Succeed()) + + By("verifying the returned pod is from the watched namespace") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + Expect(out.Items[0].GetNamespace()).To(Equal(testNamespaceOne)) + + By("listing all namespaces - should still be able to get a cluster-scoped resource") + namespaceList := &kmetav1.PartialObjectMetadataList{} + namespaceList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NamespaceList", + }) + Expect(namespacedCache.List(context.Background(), namespaceList)).To(Succeed()) + + By("verifying the namespace list is not empty") + Expect(namespaceList.Items).NotTo(BeEmpty()) + }) + + It("should deep copy the object unless told otherwise", func() { + By("retrieving a specific pod from the cache") + out := &kmetav1.PartialObjectMetadata{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + uKnownPod2 := &kmetav1.PartialObjectMetadata{} + knownPod2.(*kcorev1.Pod).ObjectMeta.DeepCopyInto(&uKnownPod2.ObjectMeta) + uKnownPod2.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(context.Background(), podKey, out)).To(Succeed()) + + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(uKnownPod2)) + + By("altering a field in the retrieved pod") + out.Labels["foo"] = "bar" + + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) + }) + + It("should return an error if the object is not found", func() { + By("getting a service that does not exists") + svc := &kmetav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + It("should return an error if getting object in unwatched namespace", func() { + By("getting a service that does not exists") + svc := &kcorev1.Service{} + svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + By("verifying that an error is returned") err := informerCache.Get(context.Background(), svcKey, svc) Expect(err).To(HaveOccurred()) @@ -518,7 +716,6 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Eventually(out).Should(Receive(Equal(pod))) close(done) }) - // TODO: Add a test for when GVK is not in Scheme. Does code support informer for unstructured object? It("should be able to get an informer by group/version/kind", func(done Done) { By("getting an shared index informer for gvk = core/v1/pod") gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} @@ -597,7 +794,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Object: map[string]interface{}{ "spec": map[string]interface{}{ "containers": []map[string]interface{}{ - map[string]interface{}{ + { "name": "nginx", "image": "nginx", }, @@ -678,6 +875,106 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca client.MatchingFields{"spec.restartPolicy": "OnFailure"}) Expect(err).To(Succeed()) + By("verifying that the returned pods have correct restart policy") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.GetName()).To(Equal("test-pod-3")) + }, 3) + }) + Context("with metadata-only objects", func() { + It("should be able to get informer for the object", func(done Done) { + By("getting a shared index informer for a pod") + + pod := &kcorev1.Pod{ + ObjectMeta: kmetav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: kcorev1.PodSpec{ + Containers: []kcorev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + + podMeta := &kmetav1.PartialObjectMetadata{} + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + podMeta.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + sii, err := informerCache.GetInformer(podMeta) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("adding an event handler listening for object creation which sends the object to a channel") + out := make(chan interface{}) + addFunc := func(obj interface{}) { + out <- obj + } + sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + + By("adding an object") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl.Create(context.Background(), pod)).To(Succeed()) + defer deletePod(pod) + // re-copy the result in so that we can match on it properly + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + // NB(directxman12): proto doesn't care typemeta, and + // partialobjectmetadata is proto, so no typemeta + // TODO(directxman12): we should paper over this in controller-runtime + podMeta.APIVersion = "" + podMeta.Kind = "" + + By("verifying the object's metadata is received on the channel") + Eventually(out).Should(Receive(Equal(podMeta))) + close(done) + }, 3) + + It("should be able to index an object field then retrieve objects by that field", func() { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the restartPolicy field of the Pod object before starting") + pod := &kmetav1.PartialObjectMetadata{} + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + indexFunc := func(obj runtime.Object) []string { + metadata := obj.(*kmetav1.PartialObjectMetadata) + return []string{metadata.Labels["test-label"]} + } + Expect(informer.IndexField(pod, "metadata.labels.test-label", indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(stop)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(stop)).NotTo(BeFalse()) + + By("listing Pods with restartPolicyOnFailure") + listObj := &kmetav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err = informer.List(context.Background(), listObj, + client.MatchingFields{"metadata.labels.test-label": "test-pod-3"}) + Expect(err).To(Succeed()) + By("verifying that the returned pods have correct restart policy") Expect(listObj.Items).NotTo(BeEmpty()) Expect(listObj.Items).Should(HaveLen(1)) @@ -706,3 +1003,9 @@ func ensureNamespace(namespace string, client client.Client) error { } return err } + +//nolint:interfacer +func isKubeService(svc kmetav1.Object) bool { + // grumble grumble linters grumble grumble + return svc.GetNamespace() == "default" && svc.GetName() == "kubernetes" +} diff --git a/pkg/cache/internal/deleg_map.go b/pkg/cache/internal/deleg_map.go index cdaf1fc21c..10720115c4 100644 --- a/pkg/cache/internal/deleg_map.go +++ b/pkg/cache/internal/deleg_map.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -31,10 +32,12 @@ import ( // InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. // It uses a standard parameter codec constructed based on the given generated Scheme. type InformersMap struct { - // we abstract over the details of structured vs unstructured with the specificInformerMaps + // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps + // TODO(directxman12): genericize this over different projections now that we have 3 different maps structured *specificInformersMap unstructured *specificInformersMap + metadata *specificInformersMap // Scheme maps runtime.Objects to GroupVersionKinds Scheme *runtime.Scheme @@ -51,6 +54,7 @@ func NewInformersMap(config *rest.Config, return &InformersMap{ structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace), unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace), Scheme: scheme, } @@ -60,6 +64,7 @@ func NewInformersMap(config *rest.Config, func (m *InformersMap) Start(stop <-chan struct{}) error { go m.structured.Start(stop) go m.unstructured.Start(stop) + go m.metadata.Start(stop) <-stop return nil } @@ -75,21 +80,27 @@ func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool { if !m.unstructured.waitForStarted(stop) { return false } + if !m.metadata.waitForStarted(stop) { + return false + } return cache.WaitForCacheSync(stop, syncedFuncs...) } // Get will create a new Informer and add it to the map of InformersMap if none exists. Returns // the Informer from the map. func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList - - if isUnstructured { + switch obj.(type) { + case *unstructured.Unstructured: + return m.unstructured.Get(ctx, gvk, obj) + case *unstructured.UnstructuredList: return m.unstructured.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadata: + return m.metadata.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadataList: + return m.metadata.Get(ctx, gvk, obj) + default: + return m.structured.Get(ctx, gvk, obj) } - - return m.structured.Get(ctx, gvk, obj) } // newStructuredInformersMap creates a new InformersMap for structured objects. @@ -101,3 +112,8 @@ func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapp func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createUnstructuredListWatch) } + +// newMetadataInformersMap creates a new InformersMap for metadata-only objects. +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createMetadataListWatch) +} diff --git a/pkg/cache/internal/informers_map.go b/pkg/cache/internal/informers_map.go index 167a195e7f..ebdc83b1aa 100644 --- a/pkg/cache/internal/informers_map.go +++ b/pkg/cache/internal/informers_map.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -295,6 +296,40 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform }, nil } +func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // grab the metadata client + client, err := metadata.NewForConfig(ip.config) + if err != nil { + return nil, err + } + + // create the relevant listwaatch + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(ip.namespace).List(opts) + } + return client.Resource(mapping.Resource).List(opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Watch needs to be set to true separately + opts.Watch = true + if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(ip.namespace).Watch(opts) + } + return client.Resource(mapping.Resource).Watch(opts) + }, + }, nil +} + // resyncPeriod returns a function which generates a duration each time it is // invoked; this is so that multiple controllers don't get into lock-step and all // hammer the apiserver with list requests simultaneously. diff --git a/pkg/client/apiutil/apimachinery.go b/pkg/client/apiutil/apimachinery.go index 9fe32b21f3..2766d748e9 100644 --- a/pkg/client/apiutil/apimachinery.go +++ b/pkg/client/apiutil/apimachinery.go @@ -23,6 +23,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -48,6 +49,27 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + gvks, isUnversioned, err := scheme.ObjectKinds(obj) if err != nil { return schema.GroupVersionKind{}, err diff --git a/pkg/client/client.go b/pkg/client/client.go index c1c4d5d691..9c08bf8fe2 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -21,11 +21,13 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -76,6 +78,11 @@ func New(config *rest.Config, options Options) (Client, error) { resourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + c := &client{ typedClient: typedClient{ cache: clientcache, @@ -85,6 +92,10 @@ func New(config *rest.Config, options Options) (Client, error) { cache: clientcache, paramCodec: noConversionParamCodec{}, }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, } return c, nil @@ -97,6 +108,7 @@ var _ Client = &client{} type client struct { typedClient typedClient unstructuredClient unstructuredClient + metadataClient metadataClient } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -111,67 +123,88 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi // Create implements client.Client func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) } - return c.typedClient.Create(ctx, obj, opts...) } // Update implements client.Client func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) } - return c.typedClient.Update(ctx, obj, opts...) } // Delete implements client.Client func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) } - return c.typedClient.Delete(ctx, obj, opts...) } // DeleteAllOf implements client.Client func (c *client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) } - return c.typedClient.DeleteAllOf(ctx, obj, opts...) } // Patch implements client.Client func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) } - return c.typedClient.Patch(ctx, obj, patch, opts...) } // Get implements client.Client func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Get(ctx, key, obj) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Get(ctx, key, obj) + default: + return c.typedClient.Get(ctx, key, obj) } - return c.typedClient.Get(ctx, key, obj) } // List implements client.Client func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { - _, ok := obj.(*unstructured.UnstructuredList) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + return c.metadataClient.List(ctx, obj, opts...) + default: + return c.typedClient.List(ctx, obj, opts...) } - return c.typedClient.List(ctx, obj, opts...) } // Status implements client.StatusClient @@ -190,19 +223,25 @@ var _ StatusWriter = &statusWriter{} // Update implements client.StatusWriter func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) } - return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) } // Patch implements client.Client func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } - return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go index 84ace62f1a..37d5b27373 100644 --- a/pkg/client/client_test.go +++ b/pkg/client/client_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "sync/atomic" + "time" "k8s.io/apimachinery/pkg/types" @@ -48,12 +49,73 @@ func deleteDeployment(dep *appsv1.Deployment, ns string) { } } -func deleteNamespace(ns *corev1.Namespace) { - _, err := clientset.CoreV1().Namespaces().Get(ns.Name, metav1.GetOptions{}) - if err == nil { - err = clientset.CoreV1().Namespaces().Delete(ns.Name, &metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) +func deleteNamespace(ctx context.Context, ns *corev1.Namespace) { + ns, err := clientset.CoreV1().Namespaces().Get(ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + err = clientset.CoreV1().Namespaces().Delete(ns.Name, &metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // finalize if necessary + pos := -1 + finalizers := ns.Spec.Finalizers + for i, fin := range finalizers { + if fin == "kubernetes" { + pos = i + break + } + } + if pos == -1 { + // no need to finalize + return + } + + // re-get in order to finalize + ns, err = clientset.CoreV1().Namespaces().Get(ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + ns.Spec.Finalizers = append(finalizers[:pos], finalizers[pos+1:]...) + _, err = clientset.CoreV1().Namespaces().Finalize(ns) + Expect(err).NotTo(HaveOccurred()) + +WAIT_LOOP: + for i := 0; i < 10; i++ { + ns, err = clientset.CoreV1().Namespaces().Get(ns.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // success! + return + } + select { + case <-ctx.Done(): + break WAIT_LOOP + // failed to delete in time, see failure below + case <-time.After(100 * time.Millisecond): + // do nothing, try again + } + } + Fail(fmt.Sprintf("timed out waiting for namespace %q to be deleted", ns.Name)) +} + +// metaOnlyFromObj returns PartialObjectMetadata from a concrete Go struct that +// returns a concrete *metav1.ObjectMeta from GetObjectMeta (yes, that plays a +// bit fast and loose, but the only other options are serializing and then +// deserializing, or manually calling all the accessor funcs, which are both a bit annoying). +func metaOnlyFromObj(obj interface { + runtime.Object + metav1.ObjectMetaAccessor +}, scheme *runtime.Scheme) *metav1.PartialObjectMetadata { + metaObj := metav1.PartialObjectMetadata{} + obj.GetObjectMeta().(*metav1.ObjectMeta).DeepCopyInto(&metaObj.ObjectMeta) + kinds, _, err := scheme.ObjectKinds(obj) + if err != nil { + panic(err) } + metaObj.SetGroupVersionKind(kinds[0]) + return &metaObj } var _ = Describe("Client", func() { @@ -67,6 +129,7 @@ var _ = Describe("Client", func() { var replicaCount int32 = 2 var ns = "default" var mergePatch []byte + ctx := context.TODO() BeforeEach(func(done Done) { atomic.AddUint64(&count, 1) @@ -275,6 +338,16 @@ var _ = Describe("Client", func() { // Example: ListOptions }) + Context("with metadata objects", func() { + It("should fail with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Create(context.TODO(), obj)).NotTo(Succeed()) + }) + }) + Context("with the DryRun option", func() { It("should not create a new object", func(done Done) { cl, err := client.New(cfg, client.Options{}) @@ -642,6 +715,17 @@ var _ = Describe("Client", func() { close(done) }) }) + + Context("with metadata objects", func() { + It("should fail with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + + Expect(cl.Update(context.TODO(), obj)).NotTo(Succeed()) + }) + }) }) Describe("StatusClient", func() { @@ -953,6 +1037,44 @@ var _ = Describe("Client", func() { }) }) + + Context("with metadata objects", func() { + It("should fail to update with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Status().Update(context.TODO(), obj)).NotTo(Succeed()) + }) + + It("should patch status and preserve type information", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + Expect(err).NotTo(HaveOccurred()) + + By("patching the status of Deployment") + objPatch := client.MergeFrom(metaOnlyFromObj(dep, scheme)) + dep.Annotations = map[string]string{"some-new-annotation": "some-new-value"} + obj := metaOnlyFromObj(dep, scheme) + err = cl.Status().Patch(context.TODO(), obj, objPatch) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(obj.GroupVersionKind()).To(Equal(depGvk)) + + By("validating patched Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations).To(HaveKeyWithValue("some-new-annotation", "some-new-value")) + + close(done) + }) + }) }) Describe("Delete", func() { @@ -1183,6 +1305,95 @@ var _ = Describe("Client", func() { close(done) }) }) + + Context("with metadata objects", func() { + It("should delete an existing object from a go struct", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Deployment") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.Delete(context.TODO(), metaObj) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + + close(done) + }) + + It("should delete an existing object non-namespace object from a go struct", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(node) + Expect(err).NotTo(HaveOccurred()) + + By("deleting the Node") + metaObj := metaOnlyFromObj(node, scheme) + err = cl.Delete(context.TODO(), metaObj) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + + close(done) + }) + + It("should fail if the object does not exist", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("Deleting node before it is ever created") + metaObj := metaOnlyFromObj(node, scheme) + err = cl.Delete(context.TODO(), metaObj) + Expect(err).To(HaveOccurred()) + + close(done) + }) + + It("should delete a collection of object", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating two Deployments") + + dep2 := dep.DeepCopy() + dep2.Name = dep2.Name + "-2" + + dep, err = clientset.AppsV1().Deployments(ns).Create(dep) + Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(dep2) + Expect(err).NotTo(HaveOccurred()) + + depName := dep.Name + dep2Name := dep2.Name + + By("deleting Deployments") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.DeleteAllOf(context.TODO(), metaObj, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) + Expect(err).NotTo(HaveOccurred()) + + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + + close(done) + }) + + }) }) Describe("Patch", func() { @@ -1645,6 +1856,77 @@ var _ = Describe("Client", func() { close(done) }) }) + + Context("with metadata objects", func() { + It("should fetch an existing object for a go struct", func(done Done) { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + Expect(err).NotTo(HaveOccurred()) + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching the created Deployment") + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched deployment equals the created one") + Expect(metaOnlyFromObj(dep, scheme)).To(Equal(&actual)) + + close(done) + }) + + It("should fetch an existing non-namespace object for a go struct", func(done Done) { + By("first creating the object") + node, err := clientset.CoreV1().Nodes().Create(node) + Expect(err).NotTo(HaveOccurred()) + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("retrieving node through client") + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Version: "v1", + Kind: "Node", + }) + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + Expect(metaOnlyFromObj(node, scheme)).To(Equal(&actual)) + + close(done) + }) + + It("should fail if the object does not exist", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) + err = cl.Get(context.TODO(), key, &actual) + Expect(err).To(HaveOccurred()) + + close(done) + }) + }) }) Describe("List", func() { @@ -1836,8 +2118,8 @@ var _ = Describe("Client", func() { deleteDeployment(depFrontend, "test-namespace-1") deleteDeployment(depBackend, "test-namespace-2") - deleteNamespace(tns1) - deleteNamespace(tns2) + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) close(done) }, serverSideTimeoutSeconds) @@ -1985,8 +2267,8 @@ var _ = Describe("Client", func() { deleteDeployment(depFrontend3, "test-namespace-3") deleteDeployment(depBackend3, "test-namespace-3") deleteDeployment(depFrontend4, "test-namespace-4") - deleteNamespace(tns3) - deleteNamespace(tns4) + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) close(done) }, serverSideTimeoutSeconds) @@ -2197,8 +2479,8 @@ var _ = Describe("Client", func() { deleteDeployment(depFrontend, "test-namespace-5") deleteDeployment(depBackend, "test-namespace-6") - deleteNamespace(tns1) - deleteNamespace(tns2) + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) close(done) }, serverSideTimeoutSeconds) @@ -2354,8 +2636,8 @@ var _ = Describe("Client", func() { deleteDeployment(depFrontend3, "test-namespace-7") deleteDeployment(depBackend3, "test-namespace-7") deleteDeployment(depFrontend4, "test-namespace-8") - deleteNamespace(tns3) - deleteNamespace(tns4) + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) close(done) }, serverSideTimeoutSeconds) @@ -2368,6 +2650,460 @@ var _ = Describe("Client", func() { }) }) + + Context("with metadata objects", func() { + It("should fetch collection of objects", func(done Done) { + By("creating an initial object") + dep, err := clientset.AppsV1().Deployments(ns).Create(dep) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + Expect(cl.List(context.Background(), metaList)).NotTo(HaveOccurred()) + + Expect(metaList.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range metaList.Items { + if item.Name == dep.Name && item.Namespace == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + + close(done) + }, serverSideTimeoutSeconds) + + It("should return an empty list if there are no matching objects", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in the cluster") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + Expect(cl.List(context.Background(), metaList)).NotTo(HaveOccurred()) + + By("validating no Deployments are returned") + Expect(metaList.Items).To(BeEmpty()) + + close(done) + }, serverSideTimeoutSeconds) + + // TODO(seans): get label selector test working + It("should filter results by label selector", func(done Done) { + By("creating a Deployment with the app=frontend label") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: ns, + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(depFrontend) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with the app=backend label") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: ns, + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(depBackend) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with label app=backend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "backend"} + err = cl.List(context.Background(), metaList, client.MatchingLabels(labels)) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend label is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(depFrontend, ns) + deleteDeployment(depBackend, ns) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector", func(done Done) { + By("creating a Deployment in test-namespace-1") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-1"}} + _, err := clientset.CoreV1().Namespaces().Create(tns1) + Expect(err).NotTo(HaveOccurred()) + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-1"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-1").Create(depFrontend) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-2") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-2"}} + _, err = clientset.CoreV1().Namespaces().Create(tns2) + Expect(err).NotTo(HaveOccurred()) + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-2"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments("test-namespace-2").Create(depBackend) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-1") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, client.InNamespace("test-namespace-1")) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-1 is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + + deleteDeployment(depFrontend, "test-namespace-1") + deleteDeployment(depBackend, "test-namespace-2") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results by field selector", func(done Done) { + By("creating a Deployment with name deployment-frontend") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(depFrontend) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with name deployment-backend") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(depBackend) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with field metadata.name=deployment-backend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.MatchingFields{"metadata.name": "deployment-backend"}) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend field is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(depFrontend, ns) + deleteDeployment(depBackend, ns) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector and label selector", func(done Done) { + By("creating a Deployment in test-namespace-3 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-3"}} + _, err := clientset.CoreV1().Namespaces().Create(tns3) + Expect(err).NotTo(HaveOccurred()) + depFrontend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(depFrontend3) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-3 with the app=backend label") + depBackend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(depBackend3) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-4 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-4"}} + _, err = clientset.CoreV1().Namespaces().Create(tns4) + Expect(err).NotTo(HaveOccurred()) + depFrontend4 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-4", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-4").Create(depFrontend4) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-3 with label app=frontend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "frontend"} + err = cl.List(context.Background(), metaList, + client.InNamespace("test-namespace-3"), + client.MatchingLabels(labels), + ) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-3 with label app=frontend is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + Expect(actual.Namespace).To(Equal("test-namespace-3")) + + deleteDeployment(depFrontend3, "test-namespace-3") + deleteDeployment(depBackend3, "test-namespace-3") + deleteDeployment(depFrontend4, "test-namespace-4") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results using limit and continue options", func() { + + makeDeployment := func(suffix string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deployment-%s", suffix), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + } + + By("creating 4 deployments") + dep1 := makeDeployment("1") + dep1, err := clientset.AppsV1().Deployments(ns).Create(dep1) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(dep1, ns) + + dep2 := makeDeployment("2") + dep2, err = clientset.AppsV1().Deployments(ns).Create(dep2) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(dep2, ns) + + dep3 := makeDeployment("3") + dep3, err = clientset.AppsV1().Deployments(ns).Create(dep3) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(dep3, ns) + + dep4 := makeDeployment("4") + dep4, err = clientset.AppsV1().Deployments(ns).Create(dep4) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(dep4, ns) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing 1 deployment when limit=1 is used") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Limit(1), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep1.Name)) + + continueToken := metaList.Continue + + By("listing the next deployment when previous continuation token is used and limit=1") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Limit(1), + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep2.Name)) + + continueToken = metaList.Continue + + By("listing the 2 remaining deployments when previous continuation token is used without a limit") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(2)) + Expect(metaList.Continue).To(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep3.Name)) + Expect(metaList.Items[1].Name).To(Equal(dep4.Name)) + }, serverSideTimeoutSeconds) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should fail if the object cannot be mapped to a GVK", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + + }) + }) }) Describe("CreateOptions", func() { diff --git a/pkg/client/metadata_client.go b/pkg/client/metadata_client.go new file mode 100644 index 0000000000..8ba5421a29 --- /dev/null +++ b/pkg/client/metadata_client.go @@ -0,0 +1,194 @@ +/* + Copyright 2020 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client +func (mc *metadataClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(metadata.Name, deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client +func (mc *metadataClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client +func (mc *metadataClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(*listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/pkg/handler/enqueue_owner.go b/pkg/handler/enqueue_owner.go index 17d512696c..0d96bca8b2 100644 --- a/pkg/handler/enqueue_owner.go +++ b/pkg/handler/enqueue_owner.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/event" logf "sigs.k8s.io/controller-runtime/pkg/internal/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -91,21 +92,13 @@ func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.Rat // parseOwnerTypeGroupKind parses the OwnerType into a Group and Kind and caches the result. Returns false // if the OwnerType could not be parsed using the scheme. func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) error { - // Get the kinds of the type - kinds, _, err := scheme.ObjectKinds(e.OwnerType) + gvk, err := apiutil.GVKForObject(e.OwnerType, scheme) if err != nil { log.Error(err, "Could not get ObjectKinds for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType)) return err } - // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. - if len(kinds) != 1 { - err := fmt.Errorf("Expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "Expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) - return err - - } // Cache the Group and Kind for the OwnerType - e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} + e.groupKind = gvk.GroupKind() return nil } From 8da4f3446ff245313bfcc3484ac0b742f3120e28 Mon Sep 17 00:00:00 2001 From: Vince Prignano Date: Tue, 3 Nov 2020 07:43:26 -0800 Subject: [PATCH 2/2] :sparkles: Allow to use builder.OnlyMetadata option with Watches This change allows builders to use builder.OnlyMetadata when creating extra watches with .Watches(...). The code inspects the passed source.Source, and if it's of type *source.Kind, it calls the internal project method that allows to use metav1.PartialObjectMetadata in mapping functions. Signed-off-by: Vince Prignano (cherry picked from commit df54dc5be415695313c8c4727858d1af46ba3b4f) --- pkg/builder/controller.go | 9 +++++- pkg/builder/controller_test.go | 52 ++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 3 deletions(-) diff --git a/pkg/builder/controller.go b/pkg/builder/controller.go index 01a0518940..231e4fdddb 100644 --- a/pkg/builder/controller.go +++ b/pkg/builder/controller.go @@ -201,10 +201,17 @@ func (blder *Builder) doWatch() error { // Do the watch requests for _, w := range blder.watchRequest { + // If the source of this watch is of type *source.Kind, project it. + if srckind, ok := w.src.(*source.Kind); ok { + typeForSrc, err := blder.project(srckind.Type) + if err != nil { + return err + } + srckind.Type = typeForSrc + } if err := blder.ctrl.Watch(w.src, w.eventhandler, blder.predicates...); err != nil { return err } - } return nil } diff --git a/pkg/builder/controller_test.go b/pkg/builder/controller_test.go index 1c609f7481..d713301ff0 100644 --- a/pkg/builder/controller_test.go +++ b/pkg/builder/controller_test.go @@ -216,12 +216,60 @@ var _ = Describe("application", func() { Expect(err).NotTo(HaveOccurred()) }) - It("should support watching For & Owns as metadata", func() { + It("should support watching For, Owns, and Watch as metadata", func() { + statefulSetMaps := make(chan *metav1.PartialObjectMetadata) + bldr := ControllerManagedBy(mgr). For(OnlyMetadata(&appsv1.Deployment{})). - Owns(OnlyMetadata(&appsv1.ReplicaSet{})) + Owns(OnlyMetadata(&appsv1.ReplicaSet{})). + Watches(&source.Kind{Type: OnlyMetadata(&appsv1.StatefulSet{})}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: handler.ToRequestsFunc(func(o handler.MapObject) []reconcile.Request { + ometa := o.Object.(*metav1.PartialObjectMetadata) + statefulSetMaps <- ometa + return nil + }), + }) doReconcileTest("8", stop, bldr, mgr, true) + + By("Creating a new stateful set") + set := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + err := mgr.GetClient().Create(context.TODO(), set) + Expect(err).NotTo(HaveOccurred()) + + By("Checking that the mapping function has been called") + Eventually(func() bool { + metaSet := <-statefulSetMaps + Expect(metaSet.Name).To(Equal(set.Name)) + Expect(metaSet.Namespace).To(Equal(set.Namespace)) + Expect(metaSet.Labels).To(Equal(set.Labels)) + return true + }).Should(BeTrue()) }) }) })