diff --git a/pkg/builder/controller.go b/pkg/builder/controller.go index 0d91a43a48..064e6e105c 100644 --- a/pkg/builder/controller.go +++ b/pkg/builder/controller.go @@ -21,6 +21,7 @@ import ( "strings" "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" @@ -37,6 +38,17 @@ import ( var newController = controller.New var getGvk = apiutil.GVKForObject +// project represents other forms that the we can use to +// send/receive a given resource (metadata-only, unstructured, etc) +type objectProjection int + +const ( + // projectAsNormal doesn't change the object from the form given + projectAsNormal objectProjection = iota + // projectAsMetadata turns this into an metadata-only watch + projectAsMetadata +) + // Builder builds a Controller. type Builder struct { forInput ForInput @@ -68,8 +80,9 @@ func (blder *Builder) ForType(apiType runtime.Object) *Builder { // ForInput represents the information set by For method. type ForInput struct { - object runtime.Object - predicates []predicate.Predicate + object runtime.Object + predicates []predicate.Predicate + objectProjection objectProjection } // For defines the type of Object being *reconciled*, and configures the ControllerManagedBy to respond to create / delete / @@ -88,8 +101,9 @@ func (blder *Builder) For(object runtime.Object, opts ...ForOption) *Builder { // OwnsInput represents the information set by Owns method. type OwnsInput struct { - object runtime.Object - predicates []predicate.Predicate + object runtime.Object + predicates []predicate.Predicate + objectProjection objectProjection } // Owns defines types of Objects being *generated* by the ControllerManagedBy, and configures the ControllerManagedBy to respond to @@ -107,9 +121,10 @@ func (blder *Builder) Owns(object runtime.Object, opts ...OwnsOption) *Builder { // WatchesInput represents the information set by Watches method. type WatchesInput struct { - src source.Source - eventhandler handler.EventHandler - predicates []predicate.Predicate + src source.Source + eventhandler handler.EventHandler + predicates []predicate.Predicate + objectProjection objectProjection } // Watches exposes the lower-level ControllerManagedBy Watches functions through the builder. Consider using @@ -195,19 +210,43 @@ func (blder *Builder) Build(r reconcile.Reconciler) (controller.Controller, erro return blder.ctrl, nil } +func (blder *Builder) project(obj runtime.Object, proj objectProjection) (runtime.Object, error) { + switch proj { + case projectAsNormal: + return obj, nil + case projectAsMetadata: + metaObj := &metav1.PartialObjectMetadata{} + gvk, err := getGvk(obj, blder.mgr.GetScheme()) + if err != nil { + return nil, fmt.Errorf("unable to determine GVK of %T for a metadata-only watch: %w", obj, err) + } + metaObj.SetGroupVersionKind(gvk) + return metaObj, nil + default: + panic(fmt.Sprintf("unexpected projection type %v on type %T, should not be possible since this is an internal field", proj, obj)) + } +} + func (blder *Builder) doWatch() error { // Reconcile type - src := &source.Kind{Type: blder.forInput.object} + typeForSrc, err := blder.project(blder.forInput.object, blder.forInput.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} hdler := &handler.EnqueueRequestForObject{} allPredicates := append(blder.globalPredicates, blder.forInput.predicates...) - err := blder.ctrl.Watch(src, hdler, allPredicates...) - if err != nil { + if err := blder.ctrl.Watch(src, hdler, allPredicates...); err != nil { return err } // Watches the managed types for _, own := range blder.ownsInput { - src := &source.Kind{Type: own.object} + typeForSrc, err := blder.project(own.object, own.objectProjection) + if err != nil { + return err + } + src := &source.Kind{Type: typeForSrc} hdler := &handler.EnqueueRequestForOwner{ OwnerType: blder.forInput.object, IsController: true, @@ -223,10 +262,19 @@ func (blder *Builder) doWatch() error { for _, w := range blder.watchesInput { allPredicates := append([]predicate.Predicate(nil), blder.globalPredicates...) allPredicates = append(allPredicates, w.predicates...) + + // If the source of this watch is of type *source.Kind, project it. + if srckind, ok := w.src.(*source.Kind); ok { + typeForSrc, err := blder.project(srckind.Type, w.objectProjection) + if err != nil { + return err + } + srckind.Type = typeForSrc + } + if err := blder.ctrl.Watch(w.src, w.eventhandler, allPredicates...); err != nil { return err } - } return nil } diff --git a/pkg/builder/controller_test.go b/pkg/builder/controller_test.go index b51dd2272e..646b62ad4d 100644 --- a/pkg/builder/controller_test.go +++ b/pkg/builder/controller_test.go @@ -24,6 +24,8 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -294,8 +296,107 @@ var _ = Describe("application", func() { }) }) + Describe("watching with projections", func() { + var mgr manager.Manager + BeforeEach(func() { + // use a cache that intercepts requests for fully typed objects to + // ensure we use the projected versions + var err error + mgr, err = manager.New(cfg, manager.Options{NewCache: newNonTypedOnlyCache}) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should support watching For, Owns, and Watch as metadata", func() { + statefulSetMaps := make(chan *metav1.PartialObjectMetadata) + + bldr := ControllerManagedBy(mgr). + For(&appsv1.Deployment{}, OnlyMetadata). + Owns(&appsv1.ReplicaSet{}, OnlyMetadata). + Watches(&source.Kind{Type: &appsv1.StatefulSet{}}, + &handler.EnqueueRequestsFromMapFunc{ + ToRequests: handler.ToRequestsFunc(func(o handler.MapObject) []reconcile.Request { + ometa := o.Object.(*metav1.PartialObjectMetadata) + statefulSetMaps <- ometa + return nil + }), + }, + OnlyMetadata) + + doReconcileTest("8", stop, bldr, mgr, true) + + By("Creating a new stateful set") + set := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test1", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + }, + }, + } + err := mgr.GetClient().Create(context.TODO(), set) + Expect(err).NotTo(HaveOccurred()) + + By("Checking that the mapping function has been called") + Eventually(func() bool { + metaSet := <-statefulSetMaps + Expect(metaSet.Name).To(Equal(set.Name)) + Expect(metaSet.Namespace).To(Equal(set.Namespace)) + Expect(metaSet.Labels).To(Equal(set.Labels)) + return true + }).Should(BeTrue()) + }) + }) }) +// newNonTypedOnlyCache returns a new cache that wraps the normal cache, +// returning an error if normal, typed objects have informers requested. +func newNonTypedOnlyCache(config *rest.Config, opts cache.Options) (cache.Cache, error) { + normalCache, err := cache.New(config, opts) + if err != nil { + return nil, err + } + return &nonTypedOnlyCache{ + Cache: normalCache, + }, nil +} + +// nonTypedOnlyCache is a cache.Cache that only provides metadata & +// unstructured informers. +type nonTypedOnlyCache struct { + cache.Cache +} + +func (c *nonTypedOnlyCache) GetInformer(ctx context.Context, obj runtime.Object) (cache.Informer, error) { + switch obj.(type) { + case (*metav1.PartialObjectMetadata): + return c.Cache.GetInformer(ctx, obj) + default: + return nil, fmt.Errorf("did not want to provide an informer for normal type %T", obj) + } +} +func (c *nonTypedOnlyCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (cache.Informer, error) { + return nil, fmt.Errorf("don't try to sidestep the restriction on informer types by calling GetInformerForKind") +} + +// TODO(directxman12): this function has too many arguments, and the whole +// "nameSuffix" think is a bit of a hack It should be cleaned up significantly by someone with a bit of time func doReconcileTest(nameSuffix string, stop chan struct{}, blder *Builder, mgr manager.Manager, complete bool) { deployName := "deploy-name-" + nameSuffix rsName := "rs-name-" + nameSuffix @@ -358,8 +459,8 @@ func doReconcileTest(nameSuffix string, stop chan struct{}, blder *Builder, mgr Expect(err).NotTo(HaveOccurred()) By("Waiting for the Deployment Reconcile") - Expect(<-ch).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}})) + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) By("Creating a ReplicaSet") // Expect a Reconcile when an Owned object is managedObjects. @@ -388,8 +489,8 @@ func doReconcileTest(nameSuffix string, stop chan struct{}, blder *Builder, mgr Expect(err).NotTo(HaveOccurred()) By("Waiting for the ReplicaSet Reconcile") - Expect(<-ch).To(Equal(reconcile.Request{ - NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}})) + Eventually(ch).Should(Receive(Equal(reconcile.Request{ + NamespacedName: types.NamespacedName{Namespace: "default", Name: deployName}}))) } diff --git a/pkg/builder/example_test.go b/pkg/builder/example_test.go index 128f729d75..4f421b5a31 100644 --- a/pkg/builder/example_test.go +++ b/pkg/builder/example_test.go @@ -21,6 +21,7 @@ import ( "fmt" "os" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logf "sigs.k8s.io/controller-runtime/pkg/log" appsv1 "k8s.io/api/apps/v1" @@ -34,6 +35,60 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +func ExampleBuilder_metadata_only() { + logf.SetLogger(zap.New()) + + var log = logf.Log.WithName("builder-examples") + + mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) + if err != nil { + log.Error(err, "could not create manager") + os.Exit(1) + } + + cl := mgr.GetClient() + err = builder. + ControllerManagedBy(mgr). // Create the ControllerManagedBy + For(&appsv1.ReplicaSet{}). // ReplicaSet is the Application API + Owns(&corev1.Pod{}, builder.OnlyMetadata). // ReplicaSet owns Pods created by it, and caches them as metadata only + Complete(reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Read the ReplicaSet + rs := &appsv1.ReplicaSet{} + err := cl.Get(ctx, req.NamespacedName, rs) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // List the Pods matching the PodTemplate Labels, but only their metadata + var podsMeta metav1.PartialObjectMetadataList + err = cl.List(ctx, &podsMeta, client.InNamespace(req.Namespace), client.MatchingLabels(rs.Spec.Template.Labels)) + if err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + // Update the ReplicaSet + rs.Labels["pod-count"] = fmt.Sprintf("%v", len(podsMeta.Items)) + err = cl.Update(ctx, rs) + if err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + })) + if err != nil { + log.Error(err, "could not create controller") + os.Exit(1) + } + + if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + log.Error(err, "could not start manager") + os.Exit(1) + } +} + // This example creates a simple application ControllerManagedBy that is configured for ReplicaSets and Pods. // // * Create a new application for ReplicaSets that manages Pods owned by the ReplicaSet and calls into diff --git a/pkg/builder/options.go b/pkg/builder/options.go index edd5d0156b..7bb4273094 100644 --- a/pkg/builder/options.go +++ b/pkg/builder/options.go @@ -76,3 +76,42 @@ var _ OwnsOption = &Predicates{} var _ WatchesOption = &Predicates{} // }}} + +// {{{ For & Owns Dual-Type options + +// asProjection configures the projection (currently only metadata) on the input. +// Currently only metadata is supported. We might want to expand +// this to arbitrary non-special local projections in the future. +type projectAs objectProjection + +// ApplyToFor applies this configuration to the given ForInput options. +func (p projectAs) ApplyToFor(opts *ForInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToOwns applies this configuration to the given OwnsInput options. +func (p projectAs) ApplyToOwns(opts *OwnsInput) { + opts.objectProjection = objectProjection(p) +} + +// ApplyToWatches applies this configuration to the given WatchesInput options. +func (p projectAs) ApplyToWatches(opts *WatchesInput) { + opts.objectProjection = objectProjection(p) +} + +var ( + // OnlyMetadata tells the controller to *only* cache metadata, and to watch + // the the API server in metadata-only form. This is useful when watching + // lots of objects, really big objects, or objects for which you only know + // the the GVK, but not the structure. You'll need to pass + // metav1.PartialObjectMetadata to the client when fetching objects in your + // reconciler, otherwise you'll end up with a duplicate structured or + // unstructured cache. + OnlyMetadata = projectAs(projectAsMetadata) + + _ ForOption = OnlyMetadata + _ OwnsOption = OnlyMetadata + _ WatchesOption = OnlyMetadata +) + +// }}} diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index 440b5d2fb0..1b95477bb5 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -153,7 +153,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(listObj.Items).NotTo(BeEmpty()) hasKubeService := false for _, svc := range listObj.Items { - if svc.Namespace == "default" && svc.Name == "kubernetes" { + if isKubeService(&svc) { hasKubeService = true break } @@ -300,7 +300,7 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(listObj.Items).NotTo(BeEmpty()) hasKubeService := false for _, svc := range listObj.Items { - if svc.GetNamespace() == "default" && svc.GetName() == "kubernetes" { + if isKubeService(&svc) { hasKubeService = true break } @@ -472,6 +472,204 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca svc := &kcorev1.Service{} svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + }) + }) + Context("with metadata-only objects", func() { + It("should be able to list objects that haven't been watched previously", func() { + By("listing all services in the cluster") + listObj := &kmetav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "ServiceList", + }) + err := informerCache.List(context.Background(), listObj) + Expect(err).To(Succeed()) + + By("verifying that the returned list contains the Kubernetes service") + // NB: kubernetes default service is automatically created in testenv. + Expect(listObj.Items).NotTo(BeEmpty()) + hasKubeService := false + for _, svc := range listObj.Items { + if isKubeService(&svc) { + hasKubeService = true + break + } + } + Expect(hasKubeService).To(BeTrue()) + }) + It("should be able to get objects that haven't been watched previously", func() { + By("getting the Kubernetes service") + svc := &kmetav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: "default", Name: "kubernetes"} + Expect(informerCache.Get(context.Background(), svcKey, svc)).To(Succeed()) + + By("verifying that the returned service looks reasonable") + Expect(svc.GetName()).To(Equal("kubernetes")) + Expect(svc.GetNamespace()).To(Equal("default")) + }) + + It("should support filtering by labels in a single namespace", func() { + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := kmetav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), &out, + client.InNamespace(testNamespaceTwo), + client.MatchingLabels(map[string]string{"test-label": "test-pod-2"})) + Expect(err).To(Succeed()) + + By("verifying the returned pods have the correct label") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + actual := out.Items[0] + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + }) + + It("should support filtering by labels from multiple namespaces", func() { + By("creating another pod with the same label but different namespace") + anotherPod := createPod("test-pod-2", testNamespaceOne, kcorev1.RestartPolicyAlways) + defer deletePod(anotherPod) + + By("listing pods with a particular label") + // NB: each pod has a "test-label": + out := kmetav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + labels := map[string]string{"test-label": "test-pod-2"} + err := informerCache.List(context.Background(), &out, client.MatchingLabels(labels)) + Expect(err).To(Succeed()) + + By("verifying multiple pods with the same label in different namespaces are returned") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(2)) + for _, actual := range out.Items { + Expect(actual.GetLabels()["test-label"]).To(Equal("test-pod-2")) + } + + }) + + It("should be able to list objects by namespace", func() { + By("listing pods in test-namespace-1") + listObj := &kmetav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err := informerCache.List(context.Background(), listObj, client.InNamespace(testNamespaceOne)) + Expect(err).To(Succeed()) + + By("verifying that the returned pods are in test-namespace-1") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.GetNamespace()).To(Equal(testNamespaceOne)) + }) + + It("should be able to restrict cache to a namespace", func() { + By("creating a namespaced cache") + namespacedCache, err := cache.New(cfg, cache.Options{Namespace: testNamespaceOne}) + Expect(err).NotTo(HaveOccurred()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(namespacedCache.Start(stop)).To(Succeed()) + }() + Expect(namespacedCache.WaitForCacheSync(stop)).NotTo(BeFalse()) + + By("listing pods in all namespaces") + out := &kmetav1.PartialObjectMetadataList{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + Expect(namespacedCache.List(context.Background(), out)).To(Succeed()) + + By("verifying the returned pod is from the watched namespace") + Expect(out.Items).NotTo(BeEmpty()) + Expect(out.Items).Should(HaveLen(1)) + Expect(out.Items[0].GetNamespace()).To(Equal(testNamespaceOne)) + + By("listing all namespaces - should still be able to get a cluster-scoped resource") + namespaceList := &kmetav1.PartialObjectMetadataList{} + namespaceList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "NamespaceList", + }) + Expect(namespacedCache.List(context.Background(), namespaceList)).To(Succeed()) + + By("verifying the namespace list is not empty") + Expect(namespaceList.Items).NotTo(BeEmpty()) + }) + + It("should deep copy the object unless told otherwise", func() { + By("retrieving a specific pod from the cache") + out := &kmetav1.PartialObjectMetadata{} + out.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + uKnownPod2 := &kmetav1.PartialObjectMetadata{} + knownPod2.(*kcorev1.Pod).ObjectMeta.DeepCopyInto(&uKnownPod2.ObjectMeta) + uKnownPod2.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + podKey := client.ObjectKey{Name: "test-pod-2", Namespace: testNamespaceTwo} + Expect(informerCache.Get(context.Background(), podKey, out)).To(Succeed()) + + By("verifying the retrieved pod is equal to a known pod") + Expect(out).To(Equal(uKnownPod2)) + + By("altering a field in the retrieved pod") + out.Labels["foo"] = "bar" + + By("verifying the pods are no longer equal") + Expect(out).NotTo(Equal(knownPod2)) + }) + + It("should return an error if the object is not found", func() { + By("getting a service that does not exists") + svc := &kmetav1.PartialObjectMetadata{} + svc.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Service", + }) + svcKey := client.ObjectKey{Namespace: testNamespaceOne, Name: "unknown"} + + By("verifying that an error is returned") + err := informerCache.Get(context.Background(), svcKey, svc) + Expect(err).To(HaveOccurred()) + Expect(errors.IsNotFound(err)).To(BeTrue()) + }) + It("should return an error if getting object in unwatched namespace", func() { + By("getting a service that does not exists") + svc := &kcorev1.Service{} + svcKey := client.ObjectKey{Namespace: "unknown", Name: "unknown"} + By("verifying that an error is returned") err := informerCache.Get(context.Background(), svcKey, svc) Expect(err).To(HaveOccurred()) @@ -518,7 +716,6 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Eventually(out).Should(Receive(Equal(pod))) close(done) }) - // TODO: Add a test for when GVK is not in Scheme. Does code support informer for unstructured object? It("should be able to get an informer by group/version/kind", func(done Done) { By("getting an shared index informer for gvk = core/v1/pod") gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} @@ -744,6 +941,126 @@ func CacheTest(createCacheFunc func(config *rest.Config, opts cache.Options) (ca Expect(errors.IsTimeout(err)).To(BeTrue()) }) }) + Context("with metadata-only objects", func() { + It("should be able to get informer for the object", func(done Done) { + By("getting a shared index informer for a pod") + + pod := &kcorev1.Pod{ + ObjectMeta: kmetav1.ObjectMeta{ + Name: "informer-obj", + Namespace: "default", + }, + Spec: kcorev1.PodSpec{ + Containers: []kcorev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, + } + + podMeta := &kmetav1.PartialObjectMetadata{} + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + podMeta.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + + sii, err := informerCache.GetInformer(context.TODO(), podMeta) + Expect(err).NotTo(HaveOccurred()) + Expect(sii).NotTo(BeNil()) + Expect(sii.HasSynced()).To(BeTrue()) + + By("adding an event handler listening for object creation which sends the object to a channel") + out := make(chan interface{}) + addFunc := func(obj interface{}) { + out <- obj + } + sii.AddEventHandler(kcache.ResourceEventHandlerFuncs{AddFunc: addFunc}) + + By("adding an object") + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl.Create(context.Background(), pod)).To(Succeed()) + defer deletePod(pod) + // re-copy the result in so that we can match on it properly + pod.ObjectMeta.DeepCopyInto(&podMeta.ObjectMeta) + // NB(directxman12): proto doesn't care typemeta, and + // partialobjectmetadata is proto, so no typemeta + // TODO(directxman12): we should paper over this in controller-runtime + podMeta.APIVersion = "" + podMeta.Kind = "" + + By("verifying the object's metadata is received on the channel") + Eventually(out).Should(Receive(Equal(podMeta))) + close(done) + }, 3) + + It("should be able to index an object field then retrieve objects by that field", func() { + By("creating the cache") + informer, err := cache.New(cfg, cache.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("indexing the restartPolicy field of the Pod object before starting") + pod := &kmetav1.PartialObjectMetadata{} + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + indexFunc := func(obj runtime.Object) []string { + metadata := obj.(*kmetav1.PartialObjectMetadata) + return []string{metadata.Labels["test-label"]} + } + Expect(informer.IndexField(context.TODO(), pod, "metadata.labels.test-label", indexFunc)).To(Succeed()) + + By("running the cache and waiting for it to sync") + go func() { + defer GinkgoRecover() + Expect(informer.Start(stop)).To(Succeed()) + }() + Expect(informer.WaitForCacheSync(stop)).NotTo(BeFalse()) + + By("listing Pods with restartPolicyOnFailure") + listObj := &kmetav1.PartialObjectMetadataList{} + listObj.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "PodList", + }) + err = informer.List(context.Background(), listObj, + client.MatchingFields{"metadata.labels.test-label": "test-pod-3"}) + Expect(err).To(Succeed()) + + By("verifying that the returned pods have correct restart policy") + Expect(listObj.Items).NotTo(BeEmpty()) + Expect(listObj.Items).Should(HaveLen(1)) + actual := listObj.Items[0] + Expect(actual.GetName()).To(Equal("test-pod-3")) + }, 3) + + It("should allow for get informer to be cancelled", func() { + By("creating a context and cancelling it") + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + By("getting a shared index informer for a pod with a cancelled context") + pod := &kmetav1.PartialObjectMetadata{} + pod.SetName("informer-obj2") + pod.SetNamespace("default") + pod.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "", + Version: "v1", + Kind: "Pod", + }) + sii, err := informerCache.GetInformer(ctx, pod) + Expect(err).To(HaveOccurred()) + Expect(sii).To(BeNil()) + Expect(errors.IsTimeout(err)).To(BeTrue()) + }) + }) }) }) } @@ -765,3 +1082,9 @@ func ensureNamespace(namespace string, client client.Client) error { } return err } + +//nolint:interfacer +func isKubeService(svc kmetav1.Object) bool { + // grumble grumble linters grumble grumble + return svc.GetNamespace() == "default" && svc.GetName() == "kubernetes" +} diff --git a/pkg/cache/internal/deleg_map.go b/pkg/cache/internal/deleg_map.go index cdaf1fc21c..10720115c4 100644 --- a/pkg/cache/internal/deleg_map.go +++ b/pkg/cache/internal/deleg_map.go @@ -21,6 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -31,10 +32,12 @@ import ( // InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. // It uses a standard parameter codec constructed based on the given generated Scheme. type InformersMap struct { - // we abstract over the details of structured vs unstructured with the specificInformerMaps + // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps + // TODO(directxman12): genericize this over different projections now that we have 3 different maps structured *specificInformersMap unstructured *specificInformersMap + metadata *specificInformersMap // Scheme maps runtime.Objects to GroupVersionKinds Scheme *runtime.Scheme @@ -51,6 +54,7 @@ func NewInformersMap(config *rest.Config, return &InformersMap{ structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace), unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace), Scheme: scheme, } @@ -60,6 +64,7 @@ func NewInformersMap(config *rest.Config, func (m *InformersMap) Start(stop <-chan struct{}) error { go m.structured.Start(stop) go m.unstructured.Start(stop) + go m.metadata.Start(stop) <-stop return nil } @@ -75,21 +80,27 @@ func (m *InformersMap) WaitForCacheSync(stop <-chan struct{}) bool { if !m.unstructured.waitForStarted(stop) { return false } + if !m.metadata.waitForStarted(stop) { + return false + } return cache.WaitForCacheSync(stop, syncedFuncs...) } // Get will create a new Informer and add it to the map of InformersMap if none exists. Returns // the Informer from the map. func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { - _, isUnstructured := obj.(*unstructured.Unstructured) - _, isUnstructuredList := obj.(*unstructured.UnstructuredList) - isUnstructured = isUnstructured || isUnstructuredList - - if isUnstructured { + switch obj.(type) { + case *unstructured.Unstructured: + return m.unstructured.Get(ctx, gvk, obj) + case *unstructured.UnstructuredList: return m.unstructured.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadata: + return m.metadata.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadataList: + return m.metadata.Get(ctx, gvk, obj) + default: + return m.structured.Get(ctx, gvk, obj) } - - return m.structured.Get(ctx, gvk, obj) } // newStructuredInformersMap creates a new InformersMap for structured objects. @@ -101,3 +112,8 @@ func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapp func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createUnstructuredListWatch) } + +// newMetadataInformersMap creates a new InformersMap for metadata-only objects. +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, namespace string) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, createMetadataListWatch) +} diff --git a/pkg/cache/internal/informers_map.go b/pkg/cache/internal/informers_map.go index 1068a17383..fd948a60ac 100644 --- a/pkg/cache/internal/informers_map.go +++ b/pkg/cache/internal/informers_map.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -301,6 +302,44 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform }, nil } +func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // grab the metadata client + client, err := metadata.NewForConfig(ip.config) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + + // create the relevant listwaatch + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) + } + return client.Resource(mapping.Resource).List(ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + // Watch needs to be set to true separately + opts.Watch = true + if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts) + } + return client.Resource(mapping.Resource).Watch(ctx, opts) + }, + }, nil +} + // resyncPeriod returns a function which generates a duration each time it is // invoked; this is so that multiple controllers don't get into lock-step and all // hammer the apiserver with list requests simultaneously. diff --git a/pkg/client/apiutil/apimachinery.go b/pkg/client/apiutil/apimachinery.go index 9fe32b21f3..2766d748e9 100644 --- a/pkg/client/apiutil/apimachinery.go +++ b/pkg/client/apiutil/apimachinery.go @@ -23,6 +23,7 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -48,6 +49,27 @@ func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { // GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + gvks, isUnversioned, err := scheme.ObjectKinds(obj) if err != nil { return schema.GroupVersionKind{}, err diff --git a/pkg/client/client.go b/pkg/client/client.go index c1c4d5d691..9c08bf8fe2 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -21,11 +21,13 @@ import ( "fmt" "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) @@ -76,6 +78,11 @@ func New(config *rest.Config, options Options) (Client, error) { resourceByType: make(map[schema.GroupVersionKind]*resourceMeta), } + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + c := &client{ typedClient: typedClient{ cache: clientcache, @@ -85,6 +92,10 @@ func New(config *rest.Config, options Options) (Client, error) { cache: clientcache, paramCodec: noConversionParamCodec{}, }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, } return c, nil @@ -97,6 +108,7 @@ var _ Client = &client{} type client struct { typedClient typedClient unstructuredClient unstructuredClient + metadataClient metadataClient } // resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. @@ -111,67 +123,88 @@ func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersi // Create implements client.Client func (c *client) Create(ctx context.Context, obj runtime.Object, opts ...CreateOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) } - return c.typedClient.Create(ctx, obj, opts...) } // Update implements client.Client func (c *client) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) } - return c.typedClient.Update(ctx, obj, opts...) } // Delete implements client.Client func (c *client) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) } - return c.typedClient.Delete(ctx, obj, opts...) } // DeleteAllOf implements client.Client func (c *client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) } - return c.typedClient.DeleteAllOf(ctx, obj, opts...) } // Patch implements client.Client func (c *client) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) } - return c.typedClient.Patch(ctx, obj, patch, opts...) } // Get implements client.Client func (c *client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.Get(ctx, key, obj) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Get(ctx, key, obj) + default: + return c.typedClient.Get(ctx, key, obj) } - return c.typedClient.Get(ctx, key, obj) } // List implements client.Client func (c *client) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { - _, ok := obj.(*unstructured.UnstructuredList) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + return c.metadataClient.List(ctx, obj, opts...) + default: + return c.typedClient.List(ctx, obj, opts...) } - return c.typedClient.List(ctx, obj, opts...) } // Status implements client.StatusClient @@ -190,19 +223,25 @@ var _ StatusWriter = &statusWriter{} // Update implements client.StatusWriter func (sw *statusWriter) Update(ctx context.Context, obj runtime.Object, opts ...UpdateOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return sw.client.unstructuredClient.UpdateStatus(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) } - return sw.client.typedClient.UpdateStatus(ctx, obj, opts...) } // Patch implements client.Client func (sw *statusWriter) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) - _, ok := obj.(*unstructured.Unstructured) - if ok { + switch obj.(type) { + case *unstructured.Unstructured: return sw.client.unstructuredClient.PatchStatus(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return sw.client.metadataClient.PatchStatus(ctx, obj, patch, opts...) + default: + return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } - return sw.client.typedClient.PatchStatus(ctx, obj, patch, opts...) } diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go index e8a6bca339..4ba2f6781b 100644 --- a/pkg/client/client_test.go +++ b/pkg/client/client_test.go @@ -18,9 +18,9 @@ package client_test import ( "context" - "encoding/json" "fmt" "sync/atomic" + "time" "k8s.io/apimachinery/pkg/types" @@ -49,11 +49,72 @@ func deleteDeployment(ctx context.Context, dep *appsv1.Deployment, ns string) { } func deleteNamespace(ctx context.Context, ns *corev1.Namespace) { - _, err := clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) - if err == nil { - err = clientset.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}) - Expect(err).NotTo(HaveOccurred()) + ns, err := clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + err = clientset.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // finalize if necessary + pos := -1 + finalizers := ns.Spec.Finalizers + for i, fin := range finalizers { + if fin == "kubernetes" { + pos = i + break + } + } + if pos == -1 { + // no need to finalize + return + } + + // re-get in order to finalize + ns, err = clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if err != nil { + return + } + + ns.Spec.Finalizers = append(finalizers[:pos], finalizers[pos+1:]...) + _, err = clientset.CoreV1().Namespaces().Finalize(ctx, ns, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + +WAIT_LOOP: + for i := 0; i < 10; i++ { + ns, err = clientset.CoreV1().Namespaces().Get(ctx, ns.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // success! + return + } + select { + case <-ctx.Done(): + break WAIT_LOOP + // failed to delete in time, see failure below + case <-time.After(100 * time.Millisecond): + // do nothing, try again + } + } + Fail(fmt.Sprintf("timed out waiting for namespace %q to be deleted", ns.Name)) +} + +// metaOnlyFromObj returns PartialObjectMetadata from a concrete Go struct that +// returns a concrete *metav1.ObjectMeta from GetObjectMeta (yes, that plays a +// bit fast and loose, but the only other options are serializing and then +// deserializing, or manually calling all the accessor funcs, which are both a bit annoying). +func metaOnlyFromObj(obj interface { + runtime.Object + metav1.ObjectMetaAccessor +}, scheme *runtime.Scheme) *metav1.PartialObjectMetadata { + metaObj := metav1.PartialObjectMetadata{} + obj.GetObjectMeta().(*metav1.ObjectMeta).DeepCopyInto(&metaObj.ObjectMeta) + kinds, _, err := scheme.ObjectKinds(obj) + if err != nil { + panic(err) } + metaObj.SetGroupVersionKind(kinds[0]) + return &metaObj } var _ = Describe("Client", func() { @@ -66,7 +127,6 @@ var _ = Describe("Client", func() { var count uint64 = 0 var replicaCount int32 = 2 var ns = "default" - var mergePatch []byte ctx := context.TODO() BeforeEach(func(done Done) { @@ -99,15 +159,6 @@ var _ = Describe("Client", func() { Spec: corev1.NodeSpec{}, } scheme = kscheme.Scheme - var err error - mergePatch, err = json.Marshal(map[string]interface{}{ - "metadata": map[string]interface{}{ - "annotations": map[string]interface{}{ - "foo": "bar", - }, - }, - }) - Expect(err).NotTo(HaveOccurred()) close(done) }, serverSideTimeoutSeconds) @@ -276,6 +327,16 @@ var _ = Describe("Client", func() { // Example: ListOptions }) + Context("with metadata objects", func() { + It("should fail with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Create(context.TODO(), obj)).NotTo(Succeed()) + }) + }) + Context("with the DryRun option", func() { It("should not create a new object", func(done Done) { cl, err := client.New(cfg, client.Options{}) @@ -643,6 +704,16 @@ var _ = Describe("Client", func() { close(done) }) }) + Context("with metadata objects", func() { + It("should fail with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + + Expect(cl.Update(context.TODO(), obj)).NotTo(Succeed()) + }) + }) }) Describe("StatusClient", func() { @@ -954,6 +1025,43 @@ var _ = Describe("Client", func() { }) }) + Context("with metadata objects", func() { + It("should fail to update with an error", func() { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + obj := metaOnlyFromObj(dep, scheme) + Expect(cl.Status().Update(context.TODO(), obj)).NotTo(Succeed()) + }) + + It("should patch status and preserve type information", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) + + By("initially creating a Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("patching the status of Deployment") + objPatch := client.MergeFrom(metaOnlyFromObj(dep, scheme)) + dep.Annotations = map[string]string{"some-new-annotation": "some-new-value"} + obj := metaOnlyFromObj(dep, scheme) + err = cl.Status().Patch(context.TODO(), obj, objPatch) + Expect(err).NotTo(HaveOccurred()) + + By("validating updated Deployment has type information") + Expect(obj.GroupVersionKind()).To(Equal(depGvk)) + + By("validating patched Deployment has new status") + actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + Expect(actual.Annotations).To(HaveKeyWithValue("some-new-annotation", "some-new-value")) + + close(done) + }) + }) }) Describe("Delete", func() { @@ -1184,11 +1292,8 @@ var _ = Describe("Client", func() { close(done) }) }) - }) - - Describe("Patch", func() { - Context("with structured objects", func() { - It("should patch an existing object from a go struct", func(done Done) { + Context("with metadata objects", func() { + It("should delete an existing object from a go struct", func(done Done) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) @@ -1197,94 +1302,126 @@ var _ = Describe("Client", func() { dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Deployment") - err = cl.Patch(context.TODO(), dep, client.RawPatch(types.MergePatchType, mergePatch)) + By("deleting the Deployment") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.Delete(context.TODO(), metaObj) Expect(err).NotTo(HaveOccurred()) - By("validating patched Deployment has new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) close(done) }) - It("should patch an existing object from a go struct, using optimistic locking", func(done Done) { + It("should delete an existing object non-namespace object from a go struct", func(done Done) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + By("initially creating a Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("creating a patch from with optimistic lock") - patch := client.MergeFromWithOptions(dep.DeepCopy(), client.MergeFromWithOptimisticLock{}) - - By("adding a new annotation") - dep.Annotations = map[string]string{ - "foo": "bar", - } + By("deleting the Node") + metaObj := metaOnlyFromObj(node, scheme) - By("patching the Deployment") - err = cl.Patch(context.TODO(), dep, patch) + err = cl.Delete(context.TODO(), metaObj) Expect(err).NotTo(HaveOccurred()) - By("validating patched Deployment has new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) + By("validating the Node no longer exists") + _, err = clientset.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + + close(done) + }) + + It("should fail if the object does not exist", func(done Done) { + cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) + Expect(cl).NotTo(BeNil()) - By("validating that a patch should fail with conflict, when it has an outdated resource version") - dep.Annotations["should"] = "conflict" - err = cl.Patch(context.TODO(), dep, patch) + By("Deleting node before it is ever created") + metaObj := metaOnlyFromObj(node, scheme) + err = cl.Delete(context.TODO(), metaObj) Expect(err).To(HaveOccurred()) - Expect(apierrors.IsConflict(err)).To(BeTrue()) close(done) }) - It("should patch and preserve type information", func(done Done) { + It("should delete a collection of object", func(done Done) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") + By("initially creating two Deployments") + dep2 := dep.DeepCopy() + dep2.Name = dep2.Name + "-2" + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - By("patching the Deployment") - dep.SetGroupVersionKind(depGvk) - err = cl.Patch(context.TODO(), dep, client.RawPatch(types.MergePatchType, mergePatch)) + depName := dep.Name + dep2Name := dep2.Name + + By("deleting Deployments") + metaObj := metaOnlyFromObj(dep, scheme) + err = cl.DeleteAllOf(context.TODO(), metaObj, client.InNamespace(ns), client.MatchingLabels(dep.ObjectMeta.Labels)) Expect(err).NotTo(HaveOccurred()) - By("validating updated Deployment has type information") - Expect(dep.GroupVersionKind()).To(Equal(depGvk)) + By("validating the Deployment no longer exists") + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) + _, err = clientset.AppsV1().Deployments(ns).Get(ctx, dep2Name, metav1.GetOptions{}) + Expect(err).To(HaveOccurred()) close(done) }) + }) + }) + + Describe("Get", func() { + Context("with structured objects", func() { + It("should fetch an existing object for a go struct", func(done Done) { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) - It("should patch an existing object non-namespace object from a go struct", func(done Done) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Node") + By("fetching the created Deployment") + var actual appsv1.Deployment + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) + Expect(err).NotTo(HaveOccurred()) + Expect(actual).NotTo(BeNil()) + + By("validating the fetched deployment equals the created one") + Expect(dep).To(Equal(&actual)) + + close(done) + }) + + It("should fetch an existing non-namespace object for a go struct", func(done Done) { + By("first creating the object") node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Node") - nodeName := node.Name - err = cl.Patch(context.TODO(), node, client.RawPatch(types.MergePatchType, mergePatch)) + cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) + Expect(cl).NotTo(BeNil()) - By("validating the Node no longer exists") - actual, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + By("retrieving node through client") + var actual corev1.Node + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(context.TODO(), key, &actual) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) + Expect(node).To(Equal(&actual)) close(done) }) @@ -1294,8 +1431,10 @@ var _ = Describe("Client", func() { Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("Patching node before it is ever created") - err = cl.Patch(context.TODO(), node, client.RawPatch(types.MergePatchType, mergePatch)) + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + var actual appsv1.Deployment + err = cl.Get(context.TODO(), key, &actual) Expect(err).To(HaveOccurred()) close(done) @@ -1305,129 +1444,88 @@ var _ = Describe("Client", func() { }) - It("should fail if the object cannot be mapped to a GVK", func(done Done) { - By("creating client with empty Scheme") + It("should fail if the object cannot be mapped to a GVK", func() { + By("first creating the Deployment") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a client with an empty Scheme") emptyScheme := runtime.NewScheme() cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("patching the Deployment fails") - err = cl.Patch(context.TODO(), dep, client.RawPatch(types.MergePatchType, mergePatch)) + By("fetching the created Deployment fails") + var actual appsv1.Deployment + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) - - close(done) }) PIt("should fail if the GVK cannot be mapped to a Resource", func() { }) - - It("should respect passed in update options", func() { - By("creating a new client") - cl, err := client.New(cfg, client.Options{}) - Expect(err).NotTo(HaveOccurred()) - Expect(cl).NotTo(BeNil()) - - By("initially creating a Deployment") + }) + Context("with unstructured objects", func() { + It("should fetch an existing object", func(done Done) { + By("first creating the Deployment") dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("patching the Deployment with dry-run") - err = cl.Patch(context.TODO(), dep, client.RawPatch(types.MergePatchType, mergePatch), client.PatchDryRunAll) - Expect(err).NotTo(HaveOccurred()) - - By("validating patched Deployment doesn't have the new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, dep.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations).NotTo(HaveKey("foo")) - }) - }) - Context("with unstructured objects", func() { - It("should patch an existing object from a go struct", func(done Done) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) + By("encoding the Deployment as unstructured") + var u runtime.Unstructured = &unstructured.Unstructured{} - By("patching the Deployment") - depName := dep.Name - u := &unstructured.Unstructured{} Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ + By("fetching the created Deployment") + var actual unstructured.Unstructured + actual.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", Kind: "Deployment", Version: "v1", }) - err = cl.Patch(context.TODO(), u, client.RawPatch(types.MergePatchType, mergePatch)) - Expect(err).NotTo(HaveOccurred()) - - By("validating patched Deployment has new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) + key := client.ObjectKey{Namespace: ns, Name: dep.Name} + err = cl.Get(context.TODO(), key, &actual) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) + + By("validating the fetched Deployment equals the created one") + Expect(u).To(Equal(&actual)) close(done) }) - It("should patch and preserve type information", func(done Done) { - cl, err := client.New(cfg, client.Options{}) - Expect(err).NotTo(HaveOccurred()) - Expect(cl).NotTo(BeNil()) - - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("patching the Deployment") - u := &unstructured.Unstructured{} - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - u.SetGroupVersionKind(depGvk) - err = cl.Patch(context.TODO(), u, client.RawPatch(types.MergePatchType, mergePatch)) + It("should fetch an existing non-namespace object", func(done Done) { + By("first creating the Node") + node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) - By("validating updated Deployment has type information") - Expect(u.GroupVersionKind()).To(Equal(depGvk)) - - close(done) - }) + By("encoding the Node as unstructured") + var u runtime.Unstructured = &unstructured.Unstructured{} + Expect(scheme.Convert(node, u, nil)).To(Succeed()) - It("should patch an existing object non-namespace object from a go struct", func(done Done) { cl, err := client.New(cfg, client.Options{}) Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("initially creating a Node") - node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("patching the Node") - nodeName := node.Name - u := &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ + By("fetching the created Node") + var actual unstructured.Unstructured + actual.SetGroupVersionKind(schema.GroupVersionKind{ Group: "", Kind: "Node", Version: "v1", }) - err = cl.Patch(context.TODO(), u, client.RawPatch(types.MergePatchType, mergePatch)) - Expect(err).NotTo(HaveOccurred()) - - By("validating patched Node has new annotation") - actual, err := clientset.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + key := client.ObjectKey{Namespace: ns, Name: node.Name} + err = cl.Get(context.TODO(), key, &actual) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations["foo"]).To(Equal("bar")) + + By("validating the fetched Node equals the created one") + Expect(u).To(Equal(&actual)) close(done) }) @@ -1437,53 +1535,16 @@ var _ = Describe("Client", func() { Expect(err).NotTo(HaveOccurred()) Expect(cl).NotTo(BeNil()) - By("Patching node before it is ever created") + By("fetching object that has not been created yet") + key := client.ObjectKey{Namespace: ns, Name: dep.Name} u := &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Kind: "Node", - Version: "v1", - }) - err = cl.Patch(context.TODO(), node, client.RawPatch(types.MergePatchType, mergePatch)) + err = cl.Get(context.TODO(), key, u) Expect(err).To(HaveOccurred()) close(done) }) - - It("should respect passed-in update options", func() { - By("creating a new client") - cl, err := client.New(cfg, client.Options{}) - Expect(err).NotTo(HaveOccurred()) - Expect(cl).NotTo(BeNil()) - - By("initially creating a Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("patching the Deployment") - depName := dep.Name - u := &unstructured.Unstructured{} - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "apps", - Kind: "Deployment", - Version: "v1", - }) - err = cl.Patch(context.TODO(), u, client.RawPatch(types.MergePatchType, mergePatch), client.PatchDryRunAll) - Expect(err).NotTo(HaveOccurred()) - - By("validating patched Deployment does not have the new annotation") - actual, err := clientset.AppsV1().Deployments(ns).Get(ctx, depName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - Expect(actual.Annotations).NotTo(HaveKey("foo")) - }) }) - }) - - Describe("Get", func() { - Context("with structured objects", func() { + Context("with metadata objects", func() { It("should fetch an existing object for a go struct", func(done Done) { By("first creating the Deployment") dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) @@ -1494,14 +1555,19 @@ var _ = Describe("Client", func() { Expect(cl).NotTo(BeNil()) By("fetching the created Deployment") - var actual appsv1.Deployment + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) key := client.ObjectKey{Namespace: ns, Name: dep.Name} err = cl.Get(context.TODO(), key, &actual) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) By("validating the fetched deployment equals the created one") - Expect(dep).To(Equal(&actual)) + Expect(metaOnlyFromObj(dep, scheme)).To(Equal(&actual)) close(done) }) @@ -1516,13 +1582,17 @@ var _ = Describe("Client", func() { Expect(cl).NotTo(BeNil()) By("retrieving node through client") - var actual corev1.Node + var actual metav1.PartialObjectMetadata + actual.SetGroupVersionKind(schema.GroupVersionKind{ + Version: "v1", + Kind: "Node", + }) key := client.ObjectKey{Namespace: ns, Name: node.Name} err = cl.Get(context.TODO(), key, &actual) Expect(err).NotTo(HaveOccurred()) Expect(actual).NotTo(BeNil()) - Expect(node).To(Equal(&actual)) + Expect(metaOnlyFromObj(node, scheme)).To(Equal(&actual)) close(done) }) @@ -1534,116 +1604,22 @@ var _ = Describe("Client", func() { By("fetching object that has not been created yet") key := client.ObjectKey{Namespace: ns, Name: dep.Name} - var actual appsv1.Deployment - err = cl.Get(context.TODO(), key, &actual) - Expect(err).To(HaveOccurred()) - - close(done) - }) - - PIt("should fail if the object doesn't have meta", func() { - - }) - - It("should fail if the object cannot be mapped to a GVK", func() { - By("first creating the Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("creating a client with an empty Scheme") - emptyScheme := runtime.NewScheme() - cl, err := client.New(cfg, client.Options{Scheme: emptyScheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(cl).NotTo(BeNil()) - - By("fetching the created Deployment fails") - var actual appsv1.Deployment - key := client.ObjectKey{Namespace: ns, Name: dep.Name} - err = cl.Get(context.TODO(), key, &actual) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("no kind is registered for the type")) - }) - - PIt("should fail if the GVK cannot be mapped to a Resource", func() { - - }) - }) - - Context("with unstructured objects", func() { - It("should fetch an existing object", func(done Done) { - By("first creating the Deployment") - dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - cl, err := client.New(cfg, client.Options{}) - Expect(err).NotTo(HaveOccurred()) - Expect(cl).NotTo(BeNil()) - - By("encoding the Deployment as unstructured") - var u runtime.Unstructured = &unstructured.Unstructured{} - Expect(scheme.Convert(dep, u, nil)).To(Succeed()) - - By("fetching the created Deployment") - var actual unstructured.Unstructured + var actual metav1.PartialObjectMetadata actual.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", - Kind: "Deployment", Version: "v1", + Kind: "Deployment", }) - key := client.ObjectKey{Namespace: ns, Name: dep.Name} err = cl.Get(context.TODO(), key, &actual) - Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - - By("validating the fetched Deployment equals the created one") - Expect(u).To(Equal(&actual)) + Expect(err).To(HaveOccurred()) close(done) }) - It("should fetch an existing non-namespace object", func(done Done) { - By("first creating the Node") - node, err := clientset.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("encoding the Node as unstructured") - var u runtime.Unstructured = &unstructured.Unstructured{} - Expect(scheme.Convert(node, u, nil)).To(Succeed()) - - cl, err := client.New(cfg, client.Options{}) - Expect(err).NotTo(HaveOccurred()) - Expect(cl).NotTo(BeNil()) - - By("fetching the created Node") - var actual unstructured.Unstructured - actual.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "", - Kind: "Node", - Version: "v1", - }) - key := client.ObjectKey{Namespace: ns, Name: node.Name} - err = cl.Get(context.TODO(), key, &actual) - Expect(err).NotTo(HaveOccurred()) - Expect(actual).NotTo(BeNil()) - - By("validating the fetched Node equals the created one") - Expect(u).To(Equal(&actual)) - - close(done) + PIt("should fail if the object doesn't have meta", func() { }) - It("should fail if the object does not exist", func(done Done) { - cl, err := client.New(cfg, client.Options{}) - Expect(err).NotTo(HaveOccurred()) - Expect(cl).NotTo(BeNil()) - - By("fetching object that has not been created yet") - key := client.ObjectKey{Namespace: ns, Name: dep.Name} - u := &unstructured.Unstructured{} - err = cl.Get(context.TODO(), key, u) - Expect(err).To(HaveOccurred()) - - close(done) + PIt("should fail if the GVK cannot be mapped to a Resource", func() { }) }) }) @@ -2367,6 +2343,459 @@ var _ = Describe("Client", func() { PIt("should filter results by namespace selector", func() { + }) + }) + Context("with metadata objects", func() { + It("should fetch collection of objects", func(done Done) { + By("creating an initial object") + dep, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all objects of that type in the cluster") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + Expect(cl.List(context.Background(), metaList)).NotTo(HaveOccurred()) + + Expect(metaList.Items).NotTo(BeEmpty()) + hasDep := false + for _, item := range metaList.Items { + if item.Name == dep.Name && item.Namespace == dep.Namespace { + hasDep = true + break + } + } + Expect(hasDep).To(BeTrue()) + + close(done) + }, serverSideTimeoutSeconds) + + It("should return an empty list if there are no matching objects", func(done Done) { + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in the cluster") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + Expect(cl.List(context.Background(), metaList)).NotTo(HaveOccurred()) + + By("validating no Deployments are returned") + Expect(metaList.Items).To(BeEmpty()) + + close(done) + }, serverSideTimeoutSeconds) + + // TODO(seans): get label selector test working + It("should filter results by label selector", func(done Done) { + By("creating a Deployment with the app=frontend label") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: ns, + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with the app=backend label") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: ns, + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with label app=backend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "backend"} + err = cl.List(context.Background(), metaList, client.MatchingLabels(labels)) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend label is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector", func(done Done) { + By("creating a Deployment in test-namespace-1") + tns1 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-1"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: "test-namespace-1"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err = clientset.AppsV1().Deployments("test-namespace-1").Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-2") + tns2 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-2"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: "test-namespace-2"}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments("test-namespace-2").Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-1") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, client.InNamespace("test-namespace-1")) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-1 is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + + deleteDeployment(ctx, depFrontend, "test-namespace-1") + deleteDeployment(ctx, depBackend, "test-namespace-2") + deleteNamespace(ctx, tns1) + deleteNamespace(ctx, tns2) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results by field selector", func(done Done) { + By("creating a Deployment with name deployment-frontend") + depFrontend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-frontend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend, err := clientset.AppsV1().Deployments(ns).Create(ctx, depFrontend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment with name deployment-backend") + depBackend := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: "deployment-backend", Namespace: ns}, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend, err = clientset.AppsV1().Deployments(ns).Create(ctx, depBackend, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments with field metadata.name=deployment-backend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.MatchingFields{"metadata.name": "deployment-backend"}) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment with the backend field is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-backend")) + + deleteDeployment(ctx, depFrontend, ns) + deleteDeployment(ctx, depBackend, ns) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results by namespace selector and label selector", func(done Done) { + By("creating a Deployment in test-namespace-3 with the app=frontend label") + tns3 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-3"}} + _, err := clientset.CoreV1().Namespaces().Create(ctx, tns3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depFrontend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-3 with the app=backend label") + depBackend3 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-backend", + Namespace: "test-namespace-3", + Labels: map[string]string{"app": "backend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "backend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "backend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depBackend3, err = clientset.AppsV1().Deployments("test-namespace-3").Create(ctx, depBackend3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a Deployment in test-namespace-4 with the app=frontend label") + tns4 := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "test-namespace-4"}} + _, err = clientset.CoreV1().Namespaces().Create(ctx, tns4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + depFrontend4 := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment-frontend", + Namespace: "test-namespace-4", + Labels: map[string]string{"app": "frontend"}, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "frontend"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "frontend"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + depFrontend4, err = clientset.AppsV1().Deployments("test-namespace-4").Create(ctx, depFrontend4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing all Deployments in test-namespace-3 with label app=frontend") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + labels := map[string]string{"app": "frontend"} + err = cl.List(context.Background(), metaList, + client.InNamespace("test-namespace-3"), + client.MatchingLabels(labels), + ) + Expect(err).NotTo(HaveOccurred()) + + By("only the Deployment in test-namespace-3 with label app=frontend is returned") + Expect(metaList.Items).NotTo(BeEmpty()) + Expect(1).To(Equal(len(metaList.Items))) + actual := metaList.Items[0] + Expect(actual.Name).To(Equal("deployment-frontend")) + Expect(actual.Namespace).To(Equal("test-namespace-3")) + + deleteDeployment(ctx, depFrontend3, "test-namespace-3") + deleteDeployment(ctx, depBackend3, "test-namespace-3") + deleteDeployment(ctx, depFrontend4, "test-namespace-4") + deleteNamespace(ctx, tns3) + deleteNamespace(ctx, tns4) + + close(done) + }, serverSideTimeoutSeconds) + + It("should filter results using limit and continue options", func() { + + makeDeployment := func(suffix string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deployment-%s", suffix), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"foo": "bar"}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, + }, + } + } + + By("creating 4 deployments") + dep1 := makeDeployment("1") + dep1, err := clientset.AppsV1().Deployments(ns).Create(ctx, dep1, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep1, ns) + + dep2 := makeDeployment("2") + dep2, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep2, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep2, ns) + + dep3 := makeDeployment("3") + dep3, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep3, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep3, ns) + + dep4 := makeDeployment("4") + dep4, err = clientset.AppsV1().Deployments(ns).Create(ctx, dep4, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer deleteDeployment(ctx, dep4, ns) + + cl, err := client.New(cfg, client.Options{}) + Expect(err).NotTo(HaveOccurred()) + + By("listing 1 deployment when limit=1 is used") + metaList := &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Limit(1), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep1.Name)) + + continueToken := metaList.Continue + + By("listing the next deployment when previous continuation token is used and limit=1") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Limit(1), + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(1)) + Expect(metaList.Continue).NotTo(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep2.Name)) + + continueToken = metaList.Continue + + By("listing the 2 remaining deployments when previous continuation token is used without a limit") + metaList = &metav1.PartialObjectMetadataList{} + metaList.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "DeploymentList", + }) + err = cl.List(context.Background(), metaList, + client.Continue(continueToken), + ) + Expect(err).NotTo(HaveOccurred()) + + Expect(metaList.Items).To(HaveLen(2)) + Expect(metaList.Continue).To(BeEmpty()) + Expect(metaList.Items[0].Name).To(Equal(dep3.Name)) + Expect(metaList.Items[1].Name).To(Equal(dep4.Name)) + }, serverSideTimeoutSeconds) + + PIt("should fail if the object doesn't have meta", func() { + + }) + + PIt("should fail if the object cannot be mapped to a GVK", func() { + + }) + + PIt("should fail if the GVK cannot be mapped to a Resource", func() { + }) }) }) diff --git a/pkg/client/metadata_client.go b/pkg/client/metadata_client.go new file mode 100644 index 0000000000..956fa0fb3e --- /dev/null +++ b/pkg/client/metadata_client.go @@ -0,0 +1,194 @@ +/* + Copyright 2020 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client +func (mc *metadataClient) Delete(ctx context.Context, obj runtime.Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client +func (mc *metadataClient) Patch(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, metav1.GetOptions{}) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client +func (mc *metadataClient) List(ctx context.Context, obj runtime.Object, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + if strings.HasSuffix(gvk.Kind, "List") { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchStatus(ctx context.Context, obj runtime.Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), "status") + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +}