From 9b648faab7733bfef83c4e71115c6c700664b48e Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Tue, 3 Oct 2023 15:27:30 -0700 Subject: [PATCH 01/11] Use output properties for await --- provider/pkg/await/await.go | 57 +++++++++-------- provider/pkg/await/awaiters.go | 84 +++++++++++++------------- provider/pkg/await/deployment.go | 46 +++++++------- provider/pkg/await/deployment_test.go | 10 +-- provider/pkg/await/ingress.go | 20 +++--- provider/pkg/await/job.go | 14 ++--- provider/pkg/await/pod.go | 12 ++-- provider/pkg/await/service.go | 12 ++-- provider/pkg/await/statefulset.go | 16 ++--- provider/pkg/await/statefulset_test.go | 6 +- provider/pkg/await/util_test.go | 1 - provider/pkg/metadata/naming.go | 10 ++- provider/pkg/provider/provider.go | 27 +++++---- 13 files changed, 159 insertions(+), 156 deletions(-) diff --git a/provider/pkg/await/await.go b/provider/pkg/await/await.go index 6ae92aac87..2f632b1241 100644 --- a/provider/pkg/await/await.go +++ b/provider/pkg/await/await.go @@ -90,17 +90,18 @@ type ReadConfig struct { type UpdateConfig struct { ProviderConfig - Previous *unstructured.Unstructured - Inputs *unstructured.Unstructured - Timeout float64 - Preview bool + OldInputs *unstructured.Unstructured + OldOutputs *unstructured.Unstructured + Inputs *unstructured.Unstructured + Timeout float64 + Preview bool // IgnoreChanges is a list of fields to ignore when diffing the old and new objects. IgnoreChanges []string } type DeleteConfig struct { ProviderConfig - Inputs *unstructured.Unstructured + Outputs *unstructured.Unstructured Name string Timeout float64 } @@ -162,6 +163,8 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { // nolint // https://github.com/kubernetes/kubernetes/blob/54889d581a35acf940d52a8a384cccaa0b597ddc/pkg/kubectl/cmd/apply/apply.go#L94 + patchResource := kinds.PatchQualifiedTypes.Has(c.URN.QualifiedType().String()) + var outputs *unstructured.Unstructured var client dynamic.ResourceInterface err := retry.SleepingRetry( @@ -187,7 +190,7 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { } } - if c.ServerSideApply { + if c.ServerSideApply && patchResource { force := patchForce(c.Inputs, nil, c.Preview) options := metav1.PatchOptions{ FieldManager: c.FieldManager, @@ -251,7 +254,7 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { id := fmt.Sprintf("%s/%s", c.Inputs.GetAPIVersion(), c.Inputs.GetKind()) if awaiter, exists := awaiters[id]; exists { if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + logger.V(1).Infof("Skipping await logic for %v", outputs.GetName()) } else { if awaiter.awaitCreation != nil { conf := createAwaitConfig{ @@ -260,7 +263,6 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, - currentInputs: c.Inputs, currentOutputs: outputs, logger: c.DedupLogger, timeout: c.Timeout, @@ -280,7 +282,7 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { // If the client fails to get the live object for some reason, DO NOT return the error. This // will leak the fact that the object was successfully created. Instead, fall back to the // last-seen live object. - live, err := client.Get(c.Context, c.Inputs.GetName(), metav1.GetOptions{}) + live, err := client.Get(c.Context, outputs.GetName(), metav1.GetOptions{}) if err != nil { return outputs, nil } @@ -306,7 +308,7 @@ func Read(c ReadConfig) (*unstructured.Unstructured, error) { id := fmt.Sprintf("%s/%s", outputs.GetAPIVersion(), outputs.GetKind()) if awaiter, exists := awaiters[id]; exists { if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + logger.V(1).Infof("Skipping await logic for %v", outputs.GetName()) } else { if awaiter.awaitRead != nil { conf := createAwaitConfig{ @@ -315,7 +317,6 @@ func Read(c ReadConfig) (*unstructured.Unstructured, error) { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, - currentInputs: c.Inputs, currentOutputs: outputs, logger: c.DedupLogger, clusterVersion: c.ClusterVersion, @@ -353,14 +354,14 @@ func Read(c ReadConfig) (*unstructured.Unstructured, error) { // [3]: // https://kubernetes.io/docs/reference/using-api/server-side-apply func Update(c UpdateConfig) (*unstructured.Unstructured, error) { - client, err := c.ClientSet.ResourceClientForObject(c.Inputs) + client, err := c.ClientSet.ResourceClientForObject(c.OldOutputs) if err != nil { return nil, err } // Get the "live" version of the last submitted object. This is necessary because the server may // have populated some fields automatically, updated status fields, and so on. - liveOldObj, err := client.Get(c.Context, c.Previous.GetName(), metav1.GetOptions{}) + liveOldObj, err := client.Get(c.Context, c.OldOutputs.GetName(), metav1.GetOptions{}) if err != nil { return nil, err } @@ -380,7 +381,7 @@ func Update(c UpdateConfig) (*unstructured.Unstructured, error) { id := fmt.Sprintf("%s/%s", c.Inputs.GetAPIVersion(), c.Inputs.GetKind()) if awaiter, exists := awaiters[id]; exists { if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + logger.V(1).Infof("Skipping await logic for %v", currentOutputs.GetName()) } else { if awaiter.awaitUpdate != nil { conf := updateAwaitConfig{ @@ -390,13 +391,11 @@ func Update(c UpdateConfig) (*unstructured.Unstructured, error) { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, - currentInputs: c.Inputs, currentOutputs: currentOutputs, logger: c.DedupLogger, timeout: c.Timeout, clusterVersion: c.ClusterVersion, }, - lastInputs: c.Previous, lastOutputs: liveOldObj, } waitErr := awaiter.awaitUpdate(conf) @@ -411,12 +410,12 @@ func Update(c UpdateConfig) (*unstructured.Unstructured, error) { gvk := c.Inputs.GroupVersionKind() logger.V(3).Infof("Resource %s/%s/%s '%s.%s' patched and updated", gvk.Group, gvk.Version, - gvk.Kind, c.Inputs.GetNamespace(), c.Inputs.GetName()) + gvk.Kind, c.Inputs.GetNamespace(), currentOutputs.GetName()) // If the client fails to get the live object for some reason, DO NOT return the error. This // will leak the fact that the object was successfully created. Instead, fall back to the // last-seen live object. - live, err := client.Get(c.Context, c.Inputs.GetName(), metav1.GetOptions{}) + live, err := client.Get(c.Context, currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { return currentOutputs, nil } @@ -450,7 +449,7 @@ func csaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dy // optimistically rather than failing the update. _ = handleCSAIgnoreFields(c, liveOldObj) // Create merge patch (prefer strategic merge patch, fall back to JSON merge patch). - patch, patchType, _, err := openapi.PatchForResourceUpdate(c.Resources, c.Previous, c.Inputs, liveOldObj) + patch, patchType, _, err := openapi.PatchForResourceUpdate(c.Resources, c.OldInputs, c.Inputs, liveOldObj) if err != nil { return nil, err } @@ -462,7 +461,7 @@ func csaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dy options.DryRun = []string{metav1.DryRunAll} } - return client.Patch(c.Context, c.Inputs.GetName(), patchType, patch, options) + return client.Patch(c.Context, liveOldObj.GetName(), patchType, patch, options) } // ssaUpdate handles the logic for updating a resource using server-side apply. @@ -490,7 +489,7 @@ func ssaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dy options.DryRun = []string{metav1.DryRunAll} } - currentOutputs, err := client.Patch(c.Context, c.Inputs.GetName(), types.ApplyPatchType, objYAML, options) + currentOutputs, err := client.Patch(c.Context, liveOldObj.GetName(), types.ApplyPatchType, objYAML, options) if err != nil { if errors.IsConflict(err) { err = fmt.Errorf("Server-Side Apply field conflict detected. See %s for troubleshooting help\n: %w", @@ -543,7 +542,7 @@ func handleSSAIgnoreFields(c *UpdateConfig, liveOldObj *unstructured.Unstructure for _, f := range managedFields { s, err := fluxssa.FieldsToSet(*f.FieldsV1) if err != nil { - return fmt.Errorf("unable to parse managed fields from resource %q into fieldpath.Set: %w", c.Inputs.GetName(), err) + return fmt.Errorf("unable to parse managed fields from resource %q into fieldpath.Set: %w", liveOldObj.GetName(), err) } switch f.Manager { @@ -706,18 +705,18 @@ func Deletion(c DeleteConfig) error { } // Obtain client for the resource being deleted. - client, err := c.ClientSet.ResourceClientForObject(c.Inputs) + client, err := c.ClientSet.ResourceClientForObject(c.Outputs) if err != nil { return nilIfGVKDeleted(err) } patchResource := kinds.IsPatchURN(c.URN) if c.ServerSideApply && patchResource { - err = ssa.Relinquish(c.Context, client, c.Inputs, c.FieldManager) + err = ssa.Relinquish(c.Context, client, c.Outputs, c.FieldManager) return err } - timeout := metadata.TimeoutDuration(c.Timeout, c.Inputs, 300) + timeout := metadata.TimeoutDuration(c.Timeout, c.Outputs, 300) timeoutSeconds := int64(timeout.Seconds()) listOpts := metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", c.Name).String(), @@ -739,10 +738,10 @@ func Deletion(c DeleteConfig) error { // if we don't have an entry for the resource type; in the event that we do, but the await logic // is blank, simply do nothing instead of logging. var waitErr error - id := fmt.Sprintf("%s/%s", c.Inputs.GetAPIVersion(), c.Inputs.GetKind()) + id := fmt.Sprintf("%s/%s", c.Outputs.GetAPIVersion(), c.Outputs.GetKind()) if awaiter, exists := awaiters[id]; exists && awaiter.awaitDeletion != nil { - if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + if metadata.SkipAwaitLogic(c.Outputs) { + logger.V(1).Infof("Skipping await logic for %v", c.Name) } else { waitErr = awaiter.awaitDeletion(deleteAwaitConfig{ createAwaitConfig: createAwaitConfig{ @@ -751,7 +750,7 @@ func Deletion(c DeleteConfig) error { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, - currentInputs: c.Inputs, + currentOutputs: c.Outputs, logger: c.DedupLogger, timeout: c.Timeout, clusterVersion: c.ClusterVersion, diff --git a/provider/pkg/await/awaiters.go b/provider/pkg/await/awaiters.go index 7dbde44d25..48ac9d5a89 100644 --- a/provider/pkg/await/awaiters.go +++ b/provider/pkg/await/awaiters.go @@ -51,7 +51,6 @@ type createAwaitConfig struct { initialAPIVersion string logger *logging.DedupLogger clientSet *clients.DynamicClientSet - currentInputs *unstructured.Unstructured currentOutputs *unstructured.Unstructured timeout float64 clusterVersion *cluster.ServerVersion @@ -73,7 +72,6 @@ func (cac *createAwaitConfig) logMessage(message checkerlog.Message) { // reasonably efficient. type updateAwaitConfig struct { createAwaitConfig - lastInputs *unstructured.Unstructured lastOutputs *unstructured.Unstructured } @@ -301,19 +299,19 @@ func untilAppsDeploymentDeleted(config deleteAwaitConfig) error { specReplicas, _ := deploymentSpecReplicas(d) return watcher.RetryableError( - fmt.Errorf("deployment %q still exists (%d / %d replicas exist)", config.currentInputs.GetName(), + fmt.Errorf("deployment %q still exists (%d / %d replicas exist)", d.GetName(), currReplicas, specReplicas)) } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. - timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) - err := watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 600) + err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(deploymentMissing, timeout) if err != nil { return err } - logger.V(3).Infof("Deployment '%s' deleted", config.currentInputs.GetName()) + logger.V(3).Infof("Deployment '%s' deleted", config.currentOutputs.GetName()) return nil } @@ -344,19 +342,19 @@ func untilAppsStatefulSetDeleted(config deleteAwaitConfig) error { specReplicas, _ := specReplicas(d) return watcher.RetryableError( - fmt.Errorf("StatefulSet %q still exists (%d / %d replicas exist)", config.currentInputs.GetName(), + fmt.Errorf("StatefulSet %q still exists (%d / %d replicas exist)", d.GetName(), currReplicas, specReplicas)) } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. - timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) - err := watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 600) + err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(statefulsetmissing, timeout) if err != nil { return err } - logger.V(3).Infof("StatefulSet %q deleted", config.currentInputs.GetName()) + logger.V(3).Infof("StatefulSet %q deleted", config.currentOutputs.GetName()) return nil } @@ -375,12 +373,12 @@ func untilBatchV1JobDeleted(config deleteAwaitConfig) error { return err } - e := fmt.Errorf("job %q still exists", config.currentInputs.GetName()) + e := fmt.Errorf("job %q still exists", pod.GetName()) return watcher.RetryableError(e) } - timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) - return watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 300) + return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(jobMissingOrKilled, timeout) } @@ -396,22 +394,22 @@ func untilCoreV1NamespaceDeleted(config deleteAwaitConfig) error { return nil } else if err != nil { logger.V(3).Infof("Received error deleting namespace %q: %#v", - config.currentInputs.GetName(), err) + ns.GetName(), err) return err } statusPhase, _ := openapi.Pluck(ns.Object, "status", "phase") - logger.V(3).Infof("Namespace %q status received: %#v", config.currentInputs.GetName(), statusPhase) + logger.V(3).Infof("Namespace %q status received: %#v", ns.GetName(), statusPhase) if statusPhase == "" { return nil } return watcher.RetryableError(fmt.Errorf("namespace %q still exists (%v)", - config.currentInputs.GetName(), statusPhase)) + ns.GetName(), statusPhase)) } - timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) - return watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 300) + return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(namespaceMissingOrKilled, timeout) } @@ -433,11 +431,11 @@ func untilCoreV1PersistentVolumeInitialized(c createAwaitConfig) error { return statusPhase == statusAvailable || statusPhase == statusBound } - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } - return watcher.ForObject(c.ctx, client, c.currentInputs.GetName()). + return watcher.ForObject(c.ctx, client, c.currentOutputs.GetName()). WatchUntil(pvAvailableOrBound, 5*time.Minute) } @@ -454,11 +452,11 @@ func untilCoreV1PersistentVolumeClaimBound(c createAwaitConfig) error { return statusPhase == statusBound } - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } - return watcher.ForObject(c.ctx, client, c.currentInputs.GetName()). + return watcher.ForObject(c.ctx, client, c.currentOutputs.GetName()). WatchUntil(pvcBound, 5*time.Minute) } @@ -478,13 +476,13 @@ func untilCoreV1PodDeleted(config deleteAwaitConfig) error { } statusPhase, _ := openapi.Pluck(pod.Object, "status", "phase") - logger.V(3).Infof("Current state of pod %q: %#v", config.currentInputs.GetName(), statusPhase) - e := fmt.Errorf("pod %q still exists (%v)", config.currentInputs.GetName(), statusPhase) + logger.V(3).Infof("Current state of pod %q: %#v", pod.GetName(), statusPhase) + e := fmt.Errorf("pod %q still exists (%v)", pod.GetName(), statusPhase) return watcher.RetryableError(e) } - timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) - return watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 300) + return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(podMissingOrKilled, timeout) } @@ -503,13 +501,13 @@ func untilCoreV1ReplicationControllerInitialized(c createAwaitConfig) error { return openapi.Pluck(rc.Object, "status", "availableReplicas") } - name := c.currentInputs.GetName() + name := c.currentOutputs.GetName() - replicas, _ := openapi.Pluck(c.currentInputs.Object, "spec", "replicas") + replicas, _ := openapi.Pluck(c.currentOutputs.Object, "spec", "replicas") logger.V(3).Infof("Waiting for replication controller %q to schedule '%v' replicas", name, replicas) - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } @@ -525,8 +523,8 @@ func untilCoreV1ReplicationControllerInitialized(c createAwaitConfig) error { // but that means checking each pod status separately (which can be expensive at scale) // as there's no aggregate data available from the API - logger.V(3).Infof("Replication controller %q initialized: %#v", c.currentInputs.GetName(), - c.currentInputs) + logger.V(3).Infof("Replication controller %q initialized: %#v", name, + c.currentOutputs) return nil } @@ -559,18 +557,18 @@ func untilCoreV1ReplicationControllerDeleted(config deleteAwaitConfig) error { return watcher.RetryableError( fmt.Errorf("ReplicationController %q still exists (%d / %d replicas exist)", - config.currentInputs.GetName(), currReplicas, specReplicas)) + rc.GetName(), currReplicas, specReplicas)) } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. - timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) - err := watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 600) + err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(rcMissing, timeout) if err != nil { return err } - logger.V(3).Infof("ReplicationController %q deleted", config.currentInputs.GetName()) + logger.V(3).Infof("ReplicationController %q deleted", config.currentOutputs.GetName()) return nil } @@ -589,8 +587,8 @@ func untilCoreV1ResourceQuotaInitialized(c createAwaitConfig) error { hard, hardIsMap := hardRaw.(map[string]any) hardStatus, hardStatusIsMap := hardStatusRaw.(map[string]any) if hardIsMap && hardStatusIsMap && reflect.DeepEqual(hard, hardStatus) { - logger.V(3).Infof("ResourceQuota %q initialized: %#v", c.currentInputs.GetName(), - c.currentInputs) + logger.V(3).Infof("ResourceQuota %q initialized: %#v", quota.GetName(), + quota) return true } logger.V(3).Infof("Quotas don't match after creation.\nExpected: %#v\nGiven: %#v", @@ -598,17 +596,17 @@ func untilCoreV1ResourceQuotaInitialized(c createAwaitConfig) error { return false } - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } - return watcher.ForObject(c.ctx, client, c.currentInputs.GetName()). + return watcher.ForObject(c.ctx, client, c.currentOutputs.GetName()). WatchUntil(rqInitialized, 1*time.Minute) } func untilCoreV1ResourceQuotaUpdated(c updateAwaitConfig) error { - oldSpec, _ := openapi.Pluck(c.lastInputs.Object, "spec") - newSpec, _ := openapi.Pluck(c.currentInputs.Object, "spec") + oldSpec, _ := openapi.Pluck(c.lastOutputs.Object, "spec") + newSpec, _ := openapi.Pluck(c.currentOutputs.Object, "spec") if !reflect.DeepEqual(oldSpec, newSpec) { return untilCoreV1ResourceQuotaInitialized(c.createAwaitConfig) } @@ -626,7 +624,7 @@ func untilCoreV1SecretInitialized(c createAwaitConfig) error { // Some types secrets do not have data available immediately and therefore are not considered initialized where data map is empty. // For example service-account-token as described in the docs: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#to-create-additional-api-tokens // - secretType, _ := openapi.Pluck(c.currentInputs.Object, "type") + secretType, _ := openapi.Pluck(c.currentOutputs.Object, "type") // Other secret types are not generated by controller therefore we do not need to create a watcher for them. // nolint:gosec @@ -672,7 +670,7 @@ func untilCoreV1ServiceAccountInitialized(c createAwaitConfig) error { // secrets array (i.e., in addition to the secrets specified by the user). // - specSecrets, _ := openapi.Pluck(c.currentInputs.Object, "secrets") + specSecrets, _ := openapi.Pluck(c.currentOutputs.Object, "secrets") var numSpecSecrets int if specSecretsArr, isArr := specSecrets.([]any); isArr { numSpecSecrets = len(specSecretsArr) diff --git a/provider/pkg/await/deployment.go b/provider/pkg/await/deployment.go index ee55a7f7e0..88b828f2bf 100644 --- a/provider/pkg/await/deployment.go +++ b/provider/pkg/await/deployment.go @@ -228,7 +228,7 @@ func (dia *deploymentInitAwaiter) Await() error { aggregateErrorTicker := time.NewTicker(10 * time.Second) defer aggregateErrorTicker.Stop() - timeout := metadata.TimeoutDuration(dia.config.timeout, dia.config.currentInputs, DefaultDeploymentTimeoutMins*60) + timeout := metadata.TimeoutDuration(dia.config.timeout, dia.config.currentOutputs, DefaultDeploymentTimeoutMins*60) return dia.await( deploymentEvents, @@ -248,7 +248,7 @@ func (dia *deploymentInitAwaiter) Read() error { // Get live versions of Deployment, ReplicaSets, and Pods. deployment, err := deploymentClient.Get(dia.config.ctx, - dia.config.currentInputs.GetName(), + dia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it @@ -416,7 +416,7 @@ func (dia *deploymentInitAwaiter) checkAndLogStatus() bool { } func (dia *deploymentInitAwaiter) processDeploymentEvent(event watch.Event) { - inputDeploymentName := dia.config.currentInputs.GetName() + currentDeploymentName := dia.config.currentOutputs.GetName() deployment, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -429,7 +429,7 @@ func (dia *deploymentInitAwaiter) processDeploymentEvent(event watch.Event) { dia.deploymentErrors = map[string]string{} // Do nothing if this is not the Deployment we're waiting for. - if deployment.GetName() != inputDeploymentName { + if deployment.GetName() != currentDeploymentName { return } @@ -543,11 +543,11 @@ func (dia *deploymentInitAwaiter) processReplicaSetEvent(event watch.Event) { logger.V(3).Infof("Received update for ReplicaSet %q", rs.GetName()) // Check whether this ReplicaSet was created by our Deployment. - if !isOwnedBy(rs, dia.config.currentInputs) { + if !isOwnedBy(rs, dia.config.currentOutputs) { return } - logger.V(3).Infof("ReplicaSet %q is owned by %q", rs.GetName(), dia.config.currentInputs.GetName()) + logger.V(3).Infof("ReplicaSet %q is owned by %q", rs.GetName(), dia.config.currentOutputs.GetName()) // If Pod was deleted, remove it from our aggregated checkers. generation := rs.GetAnnotations()[revision] @@ -560,9 +560,9 @@ func (dia *deploymentInitAwaiter) processReplicaSetEvent(event watch.Event) { } func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { - inputs := dia.config.currentInputs + outputs := dia.config.currentOutputs - logger.V(3).Infof("Checking ReplicaSet status for Deployment %q", inputs.GetName()) + logger.V(3).Infof("Checking ReplicaSet status for Deployment %q", outputs.GetName()) rs, updatedReplicaSetCreated := dia.replicaSets[dia.replicaSetGeneration] if dia.replicaSetGeneration == "0" || !updatedReplicaSetCreated { @@ -570,14 +570,14 @@ func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { } logger.V(3).Infof("Deployment %q has generation %q, which corresponds to ReplicaSet %q", - inputs.GetName(), dia.replicaSetGeneration, rs.GetName()) + outputs.GetName(), dia.replicaSetGeneration, rs.GetName()) var lastRevision string if outputs := dia.config.lastOutputs; outputs != nil { lastRevision = outputs.GetAnnotations()[revision] } - logger.V(3).Infof("The last generation of Deployment %q was %q", inputs.GetName(), lastRevision) + logger.V(3).Infof("The last generation of Deployment %q was %q", outputs.GetName(), lastRevision) // NOTE: Check `.spec.replicas` in the live `ReplicaSet` instead of the last input `Deployment`, // since this is the plan of record. This protects against (e.g.) a user running `kubectl scale` @@ -686,18 +686,18 @@ func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { } func (dia *deploymentInitAwaiter) changeTriggeredRollout() bool { - if dia.config.lastInputs == nil { + if dia.config.lastOutputs == nil { return true } fields, err := openapi.PropertiesChanged( - dia.config.lastInputs.Object, dia.config.currentInputs.Object, + dia.config.lastOutputs.Object, dia.config.currentOutputs.Object, []string{ ".spec.template.spec", }) if err != nil { logger.V(3).Infof("Failed to check whether Pod template for Deployment %q changed", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return false } @@ -705,9 +705,9 @@ func (dia *deploymentInitAwaiter) changeTriggeredRollout() bool { } func (dia *deploymentInitAwaiter) checkPersistentVolumeClaimStatus() { - inputs := dia.config.currentInputs + currentOutputs := dia.config.currentOutputs - logger.V(3).Infof("Checking PersistentVolumeClaims status for Deployment %q", inputs.GetName()) + logger.V(3).Infof("Checking PersistentVolumeClaims status for Deployment %q", currentOutputs.GetName()) allPVCsReady := true for _, pvc := range dia.pvcs { @@ -881,31 +881,31 @@ func (dia *deploymentInitAwaiter) makeClients() ( deploymentClient, replicaSetClient, podClient, pvcClient dynamic.ResourceInterface, err error, ) { deploymentClient, err = clients.ResourceClient( - kinds.Deployment, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.Deployment, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } replicaSetClient, err = clients.ResourceClient( - kinds.ReplicaSet, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.ReplicaSet, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch ReplicaSets associated with Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } podClient, err = clients.ResourceClient( - kinds.Pod, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.Pod, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch Pods associated with Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } pvcClient, err = clients.ResourceClient( - kinds.PersistentVolumeClaim, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.PersistentVolumeClaim, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch PVCs associated with Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } diff --git a/provider/pkg/await/deployment_test.go b/provider/pkg/await/deployment_test.go index ee9c3c539c..3ed29850fc 100644 --- a/provider/pkg/await/deployment_test.go +++ b/provider/pkg/await/deployment_test.go @@ -702,14 +702,14 @@ func Test_Apps_Deployment_Without_PersistentVolumeClaims(t *testing.T) { } } -type setLastInputs func(obj *unstructured.Unstructured) +type setLastOutputs func(obj *unstructured.Unstructured) func Test_Apps_Deployment_MultipleUpdates(t *testing.T) { tests := []struct { description string inputs func() *unstructured.Unstructured firstUpdate func(deployments, replicaSets, pods chan watch.Event, timeout chan time.Time, - setLast setLastInputs) + setLast setLastOutputs) secondUpdate func(deployments, replicaSets, pods chan watch.Event, timeout chan time.Time) expectedError error }{ @@ -718,13 +718,13 @@ func Test_Apps_Deployment_MultipleUpdates(t *testing.T) { inputs: regressionDeploymentScaled3Input, firstUpdate: func( deployments, replicaSets, pods chan watch.Event, timeout chan time.Time, - setLast setLastInputs, + setLast setLastOutputs, ) { computed := regressionDeploymentScaled3() deployments <- watchAddedEvent(computed) replicaSets <- watchAddedEvent(regressionReplicaSetScaled3()) - setLast(regressionDeploymentScaled3Input()) + setLast(computed) // Timeout. Success. timeout <- time.Now() }, @@ -752,7 +752,7 @@ func Test_Apps_Deployment_MultipleUpdates(t *testing.T) { period := make(chan time.Time) go test.firstUpdate(deployments, replicaSets, pods, timeout, func(obj *unstructured.Unstructured) { - awaiter.config.lastInputs = obj + awaiter.config.lastOutputs = obj }) err := awaiter.await(deployments, replicaSets, pods, pvcs, timeout, period) diff --git a/provider/pkg/await/ingress.go b/provider/pkg/await/ingress.go index 40ce54bcd7..a97dc1e9e9 100644 --- a/provider/pkg/await/ingress.go +++ b/provider/pkg/await/ingress.go @@ -116,7 +116,7 @@ func (iia *ingressInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(iia.config.clientSet, - informers.WithNamespaceOrDefault(iia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(iia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) ingressEvents := make(chan watch.Event) @@ -144,7 +144,7 @@ func (iia *ingressInitAwaiter) Await() error { } go serviceInformer.Informer().Run(stopper) - timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60) + timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentOutputs, DefaultIngressTimeoutMins*60) return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout)) } @@ -155,7 +155,7 @@ func (iia *ingressInitAwaiter) Read() error { } // Get live versions of Ingress. - ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{}) + ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it // can mark the deployment as having been deleted. @@ -288,7 +288,7 @@ func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) { } func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) { - inputIngressName := iia.config.currentInputs.GetName() + inputIngressName := iia.config.currentOutputs.GetName() ingress, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -509,25 +509,25 @@ func (iia *ingressInitAwaiter) makeClients() ( ingressClient, endpointsClient, servicesClient dynamic.ResourceInterface, err error, ) { ingressClient, err = clients.ResourceClient( - kinds.Ingress, iia.config.currentInputs.GetNamespace(), iia.config.clientSet) + kinds.Ingress, iia.config.currentOutputs.GetNamespace(), iia.config.clientSet) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Could not make client to watch Ingress %q", - iia.config.currentInputs.GetName()) + iia.config.currentOutputs.GetName()) } endpointsClient, err = clients.ResourceClient( - kinds.Endpoints, iia.config.currentInputs.GetNamespace(), iia.config.clientSet) + kinds.Endpoints, iia.config.currentOutputs.GetNamespace(), iia.config.clientSet) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Could not make client to watch Endpoints associated with Ingress %q", - iia.config.currentInputs.GetName()) + iia.config.currentOutputs.GetName()) } servicesClient, err = clients.ResourceClient( - kinds.Service, iia.config.currentInputs.GetNamespace(), iia.config.clientSet) + kinds.Service, iia.config.currentOutputs.GetNamespace(), iia.config.clientSet) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Could not make client to watch Services associated with Ingress %q", - iia.config.currentInputs.GetName()) + iia.config.currentOutputs.GetName()) } return diff --git a/provider/pkg/await/job.go b/provider/pkg/await/job.go index 76c42a1e6d..b6e8af09f3 100644 --- a/provider/pkg/await/job.go +++ b/provider/pkg/await/job.go @@ -102,7 +102,7 @@ func (jia *jobInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(jia.config.clientSet, - informers.WithNamespaceOrDefault(jia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(jia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) jobEvents := make(chan watch.Event) @@ -123,7 +123,7 @@ func (jia *jobInitAwaiter) Await() error { podAggregator.Start(podEvents) defer podAggregator.Stop() - timeout := metadata.TimeoutDuration(jia.config.timeout, jia.config.currentInputs, DefaultJobTimeoutMins*60) + timeout := metadata.TimeoutDuration(jia.config.timeout, jia.config.currentOutputs, DefaultJobTimeoutMins*60) for { if jia.ready { return nil @@ -156,18 +156,18 @@ func (jia *jobInitAwaiter) Read() error { stopper := make(chan struct{}) defer close(stopper) - namespace := jia.config.currentInputs.GetNamespace() + namespace := jia.config.currentOutputs.GetNamespace() informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(jia.config.clientSet.GenericClient, 60*time.Second, namespace, nil) informerFactory.Start(stopper) - jobClient, err := clients.ResourceClient(kinds.Job, jia.config.currentInputs.GetNamespace(), jia.config.clientSet) + jobClient, err := clients.ResourceClient(kinds.Job, jia.config.currentOutputs.GetNamespace(), jia.config.clientSet) if err != nil { return errors.Wrapf(err, "Could not make client to get Job %q", - jia.config.currentInputs.GetName()) + jia.config.currentOutputs.GetName()) } // Get live version of Job. - job, err := jobClient.Get(jia.config.ctx, jia.config.currentInputs.GetName(), metav1.GetOptions{}) + job, err := jobClient.Get(jia.config.ctx, jia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it // can mark the Pod as having been deleted. @@ -212,7 +212,7 @@ func (jia *jobInitAwaiter) processJobEvent(event watch.Event) error { } // Do nothing if this is not the job we're waiting for. - if job.GetName() != jia.config.currentInputs.GetName() { + if job.GetName() != jia.config.currentOutputs.GetName() { return nil } diff --git a/provider/pkg/await/pod.go b/provider/pkg/await/pod.go index 07a23068e1..d9689a4cf2 100644 --- a/provider/pkg/await/pod.go +++ b/provider/pkg/await/pod.go @@ -150,7 +150,7 @@ func (pia *podInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(pia.config.clientSet, - informers.WithNamespaceOrDefault(pia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(pia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) podEvents := make(chan watch.Event) @@ -160,7 +160,7 @@ func (pia *podInitAwaiter) Await() error { } go podInformer.Informer().Run(stopper) - timeout := metadata.TimeoutDuration(pia.config.timeout, pia.config.currentInputs, DefaultPodTimeoutMins*60) + timeout := metadata.TimeoutDuration(pia.config.timeout, pia.config.currentOutputs, DefaultPodTimeoutMins*60) for { if pia.ready { return nil @@ -187,14 +187,14 @@ func (pia *podInitAwaiter) Await() error { func (pia *podInitAwaiter) Read() error { podClient, err := clients.ResourceClient( - kinds.Pod, pia.config.currentInputs.GetNamespace(), pia.config.clientSet) + kinds.Pod, pia.config.currentOutputs.GetNamespace(), pia.config.clientSet) if err != nil { return errors.Wrapf(err, "Could not make client to get Pod %q", - pia.config.currentInputs.GetName()) + pia.config.currentOutputs.GetName()) } // Get live version of Pod. - pod, err := podClient.Get(pia.config.ctx, pia.config.currentInputs.GetName(), metav1.GetOptions{}) + pod, err := podClient.Get(pia.config.ctx, pia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it // can mark the Pod as having been deleted. @@ -226,7 +226,7 @@ func (pia *podInitAwaiter) processPodEvent(event watch.Event) { } // Do nothing if this is not the pod we're waiting for. - if pod.GetName() != pia.config.currentInputs.GetName() { + if pod.GetName() != pia.config.currentOutputs.GetName() { return } diff --git a/provider/pkg/await/service.go b/provider/pkg/await/service.go index 653b7725ba..19ef59f26d 100644 --- a/provider/pkg/await/service.go +++ b/provider/pkg/await/service.go @@ -137,7 +137,7 @@ func (sia *serviceInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(sia.config.clientSet, - informers.WithNamespaceOrDefault(sia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(sia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) serviceEvents := make(chan watch.Event) @@ -156,7 +156,7 @@ func (sia *serviceInitAwaiter) Await() error { version := cluster.TryGetServerVersion(sia.config.clientSet.DiscoveryClientCached) - timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentInputs, DefaultServiceTimeoutMins*60) + timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentOutputs, DefaultServiceTimeoutMins*60) return sia.await(serviceEvents, endpointsEvents, time.After(timeout), make(chan struct{}), version) } @@ -273,7 +273,7 @@ func (sia *serviceInitAwaiter) await( } func (sia *serviceInitAwaiter) processServiceEvent(event watch.Event) { - inputServiceName := sia.config.currentInputs.GetName() + serviceName := sia.config.currentOutputs.GetName() service, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -283,7 +283,7 @@ func (sia *serviceInitAwaiter) processServiceEvent(event watch.Event) { } // Do nothing if this is not the service we're waiting for. - if service.GetName() != inputServiceName { + if service.GetName() != serviceName { return } @@ -302,14 +302,14 @@ func (sia *serviceInitAwaiter) processServiceEvent(event watch.Event) { lbIngress, _ := openapi.Pluck(service.Object, "status", "loadBalancer", "ingress") status, _ := openapi.Pluck(service.Object, "status") - logger.V(3).Infof("Received status for service %q: %#v", inputServiceName, status) + logger.V(3).Infof("Received status for service %q: %#v", serviceName, status) ing, isSlice := lbIngress.([]any) // Update status of service object so that we can check success. sia.serviceReady = isSlice && len(ing) > 0 logger.V(3).Infof("Waiting for service %q to assign IP/hostname for a load balancer", - inputServiceName) + serviceName) } else { // If it's not type `LoadBalancer`, report success. sia.serviceReady = true diff --git a/provider/pkg/await/statefulset.go b/provider/pkg/await/statefulset.go index 40c6c5aaf4..804a42d6ef 100644 --- a/provider/pkg/await/statefulset.go +++ b/provider/pkg/await/statefulset.go @@ -192,7 +192,7 @@ func (sia *statefulsetInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(sia.config.clientSet, - informers.WithNamespaceOrDefault(sia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(sia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) statefulSetEvents := make(chan watch.Event) @@ -216,7 +216,7 @@ func (sia *statefulsetInitAwaiter) Await() error { aggregateErrorTicker := time.NewTicker(10 * time.Second) defer aggregateErrorTicker.Stop() - timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentInputs, DefaultStatefulSetTimeoutMins*60) + timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentOutputs, DefaultStatefulSetTimeoutMins*60) return sia.await(statefulSetEvents, podEvents, time.After(timeout), aggregateErrorTicker.C) } @@ -229,7 +229,7 @@ func (sia *statefulsetInitAwaiter) Read() error { // Get live versions of StatefulSet and Pods. statefulset, err := statefulSetClient.Get(sia.config.ctx, - sia.config.currentInputs.GetName(), + sia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it @@ -343,7 +343,7 @@ func (sia *statefulsetInitAwaiter) checkAndLogStatus() bool { } func (sia *statefulsetInitAwaiter) processStatefulSetEvent(event watch.Event) { - inputStatefulSetName := sia.config.currentInputs.GetName() + inputStatefulSetName := sia.config.currentOutputs.GetName() statefulset, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -509,18 +509,18 @@ func (sia *statefulsetInitAwaiter) makeClients() ( statefulSetClient, podClient dynamic.ResourceInterface, err error, ) { statefulSetClient, err = clients.ResourceClient( - kinds.StatefulSet, sia.config.currentInputs.GetNamespace(), sia.config.clientSet) + kinds.StatefulSet, sia.config.currentOutputs.GetNamespace(), sia.config.clientSet) if err != nil { return nil, nil, errors.Wrapf(err, "Could not make client to watch StatefulSet %q", - sia.config.currentInputs.GetName()) + sia.config.currentOutputs.GetName()) } podClient, err = clients.ResourceClient( - kinds.Pod, sia.config.currentInputs.GetNamespace(), sia.config.clientSet) + kinds.Pod, sia.config.currentOutputs.GetNamespace(), sia.config.clientSet) if err != nil { return nil, nil, errors.Wrapf(err, "Could not make client to watch Pods associated with StatefulSet %q", - sia.config.currentInputs.GetName()) + sia.config.currentOutputs.GetName()) } return statefulSetClient, podClient, nil diff --git a/provider/pkg/await/statefulset_test.go b/provider/pkg/await/statefulset_test.go index 677a988f76..8614c41287 100644 --- a/provider/pkg/await/statefulset_test.go +++ b/provider/pkg/await/statefulset_test.go @@ -266,7 +266,7 @@ func Test_Apps_StatefulSet_MultipleUpdates(t *testing.T) { description string inputs func() *unstructured.Unstructured firstUpdate func(statefulsets, pods chan watch.Event, timeout chan time.Time, - setLast setLastInputs) + setLast setLastOutputs) secondUpdate func(statefulsets, pods chan watch.Event, timeout chan time.Time) firstExpectedError error secondExpectedError error @@ -276,7 +276,7 @@ func Test_Apps_StatefulSet_MultipleUpdates(t *testing.T) { inputs: statefulsetFailed, firstUpdate: func( statefulsets, pods chan watch.Event, timeout chan time.Time, - setLast setLastInputs, + setLast setLastOutputs, ) { statefulsets <- watchAddedEvent(statefulsetFailed()) @@ -311,7 +311,7 @@ func Test_Apps_StatefulSet_MultipleUpdates(t *testing.T) { period := make(chan time.Time) go test.firstUpdate(statefulsets, pods, timeout, func(obj *unstructured.Unstructured) { - awaiter.config.lastInputs = obj + awaiter.config.lastOutputs = obj }) err := awaiter.await(statefulsets, pods, timeout, period) diff --git a/provider/pkg/await/util_test.go b/provider/pkg/await/util_test.go index b16cba4d2d..74c52cc86b 100644 --- a/provider/pkg/await/util_test.go +++ b/provider/pkg/await/util_test.go @@ -14,7 +14,6 @@ func mockAwaitConfig(inputs *unstructured.Unstructured) createAwaitConfig { return createAwaitConfig{ ctx: context.Background(), //TODO: complete this mock if needed - currentInputs: inputs, currentOutputs: inputs, logger: logging.NewLogger(context.Background(), nil, ""), } diff --git a/provider/pkg/metadata/naming.go b/provider/pkg/metadata/naming.go index 2cd866c631..e99f921dd8 100644 --- a/provider/pkg/metadata/naming.go +++ b/provider/pkg/metadata/naming.go @@ -31,6 +31,12 @@ func AssignNameIfAutonamable(randomSeed []byte, obj *unstructured.Unstructured, } } + if obj.GetGenerateName() != "" { + // let the Kubernetes API server produce a name. + // TODO assign a computed output? + return + } + if obj.GetName() == "" { prefix := urn.Name() + "-" autoname, err := resource.NewUniqueName(randomSeed, prefix, 0, 0, nil) @@ -43,8 +49,8 @@ func AssignNameIfAutonamable(randomSeed []byte, obj *unstructured.Unstructured, // AdoptOldAutonameIfUnnamed checks if `newObj` has a name, and if not, "adopts" the name of `oldObj` // instead. If `oldObj` was autonamed, then we mark `newObj` as autonamed, too. func AdoptOldAutonameIfUnnamed(newObj, oldObj *unstructured.Unstructured) { - contract.Assertf(oldObj.GetName() != "", "expected nonempty name for object: %s", oldObj) - if newObj.GetName() == "" && IsAutonamed(oldObj) { + if newObj.GetName() == "" && newObj.GetGenerateName() == "" && IsAutonamed(oldObj) { + contract.Assertf(oldObj.GetName() != "", "expected nonempty name for object: %s", oldObj) newObj.SetName(oldObj.GetName()) SetAnnotationTrue(newObj, AnnotationAutonamed) } diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index 0d40a00fc2..5a79758b77 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -1358,9 +1358,8 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( // needs to be `DeleteBeforeReplace`'d. If the resource is marked `DeleteBeforeReplace`, then // `Create` will allocate it a new name later. if len(oldInputs.Object) > 0 { - // NOTE: If old inputs exist, they have a name, either provided by the user or filled in with a + // NOTE: If old inputs exist, they MAY have a name, either provided by the user or filled in with a // previous run of `Check`. - contract.Assertf(oldInputs.GetName() != "", "expected object name to be nonempty: %v", oldInputs) metadata.AdoptOldAutonameIfUnnamed(newInputs, oldInputs) // If the resource has existing state, we only set the "managed-by: pulumi" label if it is already present. This @@ -1692,7 +1691,7 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p switch newInputs.GetKind() { case "Job": // Fetch current Job status and check point-in-time readiness. Errors are ignored. - if live, err := k.readLiveObject(newInputs); err == nil { + if live, err := k.readLiveObject(oldLive); err == nil { jobChecker := checkjob.NewJobChecker() job, err := clients.FromUnstructured(live) if err == nil { @@ -1714,12 +1713,12 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p len(replaces) > 0 && // 2. Object is NOT autonamed (i.e., user manually named it, and therefore we can't // auto-generate the name). - !metadata.IsAutonamed(newInputs) && + !(metadata.IsAutonamed(newInputs) || newInputs.GetGenerateName() != "") && // 3. The new, user-specified name is the same as the old name. - newInputs.GetName() == oldLivePruned.GetName() && + newInputs.GetName() == oldLive.GetName() && // 4. The resource is being deployed to the same namespace (i.e., we aren't creating the // object in a new namespace and then deleting the old one). - newInputs.GetNamespace() == oldLivePruned.GetNamespace() + newInputs.GetNamespace() == oldLive.GetNamespace() return &pulumirpc.DiffResponse{ Changes: hasChanges, @@ -1880,6 +1879,7 @@ func (k *kubeProvider) Create( partialErr, isPartialErr := awaitErr.(await.PartialError) if !isPartialErr { // Object creation failed. + return nil, pkgerrors.Wrapf( awaitErr, "resource %s was not successfully created by the Kubernetes API server ", fqObjName(newInputs)) @@ -2327,7 +2327,8 @@ func (k *kubeProvider) Update( Resources: resources, ServerSideApply: k.serverSideApplyMode, }, - Previous: oldLivePruned, + OldInputs: oldLivePruned, + OldOutputs: oldLive, Inputs: newInputs, Timeout: req.Timeout, Preview: req.GetPreview(), @@ -2352,12 +2353,12 @@ func (k *kubeProvider) Update( } var getErr error - initialized, getErr = k.readLiveObject(newInputs) + initialized, getErr = k.readLiveObject(oldLive) if getErr != nil { // Object update/creation failed. return nil, pkgerrors.Wrapf( awaitErr, "update of resource %s failed because the Kubernetes API server "+ - "reported that it failed to fully initialize or become live", fqObjName(newInputs)) + "reported that it failed to fully initialize or become live", fqObjName(oldLive)) } // If we get here, resource successfully registered with the API server, but failed to // initialize. @@ -2382,7 +2383,7 @@ func (k *kubeProvider) Update( fqObjName(initialized), pkgerrors.Wrapf( awaitErr, "the Kubernetes API server reported that %q failed to fully initialize "+ - "or become live", fqObjName(newInputs)), + "or become live", fqObjName(initialized)), inputsAndComputed, nil) } @@ -2390,12 +2391,12 @@ func (k *kubeProvider) Update( if k.serverSideApplyMode { // For non-preview updates, drop the old fieldManager if the value changes. if !req.GetPreview() && fieldManagerOld != fieldManager { - client, err := k.clientSet.ResourceClientForObject(newInputs) + client, err := k.clientSet.ResourceClientForObject(initialized) if err != nil { return nil, err } - err = ssa.Relinquish(k.canceler.context, client, newInputs, fieldManagerOld) + err = ssa.Relinquish(k.canceler.context, client, initialized, fieldManagerOld) if err != nil { return nil, err } @@ -2482,7 +2483,7 @@ func (k *kubeProvider) Delete(ctx context.Context, req *pulumirpc.DeleteRequest) Resources: resources, ServerSideApply: k.serverSideApplyMode, }, - Inputs: current, + Outputs: current, Name: name, Timeout: req.Timeout, } From 5b51506200084e00847bb0b138731dc307198419 Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Thu, 18 Jan 2024 09:41:11 -0800 Subject: [PATCH 02/11] replace on change to generateName. disallow generateName for SSA and for yaml mode. use resource name rather than object name in certain error messages. --- provider/pkg/metadata/naming.go | 15 +++++------- provider/pkg/provider/diff.go | 1 + provider/pkg/provider/provider.go | 40 ++++++++++++++++++++----------- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/provider/pkg/metadata/naming.go b/provider/pkg/metadata/naming.go index e99f921dd8..8b59ba4478 100644 --- a/provider/pkg/metadata/naming.go +++ b/provider/pkg/metadata/naming.go @@ -24,20 +24,17 @@ import ( // All auto-named resources get the annotation `pulumi.com/autonamed` for tooling purposes. func AssignNameIfAutonamable(randomSeed []byte, obj *unstructured.Unstructured, propMap resource.PropertyMap, urn resource.URN) { contract.Assertf(urn.Name() != "", "expected non-empty name in URN: %s", urn) - // Check if the .metadata.name is set and is a computed value. If so, do not auto-name. if md, ok := propMap["metadata"].V.(resource.PropertyMap); ok { + // Check if the .metadata.name is set and is a computed value. If so, do not auto-name. if name, ok := md["name"]; ok && name.IsComputed() { return } + // Check if the .metadata.generateName is set and is a computed value. If so, do not auto-name. + if name, ok := md["generateName"]; ok && name.IsComputed() { + return + } } - - if obj.GetGenerateName() != "" { - // let the Kubernetes API server produce a name. - // TODO assign a computed output? - return - } - - if obj.GetName() == "" { + if obj.GetGenerateName() == "" && obj.GetName() == "" { prefix := urn.Name() + "-" autoname, err := resource.NewUniqueName(randomSeed, prefix, 0, 0, nil) contract.AssertNoErrorf(err, "unexpected error while creating NewUniqueName") diff --git a/provider/pkg/provider/diff.go b/provider/pkg/provider/diff.go index 6bec440964..603ddf0144 100644 --- a/provider/pkg/provider/diff.go +++ b/provider/pkg/provider/diff.go @@ -206,6 +206,7 @@ var statefulSet = append( func metadataForceNewProperties(prefix string) properties { return properties{ prefix + ".name", + prefix + ".generateName", prefix + ".namespace", } } diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index 5a79758b77..2425ae8b41 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -1303,7 +1303,7 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( } if !k.serverSideApplyMode && kinds.IsPatchURN(urn) { - return nil, fmt.Errorf("patch resources require Server-side Apply mode, which is enabled using the " + + return nil, fmt.Errorf("patch resources require Server-Side Apply mode, which is enabled using the " + "`enableServerSideApply` Provider config") } @@ -1343,9 +1343,15 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( if k.serverSideApplyMode && kinds.IsPatchURN(urn) { if len(newInputs.GetName()) == 0 { - return nil, fmt.Errorf("patch resources require the resource `.metadata.name` to be set") + return nil, fmt.Errorf("patch resources require the `.metadata.name` field to be set") } } + if k.serverSideApplyMode && newInputs.GetGenerateName() != "" { + return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in Server-Side Apply mode") + } + if k.yamlRenderMode && newInputs.GetGenerateName() != "" { + return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in YAML rendering mode") + } var failures []*pulumirpc.CheckFailure @@ -1359,7 +1365,7 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( // `Create` will allocate it a new name later. if len(oldInputs.Object) > 0 { // NOTE: If old inputs exist, they MAY have a name, either provided by the user or filled in with a - // previous run of `Check`. + // previous run of `Check`. They wouldn't have a name if `generateName` was used. metadata.AdoptOldAutonameIfUnnamed(newInputs, oldInputs) // If the resource has existing state, we only set the "managed-by: pulumi" label if it is already present. This @@ -1616,15 +1622,15 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p patch, err = k.inputPatch(oldLivePruned, newInputs) if err != nil { return nil, pkgerrors.Wrapf( - err, "Failed to check for changes in resource %s/%s", newInputs.GetNamespace(), newInputs.GetName()) + err, "Failed to check for changes in resource %s", urn.Name()) } patchObj := map[string]any{} if err = json.Unmarshal(patch, &patchObj); err != nil { return nil, pkgerrors.Wrapf( - err, "Failed to check for changes in resource %s/%s because of an error serializing "+ + err, "Failed to check for changes in resource %s because of an error serializing "+ "the JSON patch describing resource changes", - newInputs.GetNamespace(), newInputs.GetName()) + urn.Name()) } hasChanges := pulumirpc.DiffResponse_DIFF_NONE @@ -1639,9 +1645,9 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p } if detailedDiff, err = convertPatchToDiff(patchObj, patchBase, newInputs.Object, oldLivePruned.Object, forceNewFields...); err != nil { return nil, pkgerrors.Wrapf( - err, "Failed to check for changes in resource %s/%s because of an error "+ + err, "Failed to check for changes in resource %s because of an error "+ "converting JSON patch describing resource changes to a diff", - newInputs.GetNamespace(), newInputs.GetName()) + urn.Name()) } // Remove any ignored changes from the computed diff. @@ -1871,10 +1877,15 @@ func (k *kubeProvider) Create( // If it's a "no match" error, this is probably a CustomResource with no corresponding // CustomResourceDefinition. This usually happens if the CRD was not created, and we // print a more useful error message in this case. + gvk, err := k.gvkFromURN(urn) + if err != nil { + return nil, err + } + gvkStr := gvk.GroupVersion().String() + "/" + gvk.Kind return nil, pkgerrors.Wrapf( - awaitErr, "creation of resource %s failed because the Kubernetes API server "+ + awaitErr, "creation of resource %s with kind %s failed because the Kubernetes API server "+ "reported that the apiVersion for this resource does not exist. "+ - "Verify that any required CRDs have been created", fqObjName(newInputs)) + "Verify that any required CRDs have been created", urn.Name(), gvkStr) } partialErr, isPartialErr := awaitErr.(await.PartialError) if !isPartialErr { @@ -1882,7 +1893,7 @@ func (k *kubeProvider) Create( return nil, pkgerrors.Wrapf( awaitErr, - "resource %s was not successfully created by the Kubernetes API server ", fqObjName(newInputs)) + "resource %s was not successfully created by the Kubernetes API server ", urn.Name()) } // Resource was created, but failed to become fully initialized. @@ -1915,7 +1926,7 @@ func (k *kubeProvider) Create( fqObjName(initialized), pkgerrors.Wrapf( awaitErr, "resource %s was successfully created, but the Kubernetes API server "+ - "reported that it failed to fully initialize or become live", fqObjName(newInputs)), + "reported that it failed to fully initialize or become live", urn.Name()), inputsAndComputed, nil) } @@ -2349,7 +2360,7 @@ func (k *kubeProvider) Update( return nil, pkgerrors.Wrapf( awaitErr, "update of resource %s failed because the Kubernetes API server "+ "reported that the apiVersion for this resource does not exist. "+ - "Verify that any required CRDs have been created", fqObjName(newInputs)) + "Verify that any required CRDs have been created", urn.Name()) } var getErr error @@ -2358,7 +2369,7 @@ func (k *kubeProvider) Update( // Object update/creation failed. return nil, pkgerrors.Wrapf( awaitErr, "update of resource %s failed because the Kubernetes API server "+ - "reported that it failed to fully initialize or become live", fqObjName(oldLive)) + "reported that it failed to fully initialize or become live", urn.Name()) } // If we get here, resource successfully registered with the API server, but failed to // initialize. @@ -3236,6 +3247,7 @@ func renderYaml(resource *unstructured.Unstructured, yamlDirectory string) error // renderPathForResource determines the appropriate YAML render path depending on the resource kind. func renderPathForResource(resource *unstructured.Unstructured, yamlDirectory string) string { + contract.Assertf(resource.GetName() != "", "expected object name to be nonempty: %v", resource) crdDirectory := filepath.Join(yamlDirectory, "0-crd") manifestDirectory := filepath.Join(yamlDirectory, "1-manifest") From 497f0bba81ab381d284ce486e2e002511e8b96ea Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Thu, 18 Jan 2024 10:07:09 -0800 Subject: [PATCH 03/11] assertions --- provider/pkg/provider/provider.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index 2425ae8b41..3a4f17a643 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -1566,6 +1566,7 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p newInputs := propMapToUnstructured(newResInputs) oldInputs, oldLive := parseCheckpointObject(oldState) + contract.Assertf(oldLive.GetName() != "", "expected live object name to be nonempty: %v", oldLive) oldInputs, err = normalizeInputs(oldInputs) if err != nil { @@ -1890,7 +1891,6 @@ func (k *kubeProvider) Create( partialErr, isPartialErr := awaitErr.(await.PartialError) if !isPartialErr { // Object creation failed. - return nil, pkgerrors.Wrapf( awaitErr, "resource %s was not successfully created by the Kubernetes API server ", urn.Name()) @@ -1899,6 +1899,7 @@ func (k *kubeProvider) Create( // Resource was created, but failed to become fully initialized. initialized = partialErr.Object() } + contract.Assertf(initialized.GetName() != "", "expected live object name to be nonempty: %v", initialized) // We need to delete the empty status field returned from the API server if we are in // preview mode. Having the status field set will cause a panic during preview if the Pulumi @@ -2616,6 +2617,7 @@ func (k *kubeProvider) gvkFromURN(urn resource.URN) (schema.GroupVersionKind, er } func (k *kubeProvider) readLiveObject(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + contract.Assertf(obj.GetName() != "", "expected object name to be nonempty: %v", obj) rc, err := k.clientSet.ResourceClientForObject(obj) if err != nil { return nil, err From db4891f0c09d2dfe10f753e302def40c3fb08cda Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Thu, 18 Jan 2024 10:54:44 -0800 Subject: [PATCH 04/11] use apply for SSA --- provider/pkg/await/await.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/provider/pkg/await/await.go b/provider/pkg/await/await.go index 2f632b1241..e3920d276e 100644 --- a/provider/pkg/await/await.go +++ b/provider/pkg/await/await.go @@ -163,8 +163,6 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { // nolint // https://github.com/kubernetes/kubernetes/blob/54889d581a35acf940d52a8a384cccaa0b597ddc/pkg/kubectl/cmd/apply/apply.go#L94 - patchResource := kinds.PatchQualifiedTypes.Has(c.URN.QualifiedType().String()) - var outputs *unstructured.Unstructured var client dynamic.ResourceInterface err := retry.SleepingRetry( @@ -190,7 +188,7 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { } } - if c.ServerSideApply && patchResource { + if c.ServerSideApply { force := patchForce(c.Inputs, nil, c.Preview) options := metav1.PatchOptions{ FieldManager: c.FieldManager, From e6bb63e380c062a759e4d770f61830f96781b06a Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Thu, 18 Jan 2024 11:47:51 -0800 Subject: [PATCH 05/11] patch to use live object name for logging --- provider/pkg/openapi/openapi.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/provider/pkg/openapi/openapi.go b/provider/pkg/openapi/openapi.go index a0131d0ec6..ba70dbae30 100644 --- a/provider/pkg/openapi/openapi.go +++ b/provider/pkg/openapi/openapi.go @@ -105,19 +105,19 @@ func PatchForResourceUpdate( if knownGV := kinds.KnownGroupVersions.Has(lastSubmitted.GetAPIVersion()); !knownGV { // Use a JSON merge patch for CRD Kinds. patch, patchType, err = MergePatch( - lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, + liveOldObj, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) return patch, patchType, lookupPatchMeta, err } // Attempt a three-way strategic merge. patch, patchType, lookupPatchMeta, err = StrategicMergePatch( - resources, lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, + resources, liveOldObj, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) // Else, fall back to a three-way JSON merge patch. if err != nil { patch, patchType, err = MergePatch( - lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, + liveOldObj, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) } return patch, patchType, lookupPatchMeta, err @@ -126,12 +126,12 @@ func PatchForResourceUpdate( // StrategicMergePatch is a helper to use a three-way strategic merge on a resource version. // See for more details: https://tools.ietf.org/html/rfc6902 func StrategicMergePatch( - resources openapi.Resources, lastSubmitted *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, + resources openapi.Resources, liveOld *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) (patch []byte, patchType types.PatchType, lookupPatchMeta strategicpatch.LookupPatchMeta, err error) { - gvk := lastSubmitted.GroupVersionKind() + gvk := liveOld.GroupVersionKind() if resSchema := resources.LookupResource(gvk); resSchema != nil { logger.V(1).Infof("Attempting to update '%s' '%s/%s' with strategic merge", - gvk.String(), lastSubmitted.GetNamespace(), lastSubmitted.GetName()) + gvk.String(), liveOld.GetNamespace(), liveOld.GetName()) patch, patchType, lookupPatchMeta, err = strategicMergePatch( gvk, resSchema, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON) } @@ -144,12 +144,12 @@ func StrategicMergePatch( // MergePatch is a helper to use a three-way JSON merge patch on a resource version. // See for more details: https://tools.ietf.org/html/rfc7386 func MergePatch( - lastSubmitted *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, + liveOld *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) (patch []byte, patchType types.PatchType, err error) { - gvk := lastSubmitted.GroupVersionKind() + gvk := liveOld.GroupVersionKind() // Fall back to three-way JSON merge patch. logger.V(1).Infof("Attempting to update '%s' '%s/%s' with JSON merge", - gvk.String(), lastSubmitted.GetNamespace(), lastSubmitted.GetName()) + gvk.String(), liveOld.GetNamespace(), liveOld.GetName()) patch, patchType, err = jsonMergePatch(lastSubmittedJSON, currentSubmittedJSON, liveOldJSON) return patch, patchType, err } From 39ea8697d676bf5320c40acbdb73a2dad015963d Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Thu, 18 Jan 2024 13:29:37 -0800 Subject: [PATCH 06/11] do not replace on generateName change. allow implicit to explicit name specification. --- provider/pkg/provider/diff.go | 1 - provider/pkg/provider/provider.go | 10 ++++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/provider/pkg/provider/diff.go b/provider/pkg/provider/diff.go index 603ddf0144..6bec440964 100644 --- a/provider/pkg/provider/diff.go +++ b/provider/pkg/provider/diff.go @@ -206,7 +206,6 @@ var statefulSet = append( func metadataForceNewProperties(prefix string) properties { return properties{ prefix + ".name", - prefix + ".generateName", prefix + ".namespace", } } diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index 3a4f17a643..1627fde1f4 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -1615,6 +1615,12 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p if k.serverSideApplyMode && len(oldLivePruned.GetResourceVersion()) > 0 { oldLivePruned.SetResourceVersion("") } + // If a name was specified in the new inputs, be sure that the old live object has the previous name. + // This makes it possible to update the program to set `.metadata.name` to the name that was + // made by `.metadata.generateName` without triggering replacement. + if newInputs.GetName() != "" { + oldLivePruned.SetName(oldLive.GetName()) + } var patch []byte patchBase := oldLivePruned.Object @@ -1719,8 +1725,8 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p // 1. We know resource must be replaced. len(replaces) > 0 && // 2. Object is NOT autonamed (i.e., user manually named it, and therefore we can't - // auto-generate the name). - !(metadata.IsAutonamed(newInputs) || newInputs.GetGenerateName() != "") && + // auto-generate the name on client or server). + !(metadata.IsAutonamed(newInputs) || (newInputs.GetGenerateName() != "" && newInputs.GetName() == "")) && // 3. The new, user-specified name is the same as the old name. newInputs.GetName() == oldLive.GetName() && // 4. The resource is being deployed to the same namespace (i.e., we aren't creating the From c4fd19d601ad497c0ed1655896c939e3c54c3772 Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Thu, 18 Jan 2024 13:56:55 -0800 Subject: [PATCH 07/11] special case for helm release --- provider/pkg/provider/provider.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index 1627fde1f4..abcb3de198 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -1566,7 +1566,9 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p newInputs := propMapToUnstructured(newResInputs) oldInputs, oldLive := parseCheckpointObject(oldState) - contract.Assertf(oldLive.GetName() != "", "expected live object name to be nonempty: %v", oldLive) + if !isHelmRelease(urn) { + contract.Assertf(oldLive.GetName() != "", "expected live object name to be nonempty: %v", oldLive) + } oldInputs, err = normalizeInputs(oldInputs) if err != nil { @@ -2134,6 +2136,7 @@ func (k *kubeProvider) Read(ctx context.Context, req *pulumirpc.ReadRequest) (*p // If we get here, resource successfully registered with the API server, but failed to // initialize. } + contract.Assertf(liveObj.GetName() != "", "expected live object name to be nonempty: %v", liveObj) // Prune the live inputs to remove properties that are not present in the program inputs. liveInputs := pruneLiveState(liveObj, oldInputs) @@ -2381,6 +2384,8 @@ func (k *kubeProvider) Update( // If we get here, resource successfully registered with the API server, but failed to // initialize. } + contract.Assertf(initialized.GetName() != "", "expected live object name to be nonempty: %v", initialized) + // Return a new "checkpoint object". obj := checkpointObject(newInputs, initialized, newResInputs, initialAPIVersion, fieldManager) inputsAndComputed, err := plugin.MarshalProperties( From a68ecebe355675de651eddc733206e60d36ef17b Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Thu, 18 Jan 2024 18:03:59 -0800 Subject: [PATCH 08/11] bugfix for awaiters --- provider/pkg/await/await.go | 10 ++++++++-- provider/pkg/await/awaiters.go | 24 +++++++++++++----------- provider/pkg/await/deployment.go | 22 +++++++++------------- provider/pkg/await/deployment_test.go | 10 +++++----- provider/pkg/await/ingress.go | 2 +- provider/pkg/await/job.go | 2 +- provider/pkg/await/pod.go | 2 +- provider/pkg/await/service.go | 10 +++++----- provider/pkg/await/statefulset.go | 2 +- provider/pkg/await/statefulset_test.go | 6 +++--- provider/pkg/await/util_test.go | 1 + provider/pkg/provider/provider.go | 1 + 12 files changed, 49 insertions(+), 43 deletions(-) diff --git a/provider/pkg/await/await.go b/provider/pkg/await/await.go index e3920d276e..a447736763 100644 --- a/provider/pkg/await/await.go +++ b/provider/pkg/await/await.go @@ -101,6 +101,7 @@ type UpdateConfig struct { type DeleteConfig struct { ProviderConfig + Inputs *unstructured.Unstructured Outputs *unstructured.Unstructured Name string Timeout float64 @@ -261,6 +262,7 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, + currentInputs: c.Inputs, currentOutputs: outputs, logger: c.DedupLogger, timeout: c.Timeout, @@ -315,6 +317,7 @@ func Read(c ReadConfig) (*unstructured.Unstructured, error) { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, + currentInputs: c.Inputs, currentOutputs: outputs, logger: c.DedupLogger, clusterVersion: c.ClusterVersion, @@ -389,11 +392,13 @@ func Update(c UpdateConfig) (*unstructured.Unstructured, error) { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, + currentInputs: c.Inputs, currentOutputs: currentOutputs, logger: c.DedupLogger, timeout: c.Timeout, clusterVersion: c.ClusterVersion, }, + lastInputs: c.OldInputs, lastOutputs: liveOldObj, } waitErr := awaiter.awaitUpdate(conf) @@ -714,7 +719,7 @@ func Deletion(c DeleteConfig) error { return err } - timeout := metadata.TimeoutDuration(c.Timeout, c.Outputs, 300) + timeout := metadata.TimeoutDuration(c.Timeout, c.Inputs, 300) timeoutSeconds := int64(timeout.Seconds()) listOpts := metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("metadata.name", c.Name).String(), @@ -738,7 +743,7 @@ func Deletion(c DeleteConfig) error { var waitErr error id := fmt.Sprintf("%s/%s", c.Outputs.GetAPIVersion(), c.Outputs.GetKind()) if awaiter, exists := awaiters[id]; exists && awaiter.awaitDeletion != nil { - if metadata.SkipAwaitLogic(c.Outputs) { + if metadata.SkipAwaitLogic(c.Inputs) { logger.V(1).Infof("Skipping await logic for %v", c.Name) } else { waitErr = awaiter.awaitDeletion(deleteAwaitConfig{ @@ -748,6 +753,7 @@ func Deletion(c DeleteConfig) error { urn: c.URN, initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, + currentInputs: c.Inputs, currentOutputs: c.Outputs, logger: c.DedupLogger, timeout: c.Timeout, diff --git a/provider/pkg/await/awaiters.go b/provider/pkg/await/awaiters.go index 48ac9d5a89..ea3ff6d8fc 100644 --- a/provider/pkg/await/awaiters.go +++ b/provider/pkg/await/awaiters.go @@ -51,6 +51,7 @@ type createAwaitConfig struct { initialAPIVersion string logger *logging.DedupLogger clientSet *clients.DynamicClientSet + currentInputs *unstructured.Unstructured currentOutputs *unstructured.Unstructured timeout float64 clusterVersion *cluster.ServerVersion @@ -72,6 +73,7 @@ func (cac *createAwaitConfig) logMessage(message checkerlog.Message) { // reasonably efficient. type updateAwaitConfig struct { createAwaitConfig + lastInputs *unstructured.Unstructured lastOutputs *unstructured.Unstructured } @@ -304,7 +306,7 @@ func untilAppsDeploymentDeleted(config deleteAwaitConfig) error { } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. - timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 600) + timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(deploymentMissing, timeout) if err != nil { @@ -347,7 +349,7 @@ func untilAppsStatefulSetDeleted(config deleteAwaitConfig) error { } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. - timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 600) + timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(statefulsetmissing, timeout) if err != nil { @@ -377,7 +379,7 @@ func untilBatchV1JobDeleted(config deleteAwaitConfig) error { return watcher.RetryableError(e) } - timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 300) + timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(jobMissingOrKilled, timeout) } @@ -408,7 +410,7 @@ func untilCoreV1NamespaceDeleted(config deleteAwaitConfig) error { ns.GetName(), statusPhase)) } - timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 300) + timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(namespaceMissingOrKilled, timeout) } @@ -481,7 +483,7 @@ func untilCoreV1PodDeleted(config deleteAwaitConfig) error { return watcher.RetryableError(e) } - timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 300) + timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(podMissingOrKilled, timeout) } @@ -503,7 +505,7 @@ func untilCoreV1ReplicationControllerInitialized(c createAwaitConfig) error { name := c.currentOutputs.GetName() - replicas, _ := openapi.Pluck(c.currentOutputs.Object, "spec", "replicas") + replicas, _ := openapi.Pluck(c.currentInputs.Object, "spec", "replicas") logger.V(3).Infof("Waiting for replication controller %q to schedule '%v' replicas", name, replicas) @@ -561,7 +563,7 @@ func untilCoreV1ReplicationControllerDeleted(config deleteAwaitConfig) error { } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. - timeout := metadata.TimeoutDuration(config.timeout, config.currentOutputs, 600) + timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(rcMissing, timeout) if err != nil { @@ -605,8 +607,8 @@ func untilCoreV1ResourceQuotaInitialized(c createAwaitConfig) error { } func untilCoreV1ResourceQuotaUpdated(c updateAwaitConfig) error { - oldSpec, _ := openapi.Pluck(c.lastOutputs.Object, "spec") - newSpec, _ := openapi.Pluck(c.currentOutputs.Object, "spec") + oldSpec, _ := openapi.Pluck(c.lastInputs.Object, "spec") + newSpec, _ := openapi.Pluck(c.currentInputs.Object, "spec") if !reflect.DeepEqual(oldSpec, newSpec) { return untilCoreV1ResourceQuotaInitialized(c.createAwaitConfig) } @@ -624,7 +626,7 @@ func untilCoreV1SecretInitialized(c createAwaitConfig) error { // Some types secrets do not have data available immediately and therefore are not considered initialized where data map is empty. // For example service-account-token as described in the docs: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/#to-create-additional-api-tokens // - secretType, _ := openapi.Pluck(c.currentOutputs.Object, "type") + secretType, _ := openapi.Pluck(c.currentInputs.Object, "type") // Other secret types are not generated by controller therefore we do not need to create a watcher for them. // nolint:gosec @@ -670,7 +672,7 @@ func untilCoreV1ServiceAccountInitialized(c createAwaitConfig) error { // secrets array (i.e., in addition to the secrets specified by the user). // - specSecrets, _ := openapi.Pluck(c.currentOutputs.Object, "secrets") + specSecrets, _ := openapi.Pluck(c.currentInputs.Object, "secrets") var numSpecSecrets int if specSecretsArr, isArr := specSecrets.([]any); isArr { numSpecSecrets = len(specSecretsArr) diff --git a/provider/pkg/await/deployment.go b/provider/pkg/await/deployment.go index 88b828f2bf..bb0cbcd578 100644 --- a/provider/pkg/await/deployment.go +++ b/provider/pkg/await/deployment.go @@ -228,7 +228,7 @@ func (dia *deploymentInitAwaiter) Await() error { aggregateErrorTicker := time.NewTicker(10 * time.Second) defer aggregateErrorTicker.Stop() - timeout := metadata.TimeoutDuration(dia.config.timeout, dia.config.currentOutputs, DefaultDeploymentTimeoutMins*60) + timeout := metadata.TimeoutDuration(dia.config.timeout, dia.config.currentInputs, DefaultDeploymentTimeoutMins*60) return dia.await( deploymentEvents, @@ -416,7 +416,7 @@ func (dia *deploymentInitAwaiter) checkAndLogStatus() bool { } func (dia *deploymentInitAwaiter) processDeploymentEvent(event watch.Event) { - currentDeploymentName := dia.config.currentOutputs.GetName() + inputDeploymentName := dia.config.currentOutputs.GetName() deployment, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -429,7 +429,7 @@ func (dia *deploymentInitAwaiter) processDeploymentEvent(event watch.Event) { dia.deploymentErrors = map[string]string{} // Do nothing if this is not the Deployment we're waiting for. - if deployment.GetName() != currentDeploymentName { + if deployment.GetName() != inputDeploymentName { return } @@ -560,9 +560,7 @@ func (dia *deploymentInitAwaiter) processReplicaSetEvent(event watch.Event) { } func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { - outputs := dia.config.currentOutputs - - logger.V(3).Infof("Checking ReplicaSet status for Deployment %q", outputs.GetName()) + logger.V(3).Infof("Checking ReplicaSet status for Deployment %q", dia.config.currentOutputs.GetName()) rs, updatedReplicaSetCreated := dia.replicaSets[dia.replicaSetGeneration] if dia.replicaSetGeneration == "0" || !updatedReplicaSetCreated { @@ -570,14 +568,14 @@ func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { } logger.V(3).Infof("Deployment %q has generation %q, which corresponds to ReplicaSet %q", - outputs.GetName(), dia.replicaSetGeneration, rs.GetName()) + dia.config.currentOutputs.GetName(), dia.replicaSetGeneration, rs.GetName()) var lastRevision string if outputs := dia.config.lastOutputs; outputs != nil { lastRevision = outputs.GetAnnotations()[revision] } - logger.V(3).Infof("The last generation of Deployment %q was %q", outputs.GetName(), lastRevision) + logger.V(3).Infof("The last generation of Deployment %q was %q", dia.config.currentOutputs.GetName(), lastRevision) // NOTE: Check `.spec.replicas` in the live `ReplicaSet` instead of the last input `Deployment`, // since this is the plan of record. This protects against (e.g.) a user running `kubectl scale` @@ -686,12 +684,12 @@ func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { } func (dia *deploymentInitAwaiter) changeTriggeredRollout() bool { - if dia.config.lastOutputs == nil { + if dia.config.lastInputs == nil { return true } fields, err := openapi.PropertiesChanged( - dia.config.lastOutputs.Object, dia.config.currentOutputs.Object, + dia.config.lastInputs.Object, dia.config.currentInputs.Object, []string{ ".spec.template.spec", }) @@ -705,9 +703,7 @@ func (dia *deploymentInitAwaiter) changeTriggeredRollout() bool { } func (dia *deploymentInitAwaiter) checkPersistentVolumeClaimStatus() { - currentOutputs := dia.config.currentOutputs - - logger.V(3).Infof("Checking PersistentVolumeClaims status for Deployment %q", currentOutputs.GetName()) + logger.V(3).Infof("Checking PersistentVolumeClaims status for Deployment %q", dia.config.currentOutputs.GetName()) allPVCsReady := true for _, pvc := range dia.pvcs { diff --git a/provider/pkg/await/deployment_test.go b/provider/pkg/await/deployment_test.go index 3ed29850fc..ee9c3c539c 100644 --- a/provider/pkg/await/deployment_test.go +++ b/provider/pkg/await/deployment_test.go @@ -702,14 +702,14 @@ func Test_Apps_Deployment_Without_PersistentVolumeClaims(t *testing.T) { } } -type setLastOutputs func(obj *unstructured.Unstructured) +type setLastInputs func(obj *unstructured.Unstructured) func Test_Apps_Deployment_MultipleUpdates(t *testing.T) { tests := []struct { description string inputs func() *unstructured.Unstructured firstUpdate func(deployments, replicaSets, pods chan watch.Event, timeout chan time.Time, - setLast setLastOutputs) + setLast setLastInputs) secondUpdate func(deployments, replicaSets, pods chan watch.Event, timeout chan time.Time) expectedError error }{ @@ -718,13 +718,13 @@ func Test_Apps_Deployment_MultipleUpdates(t *testing.T) { inputs: regressionDeploymentScaled3Input, firstUpdate: func( deployments, replicaSets, pods chan watch.Event, timeout chan time.Time, - setLast setLastOutputs, + setLast setLastInputs, ) { computed := regressionDeploymentScaled3() deployments <- watchAddedEvent(computed) replicaSets <- watchAddedEvent(regressionReplicaSetScaled3()) - setLast(computed) + setLast(regressionDeploymentScaled3Input()) // Timeout. Success. timeout <- time.Now() }, @@ -752,7 +752,7 @@ func Test_Apps_Deployment_MultipleUpdates(t *testing.T) { period := make(chan time.Time) go test.firstUpdate(deployments, replicaSets, pods, timeout, func(obj *unstructured.Unstructured) { - awaiter.config.lastOutputs = obj + awaiter.config.lastInputs = obj }) err := awaiter.await(deployments, replicaSets, pods, pvcs, timeout, period) diff --git a/provider/pkg/await/ingress.go b/provider/pkg/await/ingress.go index a97dc1e9e9..b071f71536 100644 --- a/provider/pkg/await/ingress.go +++ b/provider/pkg/await/ingress.go @@ -144,7 +144,7 @@ func (iia *ingressInitAwaiter) Await() error { } go serviceInformer.Informer().Run(stopper) - timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentOutputs, DefaultIngressTimeoutMins*60) + timeout := metadata.TimeoutDuration(iia.config.timeout, iia.config.currentInputs, DefaultIngressTimeoutMins*60) return iia.await(ingressEvents, serviceEvents, endpointsEvents, make(chan struct{}), time.After(60*time.Second), time.After(timeout)) } diff --git a/provider/pkg/await/job.go b/provider/pkg/await/job.go index b6e8af09f3..b323d1a6ec 100644 --- a/provider/pkg/await/job.go +++ b/provider/pkg/await/job.go @@ -123,7 +123,7 @@ func (jia *jobInitAwaiter) Await() error { podAggregator.Start(podEvents) defer podAggregator.Stop() - timeout := metadata.TimeoutDuration(jia.config.timeout, jia.config.currentOutputs, DefaultJobTimeoutMins*60) + timeout := metadata.TimeoutDuration(jia.config.timeout, jia.config.currentInputs, DefaultJobTimeoutMins*60) for { if jia.ready { return nil diff --git a/provider/pkg/await/pod.go b/provider/pkg/await/pod.go index d9689a4cf2..0e28d42b88 100644 --- a/provider/pkg/await/pod.go +++ b/provider/pkg/await/pod.go @@ -160,7 +160,7 @@ func (pia *podInitAwaiter) Await() error { } go podInformer.Informer().Run(stopper) - timeout := metadata.TimeoutDuration(pia.config.timeout, pia.config.currentOutputs, DefaultPodTimeoutMins*60) + timeout := metadata.TimeoutDuration(pia.config.timeout, pia.config.currentInputs, DefaultPodTimeoutMins*60) for { if pia.ready { return nil diff --git a/provider/pkg/await/service.go b/provider/pkg/await/service.go index 19ef59f26d..45ca6f873b 100644 --- a/provider/pkg/await/service.go +++ b/provider/pkg/await/service.go @@ -156,7 +156,7 @@ func (sia *serviceInitAwaiter) Await() error { version := cluster.TryGetServerVersion(sia.config.clientSet.DiscoveryClientCached) - timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentOutputs, DefaultServiceTimeoutMins*60) + timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentInputs, DefaultServiceTimeoutMins*60) return sia.await(serviceEvents, endpointsEvents, time.After(timeout), make(chan struct{}), version) } @@ -273,7 +273,7 @@ func (sia *serviceInitAwaiter) await( } func (sia *serviceInitAwaiter) processServiceEvent(event watch.Event) { - serviceName := sia.config.currentOutputs.GetName() + inputServiceName := sia.config.currentOutputs.GetName() service, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -283,7 +283,7 @@ func (sia *serviceInitAwaiter) processServiceEvent(event watch.Event) { } // Do nothing if this is not the service we're waiting for. - if service.GetName() != serviceName { + if service.GetName() != inputServiceName { return } @@ -302,14 +302,14 @@ func (sia *serviceInitAwaiter) processServiceEvent(event watch.Event) { lbIngress, _ := openapi.Pluck(service.Object, "status", "loadBalancer", "ingress") status, _ := openapi.Pluck(service.Object, "status") - logger.V(3).Infof("Received status for service %q: %#v", serviceName, status) + logger.V(3).Infof("Received status for service %q: %#v", inputServiceName, status) ing, isSlice := lbIngress.([]any) // Update status of service object so that we can check success. sia.serviceReady = isSlice && len(ing) > 0 logger.V(3).Infof("Waiting for service %q to assign IP/hostname for a load balancer", - serviceName) + inputServiceName) } else { // If it's not type `LoadBalancer`, report success. sia.serviceReady = true diff --git a/provider/pkg/await/statefulset.go b/provider/pkg/await/statefulset.go index 804a42d6ef..78af3c3c69 100644 --- a/provider/pkg/await/statefulset.go +++ b/provider/pkg/await/statefulset.go @@ -216,7 +216,7 @@ func (sia *statefulsetInitAwaiter) Await() error { aggregateErrorTicker := time.NewTicker(10 * time.Second) defer aggregateErrorTicker.Stop() - timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentOutputs, DefaultStatefulSetTimeoutMins*60) + timeout := metadata.TimeoutDuration(sia.config.timeout, sia.config.currentInputs, DefaultStatefulSetTimeoutMins*60) return sia.await(statefulSetEvents, podEvents, time.After(timeout), aggregateErrorTicker.C) } diff --git a/provider/pkg/await/statefulset_test.go b/provider/pkg/await/statefulset_test.go index 8614c41287..677a988f76 100644 --- a/provider/pkg/await/statefulset_test.go +++ b/provider/pkg/await/statefulset_test.go @@ -266,7 +266,7 @@ func Test_Apps_StatefulSet_MultipleUpdates(t *testing.T) { description string inputs func() *unstructured.Unstructured firstUpdate func(statefulsets, pods chan watch.Event, timeout chan time.Time, - setLast setLastOutputs) + setLast setLastInputs) secondUpdate func(statefulsets, pods chan watch.Event, timeout chan time.Time) firstExpectedError error secondExpectedError error @@ -276,7 +276,7 @@ func Test_Apps_StatefulSet_MultipleUpdates(t *testing.T) { inputs: statefulsetFailed, firstUpdate: func( statefulsets, pods chan watch.Event, timeout chan time.Time, - setLast setLastOutputs, + setLast setLastInputs, ) { statefulsets <- watchAddedEvent(statefulsetFailed()) @@ -311,7 +311,7 @@ func Test_Apps_StatefulSet_MultipleUpdates(t *testing.T) { period := make(chan time.Time) go test.firstUpdate(statefulsets, pods, timeout, func(obj *unstructured.Unstructured) { - awaiter.config.lastOutputs = obj + awaiter.config.lastInputs = obj }) err := awaiter.await(statefulsets, pods, timeout, period) diff --git a/provider/pkg/await/util_test.go b/provider/pkg/await/util_test.go index 74c52cc86b..b16cba4d2d 100644 --- a/provider/pkg/await/util_test.go +++ b/provider/pkg/await/util_test.go @@ -14,6 +14,7 @@ func mockAwaitConfig(inputs *unstructured.Unstructured) createAwaitConfig { return createAwaitConfig{ ctx: context.Background(), //TODO: complete this mock if needed + currentInputs: inputs, currentOutputs: inputs, logger: logging.NewLogger(context.Background(), nil, ""), } diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index abcb3de198..b85ea84fdc 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -2506,6 +2506,7 @@ func (k *kubeProvider) Delete(ctx context.Context, req *pulumirpc.DeleteRequest) Resources: resources, ServerSideApply: k.serverSideApplyMode, }, + Inputs: oldInputs, Outputs: current, Name: name, Timeout: req.Timeout, From 870717b0f3bfc6e6b238cfabb8852473fe751bf8 Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Fri, 19 Jan 2024 03:02:55 -0800 Subject: [PATCH 09/11] Integration tests for generateName --- tests/sdk/nodejs/autonaming/step1/Pulumi.yaml | 2 +- .../sdk/nodejs/generatename/step1/Pulumi.yaml | 5 + tests/sdk/nodejs/generatename/step1/index.ts | 37 +++++ .../nodejs/generatename/step1/package.json | 12 ++ .../nodejs/generatename/step1/tsconfig.json | 22 +++ tests/sdk/nodejs/generatename/step2/index.ts | 36 +++++ tests/sdk/nodejs/generatename/step3/index.ts | 39 +++++ tests/sdk/nodejs/generatename/step4/index.ts | 39 +++++ tests/sdk/nodejs/generatename/step5/index.ts | 39 +++++ tests/sdk/nodejs/generatename/step6/index.ts | 40 ++++++ tests/sdk/nodejs/nodejs_test.go | 133 ++++++++++++++++++ 11 files changed, 403 insertions(+), 1 deletion(-) create mode 100644 tests/sdk/nodejs/generatename/step1/Pulumi.yaml create mode 100644 tests/sdk/nodejs/generatename/step1/index.ts create mode 100644 tests/sdk/nodejs/generatename/step1/package.json create mode 100644 tests/sdk/nodejs/generatename/step1/tsconfig.json create mode 100644 tests/sdk/nodejs/generatename/step2/index.ts create mode 100644 tests/sdk/nodejs/generatename/step3/index.ts create mode 100644 tests/sdk/nodejs/generatename/step4/index.ts create mode 100644 tests/sdk/nodejs/generatename/step5/index.ts create mode 100644 tests/sdk/nodejs/generatename/step6/index.ts diff --git a/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml b/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml index 9d6c8f4e8b..9387c9ab22 100644 --- a/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml +++ b/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml @@ -1,3 +1,3 @@ name: autonaming-test -description: A program that tests partial provider failure. +description: A program that tests auto-naming of Kubernetes objects. runtime: nodejs diff --git a/tests/sdk/nodejs/generatename/step1/Pulumi.yaml b/tests/sdk/nodejs/generatename/step1/Pulumi.yaml new file mode 100644 index 0000000000..bde6c38040 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/Pulumi.yaml @@ -0,0 +1,5 @@ +name: generatename-test +description: A program that tests support for `.metadata.generateName`. +runtime: nodejs +config: + kubernetes:enableServerSideApply: false diff --git a/tests/sdk/nodejs/generatename/step1/index.ts b/tests/sdk/nodejs/generatename/step1/index.ts new file mode 100644 index 0000000000..7cca8bf554 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/index.ts @@ -0,0 +1,37 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// A simple Pod definition. `.metadata.name` is not provided, but `.metadata.generateName` is. +// Kubernetes will provide a unique name for the Pod using `.metadata.generateName` as a prefix. +// + +export const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx"}, + ], + }, +}); diff --git a/tests/sdk/nodejs/generatename/step1/package.json b/tests/sdk/nodejs/generatename/step1/package.json new file mode 100644 index 0000000000..779b1bb5c3 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/package.json @@ -0,0 +1,12 @@ +{ + "name": "steps", + "version": "0.1.0", + "dependencies": { + "@pulumi/pulumi": "latest" + }, + "devDependencies": { + }, + "peerDependencies": { + "@pulumi/kubernetes": "latest" + } +} diff --git a/tests/sdk/nodejs/generatename/step1/tsconfig.json b/tests/sdk/nodejs/generatename/step1/tsconfig.json new file mode 100644 index 0000000000..5dacccbd42 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "outDir": "bin", + "target": "es6", + "module": "commonjs", + "moduleResolution": "node", + "declaration": true, + "sourceMap": true, + "stripInternal": true, + "experimentalDecorators": true, + "pretty": true, + "noFallthroughCasesInSwitch": true, + "noImplicitAny": true, + "noImplicitReturns": true, + "forceConsistentCasingInFileNames": true, + "strictNullChecks": true + }, + "files": [ + "index.ts" + ] +} + diff --git a/tests/sdk/nodejs/generatename/step2/index.ts b/tests/sdk/nodejs/generatename/step2/index.ts new file mode 100644 index 0000000000..f0591d9812 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step2/index.ts @@ -0,0 +1,36 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// The `.metadata.generateName` field has changed, but Pulumi does NOT automatically replace in that situation. +// + +export const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx"}, + ], + }, +}); diff --git a/tests/sdk/nodejs/generatename/step3/index.ts b/tests/sdk/nodejs/generatename/step3/index.ts new file mode 100644 index 0000000000..895899cc0c --- /dev/null +++ b/tests/sdk/nodejs/generatename/step3/index.ts @@ -0,0 +1,39 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// The image in the Pod's container has changed, triggering a replace. Because `.metadata.name` is +// not specified, but `.metadata.generateName` is, Kubernetes again will provide a new name for the replacement. +// Pulumi will proceed with replace-before-delete. +// + +export const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/generatename/step4/index.ts b/tests/sdk/nodejs/generatename/step4/index.ts new file mode 100644 index 0000000000..f276b6faf0 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step4/index.ts @@ -0,0 +1,39 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// Only the labels have changed, so no replace is triggered. Pulumi should update the object +// in-place, and the name should not be changed. +// + +export const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + labels: {app: "generatename-test"}, + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/generatename/step5/index.ts b/tests/sdk/nodejs/generatename/step5/index.ts new file mode 100644 index 0000000000..0ef8f16519 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step5/index.ts @@ -0,0 +1,39 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// The name of the pod is now explicitly set to the previously-generated name, so no replace is triggered. +// + +export const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + labels: {app: "generatename-test"}, + name: config.require("podName"), + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/generatename/step6/index.ts b/tests/sdk/nodejs/generatename/step6/index.ts new file mode 100644 index 0000000000..5bed3fff74 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step6/index.ts @@ -0,0 +1,40 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// User has now specified `.metadata.name`, so Pulumi should replace the resource, and NOT allocate +// a name to it. +// + +export const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + labels: {app: "generatename-test"}, + name: "generatename-test", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/nodejs_test.go b/tests/sdk/nodejs/nodejs_test.go index 088c632f92..2830375a01 100644 --- a/tests/sdk/nodejs/nodejs_test.go +++ b/tests/sdk/nodejs/nodejs_test.go @@ -18,6 +18,7 @@ package test import ( b64 "encoding/base64" "encoding/json" + "errors" "fmt" "io/ioutil" "log" @@ -234,6 +235,138 @@ func TestAutonaming(t *testing.T) { integration.ProgramTest(t, &test) } +func TestGenerateName(t *testing.T) { + var pt *integration.ProgramTester + var step1Name any + var step2Name any + var step3Name any + var step4Name any + var step5Name any + var step6Name any + + test := baseOptions.With(integration.ProgramTestOptions{ + Dir: filepath.Join("generatename", "step1"), + Quick: false, + SkipRefresh: false, + ExpectRefreshChanges: false, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod is successfully given a unique name by Kubernetes. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step1Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.True(t, strings.HasPrefix(step1Name.(string), "generatename-test-")) + generateName, _ := openapi.Pluck(pod.Outputs, "metadata", "generateName") + assert.Equal(t, "generatename-test-", generateName.(string)) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + Config: map[string]string{}, + + EditDirs: []integration.EditDir{ + { + Dir: filepath.Join("generatename", "step2"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod was NOT replaced, and has the same name, previously allocated by Kubernetes. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step2Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.Equal(t, step1Name, step2Name) + generateName, _ := openapi.Pluck(pod.Outputs, "metadata", "generateName") + assert.Equal(t, "generatename-test-modified-", generateName.(string)) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + { + Dir: filepath.Join("generatename", "step3"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod was replaced, i.e., destroyed and re-created, with allocating a new name. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step3Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.NotEqual(t, step2Name, step3Name) + assert.True(t, strings.HasPrefix(step3Name.(string), "generatename-test-modified-")) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + { + Dir: filepath.Join("generatename", "step4"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod was NOT replaced, and has the same name, previously allocated by Kubernetes. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step4Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.Equal(t, step3Name, step4Name) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + + // Update the configuration for subsequent steps. + require.NoError(t, + pt.RunPulumiCommand("config", "set", "podName", step4Name.(string)), + "failed to set podName config") + }, + }, + { + Dir: filepath.Join("generatename", "step5"), + Additive: true, + ExpectNoChanges: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // User has explicitly set the name to the previously-generated name (maybe for clarity), + // and Pulumi does NOT replace the pod. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step5Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.Equal(t, step4Name, step5Name) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + { + Dir: filepath.Join("generatename", "step6"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // User has specified their own name for the Pod, so we replace it, and Pulumi/Kubernetes does NOT + // allocate a name on its own. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step6Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.NotEqual(t, step5Name, step6Name) + assert.Equal(t, "generatename-test", step6Name.(string)) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + }, + }) + pt = integration.ProgramTestManualLifeCycle(t, &test) + err := pt.TestLifeCycleInitAndDestroy() + if !errors.Is(err, integration.ErrTestFailed) { + assert.NoError(t, err) + } +} + func TestCRDs(t *testing.T) { test := baseOptions.With(integration.ProgramTestOptions{ Dir: filepath.Join("crds", "step1"), From 72f74c1ee67d21b5ab2716bedee3c98c9a86d91e Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Fri, 19 Jan 2024 12:47:27 -0800 Subject: [PATCH 10/11] improved back compat, support for computed inputs. --- provider/pkg/metadata/naming.go | 35 ++++++++- provider/pkg/metadata/naming_test.go | 81 ++++++++++++++++---- provider/pkg/provider/provider.go | 22 +++--- tests/sdk/nodejs/autonaming/step1/index.ts | 4 +- tests/sdk/nodejs/autonaming/step2/index.ts | 4 +- tests/sdk/nodejs/autonaming/step3/index.ts | 4 +- tests/sdk/nodejs/autonaming/step4/index.ts | 10 +-- tests/sdk/nodejs/autonaming/step5/index.ts | 35 +++++++++ tests/sdk/nodejs/generatename/step1/index.ts | 2 +- tests/sdk/nodejs/generatename/step2/index.ts | 2 +- tests/sdk/nodejs/generatename/step3/index.ts | 2 +- tests/sdk/nodejs/generatename/step4/index.ts | 2 +- tests/sdk/nodejs/generatename/step5/index.ts | 2 +- tests/sdk/nodejs/generatename/step6/index.ts | 4 +- tests/sdk/nodejs/nodejs_test.go | 31 ++++++++ 15 files changed, 197 insertions(+), 43 deletions(-) create mode 100644 tests/sdk/nodejs/autonaming/step5/index.ts diff --git a/provider/pkg/metadata/naming.go b/provider/pkg/metadata/naming.go index 8b59ba4478..03c32ef267 100644 --- a/provider/pkg/metadata/naming.go +++ b/provider/pkg/metadata/naming.go @@ -45,14 +45,45 @@ func AssignNameIfAutonamable(randomSeed []byte, obj *unstructured.Unstructured, // AdoptOldAutonameIfUnnamed checks if `newObj` has a name, and if not, "adopts" the name of `oldObj` // instead. If `oldObj` was autonamed, then we mark `newObj` as autonamed, too. -func AdoptOldAutonameIfUnnamed(newObj, oldObj *unstructured.Unstructured) { - if newObj.GetName() == "" && newObj.GetGenerateName() == "" && IsAutonamed(oldObj) { +// Note that autonaming is preferred over generateName for backwards compatibility. +func AdoptOldAutonameIfUnnamed(newObj, oldObj *unstructured.Unstructured, newObjMap resource.PropertyMap) { + if md, ok := newObjMap["metadata"].V.(resource.PropertyMap); ok { + // Check if the .metadata.name is set and is a computed value. If so, do not auto-name. + if name, ok := md["name"]; ok && name.IsComputed() { + return + } + } + if newObj.GetName() == "" && IsAutonamed(oldObj) { contract.Assertf(oldObj.GetName() != "", "expected nonempty name for object: %s", oldObj) newObj.SetName(oldObj.GetName()) SetAnnotationTrue(newObj, AnnotationAutonamed) } } +// IsAutonamed checks if the object is auto-named by Pulumi. func IsAutonamed(obj *unstructured.Unstructured) bool { return IsAnnotationTrue(obj, AnnotationAutonamed) } + +// IsGenerateName checks if the object is auto-named by Kubernetes. +func IsGenerateName(obj *unstructured.Unstructured, propMap resource.PropertyMap) bool { + if IsNamed(obj, propMap) { + return false + } + if md, ok := propMap["metadata"].V.(resource.PropertyMap); ok { + if name, ok := md["generateName"]; ok && name.IsComputed() { + return true + } + } + return obj.GetGenerateName() != "" +} + +// IsNamed checks if the object has an assigned name (may be a known or computed value). +func IsNamed(obj *unstructured.Unstructured, propMap resource.PropertyMap) bool { + if md, ok := propMap["metadata"].V.(resource.PropertyMap); ok { + if name, ok := md["name"]; ok && name.IsComputed() { + return true + } + } + return obj.GetName() != "" +} diff --git a/provider/pkg/metadata/naming_test.go b/provider/pkg/metadata/naming_test.go index 5179e64c50..e6f41f7943 100644 --- a/provider/pkg/metadata/naming_test.go +++ b/provider/pkg/metadata/naming_test.go @@ -15,10 +15,11 @@ package metadata import ( - "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" "strings" "testing" + "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/stretchr/testify/assert" @@ -37,32 +38,54 @@ func TestAssignNameIfAutonamable(t *testing.T) { assert.Len(t, o1.GetName(), 12) // o2 has a name, so autonaming fails. - o2 := &unstructured.Unstructured{ - Object: map[string]any{"metadata": map[string]any{"name": "bar"}}, - } pm2 := resource.PropertyMap{ "metadata": resource.NewObjectProperty(resource.PropertyMap{ "name": resource.NewStringProperty("bar"), }), } + o2 := propMapToUnstructured(pm2) AssignNameIfAutonamable(nil, o2, pm2, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), tokens.Type(""), tokens.Type("bang:boom/fizzle:AnotherResource"), "bar")) assert.False(t, IsAutonamed(o2)) assert.Equal(t, "bar", o2.GetName()) // o3 has a computed name, so autonaming fails. - o3 := &unstructured.Unstructured{ - Object: map[string]any{"metadata": map[string]any{"name": "[Computed]"}}, - } pm3 := resource.PropertyMap{ "metadata": resource.NewObjectProperty(resource.PropertyMap{ "name": resource.MakeComputed(resource.NewStringProperty("bar")), }), } + o3 := propMapToUnstructured(pm3) AssignNameIfAutonamable(nil, o3, pm3, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), tokens.Type(""), tokens.Type("bang:boom/fizzle:MajorResource"), "foo")) assert.False(t, IsAutonamed(o3)) - assert.Equal(t, "[Computed]", o3.GetName()) + assert.Equal(t, "", o3.GetName()) + + // o4 has a generateName, so autonaming fails. + pm4 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "generateName": resource.NewStringProperty("bar-"), + }), + } + o4 := propMapToUnstructured(pm4) + AssignNameIfAutonamable(nil, o4, pm4, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), + tokens.Type(""), tokens.Type("bang:boom/fizzle:AnotherResource"), "bar")) + assert.False(t, IsAutonamed(o4)) + assert.Equal(t, "bar-", o4.GetGenerateName()) + assert.Equal(t, "", o4.GetName()) + + // o5 has a computed generateName, so autonaming fails. + pm5 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "name": resource.MakeComputed(resource.NewStringProperty("bar")), + }), + } + o5 := propMapToUnstructured(pm5) + AssignNameIfAutonamable(nil, o5, pm5, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), + tokens.Type(""), tokens.Type("bang:boom/fizzle:MajorResource"), "foo")) + assert.False(t, IsAutonamed(o5)) + assert.Equal(t, "", o5.GetGenerateName()) + assert.Equal(t, "", o5.GetName()) } func TestAdoptName(t *testing.T) { @@ -77,10 +100,13 @@ func TestAdoptName(t *testing.T) { }, }, } - new1 := &unstructured.Unstructured{ - Object: map[string]any{"metadata": map[string]any{"name": "new1"}}, + pm1 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "name": resource.NewStringProperty("new1"), + }), } - AdoptOldAutonameIfUnnamed(new1, old1) + new1 := propMapToUnstructured(pm1) + AdoptOldAutonameIfUnnamed(new1, old1, pm1) assert.Equal(t, "old1", old1.GetName()) assert.True(t, IsAutonamed(old1)) assert.Equal(t, "new1", new1.GetName()) @@ -90,7 +116,8 @@ func TestAdoptName(t *testing.T) { new2 := &unstructured.Unstructured{ Object: map[string]any{}, } - AdoptOldAutonameIfUnnamed(new2, old1) + pm2 := resource.NewPropertyMap(struct{}{}) + AdoptOldAutonameIfUnnamed(new2, old1, pm2) assert.Equal(t, "old1", new2.GetName()) assert.True(t, IsAutonamed(new2)) @@ -98,6 +125,7 @@ func TestAdoptName(t *testing.T) { new3 := &unstructured.Unstructured{ Object: map[string]any{}, } + pm3 := resource.NewPropertyMap(struct{}{}) old2 := &unstructured.Unstructured{ Object: map[string]any{ "metadata": map[string]any{ @@ -105,7 +133,34 @@ func TestAdoptName(t *testing.T) { }, }, } - AdoptOldAutonameIfUnnamed(new3, old2) + AdoptOldAutonameIfUnnamed(new3, old2, pm3) assert.Equal(t, "", new3.GetName()) assert.False(t, IsAutonamed(new3)) + + // new4 has a computed name and therefore DOES NOT adopt old1's name. + pm4 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "name": resource.MakeComputed(resource.NewStringProperty("new4")), + }), + } + new4 := propMapToUnstructured(pm4) + assert.Equal(t, "", new4.GetName()) + AdoptOldAutonameIfUnnamed(new4, old1, pm4) + assert.Equal(t, "", new4.GetName()) + assert.False(t, IsAutonamed(new4)) + + // new5 has a generateName and therefore DOES adopt old1's name. + pm5 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "generateName": resource.NewStringProperty("new5-"), + }), + } + new5 := propMapToUnstructured(pm5) + AdoptOldAutonameIfUnnamed(new5, old1, pm5) + assert.Equal(t, "old1", new2.GetName()) + assert.True(t, IsAutonamed(new2)) +} + +func propMapToUnstructured(pm resource.PropertyMap) *unstructured.Unstructured { + return &unstructured.Unstructured{Object: pm.MapRepl(nil, nil)} } diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index b85ea84fdc..8f2d93dd4f 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -1346,12 +1346,6 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( return nil, fmt.Errorf("patch resources require the `.metadata.name` field to be set") } } - if k.serverSideApplyMode && newInputs.GetGenerateName() != "" { - return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in Server-Side Apply mode") - } - if k.yamlRenderMode && newInputs.GetGenerateName() != "" { - return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in YAML rendering mode") - } var failures []*pulumirpc.CheckFailure @@ -1364,9 +1358,9 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( // needs to be `DeleteBeforeReplace`'d. If the resource is marked `DeleteBeforeReplace`, then // `Create` will allocate it a new name later. if len(oldInputs.Object) > 0 { - // NOTE: If old inputs exist, they MAY have a name, either provided by the user or filled in with a - // previous run of `Check`. They wouldn't have a name if `generateName` was used. - metadata.AdoptOldAutonameIfUnnamed(newInputs, oldInputs) + // NOTE: If old inputs exist, they MAY have a name, either provided by the user, or based on generateName, + // or filled in with a previous run of `Check`. + metadata.AdoptOldAutonameIfUnnamed(newInputs, oldInputs, news) // If the resource has existing state, we only set the "managed-by: pulumi" label if it is already present. This // avoids causing diffs for cases where the resource is being imported, or was created using SSA. The goal in @@ -1391,6 +1385,14 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( } } } + if metadata.IsGenerateName(newInputs, news) { + if k.serverSideApplyMode { + return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in Server-Side Apply mode") + } + if k.yamlRenderMode { + return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in YAML rendering mode") + } + } gvk, err := k.gvkFromURN(urn) if err != nil { @@ -1728,7 +1730,7 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p len(replaces) > 0 && // 2. Object is NOT autonamed (i.e., user manually named it, and therefore we can't // auto-generate the name on client or server). - !(metadata.IsAutonamed(newInputs) || (newInputs.GetGenerateName() != "" && newInputs.GetName() == "")) && + !(metadata.IsAutonamed(newInputs) || metadata.IsGenerateName(newInputs, newResInputs)) && // 3. The new, user-specified name is the same as the old name. newInputs.GetName() == oldLive.GetName() && // 4. The resource is being deployed to the same namespace (i.e., we aren't creating the diff --git a/tests/sdk/nodejs/autonaming/step1/index.ts b/tests/sdk/nodejs/autonaming/step1/index.ts index 004912cace..573334e85c 100644 --- a/tests/sdk/nodejs/autonaming/step1/index.ts +++ b/tests/sdk/nodejs/autonaming/step1/index.ts @@ -14,14 +14,14 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // // A simple Pod definition. `.metadata.name` is not provided, so Pulumi will allocate a unique name // to the resource upon creation. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, }, diff --git a/tests/sdk/nodejs/autonaming/step2/index.ts b/tests/sdk/nodejs/autonaming/step2/index.ts index d9e41747ab..227537c0c5 100644 --- a/tests/sdk/nodejs/autonaming/step2/index.ts +++ b/tests/sdk/nodejs/autonaming/step2/index.ts @@ -14,14 +14,14 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // // The image in the Pod's container has changed, triggering a replace. Because `.metadata.name` is // not specified, Pulumi again will provide a name upon creation of the new Pod resource. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, }, diff --git a/tests/sdk/nodejs/autonaming/step3/index.ts b/tests/sdk/nodejs/autonaming/step3/index.ts index 5bb94dec69..8d9070895c 100644 --- a/tests/sdk/nodejs/autonaming/step3/index.ts +++ b/tests/sdk/nodejs/autonaming/step3/index.ts @@ -14,14 +14,14 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // // Only the labels have changed, so no replace is triggered. Pulumi should update the object // in-place, and the name should not be changed. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, labels: {app: "autonaming-test"}, diff --git a/tests/sdk/nodejs/autonaming/step4/index.ts b/tests/sdk/nodejs/autonaming/step4/index.ts index e1e901df03..3da428f538 100644 --- a/tests/sdk/nodejs/autonaming/step4/index.ts +++ b/tests/sdk/nodejs/autonaming/step4/index.ts @@ -14,17 +14,17 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // -// User has now specified `.metadata.name`, so Pulumi should replace the resource, and NOT allocate -// a name to it. +// User has now specified `.metadata.generateName`, which Pulumi ignores because autonaming has already occurred, +// so no replace is triggered. Pulumi should update the object in-place, and the name should not be changed. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, - name: "autonaming-test", + generateName: "autonaming-test-", labels: {app: "autonaming-test"}, }, spec: { diff --git a/tests/sdk/nodejs/autonaming/step5/index.ts b/tests/sdk/nodejs/autonaming/step5/index.ts new file mode 100644 index 0000000000..8fca7c00a2 --- /dev/null +++ b/tests/sdk/nodejs/autonaming/step5/index.ts @@ -0,0 +1,35 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as k8s from "@pulumi/kubernetes"; + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// User has now specified `.metadata.name`, so Pulumi should replace the resource, and NOT allocate +// a name to it. +// + +export const pod = new k8s.core.v1.Pod("autonaming-test", { + metadata: { + namespace: namespace.metadata.name, + name: "autonaming-test", + labels: {app: "autonaming-test"}, + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); diff --git a/tests/sdk/nodejs/generatename/step1/index.ts b/tests/sdk/nodejs/generatename/step1/index.ts index 7cca8bf554..cb59767dd3 100644 --- a/tests/sdk/nodejs/generatename/step1/index.ts +++ b/tests/sdk/nodejs/generatename/step1/index.ts @@ -24,7 +24,7 @@ const namespace = new k8s.core.v1.Namespace("test-namespace"); // Kubernetes will provide a unique name for the Pod using `.metadata.generateName` as a prefix. // -export const pod = new k8s.core.v1.Pod("generatename-test", { +const pod = new k8s.core.v1.Pod("generatename-test", { metadata: { namespace: namespace.metadata.name, generateName: "generatename-test-", diff --git a/tests/sdk/nodejs/generatename/step2/index.ts b/tests/sdk/nodejs/generatename/step2/index.ts index f0591d9812..c45e1ba6ec 100644 --- a/tests/sdk/nodejs/generatename/step2/index.ts +++ b/tests/sdk/nodejs/generatename/step2/index.ts @@ -23,7 +23,7 @@ const namespace = new k8s.core.v1.Namespace("test-namespace"); // The `.metadata.generateName` field has changed, but Pulumi does NOT automatically replace in that situation. // -export const pod = new k8s.core.v1.Pod("generatename-test", { +const pod = new k8s.core.v1.Pod("generatename-test", { metadata: { namespace: namespace.metadata.name, generateName: "generatename-test-modified-", diff --git a/tests/sdk/nodejs/generatename/step3/index.ts b/tests/sdk/nodejs/generatename/step3/index.ts index 895899cc0c..2901b09bbd 100644 --- a/tests/sdk/nodejs/generatename/step3/index.ts +++ b/tests/sdk/nodejs/generatename/step3/index.ts @@ -25,7 +25,7 @@ const namespace = new k8s.core.v1.Namespace("test-namespace"); // Pulumi will proceed with replace-before-delete. // -export const pod = new k8s.core.v1.Pod("generatename-test", { +const pod = new k8s.core.v1.Pod("generatename-test", { metadata: { namespace: namespace.metadata.name, generateName: "generatename-test-modified-", diff --git a/tests/sdk/nodejs/generatename/step4/index.ts b/tests/sdk/nodejs/generatename/step4/index.ts index f276b6faf0..b1a4c470f0 100644 --- a/tests/sdk/nodejs/generatename/step4/index.ts +++ b/tests/sdk/nodejs/generatename/step4/index.ts @@ -24,7 +24,7 @@ const namespace = new k8s.core.v1.Namespace("test-namespace"); // in-place, and the name should not be changed. // -export const pod = new k8s.core.v1.Pod("generatename-test", { +const pod = new k8s.core.v1.Pod("generatename-test", { metadata: { namespace: namespace.metadata.name, generateName: "generatename-test-modified-", diff --git a/tests/sdk/nodejs/generatename/step5/index.ts b/tests/sdk/nodejs/generatename/step5/index.ts index 0ef8f16519..e2cbcb5f18 100644 --- a/tests/sdk/nodejs/generatename/step5/index.ts +++ b/tests/sdk/nodejs/generatename/step5/index.ts @@ -23,7 +23,7 @@ const namespace = new k8s.core.v1.Namespace("test-namespace"); // The name of the pod is now explicitly set to the previously-generated name, so no replace is triggered. // -export const pod = new k8s.core.v1.Pod("generatename-test", { +const pod = new k8s.core.v1.Pod("generatename-test", { metadata: { namespace: namespace.metadata.name, generateName: "generatename-test-modified-", diff --git a/tests/sdk/nodejs/generatename/step6/index.ts b/tests/sdk/nodejs/generatename/step6/index.ts index 5bed3fff74..b9e422f324 100644 --- a/tests/sdk/nodejs/generatename/step6/index.ts +++ b/tests/sdk/nodejs/generatename/step6/index.ts @@ -21,10 +21,10 @@ const namespace = new k8s.core.v1.Namespace("test-namespace"); // // User has now specified `.metadata.name`, so Pulumi should replace the resource, and NOT allocate -// a name to it. +// a name to it. Note that `.metadata.generateName` is ignored. // -export const pod = new k8s.core.v1.Pod("generatename-test", { +const pod = new k8s.core.v1.Pod("generatename-test", { metadata: { namespace: namespace.metadata.name, generateName: "generatename-test-modified-", diff --git a/tests/sdk/nodejs/nodejs_test.go b/tests/sdk/nodejs/nodejs_test.go index 2830375a01..9341508f09 100644 --- a/tests/sdk/nodejs/nodejs_test.go +++ b/tests/sdk/nodejs/nodejs_test.go @@ -112,6 +112,7 @@ func TestAutonaming(t *testing.T) { var step1Name any var step2Name any var step3Name any + var step4Name any test := baseOptions.With(integration.ProgramTestOptions{ Dir: filepath.Join("autonaming", "step1"), @@ -216,6 +217,36 @@ func TestAutonaming(t *testing.T) { provRes := stackInfo.Deployment.Resources[2] assert.True(t, providers.IsProviderType(provRes.URN.Type())) + // + // Assert Pod was NOT replaced, and has the same name, previously allocated by Pulumi. + // + + pod := stackInfo.Deployment.Resources[1] + assert.Equal(t, "autonaming-test", string(pod.URN.Name())) + step4Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.True(t, strings.HasPrefix(step4Name.(string), "autonaming-test-")) + + autonamed, _ := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.Equal(t, "true", autonamed) + + assert.Equal(t, step3Name, step4Name) + }, + }, + { + Dir: filepath.Join("autonaming", "step5"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + assert.Equal(t, 4, len(stackInfo.Deployment.Resources)) + + tests.SortResourcesByURN(stackInfo) + + stackRes := stackInfo.Deployment.Resources[3] + assert.Equal(t, resource.RootStackType, stackRes.URN.Type()) + + provRes := stackInfo.Deployment.Resources[2] + assert.True(t, providers.IsProviderType(provRes.URN.Type())) + // // User has specified their own name for the Pod, so we replace it, and Pulumi does NOT // allocate a name on its own. From ac08bdc7b51359b4f0855c490865495f8661b46b Mon Sep 17 00:00:00 2001 From: Eron Wright Date: Fri, 19 Jan 2024 13:49:28 -0800 Subject: [PATCH 11/11] changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 77a532ec08..aabd9fc6de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ## Unreleased +- Support for metadata.generateName (https://github.com/pulumi/pulumi-kubernetes/pull/2594) ## 4.7.1 (January 17, 2024)