diff --git a/CHANGELOG.md b/CHANGELOG.md index ac5cb2847c..c10778ab5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ ## Unreleased +- Support for metadata.generateName (https://github.com/pulumi/pulumi-kubernetes/pull/2594) - Fix DiffConfig issue when when provider's kubeconfig is set to file path (https://github.com/pulumi/pulumi-kubernetes/pull/2771) diff --git a/provider/pkg/await/await.go b/provider/pkg/await/await.go index 6ae92aac87..a447736763 100644 --- a/provider/pkg/await/await.go +++ b/provider/pkg/await/await.go @@ -90,10 +90,11 @@ type ReadConfig struct { type UpdateConfig struct { ProviderConfig - Previous *unstructured.Unstructured - Inputs *unstructured.Unstructured - Timeout float64 - Preview bool + OldInputs *unstructured.Unstructured + OldOutputs *unstructured.Unstructured + Inputs *unstructured.Unstructured + Timeout float64 + Preview bool // IgnoreChanges is a list of fields to ignore when diffing the old and new objects. IgnoreChanges []string } @@ -101,6 +102,7 @@ type UpdateConfig struct { type DeleteConfig struct { ProviderConfig Inputs *unstructured.Unstructured + Outputs *unstructured.Unstructured Name string Timeout float64 } @@ -251,7 +253,7 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { id := fmt.Sprintf("%s/%s", c.Inputs.GetAPIVersion(), c.Inputs.GetKind()) if awaiter, exists := awaiters[id]; exists { if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + logger.V(1).Infof("Skipping await logic for %v", outputs.GetName()) } else { if awaiter.awaitCreation != nil { conf := createAwaitConfig{ @@ -280,7 +282,7 @@ func Creation(c CreateConfig) (*unstructured.Unstructured, error) { // If the client fails to get the live object for some reason, DO NOT return the error. This // will leak the fact that the object was successfully created. Instead, fall back to the // last-seen live object. - live, err := client.Get(c.Context, c.Inputs.GetName(), metav1.GetOptions{}) + live, err := client.Get(c.Context, outputs.GetName(), metav1.GetOptions{}) if err != nil { return outputs, nil } @@ -306,7 +308,7 @@ func Read(c ReadConfig) (*unstructured.Unstructured, error) { id := fmt.Sprintf("%s/%s", outputs.GetAPIVersion(), outputs.GetKind()) if awaiter, exists := awaiters[id]; exists { if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + logger.V(1).Infof("Skipping await logic for %v", outputs.GetName()) } else { if awaiter.awaitRead != nil { conf := createAwaitConfig{ @@ -353,14 +355,14 @@ func Read(c ReadConfig) (*unstructured.Unstructured, error) { // [3]: // https://kubernetes.io/docs/reference/using-api/server-side-apply func Update(c UpdateConfig) (*unstructured.Unstructured, error) { - client, err := c.ClientSet.ResourceClientForObject(c.Inputs) + client, err := c.ClientSet.ResourceClientForObject(c.OldOutputs) if err != nil { return nil, err } // Get the "live" version of the last submitted object. This is necessary because the server may // have populated some fields automatically, updated status fields, and so on. - liveOldObj, err := client.Get(c.Context, c.Previous.GetName(), metav1.GetOptions{}) + liveOldObj, err := client.Get(c.Context, c.OldOutputs.GetName(), metav1.GetOptions{}) if err != nil { return nil, err } @@ -380,7 +382,7 @@ func Update(c UpdateConfig) (*unstructured.Unstructured, error) { id := fmt.Sprintf("%s/%s", c.Inputs.GetAPIVersion(), c.Inputs.GetKind()) if awaiter, exists := awaiters[id]; exists { if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + logger.V(1).Infof("Skipping await logic for %v", currentOutputs.GetName()) } else { if awaiter.awaitUpdate != nil { conf := updateAwaitConfig{ @@ -396,7 +398,7 @@ func Update(c UpdateConfig) (*unstructured.Unstructured, error) { timeout: c.Timeout, clusterVersion: c.ClusterVersion, }, - lastInputs: c.Previous, + lastInputs: c.OldInputs, lastOutputs: liveOldObj, } waitErr := awaiter.awaitUpdate(conf) @@ -411,12 +413,12 @@ func Update(c UpdateConfig) (*unstructured.Unstructured, error) { gvk := c.Inputs.GroupVersionKind() logger.V(3).Infof("Resource %s/%s/%s '%s.%s' patched and updated", gvk.Group, gvk.Version, - gvk.Kind, c.Inputs.GetNamespace(), c.Inputs.GetName()) + gvk.Kind, c.Inputs.GetNamespace(), currentOutputs.GetName()) // If the client fails to get the live object for some reason, DO NOT return the error. This // will leak the fact that the object was successfully created. Instead, fall back to the // last-seen live object. - live, err := client.Get(c.Context, c.Inputs.GetName(), metav1.GetOptions{}) + live, err := client.Get(c.Context, currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { return currentOutputs, nil } @@ -450,7 +452,7 @@ func csaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dy // optimistically rather than failing the update. _ = handleCSAIgnoreFields(c, liveOldObj) // Create merge patch (prefer strategic merge patch, fall back to JSON merge patch). - patch, patchType, _, err := openapi.PatchForResourceUpdate(c.Resources, c.Previous, c.Inputs, liveOldObj) + patch, patchType, _, err := openapi.PatchForResourceUpdate(c.Resources, c.OldInputs, c.Inputs, liveOldObj) if err != nil { return nil, err } @@ -462,7 +464,7 @@ func csaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dy options.DryRun = []string{metav1.DryRunAll} } - return client.Patch(c.Context, c.Inputs.GetName(), patchType, patch, options) + return client.Patch(c.Context, liveOldObj.GetName(), patchType, patch, options) } // ssaUpdate handles the logic for updating a resource using server-side apply. @@ -490,7 +492,7 @@ func ssaUpdate(c *UpdateConfig, liveOldObj *unstructured.Unstructured, client dy options.DryRun = []string{metav1.DryRunAll} } - currentOutputs, err := client.Patch(c.Context, c.Inputs.GetName(), types.ApplyPatchType, objYAML, options) + currentOutputs, err := client.Patch(c.Context, liveOldObj.GetName(), types.ApplyPatchType, objYAML, options) if err != nil { if errors.IsConflict(err) { err = fmt.Errorf("Server-Side Apply field conflict detected. See %s for troubleshooting help\n: %w", @@ -543,7 +545,7 @@ func handleSSAIgnoreFields(c *UpdateConfig, liveOldObj *unstructured.Unstructure for _, f := range managedFields { s, err := fluxssa.FieldsToSet(*f.FieldsV1) if err != nil { - return fmt.Errorf("unable to parse managed fields from resource %q into fieldpath.Set: %w", c.Inputs.GetName(), err) + return fmt.Errorf("unable to parse managed fields from resource %q into fieldpath.Set: %w", liveOldObj.GetName(), err) } switch f.Manager { @@ -706,14 +708,14 @@ func Deletion(c DeleteConfig) error { } // Obtain client for the resource being deleted. - client, err := c.ClientSet.ResourceClientForObject(c.Inputs) + client, err := c.ClientSet.ResourceClientForObject(c.Outputs) if err != nil { return nilIfGVKDeleted(err) } patchResource := kinds.IsPatchURN(c.URN) if c.ServerSideApply && patchResource { - err = ssa.Relinquish(c.Context, client, c.Inputs, c.FieldManager) + err = ssa.Relinquish(c.Context, client, c.Outputs, c.FieldManager) return err } @@ -739,10 +741,10 @@ func Deletion(c DeleteConfig) error { // if we don't have an entry for the resource type; in the event that we do, but the await logic // is blank, simply do nothing instead of logging. var waitErr error - id := fmt.Sprintf("%s/%s", c.Inputs.GetAPIVersion(), c.Inputs.GetKind()) + id := fmt.Sprintf("%s/%s", c.Outputs.GetAPIVersion(), c.Outputs.GetKind()) if awaiter, exists := awaiters[id]; exists && awaiter.awaitDeletion != nil { if metadata.SkipAwaitLogic(c.Inputs) { - logger.V(1).Infof("Skipping await logic for %v", c.Inputs.GetName()) + logger.V(1).Infof("Skipping await logic for %v", c.Name) } else { waitErr = awaiter.awaitDeletion(deleteAwaitConfig{ createAwaitConfig: createAwaitConfig{ @@ -752,6 +754,7 @@ func Deletion(c DeleteConfig) error { initialAPIVersion: c.InitialAPIVersion, clientSet: c.ClientSet, currentInputs: c.Inputs, + currentOutputs: c.Outputs, logger: c.DedupLogger, timeout: c.Timeout, clusterVersion: c.ClusterVersion, diff --git a/provider/pkg/await/awaiters.go b/provider/pkg/await/awaiters.go index 7dbde44d25..ea3ff6d8fc 100644 --- a/provider/pkg/await/awaiters.go +++ b/provider/pkg/await/awaiters.go @@ -301,19 +301,19 @@ func untilAppsDeploymentDeleted(config deleteAwaitConfig) error { specReplicas, _ := deploymentSpecReplicas(d) return watcher.RetryableError( - fmt.Errorf("deployment %q still exists (%d / %d replicas exist)", config.currentInputs.GetName(), + fmt.Errorf("deployment %q still exists (%d / %d replicas exist)", d.GetName(), currReplicas, specReplicas)) } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) - err := watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(deploymentMissing, timeout) if err != nil { return err } - logger.V(3).Infof("Deployment '%s' deleted", config.currentInputs.GetName()) + logger.V(3).Infof("Deployment '%s' deleted", config.currentOutputs.GetName()) return nil } @@ -344,19 +344,19 @@ func untilAppsStatefulSetDeleted(config deleteAwaitConfig) error { specReplicas, _ := specReplicas(d) return watcher.RetryableError( - fmt.Errorf("StatefulSet %q still exists (%d / %d replicas exist)", config.currentInputs.GetName(), + fmt.Errorf("StatefulSet %q still exists (%d / %d replicas exist)", d.GetName(), currReplicas, specReplicas)) } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) - err := watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(statefulsetmissing, timeout) if err != nil { return err } - logger.V(3).Infof("StatefulSet %q deleted", config.currentInputs.GetName()) + logger.V(3).Infof("StatefulSet %q deleted", config.currentOutputs.GetName()) return nil } @@ -375,12 +375,12 @@ func untilBatchV1JobDeleted(config deleteAwaitConfig) error { return err } - e := fmt.Errorf("job %q still exists", config.currentInputs.GetName()) + e := fmt.Errorf("job %q still exists", pod.GetName()) return watcher.RetryableError(e) } timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) - return watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(jobMissingOrKilled, timeout) } @@ -396,22 +396,22 @@ func untilCoreV1NamespaceDeleted(config deleteAwaitConfig) error { return nil } else if err != nil { logger.V(3).Infof("Received error deleting namespace %q: %#v", - config.currentInputs.GetName(), err) + ns.GetName(), err) return err } statusPhase, _ := openapi.Pluck(ns.Object, "status", "phase") - logger.V(3).Infof("Namespace %q status received: %#v", config.currentInputs.GetName(), statusPhase) + logger.V(3).Infof("Namespace %q status received: %#v", ns.GetName(), statusPhase) if statusPhase == "" { return nil } return watcher.RetryableError(fmt.Errorf("namespace %q still exists (%v)", - config.currentInputs.GetName(), statusPhase)) + ns.GetName(), statusPhase)) } timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) - return watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(namespaceMissingOrKilled, timeout) } @@ -433,11 +433,11 @@ func untilCoreV1PersistentVolumeInitialized(c createAwaitConfig) error { return statusPhase == statusAvailable || statusPhase == statusBound } - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } - return watcher.ForObject(c.ctx, client, c.currentInputs.GetName()). + return watcher.ForObject(c.ctx, client, c.currentOutputs.GetName()). WatchUntil(pvAvailableOrBound, 5*time.Minute) } @@ -454,11 +454,11 @@ func untilCoreV1PersistentVolumeClaimBound(c createAwaitConfig) error { return statusPhase == statusBound } - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } - return watcher.ForObject(c.ctx, client, c.currentInputs.GetName()). + return watcher.ForObject(c.ctx, client, c.currentOutputs.GetName()). WatchUntil(pvcBound, 5*time.Minute) } @@ -478,13 +478,13 @@ func untilCoreV1PodDeleted(config deleteAwaitConfig) error { } statusPhase, _ := openapi.Pluck(pod.Object, "status", "phase") - logger.V(3).Infof("Current state of pod %q: %#v", config.currentInputs.GetName(), statusPhase) - e := fmt.Errorf("pod %q still exists (%v)", config.currentInputs.GetName(), statusPhase) + logger.V(3).Infof("Current state of pod %q: %#v", pod.GetName(), statusPhase) + e := fmt.Errorf("pod %q still exists (%v)", pod.GetName(), statusPhase) return watcher.RetryableError(e) } timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 300) - return watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + return watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(podMissingOrKilled, timeout) } @@ -503,13 +503,13 @@ func untilCoreV1ReplicationControllerInitialized(c createAwaitConfig) error { return openapi.Pluck(rc.Object, "status", "availableReplicas") } - name := c.currentInputs.GetName() + name := c.currentOutputs.GetName() replicas, _ := openapi.Pluck(c.currentInputs.Object, "spec", "replicas") logger.V(3).Infof("Waiting for replication controller %q to schedule '%v' replicas", name, replicas) - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } @@ -525,8 +525,8 @@ func untilCoreV1ReplicationControllerInitialized(c createAwaitConfig) error { // but that means checking each pod status separately (which can be expensive at scale) // as there's no aggregate data available from the API - logger.V(3).Infof("Replication controller %q initialized: %#v", c.currentInputs.GetName(), - c.currentInputs) + logger.V(3).Infof("Replication controller %q initialized: %#v", name, + c.currentOutputs) return nil } @@ -559,18 +559,18 @@ func untilCoreV1ReplicationControllerDeleted(config deleteAwaitConfig) error { return watcher.RetryableError( fmt.Errorf("ReplicationController %q still exists (%d / %d replicas exist)", - config.currentInputs.GetName(), currReplicas, specReplicas)) + rc.GetName(), currReplicas, specReplicas)) } // Wait until all replicas are gone. 10 minutes should be enough for ~10k replicas. timeout := metadata.TimeoutDuration(config.timeout, config.currentInputs, 600) - err := watcher.ForObject(config.ctx, config.clientForResource, config.currentInputs.GetName()). + err := watcher.ForObject(config.ctx, config.clientForResource, config.currentOutputs.GetName()). RetryUntil(rcMissing, timeout) if err != nil { return err } - logger.V(3).Infof("ReplicationController %q deleted", config.currentInputs.GetName()) + logger.V(3).Infof("ReplicationController %q deleted", config.currentOutputs.GetName()) return nil } @@ -589,8 +589,8 @@ func untilCoreV1ResourceQuotaInitialized(c createAwaitConfig) error { hard, hardIsMap := hardRaw.(map[string]any) hardStatus, hardStatusIsMap := hardStatusRaw.(map[string]any) if hardIsMap && hardStatusIsMap && reflect.DeepEqual(hard, hardStatus) { - logger.V(3).Infof("ResourceQuota %q initialized: %#v", c.currentInputs.GetName(), - c.currentInputs) + logger.V(3).Infof("ResourceQuota %q initialized: %#v", quota.GetName(), + quota) return true } logger.V(3).Infof("Quotas don't match after creation.\nExpected: %#v\nGiven: %#v", @@ -598,11 +598,11 @@ func untilCoreV1ResourceQuotaInitialized(c createAwaitConfig) error { return false } - client, err := c.clientSet.ResourceClient(c.currentInputs.GroupVersionKind(), c.currentInputs.GetNamespace()) + client, err := c.clientSet.ResourceClient(c.currentOutputs.GroupVersionKind(), c.currentOutputs.GetNamespace()) if err != nil { return err } - return watcher.ForObject(c.ctx, client, c.currentInputs.GetName()). + return watcher.ForObject(c.ctx, client, c.currentOutputs.GetName()). WatchUntil(rqInitialized, 1*time.Minute) } diff --git a/provider/pkg/await/deployment.go b/provider/pkg/await/deployment.go index 8ea3b10f5e..f41255b192 100644 --- a/provider/pkg/await/deployment.go +++ b/provider/pkg/await/deployment.go @@ -248,7 +248,7 @@ func (dia *deploymentInitAwaiter) Read() error { // Get live versions of Deployment, ReplicaSets, and Pods. deployment, err := deploymentClient.Get(dia.config.ctx, - dia.config.currentInputs.GetName(), + dia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it @@ -416,7 +416,7 @@ func (dia *deploymentInitAwaiter) checkAndLogStatus() bool { } func (dia *deploymentInitAwaiter) processDeploymentEvent(event watch.Event) { - inputDeploymentName := dia.config.currentInputs.GetName() + inputDeploymentName := dia.config.currentOutputs.GetName() deployment, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -543,11 +543,11 @@ func (dia *deploymentInitAwaiter) processReplicaSetEvent(event watch.Event) { logger.V(3).Infof("Received update for ReplicaSet %q", rs.GetName()) // Check whether this ReplicaSet was created by our Deployment. - if !isOwnedBy(rs, dia.config.currentInputs) { + if !isOwnedBy(rs, dia.config.currentOutputs) { return } - logger.V(3).Infof("ReplicaSet %q is owned by %q", rs.GetName(), dia.config.currentInputs.GetName()) + logger.V(3).Infof("ReplicaSet %q is owned by %q", rs.GetName(), dia.config.currentOutputs.GetName()) // If Pod was deleted, remove it from our aggregated checkers. generation := rs.GetAnnotations()[revision] @@ -560,9 +560,7 @@ func (dia *deploymentInitAwaiter) processReplicaSetEvent(event watch.Event) { } func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { - inputs := dia.config.currentInputs - - logger.V(3).Infof("Checking ReplicaSet status for Deployment %q", inputs.GetName()) + logger.V(3).Infof("Checking ReplicaSet status for Deployment %q", dia.config.currentOutputs.GetName()) rs, updatedReplicaSetCreated := dia.replicaSets[dia.replicaSetGeneration] if dia.replicaSetGeneration == "0" || !updatedReplicaSetCreated { @@ -570,14 +568,14 @@ func (dia *deploymentInitAwaiter) checkReplicaSetStatus() { } logger.V(3).Infof("Deployment %q has generation %q, which corresponds to ReplicaSet %q", - inputs.GetName(), dia.replicaSetGeneration, rs.GetName()) + dia.config.currentOutputs.GetName(), dia.replicaSetGeneration, rs.GetName()) var lastRevision string if outputs := dia.config.lastOutputs; outputs != nil { lastRevision = outputs.GetAnnotations()[revision] } - logger.V(3).Infof("The last generation of Deployment %q was %q", inputs.GetName(), lastRevision) + logger.V(3).Infof("The last generation of Deployment %q was %q", dia.config.currentOutputs.GetName(), lastRevision) // NOTE: Check `.spec.replicas` in the live `ReplicaSet` instead of the last input `Deployment`, // since this is the plan of record. This protects against (e.g.) a user running `kubectl scale` @@ -697,9 +695,7 @@ func (dia *deploymentInitAwaiter) changeTriggeredRollout() bool { } func (dia *deploymentInitAwaiter) checkPersistentVolumeClaimStatus() { - inputs := dia.config.currentInputs - - logger.V(3).Infof("Checking PersistentVolumeClaims status for Deployment %q", inputs.GetName()) + logger.V(3).Infof("Checking PersistentVolumeClaims status for Deployment %q", dia.config.currentOutputs.GetName()) allPVCsReady := true for _, pvc := range dia.pvcs { @@ -873,31 +869,31 @@ func (dia *deploymentInitAwaiter) makeClients() ( deploymentClient, replicaSetClient, podClient, pvcClient dynamic.ResourceInterface, err error, ) { deploymentClient, err = clients.ResourceClient( - kinds.Deployment, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.Deployment, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } replicaSetClient, err = clients.ResourceClient( - kinds.ReplicaSet, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.ReplicaSet, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch ReplicaSets associated with Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } podClient, err = clients.ResourceClient( - kinds.Pod, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.Pod, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch Pods associated with Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } pvcClient, err = clients.ResourceClient( - kinds.PersistentVolumeClaim, dia.config.currentInputs.GetNamespace(), dia.config.clientSet) + kinds.PersistentVolumeClaim, dia.config.currentOutputs.GetNamespace(), dia.config.clientSet) if err != nil { err = errors.Wrapf(err, "Could not make client to watch PVCs associated with Deployment %q", - dia.config.currentInputs.GetName()) + dia.config.currentOutputs.GetName()) return nil, nil, nil, nil, err } diff --git a/provider/pkg/await/ingress.go b/provider/pkg/await/ingress.go index 40ce54bcd7..b071f71536 100644 --- a/provider/pkg/await/ingress.go +++ b/provider/pkg/await/ingress.go @@ -116,7 +116,7 @@ func (iia *ingressInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(iia.config.clientSet, - informers.WithNamespaceOrDefault(iia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(iia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) ingressEvents := make(chan watch.Event) @@ -155,7 +155,7 @@ func (iia *ingressInitAwaiter) Read() error { } // Get live versions of Ingress. - ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentInputs.GetName(), metav1.GetOptions{}) + ingress, err := ingressClient.Get(iia.config.ctx, iia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it // can mark the deployment as having been deleted. @@ -288,7 +288,7 @@ func (iia *ingressInitAwaiter) processServiceEvent(event watch.Event) { } func (iia *ingressInitAwaiter) processIngressEvent(event watch.Event) { - inputIngressName := iia.config.currentInputs.GetName() + inputIngressName := iia.config.currentOutputs.GetName() ingress, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -509,25 +509,25 @@ func (iia *ingressInitAwaiter) makeClients() ( ingressClient, endpointsClient, servicesClient dynamic.ResourceInterface, err error, ) { ingressClient, err = clients.ResourceClient( - kinds.Ingress, iia.config.currentInputs.GetNamespace(), iia.config.clientSet) + kinds.Ingress, iia.config.currentOutputs.GetNamespace(), iia.config.clientSet) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Could not make client to watch Ingress %q", - iia.config.currentInputs.GetName()) + iia.config.currentOutputs.GetName()) } endpointsClient, err = clients.ResourceClient( - kinds.Endpoints, iia.config.currentInputs.GetNamespace(), iia.config.clientSet) + kinds.Endpoints, iia.config.currentOutputs.GetNamespace(), iia.config.clientSet) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Could not make client to watch Endpoints associated with Ingress %q", - iia.config.currentInputs.GetName()) + iia.config.currentOutputs.GetName()) } servicesClient, err = clients.ResourceClient( - kinds.Service, iia.config.currentInputs.GetNamespace(), iia.config.clientSet) + kinds.Service, iia.config.currentOutputs.GetNamespace(), iia.config.clientSet) if err != nil { return nil, nil, nil, errors.Wrapf(err, "Could not make client to watch Services associated with Ingress %q", - iia.config.currentInputs.GetName()) + iia.config.currentOutputs.GetName()) } return diff --git a/provider/pkg/await/job.go b/provider/pkg/await/job.go index 76c42a1e6d..b323d1a6ec 100644 --- a/provider/pkg/await/job.go +++ b/provider/pkg/await/job.go @@ -102,7 +102,7 @@ func (jia *jobInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(jia.config.clientSet, - informers.WithNamespaceOrDefault(jia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(jia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) jobEvents := make(chan watch.Event) @@ -156,18 +156,18 @@ func (jia *jobInitAwaiter) Read() error { stopper := make(chan struct{}) defer close(stopper) - namespace := jia.config.currentInputs.GetNamespace() + namespace := jia.config.currentOutputs.GetNamespace() informerFactory := dynamicinformer.NewFilteredDynamicSharedInformerFactory(jia.config.clientSet.GenericClient, 60*time.Second, namespace, nil) informerFactory.Start(stopper) - jobClient, err := clients.ResourceClient(kinds.Job, jia.config.currentInputs.GetNamespace(), jia.config.clientSet) + jobClient, err := clients.ResourceClient(kinds.Job, jia.config.currentOutputs.GetNamespace(), jia.config.clientSet) if err != nil { return errors.Wrapf(err, "Could not make client to get Job %q", - jia.config.currentInputs.GetName()) + jia.config.currentOutputs.GetName()) } // Get live version of Job. - job, err := jobClient.Get(jia.config.ctx, jia.config.currentInputs.GetName(), metav1.GetOptions{}) + job, err := jobClient.Get(jia.config.ctx, jia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it // can mark the Pod as having been deleted. @@ -212,7 +212,7 @@ func (jia *jobInitAwaiter) processJobEvent(event watch.Event) error { } // Do nothing if this is not the job we're waiting for. - if job.GetName() != jia.config.currentInputs.GetName() { + if job.GetName() != jia.config.currentOutputs.GetName() { return nil } diff --git a/provider/pkg/await/pod.go b/provider/pkg/await/pod.go index 07a23068e1..0e28d42b88 100644 --- a/provider/pkg/await/pod.go +++ b/provider/pkg/await/pod.go @@ -150,7 +150,7 @@ func (pia *podInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(pia.config.clientSet, - informers.WithNamespaceOrDefault(pia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(pia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) podEvents := make(chan watch.Event) @@ -187,14 +187,14 @@ func (pia *podInitAwaiter) Await() error { func (pia *podInitAwaiter) Read() error { podClient, err := clients.ResourceClient( - kinds.Pod, pia.config.currentInputs.GetNamespace(), pia.config.clientSet) + kinds.Pod, pia.config.currentOutputs.GetNamespace(), pia.config.clientSet) if err != nil { return errors.Wrapf(err, "Could not make client to get Pod %q", - pia.config.currentInputs.GetName()) + pia.config.currentOutputs.GetName()) } // Get live version of Pod. - pod, err := podClient.Get(pia.config.ctx, pia.config.currentInputs.GetName(), metav1.GetOptions{}) + pod, err := podClient.Get(pia.config.ctx, pia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it // can mark the Pod as having been deleted. @@ -226,7 +226,7 @@ func (pia *podInitAwaiter) processPodEvent(event watch.Event) { } // Do nothing if this is not the pod we're waiting for. - if pod.GetName() != pia.config.currentInputs.GetName() { + if pod.GetName() != pia.config.currentOutputs.GetName() { return } diff --git a/provider/pkg/await/service.go b/provider/pkg/await/service.go index 653b7725ba..45ca6f873b 100644 --- a/provider/pkg/await/service.go +++ b/provider/pkg/await/service.go @@ -137,7 +137,7 @@ func (sia *serviceInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(sia.config.clientSet, - informers.WithNamespaceOrDefault(sia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(sia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) serviceEvents := make(chan watch.Event) @@ -273,7 +273,7 @@ func (sia *serviceInitAwaiter) await( } func (sia *serviceInitAwaiter) processServiceEvent(event watch.Event) { - inputServiceName := sia.config.currentInputs.GetName() + inputServiceName := sia.config.currentOutputs.GetName() service, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { diff --git a/provider/pkg/await/statefulset.go b/provider/pkg/await/statefulset.go index 40c6c5aaf4..78af3c3c69 100644 --- a/provider/pkg/await/statefulset.go +++ b/provider/pkg/await/statefulset.go @@ -192,7 +192,7 @@ func (sia *statefulsetInitAwaiter) Await() error { defer close(stopper) informerFactory := informers.NewInformerFactory(sia.config.clientSet, - informers.WithNamespaceOrDefault(sia.config.currentInputs.GetNamespace())) + informers.WithNamespaceOrDefault(sia.config.currentOutputs.GetNamespace())) informerFactory.Start(stopper) statefulSetEvents := make(chan watch.Event) @@ -229,7 +229,7 @@ func (sia *statefulsetInitAwaiter) Read() error { // Get live versions of StatefulSet and Pods. statefulset, err := statefulSetClient.Get(sia.config.ctx, - sia.config.currentInputs.GetName(), + sia.config.currentOutputs.GetName(), metav1.GetOptions{}) if err != nil { // IMPORTANT: Do not wrap this error! If this is a 404, the provider need to know so that it @@ -343,7 +343,7 @@ func (sia *statefulsetInitAwaiter) checkAndLogStatus() bool { } func (sia *statefulsetInitAwaiter) processStatefulSetEvent(event watch.Event) { - inputStatefulSetName := sia.config.currentInputs.GetName() + inputStatefulSetName := sia.config.currentOutputs.GetName() statefulset, isUnstructured := event.Object.(*unstructured.Unstructured) if !isUnstructured { @@ -509,18 +509,18 @@ func (sia *statefulsetInitAwaiter) makeClients() ( statefulSetClient, podClient dynamic.ResourceInterface, err error, ) { statefulSetClient, err = clients.ResourceClient( - kinds.StatefulSet, sia.config.currentInputs.GetNamespace(), sia.config.clientSet) + kinds.StatefulSet, sia.config.currentOutputs.GetNamespace(), sia.config.clientSet) if err != nil { return nil, nil, errors.Wrapf(err, "Could not make client to watch StatefulSet %q", - sia.config.currentInputs.GetName()) + sia.config.currentOutputs.GetName()) } podClient, err = clients.ResourceClient( - kinds.Pod, sia.config.currentInputs.GetNamespace(), sia.config.clientSet) + kinds.Pod, sia.config.currentOutputs.GetNamespace(), sia.config.clientSet) if err != nil { return nil, nil, errors.Wrapf(err, "Could not make client to watch Pods associated with StatefulSet %q", - sia.config.currentInputs.GetName()) + sia.config.currentOutputs.GetName()) } return statefulSetClient, podClient, nil diff --git a/provider/pkg/metadata/naming.go b/provider/pkg/metadata/naming.go index 2cd866c631..03c32ef267 100644 --- a/provider/pkg/metadata/naming.go +++ b/provider/pkg/metadata/naming.go @@ -24,14 +24,17 @@ import ( // All auto-named resources get the annotation `pulumi.com/autonamed` for tooling purposes. func AssignNameIfAutonamable(randomSeed []byte, obj *unstructured.Unstructured, propMap resource.PropertyMap, urn resource.URN) { contract.Assertf(urn.Name() != "", "expected non-empty name in URN: %s", urn) - // Check if the .metadata.name is set and is a computed value. If so, do not auto-name. if md, ok := propMap["metadata"].V.(resource.PropertyMap); ok { + // Check if the .metadata.name is set and is a computed value. If so, do not auto-name. if name, ok := md["name"]; ok && name.IsComputed() { return } + // Check if the .metadata.generateName is set and is a computed value. If so, do not auto-name. + if name, ok := md["generateName"]; ok && name.IsComputed() { + return + } } - - if obj.GetName() == "" { + if obj.GetGenerateName() == "" && obj.GetName() == "" { prefix := urn.Name() + "-" autoname, err := resource.NewUniqueName(randomSeed, prefix, 0, 0, nil) contract.AssertNoErrorf(err, "unexpected error while creating NewUniqueName") @@ -42,14 +45,45 @@ func AssignNameIfAutonamable(randomSeed []byte, obj *unstructured.Unstructured, // AdoptOldAutonameIfUnnamed checks if `newObj` has a name, and if not, "adopts" the name of `oldObj` // instead. If `oldObj` was autonamed, then we mark `newObj` as autonamed, too. -func AdoptOldAutonameIfUnnamed(newObj, oldObj *unstructured.Unstructured) { - contract.Assertf(oldObj.GetName() != "", "expected nonempty name for object: %s", oldObj) +// Note that autonaming is preferred over generateName for backwards compatibility. +func AdoptOldAutonameIfUnnamed(newObj, oldObj *unstructured.Unstructured, newObjMap resource.PropertyMap) { + if md, ok := newObjMap["metadata"].V.(resource.PropertyMap); ok { + // Check if the .metadata.name is set and is a computed value. If so, do not auto-name. + if name, ok := md["name"]; ok && name.IsComputed() { + return + } + } if newObj.GetName() == "" && IsAutonamed(oldObj) { + contract.Assertf(oldObj.GetName() != "", "expected nonempty name for object: %s", oldObj) newObj.SetName(oldObj.GetName()) SetAnnotationTrue(newObj, AnnotationAutonamed) } } +// IsAutonamed checks if the object is auto-named by Pulumi. func IsAutonamed(obj *unstructured.Unstructured) bool { return IsAnnotationTrue(obj, AnnotationAutonamed) } + +// IsGenerateName checks if the object is auto-named by Kubernetes. +func IsGenerateName(obj *unstructured.Unstructured, propMap resource.PropertyMap) bool { + if IsNamed(obj, propMap) { + return false + } + if md, ok := propMap["metadata"].V.(resource.PropertyMap); ok { + if name, ok := md["generateName"]; ok && name.IsComputed() { + return true + } + } + return obj.GetGenerateName() != "" +} + +// IsNamed checks if the object has an assigned name (may be a known or computed value). +func IsNamed(obj *unstructured.Unstructured, propMap resource.PropertyMap) bool { + if md, ok := propMap["metadata"].V.(resource.PropertyMap); ok { + if name, ok := md["name"]; ok && name.IsComputed() { + return true + } + } + return obj.GetName() != "" +} diff --git a/provider/pkg/metadata/naming_test.go b/provider/pkg/metadata/naming_test.go index 5179e64c50..e6f41f7943 100644 --- a/provider/pkg/metadata/naming_test.go +++ b/provider/pkg/metadata/naming_test.go @@ -15,10 +15,11 @@ package metadata import ( - "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" "strings" "testing" + "github.com/pulumi/pulumi/sdk/v3/go/common/tokens" + "github.com/pulumi/pulumi/sdk/v3/go/common/resource" "github.com/stretchr/testify/assert" @@ -37,32 +38,54 @@ func TestAssignNameIfAutonamable(t *testing.T) { assert.Len(t, o1.GetName(), 12) // o2 has a name, so autonaming fails. - o2 := &unstructured.Unstructured{ - Object: map[string]any{"metadata": map[string]any{"name": "bar"}}, - } pm2 := resource.PropertyMap{ "metadata": resource.NewObjectProperty(resource.PropertyMap{ "name": resource.NewStringProperty("bar"), }), } + o2 := propMapToUnstructured(pm2) AssignNameIfAutonamable(nil, o2, pm2, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), tokens.Type(""), tokens.Type("bang:boom/fizzle:AnotherResource"), "bar")) assert.False(t, IsAutonamed(o2)) assert.Equal(t, "bar", o2.GetName()) // o3 has a computed name, so autonaming fails. - o3 := &unstructured.Unstructured{ - Object: map[string]any{"metadata": map[string]any{"name": "[Computed]"}}, - } pm3 := resource.PropertyMap{ "metadata": resource.NewObjectProperty(resource.PropertyMap{ "name": resource.MakeComputed(resource.NewStringProperty("bar")), }), } + o3 := propMapToUnstructured(pm3) AssignNameIfAutonamable(nil, o3, pm3, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), tokens.Type(""), tokens.Type("bang:boom/fizzle:MajorResource"), "foo")) assert.False(t, IsAutonamed(o3)) - assert.Equal(t, "[Computed]", o3.GetName()) + assert.Equal(t, "", o3.GetName()) + + // o4 has a generateName, so autonaming fails. + pm4 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "generateName": resource.NewStringProperty("bar-"), + }), + } + o4 := propMapToUnstructured(pm4) + AssignNameIfAutonamable(nil, o4, pm4, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), + tokens.Type(""), tokens.Type("bang:boom/fizzle:AnotherResource"), "bar")) + assert.False(t, IsAutonamed(o4)) + assert.Equal(t, "bar-", o4.GetGenerateName()) + assert.Equal(t, "", o4.GetName()) + + // o5 has a computed generateName, so autonaming fails. + pm5 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "name": resource.MakeComputed(resource.NewStringProperty("bar")), + }), + } + o5 := propMapToUnstructured(pm5) + AssignNameIfAutonamable(nil, o5, pm5, resource.NewURN(tokens.QName("teststack"), tokens.PackageName("testproj"), + tokens.Type(""), tokens.Type("bang:boom/fizzle:MajorResource"), "foo")) + assert.False(t, IsAutonamed(o5)) + assert.Equal(t, "", o5.GetGenerateName()) + assert.Equal(t, "", o5.GetName()) } func TestAdoptName(t *testing.T) { @@ -77,10 +100,13 @@ func TestAdoptName(t *testing.T) { }, }, } - new1 := &unstructured.Unstructured{ - Object: map[string]any{"metadata": map[string]any{"name": "new1"}}, + pm1 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "name": resource.NewStringProperty("new1"), + }), } - AdoptOldAutonameIfUnnamed(new1, old1) + new1 := propMapToUnstructured(pm1) + AdoptOldAutonameIfUnnamed(new1, old1, pm1) assert.Equal(t, "old1", old1.GetName()) assert.True(t, IsAutonamed(old1)) assert.Equal(t, "new1", new1.GetName()) @@ -90,7 +116,8 @@ func TestAdoptName(t *testing.T) { new2 := &unstructured.Unstructured{ Object: map[string]any{}, } - AdoptOldAutonameIfUnnamed(new2, old1) + pm2 := resource.NewPropertyMap(struct{}{}) + AdoptOldAutonameIfUnnamed(new2, old1, pm2) assert.Equal(t, "old1", new2.GetName()) assert.True(t, IsAutonamed(new2)) @@ -98,6 +125,7 @@ func TestAdoptName(t *testing.T) { new3 := &unstructured.Unstructured{ Object: map[string]any{}, } + pm3 := resource.NewPropertyMap(struct{}{}) old2 := &unstructured.Unstructured{ Object: map[string]any{ "metadata": map[string]any{ @@ -105,7 +133,34 @@ func TestAdoptName(t *testing.T) { }, }, } - AdoptOldAutonameIfUnnamed(new3, old2) + AdoptOldAutonameIfUnnamed(new3, old2, pm3) assert.Equal(t, "", new3.GetName()) assert.False(t, IsAutonamed(new3)) + + // new4 has a computed name and therefore DOES NOT adopt old1's name. + pm4 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "name": resource.MakeComputed(resource.NewStringProperty("new4")), + }), + } + new4 := propMapToUnstructured(pm4) + assert.Equal(t, "", new4.GetName()) + AdoptOldAutonameIfUnnamed(new4, old1, pm4) + assert.Equal(t, "", new4.GetName()) + assert.False(t, IsAutonamed(new4)) + + // new5 has a generateName and therefore DOES adopt old1's name. + pm5 := resource.PropertyMap{ + "metadata": resource.NewObjectProperty(resource.PropertyMap{ + "generateName": resource.NewStringProperty("new5-"), + }), + } + new5 := propMapToUnstructured(pm5) + AdoptOldAutonameIfUnnamed(new5, old1, pm5) + assert.Equal(t, "old1", new2.GetName()) + assert.True(t, IsAutonamed(new2)) +} + +func propMapToUnstructured(pm resource.PropertyMap) *unstructured.Unstructured { + return &unstructured.Unstructured{Object: pm.MapRepl(nil, nil)} } diff --git a/provider/pkg/openapi/openapi.go b/provider/pkg/openapi/openapi.go index a0131d0ec6..ba70dbae30 100644 --- a/provider/pkg/openapi/openapi.go +++ b/provider/pkg/openapi/openapi.go @@ -105,19 +105,19 @@ func PatchForResourceUpdate( if knownGV := kinds.KnownGroupVersions.Has(lastSubmitted.GetAPIVersion()); !knownGV { // Use a JSON merge patch for CRD Kinds. patch, patchType, err = MergePatch( - lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, + liveOldObj, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) return patch, patchType, lookupPatchMeta, err } // Attempt a three-way strategic merge. patch, patchType, lookupPatchMeta, err = StrategicMergePatch( - resources, lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, + resources, liveOldObj, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) // Else, fall back to a three-way JSON merge patch. if err != nil { patch, patchType, err = MergePatch( - lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, + liveOldObj, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) } return patch, patchType, lookupPatchMeta, err @@ -126,12 +126,12 @@ func PatchForResourceUpdate( // StrategicMergePatch is a helper to use a three-way strategic merge on a resource version. // See for more details: https://tools.ietf.org/html/rfc6902 func StrategicMergePatch( - resources openapi.Resources, lastSubmitted *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, + resources openapi.Resources, liveOld *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) (patch []byte, patchType types.PatchType, lookupPatchMeta strategicpatch.LookupPatchMeta, err error) { - gvk := lastSubmitted.GroupVersionKind() + gvk := liveOld.GroupVersionKind() if resSchema := resources.LookupResource(gvk); resSchema != nil { logger.V(1).Infof("Attempting to update '%s' '%s/%s' with strategic merge", - gvk.String(), lastSubmitted.GetNamespace(), lastSubmitted.GetName()) + gvk.String(), liveOld.GetNamespace(), liveOld.GetName()) patch, patchType, lookupPatchMeta, err = strategicMergePatch( gvk, resSchema, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON) } @@ -144,12 +144,12 @@ func StrategicMergePatch( // MergePatch is a helper to use a three-way JSON merge patch on a resource version. // See for more details: https://tools.ietf.org/html/rfc7386 func MergePatch( - lastSubmitted *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, + liveOld *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) (patch []byte, patchType types.PatchType, err error) { - gvk := lastSubmitted.GroupVersionKind() + gvk := liveOld.GroupVersionKind() // Fall back to three-way JSON merge patch. logger.V(1).Infof("Attempting to update '%s' '%s/%s' with JSON merge", - gvk.String(), lastSubmitted.GetNamespace(), lastSubmitted.GetName()) + gvk.String(), liveOld.GetNamespace(), liveOld.GetName()) patch, patchType, err = jsonMergePatch(lastSubmittedJSON, currentSubmittedJSON, liveOldJSON) return patch, patchType, err } diff --git a/provider/pkg/provider/provider.go b/provider/pkg/provider/provider.go index 4bcaa397c5..8171ba0cd0 100644 --- a/provider/pkg/provider/provider.go +++ b/provider/pkg/provider/provider.go @@ -1293,7 +1293,7 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( } if !k.serverSideApplyMode && kinds.IsPatchURN(urn) { - return nil, fmt.Errorf("patch resources require Server-side Apply mode, which is enabled using the " + + return nil, fmt.Errorf("patch resources require Server-Side Apply mode, which is enabled using the " + "`enableServerSideApply` Provider config") } @@ -1333,7 +1333,7 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( if k.serverSideApplyMode && kinds.IsPatchURN(urn) { if len(newInputs.GetName()) == 0 { - return nil, fmt.Errorf("patch resources require the resource `.metadata.name` to be set") + return nil, fmt.Errorf("patch resources require the `.metadata.name` field to be set") } } @@ -1348,10 +1348,9 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( // needs to be `DeleteBeforeReplace`'d. If the resource is marked `DeleteBeforeReplace`, then // `Create` will allocate it a new name later. if len(oldInputs.Object) > 0 { - // NOTE: If old inputs exist, they have a name, either provided by the user or filled in with a - // previous run of `Check`. - contract.Assertf(oldInputs.GetName() != "", "expected object name to be nonempty: %v", oldInputs) - metadata.AdoptOldAutonameIfUnnamed(newInputs, oldInputs) + // NOTE: If old inputs exist, they MAY have a name, either provided by the user, or based on generateName, + // or filled in with a previous run of `Check`. + metadata.AdoptOldAutonameIfUnnamed(newInputs, oldInputs, news) // If the resource has existing state, we only set the "managed-by: pulumi" label if it is already present. This // avoids causing diffs for cases where the resource is being imported, or was created using SSA. The goal in @@ -1376,6 +1375,14 @@ func (k *kubeProvider) Check(ctx context.Context, req *pulumirpc.CheckRequest) ( } } } + if metadata.IsGenerateName(newInputs, news) { + if k.serverSideApplyMode { + return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in Server-Side Apply mode") + } + if k.yamlRenderMode { + return nil, fmt.Errorf("the `.metadata.generateName` field is not supported in YAML rendering mode") + } + } gvk, err := k.gvkFromURN(urn) if err != nil { @@ -1551,6 +1558,9 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p newInputs := propMapToUnstructured(newResInputs) oldInputs, oldLive := parseCheckpointObject(oldState) + if !isHelmRelease(urn) { + contract.Assertf(oldLive.GetName() != "", "expected live object name to be nonempty: %v", oldLive) + } oldInputs, err = normalizeInputs(oldInputs) if err != nil { @@ -1599,6 +1609,12 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p if k.serverSideApplyMode && len(oldLivePruned.GetResourceVersion()) > 0 { oldLivePruned.SetResourceVersion("") } + // If a name was specified in the new inputs, be sure that the old live object has the previous name. + // This makes it possible to update the program to set `.metadata.name` to the name that was + // made by `.metadata.generateName` without triggering replacement. + if newInputs.GetName() != "" { + oldLivePruned.SetName(oldLive.GetName()) + } var patch []byte patchBase := oldLivePruned.Object @@ -1607,15 +1623,15 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p patch, err = k.inputPatch(oldLivePruned, newInputs) if err != nil { return nil, pkgerrors.Wrapf( - err, "Failed to check for changes in resource %s/%s", newInputs.GetNamespace(), newInputs.GetName()) + err, "Failed to check for changes in resource %s", urn.Name()) } patchObj := map[string]any{} if err = json.Unmarshal(patch, &patchObj); err != nil { return nil, pkgerrors.Wrapf( - err, "Failed to check for changes in resource %s/%s because of an error serializing "+ + err, "Failed to check for changes in resource %s because of an error serializing "+ "the JSON patch describing resource changes", - newInputs.GetNamespace(), newInputs.GetName()) + urn.Name()) } hasChanges := pulumirpc.DiffResponse_DIFF_NONE @@ -1630,9 +1646,9 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p } if detailedDiff, err = convertPatchToDiff(patchObj, patchBase, newInputs.Object, oldLivePruned.Object, forceNewFields...); err != nil { return nil, pkgerrors.Wrapf( - err, "Failed to check for changes in resource %s/%s because of an error "+ + err, "Failed to check for changes in resource %s because of an error "+ "converting JSON patch describing resource changes to a diff", - newInputs.GetNamespace(), newInputs.GetName()) + urn.Name()) } // Remove any ignored changes from the computed diff. @@ -1682,7 +1698,7 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p switch newInputs.GetKind() { case "Job": // Fetch current Job status and check point-in-time readiness. Errors are ignored. - if live, err := k.readLiveObject(newInputs); err == nil { + if live, err := k.readLiveObject(oldLive); err == nil { jobChecker := checkjob.NewJobChecker() job, err := clients.FromUnstructured(live) if err == nil { @@ -1703,13 +1719,13 @@ func (k *kubeProvider) Diff(ctx context.Context, req *pulumirpc.DiffRequest) (*p // 1. We know resource must be replaced. len(replaces) > 0 && // 2. Object is NOT autonamed (i.e., user manually named it, and therefore we can't - // auto-generate the name). - !metadata.IsAutonamed(newInputs) && + // auto-generate the name on client or server). + !(metadata.IsAutonamed(newInputs) || metadata.IsGenerateName(newInputs, newResInputs)) && // 3. The new, user-specified name is the same as the old name. - newInputs.GetName() == oldLivePruned.GetName() && + newInputs.GetName() == oldLive.GetName() && // 4. The resource is being deployed to the same namespace (i.e., we aren't creating the // object in a new namespace and then deleting the old one). - newInputs.GetNamespace() == oldLivePruned.GetNamespace() + newInputs.GetNamespace() == oldLive.GetNamespace() return &pulumirpc.DiffResponse{ Changes: hasChanges, @@ -1862,22 +1878,28 @@ func (k *kubeProvider) Create( // If it's a "no match" error, this is probably a CustomResource with no corresponding // CustomResourceDefinition. This usually happens if the CRD was not created, and we // print a more useful error message in this case. + gvk, err := k.gvkFromURN(urn) + if err != nil { + return nil, err + } + gvkStr := gvk.GroupVersion().String() + "/" + gvk.Kind return nil, pkgerrors.Wrapf( - awaitErr, "creation of resource %s failed because the Kubernetes API server "+ + awaitErr, "creation of resource %s with kind %s failed because the Kubernetes API server "+ "reported that the apiVersion for this resource does not exist. "+ - "Verify that any required CRDs have been created", fqObjName(newInputs)) + "Verify that any required CRDs have been created", urn.Name(), gvkStr) } partialErr, isPartialErr := awaitErr.(await.PartialError) if !isPartialErr { // Object creation failed. return nil, pkgerrors.Wrapf( awaitErr, - "resource %s was not successfully created by the Kubernetes API server ", fqObjName(newInputs)) + "resource %s was not successfully created by the Kubernetes API server ", urn.Name()) } // Resource was created, but failed to become fully initialized. initialized = partialErr.Object() } + contract.Assertf(initialized.GetName() != "", "expected live object name to be nonempty: %v", initialized) // We need to delete the empty status field returned from the API server if we are in // preview mode. Having the status field set will cause a panic during preview if the Pulumi @@ -1905,7 +1927,7 @@ func (k *kubeProvider) Create( fqObjName(initialized), pkgerrors.Wrapf( awaitErr, "resource %s was successfully created, but the Kubernetes API server "+ - "reported that it failed to fully initialize or become live", fqObjName(newInputs)), + "reported that it failed to fully initialize or become live", urn.Name()), inputsAndComputed, nil) } @@ -2106,6 +2128,7 @@ func (k *kubeProvider) Read(ctx context.Context, req *pulumirpc.ReadRequest) (*p // If we get here, resource successfully registered with the API server, but failed to // initialize. } + contract.Assertf(liveObj.GetName() != "", "expected live object name to be nonempty: %v", liveObj) // Prune the live inputs to remove properties that are not present in the program inputs. liveInputs := pruneLiveState(liveObj, oldInputs) @@ -2317,7 +2340,8 @@ func (k *kubeProvider) Update( Resources: resources, ServerSideApply: k.serverSideApplyMode, }, - Previous: oldLivePruned, + OldInputs: oldLivePruned, + OldOutputs: oldLive, Inputs: newInputs, Timeout: req.Timeout, Preview: req.GetPreview(), @@ -2338,20 +2362,22 @@ func (k *kubeProvider) Update( return nil, pkgerrors.Wrapf( awaitErr, "update of resource %s failed because the Kubernetes API server "+ "reported that the apiVersion for this resource does not exist. "+ - "Verify that any required CRDs have been created", fqObjName(newInputs)) + "Verify that any required CRDs have been created", urn.Name()) } var getErr error - initialized, getErr = k.readLiveObject(newInputs) + initialized, getErr = k.readLiveObject(oldLive) if getErr != nil { // Object update/creation failed. return nil, pkgerrors.Wrapf( awaitErr, "update of resource %s failed because the Kubernetes API server "+ - "reported that it failed to fully initialize or become live", fqObjName(newInputs)) + "reported that it failed to fully initialize or become live", urn.Name()) } // If we get here, resource successfully registered with the API server, but failed to // initialize. } + contract.Assertf(initialized.GetName() != "", "expected live object name to be nonempty: %v", initialized) + // Return a new "checkpoint object". obj := checkpointObject(newInputs, initialized, newResInputs, initialAPIVersion, fieldManager) inputsAndComputed, err := plugin.MarshalProperties( @@ -2372,7 +2398,7 @@ func (k *kubeProvider) Update( fqObjName(initialized), pkgerrors.Wrapf( awaitErr, "the Kubernetes API server reported that %q failed to fully initialize "+ - "or become live", fqObjName(newInputs)), + "or become live", fqObjName(initialized)), inputsAndComputed, nil) } @@ -2380,12 +2406,12 @@ func (k *kubeProvider) Update( if k.serverSideApplyMode { // For non-preview updates, drop the old fieldManager if the value changes. if !req.GetPreview() && fieldManagerOld != fieldManager { - client, err := k.clientSet.ResourceClientForObject(newInputs) + client, err := k.clientSet.ResourceClientForObject(initialized) if err != nil { return nil, err } - err = ssa.Relinquish(k.canceler.context, client, newInputs, fieldManagerOld) + err = ssa.Relinquish(k.canceler.context, client, initialized, fieldManagerOld) if err != nil { return nil, err } @@ -2472,7 +2498,8 @@ func (k *kubeProvider) Delete(ctx context.Context, req *pulumirpc.DeleteRequest) Resources: resources, ServerSideApply: k.serverSideApplyMode, }, - Inputs: current, + Inputs: oldInputs, + Outputs: current, Name: name, Timeout: req.Timeout, } @@ -2594,6 +2621,7 @@ func (k *kubeProvider) gvkFromURN(urn resource.URN) (schema.GroupVersionKind, er } func (k *kubeProvider) readLiveObject(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + contract.Assertf(obj.GetName() != "", "expected object name to be nonempty: %v", obj) rc, err := k.clientSet.ResourceClientForObject(obj) if err != nil { return nil, err @@ -3225,6 +3253,7 @@ func renderYaml(resource *unstructured.Unstructured, yamlDirectory string) error // renderPathForResource determines the appropriate YAML render path depending on the resource kind. func renderPathForResource(resource *unstructured.Unstructured, yamlDirectory string) string { + contract.Assertf(resource.GetName() != "", "expected object name to be nonempty: %v", resource) crdDirectory := filepath.Join(yamlDirectory, "0-crd") manifestDirectory := filepath.Join(yamlDirectory, "1-manifest") diff --git a/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml b/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml index 9d6c8f4e8b..9387c9ab22 100644 --- a/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml +++ b/tests/sdk/nodejs/autonaming/step1/Pulumi.yaml @@ -1,3 +1,3 @@ name: autonaming-test -description: A program that tests partial provider failure. +description: A program that tests auto-naming of Kubernetes objects. runtime: nodejs diff --git a/tests/sdk/nodejs/autonaming/step1/index.ts b/tests/sdk/nodejs/autonaming/step1/index.ts index 004912cace..573334e85c 100644 --- a/tests/sdk/nodejs/autonaming/step1/index.ts +++ b/tests/sdk/nodejs/autonaming/step1/index.ts @@ -14,14 +14,14 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // // A simple Pod definition. `.metadata.name` is not provided, so Pulumi will allocate a unique name // to the resource upon creation. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, }, diff --git a/tests/sdk/nodejs/autonaming/step2/index.ts b/tests/sdk/nodejs/autonaming/step2/index.ts index d9e41747ab..227537c0c5 100644 --- a/tests/sdk/nodejs/autonaming/step2/index.ts +++ b/tests/sdk/nodejs/autonaming/step2/index.ts @@ -14,14 +14,14 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // // The image in the Pod's container has changed, triggering a replace. Because `.metadata.name` is // not specified, Pulumi again will provide a name upon creation of the new Pod resource. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, }, diff --git a/tests/sdk/nodejs/autonaming/step3/index.ts b/tests/sdk/nodejs/autonaming/step3/index.ts index 5bb94dec69..8d9070895c 100644 --- a/tests/sdk/nodejs/autonaming/step3/index.ts +++ b/tests/sdk/nodejs/autonaming/step3/index.ts @@ -14,14 +14,14 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // // Only the labels have changed, so no replace is triggered. Pulumi should update the object // in-place, and the name should not be changed. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, labels: {app: "autonaming-test"}, diff --git a/tests/sdk/nodejs/autonaming/step4/index.ts b/tests/sdk/nodejs/autonaming/step4/index.ts index e1e901df03..3da428f538 100644 --- a/tests/sdk/nodejs/autonaming/step4/index.ts +++ b/tests/sdk/nodejs/autonaming/step4/index.ts @@ -14,17 +14,17 @@ import * as k8s from "@pulumi/kubernetes"; -export const namespace = new k8s.core.v1.Namespace("test-namespace"); +const namespace = new k8s.core.v1.Namespace("test-namespace"); // -// User has now specified `.metadata.name`, so Pulumi should replace the resource, and NOT allocate -// a name to it. +// User has now specified `.metadata.generateName`, which Pulumi ignores because autonaming has already occurred, +// so no replace is triggered. Pulumi should update the object in-place, and the name should not be changed. // -const pod = new k8s.core.v1.Pod("autonaming-test", { +export const pod = new k8s.core.v1.Pod("autonaming-test", { metadata: { namespace: namespace.metadata.name, - name: "autonaming-test", + generateName: "autonaming-test-", labels: {app: "autonaming-test"}, }, spec: { diff --git a/tests/sdk/nodejs/autonaming/step5/index.ts b/tests/sdk/nodejs/autonaming/step5/index.ts new file mode 100644 index 0000000000..8fca7c00a2 --- /dev/null +++ b/tests/sdk/nodejs/autonaming/step5/index.ts @@ -0,0 +1,35 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as k8s from "@pulumi/kubernetes"; + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// User has now specified `.metadata.name`, so Pulumi should replace the resource, and NOT allocate +// a name to it. +// + +export const pod = new k8s.core.v1.Pod("autonaming-test", { + metadata: { + namespace: namespace.metadata.name, + name: "autonaming-test", + labels: {app: "autonaming-test"}, + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); diff --git a/tests/sdk/nodejs/generatename/step1/Pulumi.yaml b/tests/sdk/nodejs/generatename/step1/Pulumi.yaml new file mode 100644 index 0000000000..bde6c38040 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/Pulumi.yaml @@ -0,0 +1,5 @@ +name: generatename-test +description: A program that tests support for `.metadata.generateName`. +runtime: nodejs +config: + kubernetes:enableServerSideApply: false diff --git a/tests/sdk/nodejs/generatename/step1/index.ts b/tests/sdk/nodejs/generatename/step1/index.ts new file mode 100644 index 0000000000..cb59767dd3 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/index.ts @@ -0,0 +1,37 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// A simple Pod definition. `.metadata.name` is not provided, but `.metadata.generateName` is. +// Kubernetes will provide a unique name for the Pod using `.metadata.generateName` as a prefix. +// + +const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx"}, + ], + }, +}); diff --git a/tests/sdk/nodejs/generatename/step1/package.json b/tests/sdk/nodejs/generatename/step1/package.json new file mode 100644 index 0000000000..779b1bb5c3 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/package.json @@ -0,0 +1,12 @@ +{ + "name": "steps", + "version": "0.1.0", + "dependencies": { + "@pulumi/pulumi": "latest" + }, + "devDependencies": { + }, + "peerDependencies": { + "@pulumi/kubernetes": "latest" + } +} diff --git a/tests/sdk/nodejs/generatename/step1/tsconfig.json b/tests/sdk/nodejs/generatename/step1/tsconfig.json new file mode 100644 index 0000000000..5dacccbd42 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step1/tsconfig.json @@ -0,0 +1,22 @@ +{ + "compilerOptions": { + "outDir": "bin", + "target": "es6", + "module": "commonjs", + "moduleResolution": "node", + "declaration": true, + "sourceMap": true, + "stripInternal": true, + "experimentalDecorators": true, + "pretty": true, + "noFallthroughCasesInSwitch": true, + "noImplicitAny": true, + "noImplicitReturns": true, + "forceConsistentCasingInFileNames": true, + "strictNullChecks": true + }, + "files": [ + "index.ts" + ] +} + diff --git a/tests/sdk/nodejs/generatename/step2/index.ts b/tests/sdk/nodejs/generatename/step2/index.ts new file mode 100644 index 0000000000..c45e1ba6ec --- /dev/null +++ b/tests/sdk/nodejs/generatename/step2/index.ts @@ -0,0 +1,36 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// The `.metadata.generateName` field has changed, but Pulumi does NOT automatically replace in that situation. +// + +const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx"}, + ], + }, +}); diff --git a/tests/sdk/nodejs/generatename/step3/index.ts b/tests/sdk/nodejs/generatename/step3/index.ts new file mode 100644 index 0000000000..2901b09bbd --- /dev/null +++ b/tests/sdk/nodejs/generatename/step3/index.ts @@ -0,0 +1,39 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// The image in the Pod's container has changed, triggering a replace. Because `.metadata.name` is +// not specified, but `.metadata.generateName` is, Kubernetes again will provide a new name for the replacement. +// Pulumi will proceed with replace-before-delete. +// + +const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/generatename/step4/index.ts b/tests/sdk/nodejs/generatename/step4/index.ts new file mode 100644 index 0000000000..b1a4c470f0 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step4/index.ts @@ -0,0 +1,39 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// Only the labels have changed, so no replace is triggered. Pulumi should update the object +// in-place, and the name should not be changed. +// + +const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + labels: {app: "generatename-test"}, + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/generatename/step5/index.ts b/tests/sdk/nodejs/generatename/step5/index.ts new file mode 100644 index 0000000000..e2cbcb5f18 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step5/index.ts @@ -0,0 +1,39 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// The name of the pod is now explicitly set to the previously-generated name, so no replace is triggered. +// + +const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + labels: {app: "generatename-test"}, + name: config.require("podName"), + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/generatename/step6/index.ts b/tests/sdk/nodejs/generatename/step6/index.ts new file mode 100644 index 0000000000..b9e422f324 --- /dev/null +++ b/tests/sdk/nodejs/generatename/step6/index.ts @@ -0,0 +1,40 @@ +// Copyright 2016-2019, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import * as pulumi from "@pulumi/pulumi"; +import * as k8s from "@pulumi/kubernetes"; + +const config = new pulumi.Config(); + +const namespace = new k8s.core.v1.Namespace("test-namespace"); + +// +// User has now specified `.metadata.name`, so Pulumi should replace the resource, and NOT allocate +// a name to it. Note that `.metadata.generateName` is ignored. +// + +const pod = new k8s.core.v1.Pod("generatename-test", { + metadata: { + namespace: namespace.metadata.name, + generateName: "generatename-test-modified-", + labels: {app: "generatename-test"}, + name: "generatename-test", + }, + spec: { + containers: [ + {name: "nginx", image: "nginx:1.15-alpine"}, + ], + }, +}); + diff --git a/tests/sdk/nodejs/nodejs_test.go b/tests/sdk/nodejs/nodejs_test.go index 1da97ca911..ea07920da0 100644 --- a/tests/sdk/nodejs/nodejs_test.go +++ b/tests/sdk/nodejs/nodejs_test.go @@ -18,6 +18,7 @@ package test import ( b64 "encoding/base64" "encoding/json" + "errors" "fmt" "io/ioutil" "log" @@ -111,6 +112,7 @@ func TestAutonaming(t *testing.T) { var step1Name any var step2Name any var step3Name any + var step4Name any test := baseOptions.With(integration.ProgramTestOptions{ Dir: filepath.Join("autonaming", "step1"), @@ -215,6 +217,36 @@ func TestAutonaming(t *testing.T) { provRes := stackInfo.Deployment.Resources[2] assert.True(t, providers.IsProviderType(provRes.URN.Type())) + // + // Assert Pod was NOT replaced, and has the same name, previously allocated by Pulumi. + // + + pod := stackInfo.Deployment.Resources[1] + assert.Equal(t, "autonaming-test", string(pod.URN.Name())) + step4Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.True(t, strings.HasPrefix(step4Name.(string), "autonaming-test-")) + + autonamed, _ := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.Equal(t, "true", autonamed) + + assert.Equal(t, step3Name, step4Name) + }, + }, + { + Dir: filepath.Join("autonaming", "step5"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + assert.Equal(t, 4, len(stackInfo.Deployment.Resources)) + + tests.SortResourcesByURN(stackInfo) + + stackRes := stackInfo.Deployment.Resources[3] + assert.Equal(t, resource.RootStackType, stackRes.URN.Type()) + + provRes := stackInfo.Deployment.Resources[2] + assert.True(t, providers.IsProviderType(provRes.URN.Type())) + // // User has specified their own name for the Pod, so we replace it, and Pulumi does NOT // allocate a name on its own. @@ -234,6 +266,138 @@ func TestAutonaming(t *testing.T) { integration.ProgramTest(t, &test) } +func TestGenerateName(t *testing.T) { + var pt *integration.ProgramTester + var step1Name any + var step2Name any + var step3Name any + var step4Name any + var step5Name any + var step6Name any + + test := baseOptions.With(integration.ProgramTestOptions{ + Dir: filepath.Join("generatename", "step1"), + Quick: false, + SkipRefresh: false, + ExpectRefreshChanges: false, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod is successfully given a unique name by Kubernetes. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step1Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.True(t, strings.HasPrefix(step1Name.(string), "generatename-test-")) + generateName, _ := openapi.Pluck(pod.Outputs, "metadata", "generateName") + assert.Equal(t, "generatename-test-", generateName.(string)) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + Config: map[string]string{}, + + EditDirs: []integration.EditDir{ + { + Dir: filepath.Join("generatename", "step2"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod was NOT replaced, and has the same name, previously allocated by Kubernetes. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step2Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.Equal(t, step1Name, step2Name) + generateName, _ := openapi.Pluck(pod.Outputs, "metadata", "generateName") + assert.Equal(t, "generatename-test-modified-", generateName.(string)) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + { + Dir: filepath.Join("generatename", "step3"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod was replaced, i.e., destroyed and re-created, with allocating a new name. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step3Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.NotEqual(t, step2Name, step3Name) + assert.True(t, strings.HasPrefix(step3Name.(string), "generatename-test-modified-")) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + { + Dir: filepath.Join("generatename", "step4"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // Assert pod was NOT replaced, and has the same name, previously allocated by Kubernetes. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step4Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.Equal(t, step3Name, step4Name) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + + // Update the configuration for subsequent steps. + require.NoError(t, + pt.RunPulumiCommand("config", "set", "podName", step4Name.(string)), + "failed to set podName config") + }, + }, + { + Dir: filepath.Join("generatename", "step5"), + Additive: true, + ExpectNoChanges: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // User has explicitly set the name to the previously-generated name (maybe for clarity), + // and Pulumi does NOT replace the pod. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step5Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.Equal(t, step4Name, step5Name) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + { + Dir: filepath.Join("generatename", "step6"), + Additive: true, + ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) { + assert.NotNil(t, stackInfo.Deployment) + + // + // User has specified their own name for the Pod, so we replace it, and Pulumi/Kubernetes does NOT + // allocate a name on its own. + // + pod := tests.SearchResourcesByName(stackInfo, "", "kubernetes:core/v1:Pod", "generatename-test") + step6Name, _ = openapi.Pluck(pod.Outputs, "metadata", "name") + assert.NotEqual(t, step5Name, step6Name) + assert.Equal(t, "generatename-test", step6Name.(string)) + _, autonamed := openapi.Pluck(pod.Outputs, "metadata", "annotations", "pulumi.com/autonamed") + assert.False(t, autonamed) + }, + }, + }, + }) + pt = integration.ProgramTestManualLifeCycle(t, &test) + err := pt.TestLifeCycleInitAndDestroy() + if !errors.Is(err, integration.ErrTestFailed) { + assert.NoError(t, err) + } +} + func TestCRDs(t *testing.T) { test := baseOptions.With(integration.ProgramTestOptions{ Dir: filepath.Join("crds", "step1"),