From 27bd34d6f88b60ed549409ec65011f6a68c2d5c8 Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Mon, 25 Jan 2021 12:25:20 -0800 Subject: [PATCH] Migrating config entries into custom resources (#419) Support annotation consul.hashicorp.com/migrate-entry on custom resources that will allow an existing config entry to be migrated onto Kubernetes. The config entry from then on will be managed by Kubernetes. This will support existing users of the Helm chart that already have config entries in Consul and allow them to migrate to CRDs. --- CHANGELOG.md | 4 + api/common/common.go | 8 +- controller/configentry_controller.go | 54 ++++++++- controller/configentry_controller_test.go | 136 ++++++++++++++++++++++ 4 files changed, 198 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd45e52514..a3a328e004 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,10 @@ BREAKING CHANGES protocol: "http" ``` +FEATURES: +* CRDs: support annotation `consul.hashicorp.com/migrate-entry` on custom resources + that will allow an existing config entry to be migrated onto a Kubernetes custom resource. [[GH-419](https://github.com/hashicorp/consul-k8s/pull/419)] + ## 0.23.0 (January 22, 2021) BUG FIXES: diff --git a/api/common/common.go b/api/common/common.go index 4e08bd015a..5c1e4a5379 100644 --- a/api/common/common.go +++ b/api/common/common.go @@ -15,7 +15,9 @@ const ( DefaultConsulNamespace string = "default" WildcardNamespace string = "*" - SourceKey string = "external-source" - DatacenterKey string = "consul.hashicorp.com/source-datacenter" - SourceValue string = "kubernetes" + SourceKey string = "external-source" + DatacenterKey string = "consul.hashicorp.com/source-datacenter" + MigrateEntryKey string = "consul.hashicorp.com/migrate-entry" + MigrateEntryTrue string = "true" + SourceValue string = "kubernetes" ) diff --git a/controller/configentry_controller.go b/controller/configentry_controller.go index 6298099b5c..c88857f71f 100644 --- a/controller/configentry_controller.go +++ b/controller/configentry_controller.go @@ -2,6 +2,7 @@ package controller import ( "context" + "encoding/json" "fmt" "strings" @@ -21,6 +22,7 @@ const ( FinalizerName = "finalizers.consul.hashicorp.com" ConsulAgentError = "ConsulAgentError" ExternallyManagedConfigError = "ExternallyManagedConfigError" + MigrationFailedError = "MigrationFailedError" ) // Controller is implemented by CRD-specific controllers. It is used by @@ -191,11 +193,30 @@ func (r *ConfigEntryController) ReconcileEntry( // Check if the config entry is managed by our datacenter. // Do not process resource if the entry was not created within our datacenter // as it was created in a different cluster which will be managing that config entry. + requiresMigration := false if entry.GetMeta()[common.DatacenterKey] != r.DatacenterName { - return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey])) + + // Note that there is a special case where we will migrate a config entry + // that wasn't created by the controller if it has the migrate-entry annotation set to true. + // This functionality exists to help folks who are upgrading from older helm + // chart versions where they had previously created config entries themselves but + // now want to manage them through custom resources. + if configEntry.GetObjectMeta().Annotations[common.MigrateEntryKey] != common.MigrateEntryTrue { + return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey])) + } + + requiresMigration = true } if !configEntry.MatchesConsul(entry) { + if requiresMigration { + // If we're migrating this config entry but the custom resource + // doesn't match what's in Consul currently we error out so that + // it doesn't overwrite something accidentally. + return r.syncFailed(ctx, logger, crdCtrl, configEntry, MigrationFailedError, + r.nonMatchingMigrationError(configEntry, entry)) + } + logger.Info("config entry does not match consul", "modify-index", entry.GetModifyIndex()) _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), @@ -206,6 +227,20 @@ func (r *ConfigEntryController) ReconcileEntry( } logger.Info("config entry updated", "request-time", writeMeta.RequestTime) return r.syncSuccessful(ctx, crdCtrl, configEntry) + } else if requiresMigration && entry.GetMeta()[common.DatacenterKey] != r.DatacenterName { + // If we get here then we're doing a migration and the entry in Consul + // matches the entry in Kubernetes. We just need to update the metadata + // of the entry in Consul to say that it's now managed by Kubernetes. + logger.Info("migrating config entry to be managed by Kubernetes") + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), + }) + if err != nil { + return r.syncUnknownWithError(ctx, logger, crdCtrl, configEntry, ConsulAgentError, + fmt.Errorf("updating config entry in consul: %w", err)) + } + logger.Info("config entry migrated", "request-time", writeMeta.RequestTime) + return r.syncSuccessful(ctx, crdCtrl, configEntry) } else if configEntry.SyncedConditionStatus() != corev1.ConditionTrue { return r.syncSuccessful(ctx, crdCtrl, configEntry) } @@ -271,6 +306,23 @@ func (r *ConfigEntryController) syncUnknownWithError(ctx context.Context, return ctrl.Result{}, err } +// nonMatchingMigrationError returns an error that indicates the migration failed +// because the config entries did not match. +func (r *ConfigEntryController) nonMatchingMigrationError(kubeEntry common.ConfigEntryResource, consulEntry capi.ConfigEntry) error { + // We marshal into JSON to include in the error message so users will know + // which fields aren't matching. + kubeJSON, err := json.Marshal(kubeEntry.ToConsul(r.DatacenterName)) + if err != nil { + return fmt.Errorf("migration failed: unable to marshal Kubernetes resource: %s", err) + } + consulJSON, err := json.Marshal(consulEntry) + if err != nil { + return fmt.Errorf("migration failed: unable to marshal Consul resource: %s", err) + } + + return fmt.Errorf("migration failed: Kubernetes resource does not match existing Consul config entry: consul=%s, kube=%s", consulJSON, kubeJSON) +} + func isNotFoundErr(err error) bool { return err != nil && strings.Contains(err.Error(), "404") } diff --git a/controller/configentry_controller_test.go b/controller/configentry_controller_test.go index 19c2775d61..e05bf5be0a 100644 --- a/controller/configentry_controller_test.go +++ b/controller/configentry_controller_test.go @@ -2701,3 +2701,139 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { } require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime"))) } + +// Test that if the resource already exists in Consul but the Kube resource +// has the "migrate-entry" annotation then we let the Kube resource sync to Consul. +func TestConfigEntryController_Migration(t *testing.T) { + kubeNS := "default" + protocol := "http" + cfgEntryName := "service" + + cases := map[string]struct { + KubeResource v1alpha1.ServiceDefaults + ConsulResource capi.ServiceConfigEntry + ExpErr string + }{ + "identical resources should be migrated successfully": { + KubeResource: v1alpha1.ServiceDefaults{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfgEntryName, + Namespace: kubeNS, + Annotations: map[string]string{ + common.MigrateEntryKey: "true", + }, + }, + Spec: v1alpha1.ServiceDefaultsSpec{ + Protocol: protocol, + }, + }, + ConsulResource: capi.ServiceConfigEntry{ + Kind: capi.ServiceDefaults, + Name: cfgEntryName, + Protocol: protocol, + }, + }, + "different resources (protocol) should not be migrated": { + KubeResource: v1alpha1.ServiceDefaults{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfgEntryName, + Namespace: kubeNS, + Annotations: map[string]string{ + common.MigrateEntryKey: "true", + }, + }, + Spec: v1alpha1.ServiceDefaultsSpec{ + Protocol: "tcp", + }, + }, + ConsulResource: capi.ServiceConfigEntry{ + Kind: capi.ServiceDefaults, + Name: cfgEntryName, + Protocol: protocol, + }, + ExpErr: "migration failed: Kubernetes resource does not match existing Consul config entry", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + ctx := context.Background() + + s := runtime.NewScheme() + s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ServiceDefaults{}) + + client := fake.NewFakeClientWithScheme(s, &c.KubeResource) + consul, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + require.NoError(t, err) + + // Create the service-defaults in Consul. + success, _, err := consulClient.ConfigEntries().Set(&c.ConsulResource, nil) + require.NoError(t, err) + require.True(t, success, "config entry was not created") + + // Set up the reconciler. + logger := logrtest.TestLogger{T: t} + svcDefaultsReconciler := ServiceDefaultsController{ + Client: client, + Log: logger, + ConfigEntryController: &ConfigEntryController{ + ConsulClient: consulClient, + DatacenterName: datacenterName, + }, + } + + defaultsNamespacedName := types.NamespacedName{ + Namespace: kubeNS, + Name: cfgEntryName, + } + + // Trigger the reconciler. + resp, err := svcDefaultsReconciler.Reconcile(ctrl.Request{NamespacedName: defaultsNamespacedName}) + if c.ExpErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.ExpErr) + } else { + require.NoError(t, err) + require.False(t, resp.Requeue) + } + + entryAfterReconcile := &v1alpha1.ServiceDefaults{} + err = client.Get(ctx, defaultsNamespacedName, entryAfterReconcile) + require.NoError(t, err) + + syncCondition := entryAfterReconcile.GetCondition(v1alpha1.ConditionSynced) + if c.ExpErr != "" { + // Ensure the status of the resource is migration failed. + require.Equal(t, corev1.ConditionFalse, syncCondition.Status) + require.Equal(t, MigrationFailedError, syncCondition.Reason) + require.Contains(t, syncCondition.Message, c.ExpErr) + + // Check that the Consul resource hasn't changed. + entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil) + require.NoError(t, err) + require.NotContains(t, entry.GetMeta(), common.DatacenterKey) + require.Equal(t, protocol, entry.(*capi.ServiceConfigEntry).Protocol) + } else { + // Ensure the status of the resource is synced. + expectedCondition := &v1alpha1.Condition{ + Type: v1alpha1.ConditionSynced, + Status: corev1.ConditionTrue, + } + require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime"))) + + // Ensure the Consul resource has the expected metadata. + entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil) + require.NoError(t, err) + require.Contains(t, entry.GetMeta(), common.DatacenterKey) + require.Equal(t, "datacenter", entry.GetMeta()[common.DatacenterKey]) + } + }) + } +}