From 6338946f26d494a948b4c49f38ba4b105a40cf0a Mon Sep 17 00:00:00 2001 From: Luke Kysow <1034429+lkysow@users.noreply.github.com> Date: Tue, 12 Jan 2021 11:56:28 -0800 Subject: [PATCH] Migrating config entries into custom resources Support annotation consul.hashicorp.com/migrate-entry on custom resources that will allow an existing config entry to be migrated onto Kubernetes. The config entry from then on will be managed by Kubernetes. This will support existing users of the Helm chart that already have config entries in Consul and allow them to migrate to CRDs. --- CHANGELOG.md | 4 + api/common/common.go | 8 +- controller/configentry_controller.go | 38 +++++- controller/configentry_controller_test.go | 136 ++++++++++++++++++++++ 4 files changed, 182 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d93944286..9d849bfa80 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## UNRELEASED +FEATURES: +* CRDs: support annotation `consul.hashicorp.com/migrate-entry` on custom resources + that will allow an existing config entry to be migrated onto a Kubernetes custom resource. [[GH-419](https://github.com/hashicorp/consul-k8s/pull/419)] + BUG FIXES: * CRDs: Fix issue where a `ServiceIntentions` resource could be continually resynced with Consul because Consul's internal representation had a different order for an array than the Kubernetes resource. [[GH-416](https://github.com/hashicorp/consul-k8s/pull/416)] diff --git a/api/common/common.go b/api/common/common.go index 4e08bd015a..5c1e4a5379 100644 --- a/api/common/common.go +++ b/api/common/common.go @@ -15,7 +15,9 @@ const ( DefaultConsulNamespace string = "default" WildcardNamespace string = "*" - SourceKey string = "external-source" - DatacenterKey string = "consul.hashicorp.com/source-datacenter" - SourceValue string = "kubernetes" + SourceKey string = "external-source" + DatacenterKey string = "consul.hashicorp.com/source-datacenter" + MigrateEntryKey string = "consul.hashicorp.com/migrate-entry" + MigrateEntryTrue string = "true" + SourceValue string = "kubernetes" ) diff --git a/controller/configentry_controller.go b/controller/configentry_controller.go index 6298099b5c..b7548b8b8c 100644 --- a/controller/configentry_controller.go +++ b/controller/configentry_controller.go @@ -2,6 +2,7 @@ package controller import ( "context" + "encoding/json" "fmt" "strings" @@ -21,6 +22,7 @@ const ( FinalizerName = "finalizers.consul.hashicorp.com" ConsulAgentError = "ConsulAgentError" ExternallyManagedConfigError = "ExternallyManagedConfigError" + MigrationFailedError = "MigrationFailedError" ) // Controller is implemented by CRD-specific controllers. It is used by @@ -191,11 +193,31 @@ func (r *ConfigEntryController) ReconcileEntry( // Check if the config entry is managed by our datacenter. // Do not process resource if the entry was not created within our datacenter // as it was created in a different cluster which will be managing that config entry. + requiresMigration := false if entry.GetMeta()[common.DatacenterKey] != r.DatacenterName { - return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey])) + + // Note that there is a special case where we will migrate a config entry + // that wasn't created by the controller if it has the migrate-entry annotation set to true. + // This functionality exists to help folks who are upgrading from older helm + // chart versions where they had previously created config entries themselves but + // now want to manage them through custom resources. + if configEntry.GetObjectMeta().Annotations[common.MigrateEntryKey] != common.MigrateEntryTrue { + return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey])) + } + + requiresMigration = true } if !configEntry.MatchesConsul(entry) { + if requiresMigration { + // If we're migrating this config entry, we want to ensure + // that the custom resource matches what's in Consul currently. + kubeJSON, _ := json.Marshal(configEntry.ToConsul(r.DatacenterName)) + consulJSON, _ := json.Marshal(entry) + return r.syncFailed(ctx, logger, crdCtrl, configEntry, MigrationFailedError, + fmt.Errorf("migration failed because spec of this resource does not match existing Consul config entry: consul=%s, resource=%s", consulJSON, kubeJSON)) + } + logger.Info("config entry does not match consul", "modify-index", entry.GetModifyIndex()) _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), @@ -206,6 +228,20 @@ func (r *ConfigEntryController) ReconcileEntry( } logger.Info("config entry updated", "request-time", writeMeta.RequestTime) return r.syncSuccessful(ctx, crdCtrl, configEntry) + } else if requiresMigration && entry.GetMeta()[common.DatacenterKey] != r.DatacenterName { + // If we get here then we're doing a migration and the entry in Consul + // matches the entry in Kubernetes. We just need to update the metadata + // of the entry in Consul to say that it's now managed by Kubernetes. + logger.Info("migrating config entry to be managed by Kubernetes") + _, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{ + Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()), + }) + if err != nil { + return r.syncUnknownWithError(ctx, logger, crdCtrl, configEntry, ConsulAgentError, + fmt.Errorf("updating config entry in consul: %w", err)) + } + logger.Info("config entry migrated", "request-time", writeMeta.RequestTime) + return r.syncSuccessful(ctx, crdCtrl, configEntry) } else if configEntry.SyncedConditionStatus() != corev1.ConditionTrue { return r.syncSuccessful(ctx, crdCtrl, configEntry) } diff --git a/controller/configentry_controller_test.go b/controller/configentry_controller_test.go index 19c2775d61..9b5d88c509 100644 --- a/controller/configentry_controller_test.go +++ b/controller/configentry_controller_test.go @@ -2701,3 +2701,139 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) { } require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime"))) } + +// Test that if the resource already exists in Consul but the Kube resource +// has the "migrate-entry" annotation then we let the Kube resource sync to Consul. +func TestConfigEntryController_Migration(t *testing.T) { + kubeNS := "default" + protocol := "http" + cfgEntryName := "service" + + cases := map[string]struct { + KubeResource v1alpha1.ServiceDefaults + ConsulResource capi.ServiceConfigEntry + ExpErr string + }{ + "identical resources should be migrated successfully": { + KubeResource: v1alpha1.ServiceDefaults{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfgEntryName, + Namespace: kubeNS, + Annotations: map[string]string{ + common.MigrateEntryKey: "true", + }, + }, + Spec: v1alpha1.ServiceDefaultsSpec{ + Protocol: protocol, + }, + }, + ConsulResource: capi.ServiceConfigEntry{ + Kind: capi.ServiceDefaults, + Name: cfgEntryName, + Protocol: protocol, + }, + }, + "different resources (protocol) should not be migrated": { + KubeResource: v1alpha1.ServiceDefaults{ + ObjectMeta: metav1.ObjectMeta{ + Name: cfgEntryName, + Namespace: kubeNS, + Annotations: map[string]string{ + common.MigrateEntryKey: "true", + }, + }, + Spec: v1alpha1.ServiceDefaultsSpec{ + Protocol: "tcp", + }, + }, + ConsulResource: capi.ServiceConfigEntry{ + Kind: capi.ServiceDefaults, + Name: cfgEntryName, + Protocol: protocol, + }, + ExpErr: "migration failed because spec of this resource does not match existing Consul config entry", + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + ctx := context.Background() + + s := runtime.NewScheme() + s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ServiceDefaults{}) + + client := fake.NewFakeClientWithScheme(s, &c.KubeResource) + consul, err := testutil.NewTestServerConfigT(t, nil) + require.NoError(t, err) + defer consul.Stop() + + consul.WaitForServiceIntentions(t) + consulClient, err := capi.NewClient(&capi.Config{ + Address: consul.HTTPAddr, + }) + require.NoError(t, err) + + // Create the service-defaults in Consul. + success, _, err := consulClient.ConfigEntries().Set(&c.ConsulResource, nil) + require.NoError(t, err) + require.True(t, success, "config entry was not created") + + // Set up the reconciler. + logger := logrtest.TestLogger{T: t} + svcDefaultsReconciler := ServiceDefaultsController{ + Client: client, + Log: logger, + ConfigEntryController: &ConfigEntryController{ + ConsulClient: consulClient, + DatacenterName: datacenterName, + }, + } + + defaultsNamespacedName := types.NamespacedName{ + Namespace: kubeNS, + Name: cfgEntryName, + } + + // Trigger the reconciler. + resp, err := svcDefaultsReconciler.Reconcile(ctrl.Request{NamespacedName: defaultsNamespacedName}) + if c.ExpErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), c.ExpErr) + } else { + require.NoError(t, err) + require.False(t, resp.Requeue) + } + + entryAfterReconcile := &v1alpha1.ServiceDefaults{} + err = client.Get(ctx, defaultsNamespacedName, entryAfterReconcile) + require.NoError(t, err) + + syncCondition := entryAfterReconcile.GetCondition(v1alpha1.ConditionSynced) + if c.ExpErr != "" { + // Ensure the status of the resource is migration failed. + require.Equal(t, corev1.ConditionFalse, syncCondition.Status) + require.Equal(t, MigrationFailedError, syncCondition.Reason) + require.Contains(t, syncCondition.Message, c.ExpErr) + + // Check that the Consul resource hasn't changed. + entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil) + require.NoError(t, err) + require.NotContains(t, entry.GetMeta(), common.DatacenterKey) + require.Equal(t, protocol, entry.(*capi.ServiceConfigEntry).Protocol) + } else { + // Ensure the status of the resource is synced. + expectedCondition := &v1alpha1.Condition{ + Type: v1alpha1.ConditionSynced, + Status: corev1.ConditionTrue, + } + require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime"))) + + // Ensure the Consul resource has the expected metadata. + entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil) + require.NoError(t, err) + require.Contains(t, entry.GetMeta(), common.DatacenterKey) + require.Equal(t, "datacenter", entry.GetMeta()[common.DatacenterKey]) + } + }) + } +}