Skip to content

Commit

Permalink
Migrating config entries into custom resources
Browse files Browse the repository at this point in the history
Support annotation consul.hashicorp.com/migrate-entry on custom
resources that will allow an existing config entry to be migrated onto
Kubernetes. The config entry from then on will be managed by Kubernetes.

This will support existing users of the Helm chart that already have
config entries in Consul and allow them to migrate to CRDs.
  • Loading branch information
lkysow committed Jan 12, 2021
1 parent 509717f commit 6338946
Show file tree
Hide file tree
Showing 4 changed files with 182 additions and 4 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
## UNRELEASED

FEATURES:
* CRDs: support annotation `consul.hashicorp.com/migrate-entry` on custom resources
that will allow an existing config entry to be migrated onto a Kubernetes custom resource. [[GH-419](https://github.com/hashicorp/consul-k8s/pull/419)]

BUG FIXES:
* CRDs: Fix issue where a `ServiceIntentions` resource could be continually resynced with Consul
because Consul's internal representation had a different order for an array than the Kubernetes resource. [[GH-416](https://github.com/hashicorp/consul-k8s/pull/416)]
Expand Down
8 changes: 5 additions & 3 deletions api/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ const (
DefaultConsulNamespace string = "default"
WildcardNamespace string = "*"

SourceKey string = "external-source"
DatacenterKey string = "consul.hashicorp.com/source-datacenter"
SourceValue string = "kubernetes"
SourceKey string = "external-source"
DatacenterKey string = "consul.hashicorp.com/source-datacenter"
MigrateEntryKey string = "consul.hashicorp.com/migrate-entry"
MigrateEntryTrue string = "true"
SourceValue string = "kubernetes"
)
38 changes: 37 additions & 1 deletion controller/configentry_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package controller

import (
"context"
"encoding/json"
"fmt"
"strings"

Expand All @@ -21,6 +22,7 @@ const (
FinalizerName = "finalizers.consul.hashicorp.com"
ConsulAgentError = "ConsulAgentError"
ExternallyManagedConfigError = "ExternallyManagedConfigError"
MigrationFailedError = "MigrationFailedError"
)

// Controller is implemented by CRD-specific controllers. It is used by
Expand Down Expand Up @@ -191,11 +193,31 @@ func (r *ConfigEntryController) ReconcileEntry(
// Check if the config entry is managed by our datacenter.
// Do not process resource if the entry was not created within our datacenter
// as it was created in a different cluster which will be managing that config entry.
requiresMigration := false
if entry.GetMeta()[common.DatacenterKey] != r.DatacenterName {
return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey]))

// Note that there is a special case where we will migrate a config entry
// that wasn't created by the controller if it has the migrate-entry annotation set to true.
// This functionality exists to help folks who are upgrading from older helm
// chart versions where they had previously created config entries themselves but
// now want to manage them through custom resources.
if configEntry.GetObjectMeta().Annotations[common.MigrateEntryKey] != common.MigrateEntryTrue {
return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey]))
}

requiresMigration = true
}

if !configEntry.MatchesConsul(entry) {
if requiresMigration {
// If we're migrating this config entry, we want to ensure
// that the custom resource matches what's in Consul currently.
kubeJSON, _ := json.Marshal(configEntry.ToConsul(r.DatacenterName))
consulJSON, _ := json.Marshal(entry)
return r.syncFailed(ctx, logger, crdCtrl, configEntry, MigrationFailedError,
fmt.Errorf("migration failed because spec of this resource does not match existing Consul config entry: consul=%s, resource=%s", consulJSON, kubeJSON))
}

logger.Info("config entry does not match consul", "modify-index", entry.GetModifyIndex())
_, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{
Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()),
Expand All @@ -206,6 +228,20 @@ func (r *ConfigEntryController) ReconcileEntry(
}
logger.Info("config entry updated", "request-time", writeMeta.RequestTime)
return r.syncSuccessful(ctx, crdCtrl, configEntry)
} else if requiresMigration && entry.GetMeta()[common.DatacenterKey] != r.DatacenterName {
// If we get here then we're doing a migration and the entry in Consul
// matches the entry in Kubernetes. We just need to update the metadata
// of the entry in Consul to say that it's now managed by Kubernetes.
logger.Info("migrating config entry to be managed by Kubernetes")
_, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{
Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()),
})
if err != nil {
return r.syncUnknownWithError(ctx, logger, crdCtrl, configEntry, ConsulAgentError,
fmt.Errorf("updating config entry in consul: %w", err))
}
logger.Info("config entry migrated", "request-time", writeMeta.RequestTime)
return r.syncSuccessful(ctx, crdCtrl, configEntry)
} else if configEntry.SyncedConditionStatus() != corev1.ConditionTrue {
return r.syncSuccessful(ctx, crdCtrl, configEntry)
}
Expand Down
136 changes: 136 additions & 0 deletions controller/configentry_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2701,3 +2701,139 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) {
}
require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime")))
}

// Test that if the resource already exists in Consul but the Kube resource
// has the "migrate-entry" annotation then we let the Kube resource sync to Consul.
func TestConfigEntryController_Migration(t *testing.T) {
kubeNS := "default"
protocol := "http"
cfgEntryName := "service"

cases := map[string]struct {
KubeResource v1alpha1.ServiceDefaults
ConsulResource capi.ServiceConfigEntry
ExpErr string
}{
"identical resources should be migrated successfully": {
KubeResource: v1alpha1.ServiceDefaults{
ObjectMeta: metav1.ObjectMeta{
Name: cfgEntryName,
Namespace: kubeNS,
Annotations: map[string]string{
common.MigrateEntryKey: "true",
},
},
Spec: v1alpha1.ServiceDefaultsSpec{
Protocol: protocol,
},
},
ConsulResource: capi.ServiceConfigEntry{
Kind: capi.ServiceDefaults,
Name: cfgEntryName,
Protocol: protocol,
},
},
"different resources (protocol) should not be migrated": {
KubeResource: v1alpha1.ServiceDefaults{
ObjectMeta: metav1.ObjectMeta{
Name: cfgEntryName,
Namespace: kubeNS,
Annotations: map[string]string{
common.MigrateEntryKey: "true",
},
},
Spec: v1alpha1.ServiceDefaultsSpec{
Protocol: "tcp",
},
},
ConsulResource: capi.ServiceConfigEntry{
Kind: capi.ServiceDefaults,
Name: cfgEntryName,
Protocol: protocol,
},
ExpErr: "migration failed because spec of this resource does not match existing Consul config entry",
},
}

for name, c := range cases {
t.Run(name, func(t *testing.T) {
ctx := context.Background()

s := runtime.NewScheme()
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ServiceDefaults{})

client := fake.NewFakeClientWithScheme(s, &c.KubeResource)
consul, err := testutil.NewTestServerConfigT(t, nil)
require.NoError(t, err)
defer consul.Stop()

consul.WaitForServiceIntentions(t)
consulClient, err := capi.NewClient(&capi.Config{
Address: consul.HTTPAddr,
})
require.NoError(t, err)

// Create the service-defaults in Consul.
success, _, err := consulClient.ConfigEntries().Set(&c.ConsulResource, nil)
require.NoError(t, err)
require.True(t, success, "config entry was not created")

// Set up the reconciler.
logger := logrtest.TestLogger{T: t}
svcDefaultsReconciler := ServiceDefaultsController{
Client: client,
Log: logger,
ConfigEntryController: &ConfigEntryController{
ConsulClient: consulClient,
DatacenterName: datacenterName,
},
}

defaultsNamespacedName := types.NamespacedName{
Namespace: kubeNS,
Name: cfgEntryName,
}

// Trigger the reconciler.
resp, err := svcDefaultsReconciler.Reconcile(ctrl.Request{NamespacedName: defaultsNamespacedName})
if c.ExpErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), c.ExpErr)
} else {
require.NoError(t, err)
require.False(t, resp.Requeue)
}

entryAfterReconcile := &v1alpha1.ServiceDefaults{}
err = client.Get(ctx, defaultsNamespacedName, entryAfterReconcile)
require.NoError(t, err)

syncCondition := entryAfterReconcile.GetCondition(v1alpha1.ConditionSynced)
if c.ExpErr != "" {
// Ensure the status of the resource is migration failed.
require.Equal(t, corev1.ConditionFalse, syncCondition.Status)
require.Equal(t, MigrationFailedError, syncCondition.Reason)
require.Contains(t, syncCondition.Message, c.ExpErr)

// Check that the Consul resource hasn't changed.
entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil)
require.NoError(t, err)
require.NotContains(t, entry.GetMeta(), common.DatacenterKey)
require.Equal(t, protocol, entry.(*capi.ServiceConfigEntry).Protocol)
} else {
// Ensure the status of the resource is synced.
expectedCondition := &v1alpha1.Condition{
Type: v1alpha1.ConditionSynced,
Status: corev1.ConditionTrue,
}
require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime")))

// Ensure the Consul resource has the expected metadata.
entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil)
require.NoError(t, err)
require.Contains(t, entry.GetMeta(), common.DatacenterKey)
require.Equal(t, "datacenter", entry.GetMeta()[common.DatacenterKey])
}
})
}
}

0 comments on commit 6338946

Please sign in to comment.