diff --git a/acceptance/framework/consul/cli_cluster.go b/acceptance/framework/consul/cli_cluster.go index bc5f7dde14..5493533909 100644 --- a/acceptance/framework/consul/cli_cluster.go +++ b/acceptance/framework/consul/cli_cluster.go @@ -74,8 +74,8 @@ func NewCLICluster( require.NoError(t, err) // Merge all helm values - MergeMaps(values, valuesFromConfig) - MergeMaps(values, helmValues) + helpers.MergeMaps(values, valuesFromConfig) + helpers.MergeMaps(values, helmValues) logger := terratestLogger.New(logger.TestLogger{}) @@ -144,7 +144,7 @@ func (h *CLICluster) Create(t *testing.T) { } require.NoError(t, err) - helpers.WaitForAllPodsToBeReady(t, h.kubernetesClient, consulNS, fmt.Sprintf("release=%s", h.releaseName)) + k8s.WaitForAllPodsToBeReady(t, h.kubernetesClient, consulNS, fmt.Sprintf("release=%s", h.releaseName)) } func (h *CLICluster) Destroy(t *testing.T) { @@ -179,9 +179,9 @@ func (h *CLICluster) Destroy(t *testing.T) { func (h *CLICluster) Upgrade(t *testing.T, helmValues map[string]string) { t.Helper() - MergeMaps(h.helmOptions.SetValues, helmValues) + helpers.MergeMaps(h.helmOptions.SetValues, helmValues) helm.Upgrade(t, h.helmOptions, config.HelmChartPath, h.releaseName) - helpers.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) + k8s.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) } func (h *CLICluster) SetupConsulClient(t *testing.T, secure bool) *api.Client { diff --git a/acceptance/framework/consul/consul_cluster.go b/acceptance/framework/consul/consul_cluster.go index 74c018093d..cda22d9322 100644 --- a/acceptance/framework/consul/consul_cluster.go +++ b/acceptance/framework/consul/consul_cluster.go @@ -75,8 +75,8 @@ func NewHelmCluster( require.NoError(t, err) // Merge all helm values - MergeMaps(values, valuesFromConfig) - MergeMaps(values, helmValues) + helpers.MergeMaps(values, valuesFromConfig) + helpers.MergeMaps(values, helmValues) logger := terratestLogger.New(logger.TestLogger{}) @@ -118,7 +118,7 @@ func (h *HelmCluster) Create(t *testing.T) { helm.Install(t, h.helmOptions, config.HelmChartPath, h.releaseName) - helpers.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) + k8s.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) } func (h *HelmCluster) Destroy(t *testing.T) { @@ -213,9 +213,9 @@ func (h *HelmCluster) Destroy(t *testing.T) { func (h *HelmCluster) Upgrade(t *testing.T, helmValues map[string]string) { t.Helper() - MergeMaps(h.helmOptions.SetValues, helmValues) + helpers.MergeMaps(h.helmOptions.SetValues, helmValues) helm.Upgrade(t, h.helmOptions, config.HelmChartPath, h.releaseName) - helpers.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) + k8s.WaitForAllPodsToBeReady(t, h.kubernetesClient, h.helmOptions.KubectlOptions.Namespace, fmt.Sprintf("release=%s", h.releaseName)) } func (h *HelmCluster) SetupConsulClient(t *testing.T, secure bool) *api.Client { @@ -472,11 +472,3 @@ func defaultValues() map[string]string { } return values } - -// MergeMaps will merge the values in b with values in a and save in a. -// If there are conflicts, the values in b will overwrite the values in a. -func MergeMaps(a, b map[string]string) { - for k, v := range b { - a[k] = v - } -} diff --git a/acceptance/framework/environment/environment.go b/acceptance/framework/environment/environment.go index ad39a09a04..7c8a89e15b 100644 --- a/acceptance/framework/environment/environment.go +++ b/acceptance/framework/environment/environment.go @@ -6,7 +6,6 @@ import ( "github.com/gruntwork-io/terratest/modules/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/config" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -87,6 +86,27 @@ type kubernetesContext struct { options *k8s.KubectlOptions } +// KubernetesContextFromOptions returns the Kubernetes context from options. +// If context is explicitly set in options, it returns that context. +// Otherwise, it returns the current context. +func KubernetesContextFromOptions(t *testing.T, options *k8s.KubectlOptions) string { + t.Helper() + + // First, check if context set in options and return that + if options.ContextName != "" { + return options.ContextName + } + + // Otherwise, get current context from config + configPath, err := options.GetConfigPath(t) + require.NoError(t, err) + + rawConfig, err := k8s.LoadConfigFromPath(configPath).RawConfig() + require.NoError(t, err) + + return rawConfig.CurrentContext +} + func (k kubernetesContext) KubectlOptions(t *testing.T) *k8s.KubectlOptions { if k.options != nil { return k.options @@ -107,7 +127,7 @@ func (k kubernetesContext) KubectlOptions(t *testing.T) *k8s.KubectlOptions { rawConfig, err := k8s.LoadConfigFromPath(configPath).RawConfig() require.NoError(t, err) - contextName := helpers.KubernetesContextFromOptions(t, k.options) + contextName := KubernetesContextFromOptions(t, k.options) if rawConfig.Contexts[contextName].Namespace != "" { k.options.Namespace = rawConfig.Contexts[contextName].Namespace } else { @@ -117,12 +137,26 @@ func (k kubernetesContext) KubectlOptions(t *testing.T) *k8s.KubectlOptions { return k.options } +// KubernetesClientFromOptions takes KubectlOptions and returns Kubernetes API client. +func KubernetesClientFromOptions(t *testing.T, options *k8s.KubectlOptions) kubernetes.Interface { + configPath, err := options.GetConfigPath(t) + require.NoError(t, err) + + config, err := k8s.LoadApiClientConfigE(configPath, options.ContextName) + require.NoError(t, err) + + client, err := kubernetes.NewForConfig(config) + require.NoError(t, err) + + return client +} + func (k kubernetesContext) KubernetesClient(t *testing.T) kubernetes.Interface { if k.client != nil { return k.client } - k.client = helpers.KubernetesClientFromOptions(t, k.KubectlOptions(t)) + k.client = KubernetesClientFromOptions(t, k.KubectlOptions(t)) return k.client } diff --git a/acceptance/framework/helpers/helpers.go b/acceptance/framework/helpers/helpers.go index b80e0ca383..17dd805123 100644 --- a/acceptance/framework/helpers/helpers.go +++ b/acceptance/framework/helpers/helpers.go @@ -12,13 +12,12 @@ import ( "time" "github.com/gruntwork-io/terratest/modules/helm" + "github.com/hashicorp/consul/api" - terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" "github.com/gruntwork-io/terratest/modules/random" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) @@ -70,37 +69,6 @@ func CheckForPriorInstallations(t *testing.T, client kubernetes.Interface, optio }) } -// WaitForAllPodsToBeReady waits until all pods with the provided podLabelSelector -// are in the ready status. It checks every 5 seconds for a total of 20 tries. -// If there is at least one container in a pod that isn't ready after that, -// it fails the test. -func WaitForAllPodsToBeReady(t *testing.T, client kubernetes.Interface, namespace, podLabelSelector string) { - t.Helper() - - logger.Logf(t, "Waiting for pods with label %q to be ready.", podLabelSelector) - - // Wait up to 10m. - // On Azure, volume provisioning can sometimes take close to 5 min, - // so we need to give a bit more time for pods to become healthy. - counter := &retry.Counter{Count: 600, Wait: 1 * time.Second} - retry.RunWith(counter, t, func(r *retry.R) { - pods, err := client.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: podLabelSelector}) - require.NoError(r, err) - require.NotEmpty(r, pods.Items) - - var notReadyPods []string - for _, pod := range pods.Items { - if !IsReady(pod) { - notReadyPods = append(notReadyPods, pod.Name) - } - } - if len(notReadyPods) > 0 { - r.Errorf("%d pods are not ready: %s", len(notReadyPods), strings.Join(notReadyPods, ",")) - } - }) - logger.Log(t, "Finished waiting for pods to be ready.") -} - // SetupInterruptHandler sets up a goroutine that will wait for interrupt signals // and call cleanup function when it catches it. func SetupInterruptHandler(cleanup func()) { @@ -138,69 +106,45 @@ func Cleanup(t *testing.T, noCleanupOnFailure bool, cleanup func()) { t.Cleanup(wrappedCleanupFunc) } -// KubernetesClientFromOptions takes KubectlOptions and returns Kubernetes API client. -func KubernetesClientFromOptions(t *testing.T, options *terratestk8s.KubectlOptions) kubernetes.Interface { - configPath, err := options.GetConfigPath(t) - require.NoError(t, err) - - config, err := terratestk8s.LoadApiClientConfigE(configPath, options.ContextName) - require.NoError(t, err) - - client, err := kubernetes.NewForConfig(config) - require.NoError(t, err) - - return client -} - -// KubernetesContextFromOptions returns the Kubernetes context from options. -// If context is explicitly set in options, it returns that context. -// Otherwise, it returns the current context. -func KubernetesContextFromOptions(t *testing.T, options *terratestk8s.KubectlOptions) string { - t.Helper() - - // First, check if context set in options and return that - if options.ContextName != "" { - return options.ContextName - } - - // Otherwise, get current context from config - configPath, err := options.GetConfigPath(t) - require.NoError(t, err) - - rawConfig, err := terratestk8s.LoadConfigFromPath(configPath).RawConfig() - require.NoError(t, err) - - return rawConfig.CurrentContext -} - -// KubernetesAPIServerHostFromOptions returns the Kubernetes API server host from options. -func KubernetesAPIServerHostFromOptions(t *testing.T, options *terratestk8s.KubectlOptions) string { - t.Helper() +// VerifyFederation checks that the WAN federation between servers is successful +// by first checking members are alive from the perspective of both servers. +// If secure is true, it will also check that the ACL replication is running on the secondary server. +func VerifyFederation(t *testing.T, primaryClient, secondaryClient *api.Client, releaseName string, secure bool) { + retrier := &retry.Timer{Timeout: 5 * time.Minute, Wait: 1 * time.Second} + start := time.Now() + + // Check that server in dc1 is healthy from the perspective of the server in dc2, and vice versa. + // We're calling the Consul health API, as opposed to checking serf membership status, + // because we need to make sure that the federated servers can make API calls and forward requests + // from one server to another. From running tests in CI for a while and using serf membership status before, + // we've noticed that the status could be "alive" as soon as the server in the secondary cluster joins the primary + // and then switch to "failed". This would require us to check that the status is "alive" is showing consistently for + // some amount of time, which could be quite flakey. Calling the API in another datacenter allows us to check that + // each server can forward calls to another, which is what we need for connect. + retry.RunWith(retrier, t, func(r *retry.R) { + secondaryServerHealth, _, err := primaryClient.Health().Node(fmt.Sprintf("%s-consul-server-0", releaseName), &api.QueryOptions{Datacenter: "dc2"}) + require.NoError(r, err) + require.Equal(r, secondaryServerHealth.AggregatedStatus(), api.HealthPassing) - configPath, err := options.GetConfigPath(t) - require.NoError(t, err) + primaryServerHealth, _, err := secondaryClient.Health().Node(fmt.Sprintf("%s-consul-server-0", releaseName), &api.QueryOptions{Datacenter: "dc1"}) + require.NoError(r, err) + require.Equal(r, primaryServerHealth.AggregatedStatus(), api.HealthPassing) - config, err := terratestk8s.LoadApiClientConfigE(configPath, options.ContextName) - require.NoError(t, err) + if secure { + replicationStatus, _, err := secondaryClient.ACL().Replication(nil) + require.NoError(r, err) + require.True(r, replicationStatus.Enabled) + require.True(r, replicationStatus.Running) + } + }) - return config.Host + logger.Logf(t, "Took %s to verify federation", time.Since(start)) } -// IsReady returns true if pod is ready. -func IsReady(pod corev1.Pod) bool { - if pod.Status.Phase == corev1.PodPending { - return false - } - - for _, cond := range pod.Status.Conditions { - if cond.Type == corev1.PodReady { - if cond.Status == corev1.ConditionTrue { - return true - } else { - return false - } - } +// MergeMaps will merge the values in b with values in a and save in a. +// If there are conflicts, the values in b will overwrite the values in a. +func MergeMaps(a, b map[string]string) { + for k, v := range b { + a[k] = v } - - return false } diff --git a/acceptance/framework/k8s/debug.go b/acceptance/framework/k8s/debug.go index c01101c6dc..0754f24131 100644 --- a/acceptance/framework/k8s/debug.go +++ b/acceptance/framework/k8s/debug.go @@ -10,7 +10,7 @@ import ( "github.com/gruntwork-io/terratest/modules/k8s" terratestLogger "github.com/gruntwork-io/terratest/modules/logger" - "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,9 +23,9 @@ func WritePodsDebugInfoIfFailed(t *testing.T, kubectlOptions *k8s.KubectlOptions if t.Failed() { // Create k8s client from kubectl options. - client := helpers.KubernetesClientFromOptions(t, kubectlOptions) + client := environment.KubernetesClientFromOptions(t, kubectlOptions) - contextName := helpers.KubernetesContextFromOptions(t, kubectlOptions) + contextName := environment.KubernetesContextFromOptions(t, kubectlOptions) // Create a directory for the test. testDebugDirectory := filepath.Join(debugDirectory, t.Name(), contextName) diff --git a/acceptance/framework/k8s/helpers.go b/acceptance/framework/k8s/helpers.go new file mode 100644 index 0000000000..dabb617876 --- /dev/null +++ b/acceptance/framework/k8s/helpers.go @@ -0,0 +1,127 @@ +package k8s + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/config" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// KubernetesAPIServerHostFromOptions returns the Kubernetes API server host from options. +func KubernetesAPIServerHostFromOptions(t *testing.T, options *terratestk8s.KubectlOptions) string { + t.Helper() + + configPath, err := options.GetConfigPath(t) + require.NoError(t, err) + + config, err := terratestk8s.LoadApiClientConfigE(configPath, options.ContextName) + require.NoError(t, err) + + return config.Host +} + +// WaitForAllPodsToBeReady waits until all pods with the provided podLabelSelector +// are in the ready status. It checks every 5 seconds for a total of 20 tries. +// If there is at least one container in a pod that isn't ready after that, +// it fails the test. +func WaitForAllPodsToBeReady(t *testing.T, client kubernetes.Interface, namespace, podLabelSelector string) { + t.Helper() + + logger.Logf(t, "Waiting for pods with label %q to be ready.", podLabelSelector) + + // Wait up to 10m. + // On Azure, volume provisioning can sometimes take close to 5 min, + // so we need to give a bit more time for pods to become healthy. + counter := &retry.Counter{Count: 600, Wait: 1 * time.Second} + retry.RunWith(counter, t, func(r *retry.R) { + pods, err := client.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{LabelSelector: podLabelSelector}) + require.NoError(r, err) + require.NotEmpty(r, pods.Items) + + var notReadyPods []string + for _, pod := range pods.Items { + if !IsReady(pod) { + notReadyPods = append(notReadyPods, pod.Name) + } + } + if len(notReadyPods) > 0 { + r.Errorf("%d pods are not ready: %s", len(notReadyPods), strings.Join(notReadyPods, ",")) + } + }) + logger.Log(t, "Finished waiting for pods to be ready.") +} + +// IsReady returns true if pod is ready. +func IsReady(pod corev1.Pod) bool { + if pod.Status.Phase == corev1.PodPending { + return false + } + + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady { + if cond.Status == corev1.ConditionTrue { + return true + } else { + return false + } + } + } + + return false +} + +// KubernetesAPIServerHost returns the Kubernetes API server URL depending on test configuration. +func KubernetesAPIServerHost(t *testing.T, cfg *config.TestConfig, ctx environment.TestContext) string { + var k8sAPIHost string + // When running on kind, the kube API address in kubeconfig will have a localhost address + // which will not work from inside the container. That's why we need to use the endpoints address instead + // which will point the node IP. + if cfg.UseKind { + // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. + kubernetesEndpoint, err := ctx.KubernetesClient(t).CoreV1().Endpoints("default").Get(context.Background(), "kubernetes", metav1.GetOptions{}) + require.NoError(t, err) + k8sAPIHost = fmt.Sprintf("https://%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) + } else { + k8sAPIHost = KubernetesAPIServerHostFromOptions(t, ctx.KubectlOptions(t)) + } + + return k8sAPIHost +} + +// ServiceHost returns a host for a Kubernetes service depending on test configuration. +func ServiceHost(t *testing.T, cfg *config.TestConfig, ctx environment.TestContext, serviceName string) string { + if cfg.UseKind { + nodeList, err := ctx.KubernetesClient(t).CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + require.NoError(t, err) + // Get the address of the (only) node from the Kind cluster. + return nodeList.Items[0].Status.Addresses[0].Address + } else { + var host string + // It can take some time for the load balancers to be ready and have an IP/Hostname. + // Wait for 60 seconds before failing. + retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 60}, t, func(r *retry.R) { + svc, err := ctx.KubernetesClient(t).CoreV1().Services(ctx.KubectlOptions(t).Namespace).Get(context.Background(), serviceName, metav1.GetOptions{}) + require.NoError(t, err) + require.NotEmpty(r, svc.Status.LoadBalancer.Ingress) + // On AWS, load balancers have a hostname for ingress, while on Azure and GCP + // load balancers have IPs. + if svc.Status.LoadBalancer.Ingress[0].Hostname != "" { + host = svc.Status.LoadBalancer.Ingress[0].Hostname + } else { + host = svc.Status.LoadBalancer.Ingress[0].IP + } + }) + return host + } +} diff --git a/acceptance/framework/vault/vault_cluster.go b/acceptance/framework/vault/vault_cluster.go index bab5fbb753..d42a4fb896 100644 --- a/acceptance/framework/vault/vault_cluster.go +++ b/acceptance/framework/vault/vault_cluster.go @@ -45,20 +45,24 @@ type VaultCluster struct { } // NewVaultCluster creates a VaultCluster which will be used to install Vault using Helm. -func NewVaultCluster(t *testing.T, ctx environment.TestContext, cfg *config.TestConfig, releaseName string) *VaultCluster { +func NewVaultCluster(t *testing.T, ctx environment.TestContext, cfg *config.TestConfig, releaseName string, helmValues map[string]string) *VaultCluster { logger := terratestLogger.New(logger.TestLogger{}) kopts := ctx.KubectlOptions(t) + values := defaultHelmValues(releaseName) + if cfg.EnablePodSecurityPolicies { + values["global.psp.enable"] = "true" + } + + helpers.MergeMaps(values, helmValues) vaultHelmOpts := &helm.Options{ - SetValues: defaultHelmValues(releaseName), + SetValues: values, KubectlOptions: kopts, Logger: logger, } - if cfg.EnablePodSecurityPolicies { - vaultHelmOpts.SetValues["global.psp.enable"] = "true" - } + helm.AddRepo(t, vaultHelmOpts, "hashicorp", "https://helm.releases.hashicorp.com") // Ignoring the error from `helm repo update` as it could fail due to stale cache or unreachable servers and we're // asserting a chart version on Install which would fail in an obvious way should this not succeed. @@ -80,7 +84,7 @@ func NewVaultCluster(t *testing.T, ctx environment.TestContext, cfg *config.Test } // VaultClient returns the vault client. -func (v *VaultCluster) VaultClient(t *testing.T) *vapi.Client { return v.vaultClient } +func (v *VaultCluster) VaultClient(*testing.T) *vapi.Client { return v.vaultClient } // SetupVaultClient sets up and returns a Vault Client. func (v *VaultCluster) SetupVaultClient(t *testing.T) *vapi.Client { @@ -125,8 +129,11 @@ func (v *VaultCluster) SetupVaultClient(t *testing.T) *vapi.Client { } // bootstrap sets up Kubernetes auth method and enables secrets engines. -func (v *VaultCluster) bootstrap(t *testing.T, ctx environment.TestContext) { - +func (v *VaultCluster) bootstrap(t *testing.T) { + if !v.serverEnabled() { + v.logger.Logf(t, "skipping bootstrapping Vault because Vault server is not enabled") + return + } v.vaultClient = v.SetupVaultClient(t) // Enable the KV-V2 Secrets engine. @@ -143,26 +150,39 @@ func (v *VaultCluster) bootstrap(t *testing.T, ctx environment.TestContext) { }) require.NoError(t, err) - // Enable Kube Auth. - err = v.vaultClient.Sys().EnableAuthWithOptions("kubernetes", &vapi.EnableAuthOptions{ + namespace := v.helmOptions.KubectlOptions.Namespace + vaultServerServiceAccountName := fmt.Sprintf("%s-vault", v.releaseName) + v.ConfigureAuthMethod(t, v.vaultClient, "kubernetes", "https://kubernetes.default.svc", vaultServerServiceAccountName, namespace) +} + +// ConfigureAuthMethod configures the auth method in Vault from the provided service account name and namespace, +// kubernetes host and auth path. +// We need to take vaultClient here in case this Vault cluster does not have a server to run API commands against. +func (v *VaultCluster) ConfigureAuthMethod(t *testing.T, vaultClient *vapi.Client, authPath, k8sHost, saName, saNS string) { + v.logger.Logf(t, "enabling kubernetes auth method on %s path", authPath) + err := vaultClient.Sys().EnableAuthWithOptions(authPath, &vapi.EnableAuthOptions{ Type: "kubernetes", }) require.NoError(t, err) - v.logger.Logf(t, "updating vault kube auth config") - - // To configure the auth method, we need to read the token and the ca cert from the Vault's server + // To configure the auth method, we need to read the token and the CA cert from the auth method's // service account token. - namespace := v.helmOptions.KubectlOptions.Namespace - sa, err := v.kubernetesClient.CoreV1().ServiceAccounts(namespace).Get(context.Background(), fmt.Sprintf("%s-vault", v.releaseName), metav1.GetOptions{}) - require.NoError(t, err) - require.Len(t, sa.Secrets, 1) - tokenSecret, err := v.kubernetesClient.CoreV1().Secrets(namespace).Get(context.Background(), sa.Secrets[0].Name, metav1.GetOptions{}) + // The JWT token and CA cert is what Vault server will use to validate service account token + // with the Kubernetes API. + var sa *corev1.ServiceAccount + retry.Run(t, func(r *retry.R) { + sa, err = v.kubernetesClient.CoreV1().ServiceAccounts(saNS).Get(context.Background(), saName, metav1.GetOptions{}) + require.NoError(t, err) + require.Len(t, sa.Secrets, 1) + }) + + v.logger.Logf(t, "updating vault kubernetes auth config for %s auth path", authPath) + tokenSecret, err := v.kubernetesClient.CoreV1().Secrets(saNS).Get(context.Background(), sa.Secrets[0].Name, metav1.GetOptions{}) require.NoError(t, err) - _, err = v.vaultClient.Logical().Write("auth/kubernetes/config", map[string]interface{}{ - "token_reviewer_jwt": tokenSecret.StringData["token"], - "kubernetes_ca_cert": tokenSecret.StringData["ca.crt"], - "kubernetes_host": "https://kubernetes.default.svc", + _, err = vaultClient.Logical().Write(fmt.Sprintf("auth/%s/config", authPath), map[string]interface{}{ + "token_reviewer_jwt": string(tokenSecret.Data["token"]), + "kubernetes_ca_cert": string(tokenSecret.Data["ca.crt"]), + "kubernetes_host": k8sHost, }) require.NoError(t, err) } @@ -188,10 +208,10 @@ func (v *VaultCluster) Create(t *testing.T, ctx environment.TestContext) { v.initAndUnseal(t) // Wait for the injector and vault server pods to become Ready. - helpers.WaitForAllPodsToBeReady(t, v.kubernetesClient, v.helmOptions.KubectlOptions.Namespace, v.releaseLabelSelector()) + k8s.WaitForAllPodsToBeReady(t, v.kubernetesClient, v.helmOptions.KubectlOptions.Namespace, v.releaseLabelSelector()) // Now call bootstrap(). - v.bootstrap(t, ctx) + v.bootstrap(t) } // Destroy issues a helm delete and deletes the PVC + any helm secrets related to the release that are leftover. @@ -261,10 +281,23 @@ func (v *VaultCluster) releaseLabelSelector() string { return fmt.Sprintf("%s=%s", releaseLabel, v.releaseName) } +// serverEnabled returns true if this Vault cluster has a server. +func (v *VaultCluster) serverEnabled() bool { + serverEnabled, ok := v.helmOptions.SetValues["server.enabled"] + // Server is enabled by default in the Vault Helm chart, so it's enabled either when that helm value is + // not provided or when it's not explicitly disabled. + return !ok || serverEnabled != "false" +} + // createTLSCerts generates a self-signed CA and uses it to generate -// certificate and key for the Vault server. It then saves those as +// certificate and key for the Vault server. It then saves those as // Kubernetes secrets. func (v *VaultCluster) createTLSCerts(t *testing.T) { + if !v.serverEnabled() { + v.logger.Logf(t, "skipping generating Vault TLS certificates because Vault server is not enabled") + return + } + v.logger.Logf(t, "generating Vault TLS certificates") namespace := v.helmOptions.KubectlOptions.Namespace @@ -318,8 +351,12 @@ func (v *VaultCluster) createTLSCerts(t *testing.T) { // initAndUnseal initializes and unseals Vault. // Once initialized, it saves the Vault root token into a Kubernetes secret. func (v *VaultCluster) initAndUnseal(t *testing.T) { - v.logger.Logf(t, "initializing and unsealing Vault") + if !v.serverEnabled() { + v.logger.Logf(t, "skipping initializing and unsealing Vault because Vault server is not enabled") + return + } + v.logger.Logf(t, "initializing and unsealing Vault") namespace := v.helmOptions.KubectlOptions.Namespace retrier := &retry.Timer{Timeout: 2 * time.Minute, Wait: 1 * time.Second} retry.RunWith(retrier, t, func(r *retry.R) { diff --git a/acceptance/tests/mesh-gateway/mesh_gateway_test.go b/acceptance/tests/mesh-gateway/mesh_gateway_test.go index c989535b90..ed67252670 100644 --- a/acceptance/tests/mesh-gateway/mesh_gateway_test.go +++ b/acceptance/tests/mesh-gateway/mesh_gateway_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "testing" - "time" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" "github.com/hashicorp/consul-k8s/acceptance/framework/environment" @@ -12,7 +11,6 @@ import ( "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/logger" "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -108,7 +106,7 @@ func TestMeshGatewayDefault(t *testing.T) { // Verify federation between servers logger.Log(t, "verifying federation was successful") - verifyFederation(t, primaryClient, secondaryClient, releaseName, false) + helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, false) // Create a ProxyDefaults resource to configure services to use the mesh // gateways. @@ -244,7 +242,7 @@ func TestMeshGatewaySecure(t *testing.T) { // Verify federation between servers logger.Log(t, "verifying federation was successful") - verifyFederation(t, primaryClient, secondaryClient, releaseName, true) + helpers.VerifyFederation(t, primaryClient, secondaryClient, releaseName, true) // Create a ProxyDefaults resource to configure services to use the mesh // gateways. @@ -280,38 +278,3 @@ func TestMeshGatewaySecure(t *testing.T) { }) } } - -// verifyFederation checks that the WAN federation between servers is successful -// by first checking members are alive from the perspective of both servers. -// If secure is true, it will also check that the ACL replication is running on the secondary server. -func verifyFederation(t *testing.T, primaryClient, secondaryClient *api.Client, releaseName string, secure bool) { - retrier := &retry.Timer{Timeout: 5 * time.Minute, Wait: 1 * time.Second} - start := time.Now() - - // Check that server in dc1 is healthy from the perspective of the server in dc2, and vice versa. - // We're calling the Consul health API, as opposed to checking serf membership status, - // because we need to make sure that the federated servers can make API calls and forward requests - // from one server to another. From running tests in CI for a while and using serf membership status before, - // we've noticed that the status could be "alive" as soon as the server in the secondary cluster joins the primary - // and then switch to "failed". This would require us to check that the status is "alive" is showing consistently for - // some amount of time, which could be quite flakey. Calling the API in another datacenter allows us to check that - // each server can forward calls to another, which is what we need for connect. - retry.RunWith(retrier, t, func(r *retry.R) { - secondaryServerHealth, _, err := primaryClient.Health().Node(fmt.Sprintf("%s-consul-server-0", releaseName), &api.QueryOptions{Datacenter: "dc2"}) - require.NoError(r, err) - require.Equal(r, secondaryServerHealth.AggregatedStatus(), api.HealthPassing) - - primaryServerHealth, _, err := secondaryClient.Health().Node(fmt.Sprintf("%s-consul-server-0", releaseName), &api.QueryOptions{Datacenter: "dc1"}) - require.NoError(r, err) - require.Equal(r, primaryServerHealth.AggregatedStatus(), api.HealthPassing) - - if secure { - replicationStatus, _, err := secondaryClient.ACL().Replication(nil) - require.NoError(r, err) - require.True(r, replicationStatus.Enabled) - require.True(r, replicationStatus.Running) - } - }) - - logger.Logf(t, "Took %s to verify federation", time.Since(start)) -} diff --git a/acceptance/tests/partitions/partitions_test.go b/acceptance/tests/partitions/partitions_test.go index 5fd19af3ce..a4a95e7078 100644 --- a/acceptance/tests/partitions/partitions_test.go +++ b/acceptance/tests/partitions/partitions_test.go @@ -5,7 +5,6 @@ import ( "fmt" "strconv" "testing" - "time" terratestk8s "github.com/gruntwork-io/terratest/modules/k8s" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" @@ -128,7 +127,7 @@ func TestPartitions(t *testing.T) { releaseName := helpers.RandomName() - consul.MergeMaps(serverHelmValues, commonHelmValues) + helpers.MergeMaps(serverHelmValues, commonHelmValues) // Install the consul cluster with servers in the default kubernetes context. serverConsulCluster := consul.NewHelmCluster(t, serverHelmValues, serverClusterContext, cfg, releaseName) @@ -154,45 +153,10 @@ func TestPartitions(t *testing.T) { moveSecret(t, serverClusterContext, clientClusterContext, partitionToken) } - var partitionSvcAddress string - if cfg.UseKind { - nodeList, err := serverClusterContext.KubernetesClient(t).CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - require.NoError(t, err) - // Get the address of the (only) node from the Kind cluster. - partitionSvcAddress = nodeList.Items[0].Status.Addresses[0].Address - } else { - // Get the IP of the partition service to configure the external server address in the values file for the clients cluster. - partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) - logger.Logf(t, "retrieving partition service to determine external address for servers") - - // It can take some time for the load balancers to be ready and have an IP/Hostname. - // Wait for 60 seconds before failing. - retry.RunWith(&retry.Counter{Wait: 1 * time.Second, Count: 60}, t, func(r *retry.R) { - partitionsSvc, err := serverClusterContext.KubernetesClient(t).CoreV1().Services(serverClusterContext.KubectlOptions(t).Namespace).Get(ctx, partitionServiceName, metav1.GetOptions{}) - require.NoError(t, err) - require.NotEmpty(r, partitionsSvc.Status.LoadBalancer.Ingress) - // On AWS, load balancers have a hostname for ingress, while on Azure and GCP - // load balancers have IPs. - if partitionsSvc.Status.LoadBalancer.Ingress[0].Hostname != "" { - partitionSvcAddress = partitionsSvc.Status.LoadBalancer.Ingress[0].Hostname - } else { - partitionSvcAddress = partitionsSvc.Status.LoadBalancer.Ingress[0].IP - } - }) - } + partitionServiceName := fmt.Sprintf("%s-consul-partition", releaseName) + partitionSvcAddress := k8s.ServiceHost(t, cfg, serverClusterContext, partitionServiceName) - var k8sAuthMethodHost string - // When running on kind, the kube API address in kubeconfig will have a localhost address - // which will not work from inside the container. That's why we need to use the endpoints address instead - // which will point the node IP. - if cfg.UseKind { - // The Kubernetes AuthMethod host is read from the endpoints for the Kubernetes service. - kubernetesEndpoint, err := clientClusterContext.KubernetesClient(t).CoreV1().Endpoints(defaultNamespace).Get(ctx, "kubernetes", metav1.GetOptions{}) - require.NoError(t, err) - k8sAuthMethodHost = fmt.Sprintf("%s:%d", kubernetesEndpoint.Subsets[0].Addresses[0].IP, kubernetesEndpoint.Subsets[0].Ports[0].Port) - } else { - k8sAuthMethodHost = helpers.KubernetesAPIServerHostFromOptions(t, clientClusterContext.KubectlOptions(t)) - } + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, clientClusterContext) // Create client cluster. clientHelmValues := map[string]string{ @@ -229,7 +193,7 @@ func TestPartitions(t *testing.T) { clientHelmValues["meshGateway.service.nodePort"] = "30100" } - consul.MergeMaps(clientHelmValues, commonHelmValues) + helpers.MergeMaps(clientHelmValues, commonHelmValues) // Install the consul cluster without servers in the client cluster kubernetes context. clientConsulCluster := consul.NewHelmCluster(t, clientHelmValues, clientClusterContext, cfg, releaseName) diff --git a/acceptance/tests/vault/helpers.go b/acceptance/tests/vault/helpers.go new file mode 100644 index 0000000000..d1439ee32e --- /dev/null +++ b/acceptance/tests/vault/helpers.go @@ -0,0 +1,180 @@ +package vault + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "testing" + + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + vapi "github.com/hashicorp/vault/api" + "github.com/stretchr/testify/require" +) + +const ( + gossipPolicy = ` +path "consul/data/secret/gossip" { + capabilities = ["read"] +}` + + // connectCAPolicy allows Consul to bootstrap all certificates for the service mesh in Vault. + // Adapted from https://www.consul.io/docs/connect/ca/vault#consul-managed-pki-paths. + connectCAPolicy = ` +path "/sys/mounts" { + capabilities = [ "read" ] +} + +path "/sys/mounts/connect_root" { + capabilities = [ "create", "read", "update", "delete", "list" ] +} + +path "/sys/mounts/connect_inter" { + capabilities = [ "create", "read", "update", "delete", "list" ] +} + +path "/connect_root/*" { + capabilities = [ "create", "read", "update", "delete", "list" ] +} + +path "/connect_inter/*" { + capabilities = [ "create", "read", "update", "delete", "list" ] +} +` + caPolicy = ` +path "pki/cert/ca" { + capabilities = ["read"] +}` +) + +// generateGossipSecret generates a random 32 byte secret returned as a base64 encoded string. +func generateGossipSecret() (string, error) { + // This code was copied from Consul's Keygen command: + // https://github.com/hashicorp/consul/blob/d652cc86e3d0322102c2b5e9026c6a60f36c17a5/command/keygen/keygen.go + + key := make([]byte, 32) + n, err := rand.Reader.Read(key) + if err != nil { + return "", fmt.Errorf("error reading random data: %s", err) + } + if n != 32 { + return "", fmt.Errorf("couldn't read enough entropy") + } + + return base64.StdEncoding.EncodeToString(key), nil +} + +// configureGossipVaultSecret generates a gossip encryption key, +// stores it in vault as a secret and configures a policy to access it. +func configureGossipVaultSecret(t *testing.T, vaultClient *vapi.Client) string { + // Create the Vault Policy for the gossip key. + logger.Log(t, "Creating gossip policy") + err := vaultClient.Sys().PutPolicy("consul-gossip", gossipPolicy) + require.NoError(t, err) + + // Generate the gossip secret. + gossipKey, err := generateGossipSecret() + require.NoError(t, err) + + // Create the gossip secret. + logger.Log(t, "Creating the gossip secret") + params := map[string]interface{}{ + "data": map[string]interface{}{ + "gossip": gossipKey, + }, + } + _, err = vaultClient.Logical().Write("consul/data/secret/gossip", params) + require.NoError(t, err) + + return gossipKey +} + +// configureKubernetesAuthRoles configures roles for the Kubernetes auth method +// that will be used by the test Helm chart installation. +func configureKubernetesAuthRoles(t *testing.T, vaultClient *vapi.Client, consulReleaseName, ns, authPath, datacenter string) { + consulClientServiceAccountName := fmt.Sprintf("%s-consul-client", consulReleaseName) + consulServerServiceAccountName := fmt.Sprintf("%s-consul-server", consulReleaseName) + + // Create the Auth Roles for consul-server and consul-client. + // Auth roles bind policies to Kubernetes service accounts, which + // then enables the Vault agent init container to call 'vault login' + // with the Kubernetes auth method to obtain a Vault token. + // Please see https://www.vaultproject.io/docs/auth/kubernetes#configuration + // for more details. + logger.Log(t, "Creating the consul-server and consul-client roles") + params := map[string]interface{}{ + "bound_service_account_names": consulClientServiceAccountName, + "bound_service_account_namespaces": ns, + "policies": "consul-gossip", + "ttl": "24h", + } + _, err := vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/consul-client", authPath), params) + require.NoError(t, err) + + params = map[string]interface{}{ + "bound_service_account_names": consulServerServiceAccountName, + "bound_service_account_namespaces": ns, + "policies": fmt.Sprintf("consul-gossip,connect-ca,consul-server-%s", datacenter), + "ttl": "24h", + } + _, err = vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/consul-server", authPath), params) + require.NoError(t, err) + + // Create the CA role that all components will use to fetch the Server CA certs. + params = map[string]interface{}{ + "bound_service_account_names": "*", + "bound_service_account_namespaces": ns, + "policies": "consul-ca", + "ttl": "24h", + } + _, err = vaultClient.Logical().Write(fmt.Sprintf("auth/%s/role/consul-ca", authPath), params) + require.NoError(t, err) +} + +// configurePKICA generates a CA in Vault. +func configurePKICA(t *testing.T, vaultClient *vapi.Client) { + // Create root CA to issue Consul server certificates and the `consul-server` PKI role. + // See https://learn.hashicorp.com/tutorials/consul/vault-pki-consul-secure-tls. + // Generate the root CA. + params := map[string]interface{}{ + "common_name": "Consul CA", + "ttl": "24h", + } + _, err := vaultClient.Logical().Write("pki/root/generate/internal", params) + require.NoError(t, err) + + err = vaultClient.Sys().PutPolicy("consul-ca", caPolicy) + require.NoError(t, err) +} + +// configurePKICertificates configures roles so that Consul server TLS certificates +// can be issued by Vault. +func configurePKICertificates(t *testing.T, vaultClient *vapi.Client, consulReleaseName, ns, datacenter string) string { + // Create the Vault PKI Role. + consulServerDNSName := consulReleaseName + "-consul-server" + allowedDomains := fmt.Sprintf("%s.consul,%s,%s.%s,%s.%s.svc", datacenter, consulServerDNSName, consulServerDNSName, ns, consulServerDNSName, ns) + params := map[string]interface{}{ + "allowed_domains": allowedDomains, + "allow_bare_domains": "true", + "allow_localhost": "true", + "allow_subdomains": "true", + "generate_lease": "true", + "max_ttl": "1h", + } + + pkiRoleName := fmt.Sprintf("consul-server-%s", datacenter) + + _, err := vaultClient.Logical().Write(fmt.Sprintf("pki/roles/%s", pkiRoleName), params) + require.NoError(t, err) + + certificateIssuePath := fmt.Sprintf("pki/issue/%s", pkiRoleName) + serverTLSPolicy := fmt.Sprintf(` +path %q { + capabilities = ["create", "update"] +}`, certificateIssuePath) + + // Create the server policy. + err = vaultClient.Sys().PutPolicy(pkiRoleName, serverTLSPolicy) + require.NoError(t, err) + + return certificateIssuePath +} diff --git a/acceptance/tests/vault/vault_test.go b/acceptance/tests/vault/vault_test.go index e19e196c84..0521ab0f23 100644 --- a/acceptance/tests/vault/vault_test.go +++ b/acceptance/tests/vault/vault_test.go @@ -1,9 +1,6 @@ package vault import ( - "crypto/rand" - "encoding/base64" - "fmt" "testing" "github.com/hashicorp/consul-k8s/acceptance/framework/consul" @@ -14,45 +11,6 @@ import ( "github.com/stretchr/testify/require" ) -const ( - gossipPolicy = ` -path "consul/data/secret/gossip" { - capabilities = ["read"] -}` - - // connectCAPolicy allows Consul to bootstrap all certificates for the service mesh in Vault. - // Adapted from https://www.consul.io/docs/connect/ca/vault#consul-managed-pki-paths. - connectCAPolicy = ` -path "/sys/mounts" { - capabilities = [ "read" ] -} - -path "/sys/mounts/connect_root" { - capabilities = [ "create", "read", "update", "delete", "list" ] -} - -path "/sys/mounts/connect_inter" { - capabilities = [ "create", "read", "update", "delete", "list" ] -} - -path "/connect_root/*" { - capabilities = [ "create", "read", "update", "delete", "list" ] -} - -path "/connect_inter/*" { - capabilities = [ "create", "read", "update", "delete", "list" ] -} -` - serverTLSPolicy = ` -path "pki/issue/consul-server" { - capabilities = ["create", "update"] -}` - caPolicy = ` -path "pki/cert/ca" { - capabilities = ["read"] -}` -) - // TestVault installs Vault, bootstraps it with secrets, policies, and Kube Auth Method. // It then configures Consul to use vault as the backend and checks that it works. func TestVault(t *testing.T) { @@ -62,116 +20,31 @@ func TestVault(t *testing.T) { consulReleaseName := helpers.RandomName() vaultReleaseName := helpers.RandomName() - consulClientServiceAccountName := fmt.Sprintf("%s-consul-client", consulReleaseName) - consulServerServiceAccountName := fmt.Sprintf("%s-consul-server", consulReleaseName) - vaultCluster := vault.NewVaultCluster(t, ctx, cfg, vaultReleaseName) + vaultCluster := vault.NewVaultCluster(t, ctx, cfg, vaultReleaseName, nil) vaultCluster.Create(t, ctx) // Vault is now installed in the cluster. // Now fetch the Vault client so we can create the policies and secrets. vaultClient := vaultCluster.VaultClient(t) - // Create the Vault Policy for the gossip key. - logger.Log(t, "Creating policies") - err := vaultClient.Sys().PutPolicy("consul-gossip", gossipPolicy) - require.NoError(t, err) + gossipKey := configureGossipVaultSecret(t, vaultClient) // Create the Vault Policy for the connect-ca. - err = vaultClient.Sys().PutPolicy("connect-ca", connectCAPolicy) - require.NoError(t, err) - - // Create the Auth Roles for consul-server and consul-client. - // Auth roles bind policies to Kubernetes service accounts, which - // then enables the Vault agent init container to call 'vault login' - // with the Kubernetes auth method to obtain a Vault token. - // Please see https://www.vaultproject.io/docs/auth/kubernetes#configuration - // for more details. - logger.Log(t, "Creating the consul-server and consul-client roles") - params := map[string]interface{}{ - "bound_service_account_names": consulClientServiceAccountName, - "bound_service_account_namespaces": ns, - "policies": "consul-gossip", - "ttl": "24h", - } - _, err = vaultClient.Logical().Write("auth/kubernetes/role/consul-client", params) + err := vaultClient.Sys().PutPolicy("connect-ca", connectCAPolicy) require.NoError(t, err) - params = map[string]interface{}{ - "bound_service_account_names": consulServerServiceAccountName, - "bound_service_account_namespaces": ns, - "policies": "consul-gossip,connect-ca,consul-server", - "ttl": "24h", - } - _, err = vaultClient.Logical().Write("auth/kubernetes/role/consul-server", params) - require.NoError(t, err) + configureKubernetesAuthRoles(t, vaultClient, consulReleaseName, ns, "kubernetes", "dc1") - // Create the CA role that all components will use to fetch the Server CA certs. - params = map[string]interface{}{ - "bound_service_account_names": "*", - "bound_service_account_namespaces": ns, - "policies": "consul-ca", - "ttl": "24h", - } - _, err = vaultClient.Logical().Write("auth/kubernetes/role/consul-ca", params) - require.NoError(t, err) - - // Generate the gossip secret. - gossipKey, err := generateGossipSecret() - require.NoError(t, err) - - // Create the gossip secret. - logger.Log(t, "Creating the gossip secret") - params = map[string]interface{}{ - "data": map[string]interface{}{ - "gossip": gossipKey, - }, - } - _, err = vaultClient.Logical().Write("consul/data/secret/gossip", params) - require.NoError(t, err) + configurePKICA(t, vaultClient) + certPath := configurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") vaultCASecret := vault.CASecretName(vaultReleaseName) - // Bootstrap TLS by creating the CA infrastructure required for Consul server TLS and also create the `consul-server` PKI role. - // Using https://learn.hashicorp.com/tutorials/consul/vault-pki-consul-secure-tls. - // Generate the root CA. - params = map[string]interface{}{ - "common_name": "dc1.consul", - "ttl": "24h", - } - _, err = vaultClient.Logical().Write("pki/root/generate/internal", params) - require.NoError(t, err) - - // Create the Vault PKI Role. - name := consulReleaseName + "-consul" - allowedDomains := fmt.Sprintf("dc1.consul,%s-server,%s-server.%s,%s-server.%s.svc", name, name, ns, name, ns) - params = map[string]interface{}{ - "allowed_domains": allowedDomains, - "allow_bare_domains": "true", - "allow_localhost": "true", - "allow_subdomains": "true", - "generate_lease": "true", - "max_ttl": "1h", - } - _, err = vaultClient.Logical().Write("pki/roles/consul-server", params) - require.NoError(t, err) - - // Create the server and ca policies - err = vaultClient.Sys().PutPolicy("consul-server", serverTLSPolicy) - require.NoError(t, err) - err = vaultClient.Sys().PutPolicy("consul-ca", caPolicy) - require.NoError(t, err) - consulHelmValues := map[string]string{ - // TODO: Update the global image once 1.11 is GA. - "global.image": "docker.mirror.hashicorp.services/hashicorpdev/consul:latest", - - "server.enabled": "true", - "server.replicas": "1", "server.extraVolumes[0].type": "secret", "server.extraVolumes[0].name": vaultCASecret, "server.extraVolumes[0].load": "false", - "global.datacenter": "dc1", "connectInject.enabled": "true", "connectInject.replicas": "1", @@ -199,9 +72,8 @@ func TestVault(t *testing.T) { "terminatingGateways.enabled": "true", "terminatingGateways.defaults.replicas": "1", - "server.serverCert.secretName": "pki/issue/consul-server", + "server.serverCert.secretName": certPath, "global.tls.caCert.secretName": "pki/cert/ca", - "global.tls.httpsOnly": "false", "global.tls.enableAutoEncrypt": "true", // For sync catalog, it is sufficient to check that the deployment is running and ready @@ -250,20 +122,3 @@ func TestVault(t *testing.T) { k8s.CheckStaticServerConnectionSuccessful(t, ctx.KubectlOptions(t), "http://localhost:1234") } } - -// generateGossipSecret generates a random 32 byte secret returned as a base64 encoded string. -func generateGossipSecret() (string, error) { - // This code was copied from Consul's Keygen command: - // https://github.com/hashicorp/consul/blob/d652cc86e3d0322102c2b5e9026c6a60f36c17a5/command/keygen/keygen.go - - key := make([]byte, 32) - n, err := rand.Reader.Read(key) - if err != nil { - return "", fmt.Errorf("error reading random data: %s", err) - } - if n != 32 { - return "", fmt.Errorf("couldn't read enough entropy") - } - - return base64.StdEncoding.EncodeToString(key), nil -} diff --git a/acceptance/tests/vault/vault_wan_fed_test.go b/acceptance/tests/vault/vault_wan_fed_test.go new file mode 100644 index 0000000000..77689ef40a --- /dev/null +++ b/acceptance/tests/vault/vault_wan_fed_test.go @@ -0,0 +1,267 @@ +package vault + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/consul-k8s/acceptance/framework/config" + "github.com/hashicorp/consul-k8s/acceptance/framework/consul" + "github.com/hashicorp/consul-k8s/acceptance/framework/environment" + "github.com/hashicorp/consul-k8s/acceptance/framework/helpers" + "github.com/hashicorp/consul-k8s/acceptance/framework/k8s" + "github.com/hashicorp/consul-k8s/acceptance/framework/logger" + "github.com/hashicorp/consul-k8s/acceptance/framework/vault" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Test that WAN federation via Mesh gateways works with Vault +// as the secrets backend, testing all possible credentials that can be used for WAN federation. +// This test deploys a Vault cluster with a server in the primary k8s cluster and exposes it to the +// secondary cluster via a Kubernetes service. We then only need to deploy Vault agent injector +// in the secondary that will treat the Vault server in the primary as an external server. +func TestVault_WANFederationViaGateways(t *testing.T) { + cfg := suite.Config() + if !cfg.EnableMultiCluster { + t.Skipf("skipping this test because -enable-multi-cluster is not set") + } + primaryCtx := suite.Environment().DefaultContext(t) + secondaryCtx := suite.Environment().Context(t, environment.SecondaryContextName) + + ns := primaryCtx.KubectlOptions(t).Namespace + + vaultReleaseName := helpers.RandomName() + consulReleaseName := helpers.RandomName() + + // In the primary cluster, we will expose Vault server as a Load balancer + // or a NodePort service so that the secondary can connect to it. + primaryVaultHelmValues := map[string]string{ + "server.service.type": "LoadBalancer", + } + if cfg.UseKind { + primaryVaultHelmValues["server.service.type"] = "NodePort" + primaryVaultHelmValues["server.service.nodePort"] = "31000" + } + + primaryVaultCluster := vault.NewVaultCluster(t, primaryCtx, cfg, vaultReleaseName, primaryVaultHelmValues) + primaryVaultCluster.Create(t, primaryCtx) + + externalVaultAddress := vaultAddress(t, cfg, primaryCtx, vaultReleaseName) + + // In the secondary cluster, we will only deploy the agent injector and provide + // it with the primary's Vault address. We also want to configure the injector with + // a different k8s auth method path since the secondary cluster will need its own auth method. + secondaryVaultHelmValues := map[string]string{ + "server.enabled": "false", + "injector.externalVaultAddr": externalVaultAddress, + "injector.authPath": "auth/kubernetes-dc2", + } + + secondaryVaultCluster := vault.NewVaultCluster(t, secondaryCtx, cfg, vaultReleaseName, secondaryVaultHelmValues) + secondaryVaultCluster.Create(t, secondaryCtx) + + vaultClient := primaryVaultCluster.VaultClient(t) + + configureGossipVaultSecret(t, vaultClient) + + configureKubernetesAuthRoles(t, vaultClient, consulReleaseName, ns, "kubernetes", "dc1") + + // Configure Vault Kubernetes auth method for the secondary datacenter. + { + // Create auth method service account and ClusterRoleBinding. The Vault server + // in the primary cluster will use this service account token to talk to the secondary + // Kubernetes cluster. + // This ClusterRoleBinding is adapted from the Vault server's role: + // https://github.com/hashicorp/vault-helm/blob/b0528fce49c529f2c37953ea3a14f30ed651e0d6/templates/server-clusterrolebinding.yaml + + // Use a single name for all RBAC objects. + authMethodRBACName := fmt.Sprintf("%s-vault-auth-method", vaultReleaseName) + _, err := secondaryCtx.KubernetesClient(t).RbacV1().ClusterRoleBindings().Create(context.Background(), &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: authMethodRBACName, + }, + Subjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: authMethodRBACName, Namespace: ns}}, + RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", Name: "system:auth-delegator", Kind: "ClusterRole"}, + }, metav1.CreateOptions{}) + require.NoError(t, err) + + // Create service account for the auth method in the secondary cluster. + _, err = secondaryCtx.KubernetesClient(t).CoreV1().ServiceAccounts(ns).Create(context.Background(), &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: authMethodRBACName, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err) + t.Cleanup(func() { + secondaryCtx.KubernetesClient(t).RbacV1().ClusterRoleBindings().Delete(context.Background(), authMethodRBACName, metav1.DeleteOptions{}) + secondaryCtx.KubernetesClient(t).CoreV1().ServiceAccounts(ns).Delete(context.Background(), authMethodRBACName, metav1.DeleteOptions{}) + }) + + // Figure out the host for the Kubernetes API. This needs to be reachable from the Vault server + // in the primary cluster. + k8sAuthMethodHost := k8s.KubernetesAPIServerHost(t, cfg, secondaryCtx) + + // Now, configure the auth method in Vault. + secondaryVaultCluster.ConfigureAuthMethod(t, vaultClient, "kubernetes-dc2", k8sAuthMethodHost, authMethodRBACName, ns) + } + + configureKubernetesAuthRoles(t, vaultClient, consulReleaseName, ns, "kubernetes-dc2", "dc2") + + // Generate a CA and create PKI roles for the primary and secondary Consul servers. + configurePKICA(t, vaultClient) + primaryCertPath := configurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc1") + secondaryCertPath := configurePKICertificates(t, vaultClient, consulReleaseName, ns, "dc2") + + // Move Vault CA secret from primary to secondary so that we can mount it to pods in the + // secondary cluster. + vaultCASecretName := vault.CASecretName(vaultReleaseName) + logger.Logf(t, "retrieving Vault CA secret %s from the primary cluster and applying to the secondary", vaultCASecretName) + vaultCASecret, err := primaryCtx.KubernetesClient(t).CoreV1().Secrets(primaryCtx.KubectlOptions(t).Namespace).Get(context.Background(), vaultCASecretName, metav1.GetOptions{}) + vaultCASecret.ResourceVersion = "" + require.NoError(t, err) + _, err = secondaryCtx.KubernetesClient(t).CoreV1().Secrets(secondaryCtx.KubectlOptions(t).Namespace).Create(context.Background(), vaultCASecret, metav1.CreateOptions{}) + require.NoError(t, err) + t.Cleanup(func() { + secondaryCtx.KubernetesClient(t).CoreV1().Secrets(ns).Delete(context.Background(), vaultCASecretName, metav1.DeleteOptions{}) + }) + + primaryConsulHelmValues := map[string]string{ + "global.datacenter": "dc1", + + "global.federation.enabled": "true", + + // TLS config. + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.tls.enableAutoEncrypt": "true", + "global.tls.caCert.secretName": "pki/cert/ca", + "server.serverCert.secretName": primaryCertPath, + + // Gossip config. + "global.gossipEncryption.secretName": "consul/data/secret/gossip", + "global.gossipEncryption.secretKey": "gossip", + + // Mesh config. + "connectInject.enabled": "true", + "controller.enabled": "true", + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + + // Server config. + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": vaultCASecretName, + "server.extraVolumes[0].load": "false", + + // Vault config. + "global.secretsBackend.vault.enabled": "true", + "global.secretsBackend.vault.consulServerRole": "consul-server", + "global.secretsBackend.vault.consulClientRole": "consul-client", + "global.secretsBackend.vault.consulCARole": "consul-ca", + "global.secretsBackend.vault.ca.secretName": vaultCASecretName, + "global.secretsBackend.vault.ca.secretKey": "tls.crt", + } + + if cfg.UseKind { + primaryConsulHelmValues["meshGateway.service.type"] = "NodePort" + primaryConsulHelmValues["meshGateway.service.nodePort"] = "30000" + } + + primaryConsulCluster := consul.NewHelmCluster(t, primaryConsulHelmValues, primaryCtx, cfg, consulReleaseName) + primaryConsulCluster.Create(t) + + // Get the address of the mesh gateway. + primaryMeshGWAddress := meshGatewayAddress(t, cfg, primaryCtx, consulReleaseName) + serverExtraConfig := fmt.Sprintf(`"{\"primary_gateways\":[\"%s\"]\,\"primary_datacenter\":\"dc1\"}"`, primaryMeshGWAddress) + secondaryConsulHelmValues := map[string]string{ + "global.datacenter": "dc2", + + "global.federation.enabled": "true", + + // TLS config. + "global.tls.enabled": "true", + "global.tls.httpsOnly": "false", + "global.tls.enableAutoEncrypt": "true", + "global.tls.caCert.secretName": "pki/cert/ca", + "server.serverCert.secretName": secondaryCertPath, + + // Gossip config. + "global.gossipEncryption.secretName": "consul/data/secret/gossip", + "global.gossipEncryption.secretKey": "gossip", + + // Mesh config. + "connectInject.enabled": "true", + "meshGateway.enabled": "true", + "meshGateway.replicas": "1", + + // Server config. + "server.extraVolumes[0].type": "secret", + "server.extraVolumes[0].name": vaultCASecretName, + "server.extraVolumes[0].load": "false", + "server.extraConfig": serverExtraConfig, + + // Vault config. + "global.secretsBackend.vault.enabled": "true", + "global.secretsBackend.vault.consulServerRole": "consul-server", + "global.secretsBackend.vault.consulClientRole": "consul-client", + "global.secretsBackend.vault.consulCARole": "consul-ca", + "global.secretsBackend.vault.ca.secretName": vaultCASecretName, + "global.secretsBackend.vault.ca.secretKey": "tls.crt", + "global.secretsBackend.vault.agentAnnotations": fmt.Sprintf("vault.hashicorp.com/tls-server-name: %s-vault", vaultReleaseName), + } + + if cfg.UseKind { + secondaryConsulHelmValues["meshGateway.service.type"] = "NodePort" + secondaryConsulHelmValues["meshGateway.service.nodePort"] = "30000" + } + + // Install the secondary consul cluster in the secondary kubernetes context. + secondaryConsulCluster := consul.NewHelmCluster(t, secondaryConsulHelmValues, secondaryCtx, cfg, consulReleaseName) + secondaryConsulCluster.Create(t) + + // Verify federation between servers. + logger.Log(t, "verifying federation was successful") + primaryClient := primaryConsulCluster.SetupConsulClient(t, false) + secondaryClient := secondaryConsulCluster.SetupConsulClient(t, false) + helpers.VerifyFederation(t, primaryClient, secondaryClient, consulReleaseName, false) + + // Create a ProxyDefaults resource to configure services to use the mesh + // gateways. + logger.Log(t, "creating proxy-defaults config") + kustomizeDir := "../fixtures/bases/mesh-gateway" + k8s.KubectlApplyK(t, primaryCtx.KubectlOptions(t), kustomizeDir) + helpers.Cleanup(t, cfg.NoCleanupOnFailure, func() { + k8s.KubectlDeleteK(t, primaryCtx.KubectlOptions(t), kustomizeDir) + }) + + // Check that we can connect services over the mesh gateways. + logger.Log(t, "creating static-server in dc2") + k8s.DeployKustomize(t, secondaryCtx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-server-inject") + + logger.Log(t, "creating static-client in dc1") + k8s.DeployKustomize(t, primaryCtx.KubectlOptions(t), cfg.NoCleanupOnFailure, cfg.DebugDirectory, "../fixtures/cases/static-client-multi-dc") + + logger.Log(t, "checking that connection is successful") + k8s.CheckStaticServerConnectionSuccessful(t, primaryCtx.KubectlOptions(t), "http://localhost:1234") +} + +// vaultAddress returns Vault's server URL depending on test configuration. +func vaultAddress(t *testing.T, cfg *config.TestConfig, ctx environment.TestContext, vaultReleaseName string) string { + vaultHost := k8s.ServiceHost(t, cfg, ctx, fmt.Sprintf("%s-vault", vaultReleaseName)) + if cfg.UseKind { + return fmt.Sprintf("https://%s:31000", vaultHost) + } + return fmt.Sprintf("https://%s:8200", vaultHost) +} + +// meshGatewayAddress returns a full address of the mesh gateway depending on configuration. +func meshGatewayAddress(t *testing.T, cfg *config.TestConfig, ctx environment.TestContext, consulReleaseName string) string { + primaryMeshGWHost := k8s.ServiceHost(t, cfg, ctx, fmt.Sprintf("%s-consul-mesh-gateway", consulReleaseName)) + if cfg.UseKind { + return fmt.Sprintf("%s:%d", primaryMeshGWHost, 30000) + } else { + return fmt.Sprintf("%s:%d", primaryMeshGWHost, 443) + } +} diff --git a/charts/consul/templates/mesh-gateway-deployment.yaml b/charts/consul/templates/mesh-gateway-deployment.yaml index a66242e2c8..8a0a05caba 100644 --- a/charts/consul/templates/mesh-gateway-deployment.yaml +++ b/charts/consul/templates/mesh-gateway-deployment.yaml @@ -36,6 +36,20 @@ spec: component: mesh-gateway annotations: "consul.hashicorp.com/connect-inject": "false" + {{- if (and .Values.global.secretsBackend.vault.enabled .Values.global.tls.enabled) }} + "vault.hashicorp.com/agent-init-first": "true" + "vault.hashicorp.com/agent-inject": "true" + "vault.hashicorp.com/role": {{ .Values.global.secretsBackend.vault.consulCARole }} + "vault.hashicorp.com/agent-inject-secret-serverca.crt": {{ .Values.global.tls.caCert.secretName }} + "vault.hashicorp.com/agent-inject-template-serverca.crt": {{ template "consul.serverTLSCATemplate" . }} + {{- if and .Values.global.secretsBackend.vault.ca.secretName .Values.global.secretsBackend.vault.ca.secretKey }} + "vault.hashicorp.com/agent-extra-secret": "{{ .Values.global.secretsBackend.vault.ca.secretName }}" + "vault.hashicorp.com/ca-cert": "/vault/custom/{{ .Values.global.secretsBackend.vault.ca.secretKey }}" + {{- end }} + {{- if .Values.global.secretsBackend.vault.agentAnnotations }} + {{ tpl .Values.global.secretsBackend.vault.agentAnnotations . | nindent 8 | trim }} + {{- end }} + {{- end }} {{- if (and .Values.global.metrics.enabled .Values.global.metrics.enableGatewayMetrics) }} "prometheus.io/scrape": "true" "prometheus.io/path": "/metrics" diff --git a/charts/consul/test/unit/mesh-gateway-deployment.bats b/charts/consul/test/unit/mesh-gateway-deployment.bats index f796f4bb46..c434fce1c5 100755 --- a/charts/consul/test/unit/mesh-gateway-deployment.bats +++ b/charts/consul/test/unit/mesh-gateway-deployment.bats @@ -1531,3 +1531,175 @@ EOF [ "$status" -eq 1 ] [[ "$output" =~ "global.enableConsulNamespaces must be true if global.adminPartitions.enabled=true" ]] } + +#-------------------------------------------------------------------- +# Vault + +@test "meshGateway/Deployment: vault CA is not configured by default" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "meshGateway/Deployment: vault CA is not configured when secretName is set but secretKey is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "meshGateway/Deployment: vault CA is not configured when secretKey is set but secretName is not" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/agent-extra-secret")') + [ "${actual}" = "false" ] + local actual=$(echo $object | yq -r '.metadata.annotations | has("vault.hashicorp.com/ca-cert")') + [ "${actual}" = "false" ] +} + +@test "meshGateway/Deployment: vault CA is configured when both secretName and secretKey are set" { + cd `chart_dir` + local object=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=test' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.secretsBackend.vault.ca.secretName=ca' \ + --set 'global.secretsBackend.vault.ca.secretKey=tls.crt' \ + . | tee /dev/stderr | + yq -r '.spec.template' | tee /dev/stderr) + + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/agent-extra-secret"') + [ "${actual}" = "ca" ] + local actual=$(echo $object | yq -r '.metadata.annotations."vault.hashicorp.com/ca-cert"') + [ "${actual}" = "/vault/custom/tls.crt" ] +} + +@test "meshGateway/Deployment: vault tls annotations are set when tls is enabled" { + cd `chart_dir` + local cmd=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=foo' \ + --set 'global.secretsBackend.vault.consulServerRole=bar' \ + --set 'global.secretsBackend.vault.consulCARole=test' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'server.serverCert.secretName=pki_int/issue/test' \ + --set 'global.tls.caCert.secretName=pki_int/cert/ca' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata' | tee /dev/stderr) + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-template-serverca.crt"]' | tee /dev/stderr)" + local expected=$'{{- with secret \"pki_int/cert/ca\" -}}\n{{- .Data.certificate -}}\n{{- end -}}' + [ "${actual}" = "${expected}" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject-secret-serverca.crt"]' | tee /dev/stderr)" + [ "${actual}" = "pki_int/cert/ca" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-init-first"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/agent-inject"]' | tee /dev/stderr)" + [ "${actual}" = "true" ] + + local actual="$(echo $cmd | + yq -r '.annotations["vault.hashicorp.com/role"]' | tee /dev/stderr)" + [ "${actual}" = "test" ] +} + +#-------------------------------------------------------------------- +# Vault agent annotations + +@test "meshGateway/Deployment: no vault agent annotations defined by default" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations | del(."consul.hashicorp.com/connect-inject") | del(."vault.hashicorp.com/agent-inject") | del(."vault.hashicorp.com/role")' | tee /dev/stderr) + [ "${actual}" = "{}" ] +} + +@test "meshGateway/Deployment: vault agent annotations can be set" { + cd `chart_dir` + local actual=$(helm template \ + -s templates/mesh-gateway-deployment.yaml \ + --set 'connectInject.enabled=true' \ + --set 'meshGateway.enabled=true' \ + --set 'global.tls.enabled=true' \ + --set 'global.tls.enableAutoEncrypt=true' \ + --set 'global.secretsBackend.vault.enabled=true' \ + --set 'global.secretsBackend.vault.consulClientRole=test' \ + --set 'global.secretsBackend.vault.consulServerRole=foo' \ + --set 'global.tls.caCert.secretName=foo' \ + --set 'global.secretsBackend.vault.consulCARole=carole' \ + --set 'global.secretsBackend.vault.agentAnnotations=foo: bar' \ + . | tee /dev/stderr | + yq -r '.spec.template.metadata.annotations.foo' | tee /dev/stderr) + [ "${actual}" = "bar" ] +}