Skip to content

Commit

Permalink
WIP: Test BYO certificates
Browse files Browse the repository at this point in the history
This adds an integration test for BYO CA certificate.
  • Loading branch information
lentzi90 committed Oct 27, 2023
1 parent f5b7287 commit 2609cd2
Show file tree
Hide file tree
Showing 5 changed files with 267 additions and 0 deletions.
235 changes: 235 additions & 0 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"crypto/x509/pkix"
"fmt"
"math/big"
"path"
"sync"
"testing"
"time"
Expand Down Expand Up @@ -1392,6 +1393,240 @@ kubernetesVersion: metav1.16.1`,
}, 30*time.Second).Should(Succeed())
}

func TestReconcileInitializeControlPlane_withUserCA(t *testing.T) {
setup := func(t *testing.T, g *WithT) *corev1.Namespace {
t.Helper()

t.Log("Creating the namespace")
ns, err := env.CreateNamespace(ctx, "test-kcp-reconcile-initializecontrolplane")
g.Expect(err).ToNot(HaveOccurred())

return ns
}

teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) {
t.Helper()

t.Log("Deleting the namespace")
g.Expect(env.Delete(ctx, ns)).To(Succeed())
}

g := NewWithT(t)
namespace := setup(t, g)
defer teardown(t, g, namespace)

cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: namespace.Name})
cluster.Spec = clusterv1.ClusterSpec{
ControlPlaneEndpoint: clusterv1.APIEndpoint{
Host: "test.local",
Port: 9999,
},
}

caCertificate := &secret.Certificate{
Purpose: secret.ClusterCA,
CertFile: path.Join(secret.DefaultCertificatesDir, "ca.crt"),
KeyFile: path.Join(secret.DefaultCertificatesDir, "ca.key"),
}
// The certificate is user provided so no owner references should be added.
g.Expect(caCertificate.Generate()).To(Succeed())
certSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace.Name,
Name: cluster.Name + "-ca",
Labels: map[string]string{
clusterv1.ClusterNameLabel: cluster.Name,
},
},
Data: map[string][]byte{
secret.TLSKeyDataName: caCertificate.KeyPair.Key,
secret.TLSCrtDataName: caCertificate.KeyPair.Cert,
},
Type: clusterv1.ClusterSecretType,
}

g.Expect(env.Create(ctx, cluster)).To(Succeed())
patchHelper, err := patch.NewHelper(cluster, env)
g.Expect(err).ToNot(HaveOccurred())
cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true}
g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())

g.Expect(env.Create(ctx, certSecret)).To(Succeed())

genericInfrastructureMachineTemplate := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "GenericInfrastructureMachineTemplate",
"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
"metadata": map[string]interface{}{
"name": "infra-foo",
"namespace": cluster.Namespace,
},
"spec": map[string]interface{}{
"template": map[string]interface{}{
"spec": map[string]interface{}{
"hello": "world",
},
},
},
},
}
g.Expect(env.Create(ctx, genericInfrastructureMachineTemplate)).To(Succeed())

kcp := &controlplanev1.KubeadmControlPlane{
ObjectMeta: metav1.ObjectMeta{
Namespace: cluster.Namespace,
Name: "foo",
OwnerReferences: []metav1.OwnerReference{
{
Kind: "Cluster",
APIVersion: clusterv1.GroupVersion.String(),
Name: cluster.Name,
UID: cluster.UID,
},
},
},
Spec: controlplanev1.KubeadmControlPlaneSpec{
Replicas: nil,
Version: "v1.16.6",
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
InfrastructureRef: corev1.ObjectReference{
Kind: genericInfrastructureMachineTemplate.GetKind(),
APIVersion: genericInfrastructureMachineTemplate.GetAPIVersion(),
Name: genericInfrastructureMachineTemplate.GetName(),
Namespace: cluster.Namespace,
},
},
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{},
},
}
g.Expect(env.Create(ctx, kcp)).To(Succeed())

corednsCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "coredns",
Namespace: namespace.Name,
},
Data: map[string]string{
"Corefile": "original-core-file",
},
}
g.Expect(env.Create(ctx, corednsCM)).To(Succeed())

kubeadmCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kubeadm-config",
Namespace: namespace.Name,
},
Data: map[string]string{
"ClusterConfiguration": `apiServer:
dns:
type: CoreDNS
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: metav1.16.1`,
},
}
g.Expect(env.Create(ctx, kubeadmCM)).To(Succeed())

corednsDepl := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "coredns",
Namespace: namespace.Name,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"coredns": "",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: "coredns",
Labels: map[string]string{
"coredns": "",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: "coredns",
Image: "registry.k8s.io/coredns:1.6.2",
}},
},
},
},
}
g.Expect(env.Create(ctx, corednsDepl)).To(Succeed())

r := &KubeadmControlPlaneReconciler{
Client: env,
SecretCachingClient: secretCachingClient,
recorder: record.NewFakeRecorder(32),
managementCluster: &fakeManagementCluster{
Management: &internal.Management{Client: env},
Workload: fakeWorkloadCluster{
Workload: &internal.Workload{
Client: env,
},
Status: internal.ClusterStatus{},
},
},
managementClusterUncached: &fakeManagementCluster{
Management: &internal.Management{Client: env},
Workload: fakeWorkloadCluster{
Workload: &internal.Workload{
Client: env,
},
Status: internal.ClusterStatus{},
},
},
ssaCache: ssa.NewCache(),
}

result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
g.Expect(err).ToNot(HaveOccurred())
// this first requeue is to add finalizer
g.Expect(result).To(BeComparableTo(ctrl.Result{}))
g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))

g.Eventually(func(g Gomega) {
_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed())
// Expect the referenced infrastructure template to have a Cluster Owner Reference.
g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(genericInfrastructureMachineTemplate), genericInfrastructureMachineTemplate)).To(Succeed())
g.Expect(genericInfrastructureMachineTemplate.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: cluster.Name,
UID: cluster.UID,
}))

// Always expect that the Finalizer is set on the passed in resource
g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))

g.Expect(kcp.Status.Selector).NotTo(BeEmpty())
g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1))
g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue())

k, err := kubeconfig.FromSecret(ctx, env, util.ObjectKey(cluster))
g.Expect(err).ToNot(HaveOccurred())
g.Expect(k).NotTo(BeEmpty())

machineList := &clusterv1.MachineList{}
g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
g.Expect(machineList.Items).To(HaveLen(1))

machine := machineList.Items[0]
g.Expect(machine.Name).To(HavePrefix(kcp.Name))
// Newly cloned infra objects should have the infraref annotation.
infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Spec.InfrastructureRef.Namespace)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName()))
g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String()))
}, 30*time.Second).Should(Succeed())
}

func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) {
t.Helper()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,9 @@ metadata:
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: ns.Name,
Labels: map[string]string{
clusterv1.ClusterNameLabel: clusterName,
},
},
Type: "addons.cluster.x-k8s.io/resource-set",
StringData: map[string]string{
Expand Down Expand Up @@ -430,6 +433,9 @@ metadata:
ObjectMeta: metav1.ObjectMeta{
Name: newSecretName,
Namespace: ns.Name,
Labels: map[string]string{
clusterv1.ClusterNameLabel: testCluster.Name,
},
},
Type: addonsv1.ClusterResourceSetSecretType,
Data: map[string][]byte{},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
Expand Down Expand Up @@ -408,6 +409,9 @@ func fakeCASecret(namespace, name string, caData []byte) *corev1.Secret {
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{
clusterv1.ClusterNameLabel: "",
},
},
Data: map[string][]byte{},
}
Expand Down
3 changes: 3 additions & 0 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,9 @@ func clusterShim(c *clusterv1.Cluster) *corev1.Secret {
OwnerReferences: []metav1.OwnerReference{
*ownerReferenceTo(c),
},
Labels: map[string]string{
clusterv1.ClusterNameLabel: c.Name,
},
},
Type: clusterv1.ClusterSecretType,
}
Expand Down
19 changes: 19 additions & 0 deletions internal/test/envtest/environment.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ import (
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
kerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
Expand All @@ -44,6 +46,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/manager"
Expand Down Expand Up @@ -254,11 +257,27 @@ func newEnvironment(uncachedObjs ...client.Object) *Environment {
}
}

req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil)
clusterSecretCacheSelector := labels.NewSelector().Add(*req)
syncPeriod := 10 * time.Minute

options := manager.Options{
Scheme: scheme.Scheme,
Metrics: metricsserver.Options{
BindAddress: "0",
},
Cache: cache.Options{
// Namespaces: watchNamespaces,
SyncPeriod: &syncPeriod,
ByObject: map[client.Object]cache.ByObject{
// Note: Only Secrets with the cluster name label are cached.
// The default client of the manager won't use the cache for secrets at all (see Client.Cache.DisableFor).
// The cached secrets will only be used by the secretCachingClient we create below.
&corev1.Secret{}: {
Label: clusterSecretCacheSelector,
},
},
},
Client: client.Options{
Cache: &client.CacheOptions{
DisableFor: uncachedObjs,
Expand Down

0 comments on commit 2609cd2

Please sign in to comment.