From 5cccebeca604868177cdedc4daf8f609776f0a8a Mon Sep 17 00:00:00 2001 From: Yang Le Date: Thu, 31 Aug 2023 10:56:55 +0800 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20support=20proxy=20between=20hub=20c?= =?UTF-8?q?luster=20and=20managed=20cluster?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Yang Le --- .../bootstrapcontroller.go | 1 + .../bootstrapcontroller_test.go | 33 +- pkg/registration/clientcert/certificate.go | 12 +- .../clientcert/certificate_test.go | 67 ++++ pkg/registration/spoke/spokeagent.go | 33 +- pkg/registration/spoke/spokeagent_test.go | 48 +++ .../registration/addon_registration_test.go | 323 ++++++++++-------- .../registration/integration_suite_test.go | 23 ++ .../registration/spokecluster_joining_test.go | 234 ++++++++----- test/integration/util/authentication.go | 146 +++++++- test/integration/util/proxy.go | 213 ++++++++++++ 11 files changed, 888 insertions(+), 245 deletions(-) create mode 100644 test/integration/util/proxy.go diff --git a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go index d387b5385..15072de6b 100644 --- a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go +++ b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller.go @@ -154,6 +154,7 @@ func (k *bootstrapController) sync(ctx context.Context, controllerContext factor } if bootstrapKubeconfig.Server != hubKubeconfig.Server || + bootstrapKubeconfig.ProxyURL != hubKubeconfig.ProxyURL || !bytes.Equal(bootstrapKubeconfig.CertificateAuthorityData, hubKubeconfig.CertificateAuthorityData) { // the bootstrap kubeconfig secret is changed, reload the klusterlet agents reloadReason := fmt.Sprintf("the bootstrap secret %s/%s is changed", agentNamespace, helpers.BootstrapHubKubeConfig) diff --git a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go index 2e60341be..841988505 100644 --- a/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go +++ b/pkg/operator/operators/klusterlet/controllers/bootstrapcontroller/bootstrapcontroller_test.go @@ -57,7 +57,7 @@ func TestSync(t *testing.T) { name: "client certificate expired", queueKey: "test/test", objects: []runtime.Object{ - newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443")), + newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443", "")), newHubKubeConfigSecret("test", time.Now().Add(-60*time.Second).UTC()), }, expectedRebootstrapping: true, @@ -70,7 +70,7 @@ func TestSync(t *testing.T) { { name: "the bootstrap is not started", queueKey: "test/test", - objects: []runtime.Object{newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443"))}, + objects: []runtime.Object{newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443", ""))}, validateActions: func(t *testing.T, actions []clienttesting.Action) { if len(actions) != 0 { t.Errorf("expected no actions happens, but got %#v", actions) @@ -81,7 +81,7 @@ func TestSync(t *testing.T) { name: "the bootstrap secret is not changed", queueKey: "test/test", objects: []runtime.Object{ - newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443")), + newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.47:6443", "")), newHubKubeConfigSecret("test", time.Now().Add(60*time.Second).UTC()), }, validateActions: func(t *testing.T, actions []clienttesting.Action) { @@ -91,10 +91,24 @@ func TestSync(t *testing.T) { }, }, { - name: "the bootstrap secret is changed", + name: "hub server url is changed", queueKey: "test/test", objects: []runtime.Object{ - newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.48:6443")), + newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.48:6443", "")), + newHubKubeConfigSecret("test", time.Now().Add(60*time.Second).UTC()), + }, + expectedRebootstrapping: true, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 0 { + t.Errorf("expected no actions happens, but got %#v", actions) + } + }, + }, + { + name: "proxy url is changed", + queueKey: "test/test", + objects: []runtime.Object{ + newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.48:6443", "https://10.0.118.10:3129")), newHubKubeConfigSecret("test", time.Now().Add(60*time.Second).UTC()), }, expectedRebootstrapping: true, @@ -109,7 +123,7 @@ func TestSync(t *testing.T) { queueKey: "test/test", initRebootstrapping: true, objects: []runtime.Object{ - newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.48:6443")), + newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.48:6443", "")), newHubKubeConfigSecret("test", time.Now().Add(60*time.Second).UTC()), newDeploymentWithAvailableReplicas("test-registration-agent", "test", 1), }, @@ -123,7 +137,7 @@ func TestSync(t *testing.T) { queueKey: "test/test", initRebootstrapping: true, objects: []runtime.Object{ - newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.48:6443")), + newSecret("bootstrap-hub-kubeconfig", "test", newKubeConfig("https://10.0.118.48:6443", "")), newHubKubeConfigSecret("test", time.Now().Add(60*time.Second).UTC()), newDeployment("test-registration-agent", "test"), }, @@ -284,11 +298,12 @@ func newSecret(name, namespace string, kubeConfig []byte) *corev1.Secret { return secret } -func newKubeConfig(host string) []byte { +func newKubeConfig(host, proxyURL string) []byte { configData, _ := runtime.Encode(clientcmdlatest.Codec, &clientcmdapi.Config{ Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": { Server: host, InsecureSkipTLSVerify: true, + ProxyURL: proxyURL, }}, Contexts: map[string]*clientcmdapi.Context{"default-context": { Cluster: "default-cluster", @@ -345,7 +360,7 @@ func newHubKubeConfigSecret(namespace string, notAfter time.Time) *corev1.Secret Namespace: namespace, }, Data: map[string][]byte{ - "kubeconfig": newKubeConfig("https://10.0.118.47:6443"), + "kubeconfig": newKubeConfig("https://10.0.118.47:6443", ""), "tls.crt": pem.EncodeToMemory(&pem.Block{ Type: certutil.CertificateBlockType, Bytes: cert.Raw, diff --git a/pkg/registration/clientcert/certificate.go b/pkg/registration/clientcert/certificate.go index aaac444b6..48110ad1d 100644 --- a/pkg/registration/clientcert/certificate.go +++ b/pkg/registration/clientcert/certificate.go @@ -17,7 +17,6 @@ import ( "k8s.io/client-go/kubernetes" csrclient "k8s.io/client-go/kubernetes/typed/certificates/v1" certificatesv1listers "k8s.io/client-go/listers/certificates/v1" - restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" certutil "k8s.io/client-go/util/cert" @@ -146,19 +145,20 @@ func getCertValidityPeriod(secret *corev1.Secret) (*time.Time, *time.Time, error } // BuildKubeconfig builds a kubeconfig based on a rest config template with a cert/key pair -func BuildKubeconfig(clientConfig *restclient.Config, certPath, keyPath string) clientcmdapi.Config { +func BuildKubeconfig(server string, caData []byte, proxyURL, clientCertPath, clientKeyPath string) clientcmdapi.Config { // Build kubeconfig. kubeconfig := clientcmdapi.Config{ // Define a cluster stanza based on the bootstrap kubeconfig. Clusters: map[string]*clientcmdapi.Cluster{"default-cluster": { - Server: clientConfig.Host, + Server: server, InsecureSkipTLSVerify: false, - CertificateAuthorityData: clientConfig.CAData, + CertificateAuthorityData: caData, + ProxyURL: proxyURL, }}, // Define auth based on the obtained client cert. AuthInfos: map[string]*clientcmdapi.AuthInfo{"default-auth": { - ClientCertificate: certPath, - ClientKey: keyPath, + ClientCertificate: clientCertPath, + ClientKey: clientKeyPath, }}, // Define a context that connects the auth info and cluster, and set it as the default Contexts: map[string]*clientcmdapi.Context{"default-context": { diff --git a/pkg/registration/clientcert/certificate_test.go b/pkg/registration/clientcert/certificate_test.go index f2c802161..46d7e6891 100644 --- a/pkg/registration/clientcert/certificate_test.go +++ b/pkg/registration/clientcert/certificate_test.go @@ -2,6 +2,7 @@ package clientcert import ( "crypto/x509/pkix" + "reflect" "testing" "time" @@ -240,3 +241,69 @@ func TestGetCertValidityPeriod(t *testing.T) { }) } } + +func TestBuildKubeconfig(t *testing.T) { + cases := []struct { + name string + server string + proxyURL string + caData []byte + clientCertFile string + clientKeyFile string + }{ + { + name: "without proxy", + server: "https://127.0.0.1:6443", + caData: []byte("fake-ca-bundle"), + clientCertFile: "tls.crt", + clientKeyFile: "tls.key", + }, + { + name: "with proxy", + server: "https://127.0.0.1:6443", + caData: []byte("fake-ca-bundle-with-proxy-ca"), + proxyURL: "https://127.0.0.1:3129", + clientCertFile: "tls.crt", + clientKeyFile: "tls.key", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + kubeconfig := BuildKubeconfig(c.server, c.caData, c.proxyURL, c.clientCertFile, c.clientKeyFile) + currentContext, ok := kubeconfig.Contexts[kubeconfig.CurrentContext] + if !ok { + t.Errorf("current context %q not found: %v", kubeconfig.CurrentContext, kubeconfig) + } + + cluster, ok := kubeconfig.Clusters[currentContext.Cluster] + if !ok { + t.Errorf("cluster %q not found: %v", currentContext.Cluster, kubeconfig) + } + + if cluster.Server != c.server { + t.Errorf("expected server %q, but got %q", c.server, cluster.Server) + } + + if cluster.ProxyURL != c.proxyURL { + t.Errorf("expected proxy URL %q, but got %q", c.proxyURL, cluster.ProxyURL) + } + + if !reflect.DeepEqual(cluster.CertificateAuthorityData, c.caData) { + t.Errorf("expected ca data %v, but got %v", c.caData, cluster.CertificateAuthorityData) + } + + authInfo, ok := kubeconfig.AuthInfos[currentContext.AuthInfo] + if !ok { + t.Errorf("auth info %q not found: %v", currentContext.AuthInfo, kubeconfig) + } + + if authInfo.ClientCertificate != c.clientCertFile { + t.Errorf("expected client certificate %q, but got %q", c.clientCertFile, authInfo.ClientCertificate) + } + + if authInfo.ClientKey != c.clientKeyFile { + t.Errorf("expected client key %q, but got %q", c.clientKeyFile, authInfo.ClientKey) + } + }) + } +} diff --git a/pkg/registration/spoke/spokeagent.go b/pkg/registration/spoke/spokeagent.go index 47cb4603b..5aa3c5020 100644 --- a/pkg/registration/spoke/spokeagent.go +++ b/pkg/registration/spoke/spokeagent.go @@ -213,7 +213,12 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context, managementKubeClient, 10*time.Minute, informers.WithNamespace(o.agentOptions.ComponentNamespace)) // create a kubeconfig with references to the key/cert files in the same secret - kubeconfig := clientcert.BuildKubeconfig(bootstrapClientConfig, clientcert.TLSCertFile, clientcert.TLSKeyFile) + proxyURL, err := getProxyURLFromKubeconfig(o.registrationOption.BootstrapKubeconfig) + if err != nil { + return err + } + kubeconfig := clientcert.BuildKubeconfig(bootstrapClientConfig.Host, bootstrapClientConfig.CAData, proxyURL, + clientcert.TLSCertFile, clientcert.TLSKeyFile) kubeconfigData, err := clientcmd.Write(kubeconfig) if err != nil { return err @@ -303,7 +308,12 @@ func (o *SpokeAgentConfig) RunSpokeAgentWithSpokeInformers(ctx context.Context, recorder.Event("HubClientConfigReady", "Client config for hub is ready.") // create a kubeconfig with references to the key/cert files in the same secret - kubeconfig := clientcert.BuildKubeconfig(hubClientConfig, clientcert.TLSCertFile, clientcert.TLSKeyFile) + proxyURL, err := getProxyURLFromKubeconfig(o.agentOptions.HubKubeconfigFile) + if err != nil { + return err + } + kubeconfig := clientcert.BuildKubeconfig(hubClientConfig.Host, hubClientConfig.CAData, proxyURL, + clientcert.TLSCertFile, clientcert.TLSKeyFile) kubeconfigData, err := clientcmd.Write(kubeconfig) if err != nil { return err @@ -465,3 +475,22 @@ func (o *SpokeAgentConfig) getSpokeClusterCABundle(kubeConfig *rest.Config) ([]b } return data, nil } + +func getProxyURLFromKubeconfig(filename string) (string, error) { + config, err := clientcmd.LoadFromFile(filename) + if err != nil { + return "", err + } + + currentContext, ok := config.Contexts[config.CurrentContext] + if !ok { + return "", nil + } + + cluster, ok := config.Clusters[currentContext.Cluster] + if !ok { + return "", nil + } + + return cluster.ProxyURL, nil +} diff --git a/pkg/registration/spoke/spokeagent_test.go b/pkg/registration/spoke/spokeagent_test.go index 6d0e4d908..3d8c5523e 100644 --- a/pkg/registration/spoke/spokeagent_test.go +++ b/pkg/registration/spoke/spokeagent_test.go @@ -9,9 +9,12 @@ import ( "time" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" commonoptions "open-cluster-management.io/ocm/pkg/common/options" testingcommon "open-cluster-management.io/ocm/pkg/common/testing" + "open-cluster-management.io/ocm/pkg/registration/clientcert" testinghelpers "open-cluster-management.io/ocm/pkg/registration/helpers/testing" ) @@ -231,3 +234,48 @@ func TestGetSpokeClusterCABundle(t *testing.T) { }) } } + +func TestGetProxyURLFromKubeconfig(t *testing.T) { + tempDir, err := os.MkdirTemp("", "testgetproxyurl") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + defer os.RemoveAll(tempDir) + + kubeconfigWithoutProxy := clientcert.BuildKubeconfig("https://127.0.0.1:6443", nil, "", "tls.crt", "tls.key") + kubeconfigWithProxy := clientcert.BuildKubeconfig("https://127.0.0.1:6443", nil, "https://127.0.0.1:3129", "tls.crt", "tls.key") + + cases := []struct { + name string + kubeconfig clientcmdapi.Config + expectedProxyURL string + }{ + { + name: "without proxy url", + kubeconfig: kubeconfigWithoutProxy, + expectedProxyURL: "", + }, + { + name: "with proxy url", + kubeconfig: kubeconfigWithProxy, + expectedProxyURL: "https://127.0.0.1:3129", + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + filename := path.Join(tempDir, "kubeconfig") + if err := clientcmd.WriteToFile(c.kubeconfig, filename); err != nil { + t.Errorf("unexpected error: %v", err) + } + + proxyURL, err := getProxyURLFromKubeconfig(filename) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if c.expectedProxyURL != proxyURL { + t.Errorf("expect %s, but %s", c.expectedProxyURL, proxyURL) + } + }) + } +} diff --git a/test/integration/registration/addon_registration_test.go b/test/integration/registration/addon_registration_test.go index 3b0ebae1e..51c61dced 100644 --- a/test/integration/registration/addon_registration_test.go +++ b/test/integration/registration/addon_registration_test.go @@ -30,8 +30,10 @@ var _ = ginkgo.Describe("Addon Registration", func() { var managedClusterName, hubKubeconfigSecret, hubKubeconfigDir, addOnName string var err error var cancel context.CancelFunc + var bootstrapKubeconfig string + var expectedProxyURL string - ginkgo.BeforeEach(func() { + ginkgo.JustBeforeEach(func() { suffix := rand.String(5) managedClusterName = fmt.Sprintf("managedcluster-%s", suffix) hubKubeconfigSecret = fmt.Sprintf("hub-kubeconfig-secret-%s", suffix) @@ -39,7 +41,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { addOnName = fmt.Sprintf("addon-%s", suffix) agentOptions := &spoke.SpokeAgentOptions{ - BootstrapKubeconfig: bootstrapKubeConfigFile, + BootstrapKubeconfig: bootstrapKubeconfig, HubKubeconfigSecret: hubKubeconfigSecret, ClusterHealthCheckPeriod: 1 * time.Minute, } @@ -148,7 +150,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - assertValidClientCertificate := func(secretNamespace, secretName, signerName string) { + assertValidClientCertificate := func(secretNamespace, secretName, signerName, expectedProxyURL string) { ginkgo.By("Check client certificate in secret") gomega.Eventually(func() bool { secret, err := kubeClient.CoreV1().Secrets(secretNamespace).Get(context.TODO(), secretName, metav1.GetOptions{}) @@ -161,10 +163,21 @@ var _ = ginkgo.Describe("Addon Registration", func() { if _, ok := secret.Data[clientcert.TLSCertFile]; !ok { return false } - _, ok := secret.Data[clientcert.KubeconfigFile] - if !ok && signerName == certificates.KubeAPIServerClientSignerName { - return false + kubeconfigData, ok := secret.Data[clientcert.KubeconfigFile] + + if signerName == certificates.KubeAPIServerClientSignerName { + if !ok { + return false + } + + proxyURL, err := getProxyURLFromKubeconfigData(kubeconfigData) + if err != nil { + return false + } + + return proxyURL == expectedProxyURL } + if ok && signerName != certificates.KubeAPIServerClientSignerName { return false } @@ -251,7 +264,7 @@ var _ = ginkgo.Describe("Addon Registration", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) assertSuccessCSRApproval() - assertValidClientCertificate(addOnName, getSecretName(addOnName, signerName), signerName) + assertValidClientCertificate(addOnName, getSecretName(addOnName, signerName), signerName, expectedProxyURL) assertAddonLabel(managedClusterName, addOnName, "unreachable") assertClientCertCondition(managedClusterName, addOnName) } @@ -263,174 +276,200 @@ var _ = ginkgo.Describe("Addon Registration", func() { }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) } - ginkgo.It("should register addon successfully", func() { - assertSuccessClusterBootstrap() - signerName := certificates.KubeAPIServerClientSignerName - assertSuccessAddOnBootstrap(signerName) + assertRegistrationSucceed := func() { + ginkgo.It("should register addon successfully", func() { + assertSuccessClusterBootstrap() + signerName := certificates.KubeAPIServerClientSignerName + assertSuccessAddOnBootstrap(signerName) - ginkgo.By("Delete the addon and check if secret is gone") - err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - assertSecretGone(addOnName, getSecretName(addOnName, signerName)) + ginkgo.By("Delete the addon and check if secret is gone") + err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + assertSecretGone(addOnName, getSecretName(addOnName, signerName)) - assertHasNoAddonLabel(managedClusterName, addOnName) - }) + assertHasNoAddonLabel(managedClusterName, addOnName) + }) + } - ginkgo.It("should register addon successfully even when the install namespace is not available at the beginning", func() { - assertSuccessClusterBootstrap() + ginkgo.Context("without proxy", func() { + ginkgo.BeforeEach(func() { + bootstrapKubeconfig = bootstrapKubeConfigFile + expectedProxyURL = "" + }) + assertRegistrationSucceed() - signerName := certificates.KubeAPIServerClientSignerName - ginkgo.By("Create ManagedClusterAddOn cr with required annotations") + ginkgo.It("should register addon successfully even when the install namespace is not available at the beginning", func() { + assertSuccessClusterBootstrap() - // create addon - addOn := &addonv1alpha1.ManagedClusterAddOn{ - ObjectMeta: metav1.ObjectMeta{ - Name: addOnName, - Namespace: managedClusterName, - }, - Spec: addonv1alpha1.ManagedClusterAddOnSpec{ - InstallNamespace: addOnName, - }, - } - _, err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.TODO(), addOn, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + signerName := certificates.KubeAPIServerClientSignerName + ginkgo.By("Create ManagedClusterAddOn cr with required annotations") - created, err := addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - created.Status = addonv1alpha1.ManagedClusterAddOnStatus{ - Registrations: []addonv1alpha1.RegistrationConfig{ - { - SignerName: signerName, + // create addon + addOn := &addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: addOnName, + Namespace: managedClusterName, + }, + Spec: addonv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: addOnName, }, - }, - } - _, err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).UpdateStatus(context.TODO(), created, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - assertSuccessCSRApproval() - - ginkgo.By("Wait for addon namespace") - gomega.Consistently(func() bool { - csrs, err := util.FindAddOnCSRs(kubeClient, managedClusterName, addOnName) - if err != nil { - return false } - return len(csrs) == 1 - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - ginkgo.By("Create addon namespace") - // create addon namespace - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: addOnName, - }, - } - _, err = kubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - assertValidClientCertificate(addOnName, getSecretName(addOnName, signerName), signerName) - - ginkgo.By("Delete the addon and check if secret is gone") - err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - assertSecretGone(addOnName, getSecretName(addOnName, signerName)) - }) - - ginkgo.It("should register addon with custom signer successfully", func() { - assertSuccessClusterBootstrap() - signerName := "example.com/signer1" - assertSuccessAddOnBootstrap(signerName) - }) - - ginkgo.It("should addon registraton config updated successfully", func() { - assertSuccessClusterBootstrap() - signerName := certificates.KubeAPIServerClientSignerName - assertSuccessAddOnBootstrap(signerName) + _, err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Create(context.TODO(), addOn, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // update registration config and change the signer - addOn, err := addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - newSignerName := "example.com/signer1" - addOn.Status = addonv1alpha1.ManagedClusterAddOnStatus{ - Registrations: []addonv1alpha1.RegistrationConfig{ - { - SignerName: newSignerName, + created, err := addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + created.Status = addonv1alpha1.ManagedClusterAddOnStatus{ + Registrations: []addonv1alpha1.RegistrationConfig{ + { + SignerName: signerName, + }, }, - }, - } - _, err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).UpdateStatus(context.TODO(), addOn, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - assertSecretGone(addOnName, getSecretName(addOnName, signerName)) + } + _, err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).UpdateStatus(context.TODO(), created, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - assertSuccessCSRApproval() - assertValidClientCertificate(addOnName, getSecretName(addOnName, newSignerName), newSignerName) - }) + assertSuccessCSRApproval() + + ginkgo.By("Wait for addon namespace") + gomega.Consistently(func() bool { + csrs, err := util.FindAddOnCSRs(kubeClient, managedClusterName, addOnName) + if err != nil { + return false + } + return len(csrs) == 1 + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + ginkgo.By("Create addon namespace") + // create addon namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: addOnName, + }, + } + _, err = kubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.It("should rotate addon client cert successfully", func() { - assertSuccessClusterBootstrap() - signerName := certificates.KubeAPIServerClientSignerName - assertSuccessAddOnBootstrap(signerName) + assertValidClientCertificate(addOnName, getSecretName(addOnName, signerName), signerName, expectedProxyURL) - secretName := getSecretName(addOnName, signerName) - secret, err := kubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Wait for cert rotation") - assertSuccessCSRApproval() - gomega.Eventually(func() bool { - newSecret, err := kubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) - if err != nil { - return false - } + ginkgo.By("Delete the addon and check if secret is gone") + err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + assertSecretGone(addOnName, getSecretName(addOnName, signerName)) + }) - return !reflect.DeepEqual(secret.Data, newSecret.Data) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - }) + ginkgo.It("should register addon with custom signer successfully", func() { + assertSuccessClusterBootstrap() + signerName := "example.com/signer1" + assertSuccessAddOnBootstrap(signerName) + }) - ginkgo.It("should stop addon client cert update if too frequent", func() { - assertSuccessClusterBootstrap() - signerName := certificates.KubeAPIServerClientSignerName - assertSuccessAddOnBootstrap(signerName) + ginkgo.It("should addon registraton config updated successfully", func() { + assertSuccessClusterBootstrap() + signerName := certificates.KubeAPIServerClientSignerName + assertSuccessAddOnBootstrap(signerName) - // update subject for 15 times - for i := 1; i <= 15; i++ { + // update registration config and change the signer addOn, err := addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + newSignerName := "example.com/signer1" addOn.Status = addonv1alpha1.ManagedClusterAddOnStatus{ Registrations: []addonv1alpha1.RegistrationConfig{ { - SignerName: addOn.Status.Registrations[0].SignerName, - Subject: addonv1alpha1.Subject{ - User: fmt.Sprintf("test-%d", i), - }, + SignerName: newSignerName, }, }, } _, err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).UpdateStatus(context.TODO(), addOn, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - // sleep 1 second to ensure controller issue a new csr. - time.Sleep(1 * time.Second) - } + assertSecretGone(addOnName, getSecretName(addOnName, signerName)) - ginkgo.By("CSR should not exceed 10") - csrs, err := kubeClient.CertificatesV1().CertificateSigningRequests().List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s,%s=%s", clusterv1.ClusterNameLabelKey, managedClusterName, addonv1alpha1.AddonLabelKey, addOnName), + assertSuccessCSRApproval() + assertValidClientCertificate(addOnName, getSecretName(addOnName, newSignerName), newSignerName, expectedProxyURL) }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(len(csrs.Items) >= 10).ShouldNot(gomega.BeFalse()) - gomega.Eventually(func() error { - addOn, err := addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) - if err != nil { - return err - } + ginkgo.It("should rotate addon client cert successfully", func() { + assertSuccessClusterBootstrap() + signerName := certificates.KubeAPIServerClientSignerName + assertSuccessAddOnBootstrap(signerName) + + secretName := getSecretName(addOnName, signerName) + secret, err := kubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.By("Wait for cert rotation") + assertSuccessCSRApproval() + gomega.Eventually(func() bool { + newSecret, err := kubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err != nil { + return false + } + + return !reflect.DeepEqual(secret.Data, newSecret.Data) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + }) - if meta.IsStatusConditionFalse(addOn.Status.Conditions, "ClusterCertificateRotated") { - return nil + ginkgo.It("should stop addon client cert update if too frequent", func() { + assertSuccessClusterBootstrap() + signerName := certificates.KubeAPIServerClientSignerName + assertSuccessAddOnBootstrap(signerName) + + // update subject for 15 times + for i := 1; i <= 15; i++ { + addOn, err := addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + addOn.Status = addonv1alpha1.ManagedClusterAddOnStatus{ + Registrations: []addonv1alpha1.RegistrationConfig{ + { + SignerName: addOn.Status.Registrations[0].SignerName, + Subject: addonv1alpha1.Subject{ + User: fmt.Sprintf("test-%d", i), + }, + }, + }, + } + _, err = addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).UpdateStatus(context.TODO(), addOn, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // sleep 1 second to ensure controller issue a new csr. + time.Sleep(1 * time.Second) } - return fmt.Errorf("addon status is not correct, got %v", addOn.Status.Conditions) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + ginkgo.By("CSR should not exceed 10") + csrs, err := kubeClient.CertificatesV1().CertificateSigningRequests().List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", clusterv1.ClusterNameLabelKey, managedClusterName, addonv1alpha1.AddonLabelKey, addOnName), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(len(csrs.Items) >= 10).ShouldNot(gomega.BeFalse()) + + gomega.Eventually(func() error { + addOn, err := addOnClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + if err != nil { + return err + } + if meta.IsStatusConditionFalse(addOn.Status.Conditions, "ClusterCertificateRotated") { + return nil + } + + return fmt.Errorf("addon status is not correct, got %v", addOn.Status.Conditions) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + }) + }) + + ginkgo.Context("with http proxy", func() { + ginkgo.BeforeEach(func() { + bootstrapKubeconfig = bootstrapKubeConfigHTTPProxyFile + expectedProxyURL = httpProxyURL + }) + assertRegistrationSucceed() + }) + + ginkgo.Context("with https proxy", func() { + ginkgo.BeforeEach(func() { + bootstrapKubeconfig = bootstrapKubeConfigHTTPSProxyFile + expectedProxyURL = httpsProxyURL + }) + assertRegistrationSucceed() }) }) diff --git a/test/integration/registration/integration_suite_test.go b/test/integration/registration/integration_suite_test.go index c99e6bed0..22b099aa6 100644 --- a/test/integration/registration/integration_suite_test.go +++ b/test/integration/registration/integration_suite_test.go @@ -42,6 +42,11 @@ const ( var spokeCfg *rest.Config var bootstrapKubeConfigFile string +var bootstrapKubeConfigHTTPProxyFile string +var bootstrapKubeConfigHTTPSProxyFile string + +var httpProxyURL string +var httpsProxyURL string var testEnv *envtest.Environment var securePort string @@ -192,6 +197,24 @@ var _ = ginkgo.BeforeSuite(func() { }) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() + + // start a proxy server + proxyCertData, proxyKeyData, err := authn.SignServerCert("proxyserver", 24*time.Hour) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + proxyServer := util.NewProxyServer(proxyCertData, proxyKeyData) + err = proxyServer.Start(ctx, 5*time.Second) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + httpProxyURL = proxyServer.HTTPProxyURL + httpsProxyURL = proxyServer.HTTPSProxyURL + + // create bootstrap hub kubeconfig with http/https proxy settings + bootstrapKubeConfigHTTPProxyFile = path.Join(util.TestDir, "bootstrap-http-proxy", "kubeconfig") + err = authn.CreateBootstrapKubeConfigWithProxy(bootstrapKubeConfigHTTPProxyFile, serverCertFile, securePort, httpProxyURL, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + bootstrapKubeConfigHTTPSProxyFile = path.Join(util.TestDir, "bootstrap-https-proxy", "kubeconfig") + err = authn.CreateBootstrapKubeConfigWithProxy(bootstrapKubeConfigHTTPSProxyFile, serverCertFile, securePort, httpsProxyURL, proxyCertData) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) var _ = ginkgo.AfterSuite(func() { diff --git a/test/integration/registration/spokecluster_joining_test.go b/test/integration/registration/spokecluster_joining_test.go index eeb891b82..1758d3724 100644 --- a/test/integration/registration/spokecluster_joining_test.go +++ b/test/integration/registration/spokecluster_joining_test.go @@ -8,6 +8,8 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/clientcmd" clusterv1 "open-cluster-management.io/api/cluster/v1" @@ -17,95 +19,159 @@ import ( ) var _ = ginkgo.Describe("Joining Process", func() { - ginkgo.It("managedcluster should join successfully", func() { - var err error - - managedClusterName := "joiningtest-managedcluster" - hubKubeconfigSecret := "joiningtest-hub-kubeconfig-secret" - hubKubeconfigDir := path.Join(util.TestDir, "joiningtest", "hub-kubeconfig") - - // run registration agent - agentOptions := &spoke.SpokeAgentOptions{ - BootstrapKubeconfig: bootstrapKubeConfigFile, - HubKubeconfigSecret: hubKubeconfigSecret, - ClusterHealthCheckPeriod: 1 * time.Minute, - } - commOptions := commonoptions.NewAgentOptions() - commOptions.HubKubeconfigDir = hubKubeconfigDir - commOptions.SpokeClusterName = managedClusterName - - cancel := runAgent("joiningtest", agentOptions, commOptions, spokeCfg) - defer cancel() - - // the spoke cluster and csr should be created after bootstrap - gomega.Eventually(func() error { - if _, err := util.GetManagedCluster(clusterClient, managedClusterName); err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + var bootstrapKubeconfig string + var managedClusterName string + var hubKubeconfigSecret string + var hubKubeconfigDir string + var expectedProxyURL string + + ginkgo.BeforeEach(func() { + postfix := rand.String(5) + managedClusterName = fmt.Sprintf("joiningtest-managedcluster-%s", postfix) + hubKubeconfigSecret = fmt.Sprintf("joiningtest-hub-kubeconfig-secret-%s", postfix) + hubKubeconfigDir = path.Join(util.TestDir, fmt.Sprintf("joiningtest-%s", postfix), "hub-kubeconfig") + }) - gomega.Eventually(func() error { - if _, err := util.FindUnapprovedSpokeCSR(kubeClient, managedClusterName); err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // the spoke cluster should has finalizer that is added by hub controller - gomega.Eventually(func() error { - spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName) - if err != nil { - return err - } - if len(spokeCluster.Finalizers) != 1 { - return fmt.Errorf("cluster should have finalizer") - } + assertJoiningSucceed := func() { + ginkgo.It("managedcluster should join successfully", func() { + var err error - if spokeCluster.Finalizers[0] != clusterv1.ManagedClusterFinalizer { - return fmt.Errorf("finalizer is not correct") + // run registration agent + agentOptions := &spoke.SpokeAgentOptions{ + BootstrapKubeconfig: bootstrapKubeconfig, + HubKubeconfigSecret: hubKubeconfigSecret, + ClusterHealthCheckPeriod: 1 * time.Minute, } + commOptions := commonoptions.NewAgentOptions() + commOptions.HubKubeconfigDir = hubKubeconfigDir + commOptions.SpokeClusterName = managedClusterName + + cancel := runAgent("joiningtest", agentOptions, commOptions, spokeCfg) + defer cancel() + + // the spoke cluster and csr should be created after bootstrap + gomega.Eventually(func() error { + if _, err := util.GetManagedCluster(clusterClient, managedClusterName); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + if _, err := util.FindUnapprovedSpokeCSR(kubeClient, managedClusterName); err != nil { + return err + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // the spoke cluster should has finalizer that is added by hub controller + gomega.Eventually(func() error { + spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName) + if err != nil { + return err + } + if len(spokeCluster.Finalizers) != 1 { + return fmt.Errorf("cluster should have finalizer") + } + + if spokeCluster.Finalizers[0] != clusterv1.ManagedClusterFinalizer { + return fmt.Errorf("finalizer is not correct") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // simulate hub cluster admin to accept the managedcluster and approve the csr + err = util.AcceptManagedCluster(clusterClient, managedClusterName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = authn.ApproveSpokeClusterCSR(kubeClient, managedClusterName, time.Hour*24) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // the managed cluster should have accepted condition after it is accepted + gomega.Eventually(func() error { + spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) { + return fmt.Errorf("cluster should be accepted") + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // the hub kubeconfig secret should be filled after the csr is approved + gomega.Eventually(func() error { + secret, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret) + if err != nil { + return err + } + + // check if the proxyURL is set correctly + proxyURL, err := getProxyURLFromKubeconfigData(secret.Data["kubeconfig"]) + if err != nil { + return err + } + if proxyURL != expectedProxyURL { + return fmt.Errorf("expected proxy url %q, but got %q", expectedProxyURL, proxyURL) + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // the spoke cluster should have joined condition finally + gomega.Eventually(func() error { + spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName) + if err != nil { + return err + } + if !meta.IsStatusConditionTrue(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionJoined) { + return fmt.Errorf("cluster should be joined") + } + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + } + + ginkgo.Context("without proxy", func() { + ginkgo.BeforeEach(func() { + bootstrapKubeconfig = bootstrapKubeConfigFile + expectedProxyURL = "" + }) + assertJoiningSucceed() + }) - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + ginkgo.Context("with http proxy", func() { + ginkgo.BeforeEach(func() { + bootstrapKubeconfig = bootstrapKubeConfigHTTPProxyFile + expectedProxyURL = httpProxyURL + }) + assertJoiningSucceed() + }) - // simulate hub cluster admin to accept the managedcluster and approve the csr - err = util.AcceptManagedCluster(clusterClient, managedClusterName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ginkgo.Context("with https proxy", func() { + ginkgo.BeforeEach(func() { + bootstrapKubeconfig = bootstrapKubeConfigHTTPSProxyFile + expectedProxyURL = httpsProxyURL + }) + assertJoiningSucceed() + }) +}) - err = authn.ApproveSpokeClusterCSR(kubeClient, managedClusterName, time.Hour*24) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) +func getProxyURLFromKubeconfigData(kubeconfigData []byte) (string, error) { + config, err := clientcmd.Load(kubeconfigData) + if err != nil { + return "", err + } - // the managed cluster should have accepted condition after it is accepted - gomega.Eventually(func() error { - spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName) - if err != nil { - return err - } - if !meta.IsStatusConditionTrue(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) { - return fmt.Errorf("cluster should be accepted") - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + currentContext, ok := config.Contexts[config.CurrentContext] + if !ok { + return "", nil + } - // the hub kubeconfig secret should be filled after the csr is approved - gomega.Eventually(func() error { - if _, err := util.GetFilledHubKubeConfigSecret(kubeClient, testNamespace, hubKubeconfigSecret); err != nil { - return err - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // the spoke cluster should have joined condition finally - gomega.Eventually(func() error { - spokeCluster, err := util.GetManagedCluster(clusterClient, managedClusterName) - if err != nil { - return err - } - if !meta.IsStatusConditionTrue(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionJoined) { - return fmt.Errorf("cluster should be joined") - } - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) -}) + cluster, ok := config.Clusters[currentContext.Cluster] + if !ok { + return "", nil + } + + return cluster.ProxyURL, nil +} diff --git a/test/integration/util/authentication.go b/test/integration/util/authentication.go index 959288d32..6b5fd2768 100644 --- a/test/integration/util/authentication.go +++ b/test/integration/util/authentication.go @@ -13,6 +13,7 @@ import ( "net" "os" "path" + "reflect" "time" certificates "k8s.io/api/certificates/v1" @@ -49,17 +50,27 @@ type TestAuthn struct { caKeyFile string } -func createKubeConfigByClientCert(context string, securePort string, serverCertFile, certFile, keyFile string) (*clientcmdapi.Config, error) { +func createKubeConfigByClientCertWithProxy(context string, securePort string, serverCertFile, certFile, keyFile, + proxyURL string, proxyServerCertData []byte) (*clientcmdapi.Config, error) { caData, err := os.ReadFile(serverCertFile) if err != nil { return nil, err } + caData, err = mergeCertificateData(caData, proxyServerCertData) + if err != nil { + return nil, err + } + config := clientcmdapi.NewConfig() - config.Clusters["hub"] = &clientcmdapi.Cluster{ + hubCluster := &clientcmdapi.Cluster{ Server: fmt.Sprintf("https://127.0.0.1:%s", securePort), CertificateAuthorityData: caData, } + if len(proxyURL) > 0 { + hubCluster.ProxyURL = proxyURL + } + config.Clusters["hub"] = hubCluster config.AuthInfos["user"] = &clientcmdapi.AuthInfo{ ClientCertificate: certFile, ClientKey: keyFile, @@ -73,6 +84,48 @@ func createKubeConfigByClientCert(context string, securePort string, serverCertF return config, nil } +func mergeCertificateData(caBundles ...[]byte) ([]byte, error) { + var all []*x509.Certificate + for _, caBundle := range caBundles { + if len(caBundle) == 0 { + continue + } + certs, err := certutil.ParseCertsPEM(caBundle) + if err != nil { + return []byte{}, err + } + all = append(all, certs...) + } + + // remove duplicated cert + var merged []*x509.Certificate + for i := range all { + found := false + for j := range merged { + if reflect.DeepEqual(all[i].Raw, merged[j].Raw) { + found = true + break + } + } + if !found { + merged = append(merged, all[i]) + } + } + + // encode the merged certificates + b := bytes.Buffer{} + for _, cert := range merged { + if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} + +func createKubeConfigByClientCert(context string, securePort string, serverCertFile, certFile, keyFile string) (*clientcmdapi.Config, error) { + return createKubeConfigByClientCertWithProxy(context, securePort, serverCertFile, certFile, keyFile, "", nil) +} + func NewTestAuthn(caFile, caKeyFile string) *TestAuthn { return &TestAuthn{ caFile: caFile, @@ -168,6 +221,35 @@ func (t *TestAuthn) CreateBootstrapKubeConfigWithUser(configFileName, serverCert return t.CreateBootstrapKubeConfig(configFileName, serverCertFile, securePort, bootstrapUser, 24*time.Hour) } +func (t *TestAuthn) CreateBootstrapKubeConfigWithProxy(configFileName, serverCertFile, securePort, proxyURL string, proxyServerCertData []byte) error { + certData, keyData, err := t.signClientCertKeyWithCA(bootstrapUser, bootstrapGroups, 24*time.Hour) + if err != nil { + return err + } + + configDir := path.Dir(configFileName) + if _, err := os.Stat(configDir); os.IsNotExist(err) { + if err = os.MkdirAll(configDir, 0755); err != nil { + return err + } + } + + if err := os.WriteFile(path.Join(configDir, "bootstrap.crt"), certData, 0600); err != nil { + return err + } + if err := os.WriteFile(path.Join(configDir, "bootstrap.key"), keyData, 0600); err != nil { + return err + } + + config, err := createKubeConfigByClientCertWithProxy(configFileName, securePort, serverCertFile, + path.Join(configDir, "bootstrap.crt"), path.Join(configDir, "bootstrap.key"), proxyURL, proxyServerCertData) + if err != nil { + return err + } + + return clientcmd.WriteToFile(*config, configFileName) +} + func (t *TestAuthn) CreateBootstrapKubeConfig(configFileName, serverCertFile, securePort, bootstrapUser string, certAge time.Duration) error { certData, keyData, err := t.signClientCertKeyWithCA(bootstrapUser, bootstrapGroups, certAge) if err != nil { @@ -264,6 +346,66 @@ func (t *TestAuthn) signClientCertKeyWithCA(user string, groups []string, maxAge return certBuffer.Bytes(), keyBuffer.Bytes(), nil } +func (t *TestAuthn) SignServerCert(commonName string, maxAge time.Duration) ([]byte, []byte, error) { + now := time.Now() + caData, err := os.ReadFile(t.caFile) + if err != nil { + return nil, nil, err + } + caBlock, _ := pem.Decode(caData) + caCert, err := x509.ParseCertificate(caBlock.Bytes) + if err != nil { + return nil, nil, err + } + + caKeyData, err := os.ReadFile(t.caKeyFile) + if err != nil { + return nil, nil, err + } + keyBlock, _ := pem.Decode(caKeyData) + caKey, err := x509.ParsePKCS1PrivateKey(keyBlock.Bytes) + if err != nil { + return nil, nil, err + } + + serverKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + serverDERBytes, err := x509.CreateCertificate( + rand.Reader, + &x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{CommonName: commonName}, + NotBefore: now.UTC(), + NotAfter: now.Add(maxAge).UTC(), + BasicConstraintsValid: false, + IsCA: false, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDataEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("10.0.0.0")}, + }, + caCert, + &serverKey.PublicKey, + caKey, + ) + if err != nil { + return nil, nil, err + } + certBuffer := bytes.Buffer{} + if err := pem.Encode(&certBuffer, &pem.Block{Type: certutil.CertificateBlockType, Bytes: serverDERBytes}); err != nil { + return nil, nil, err + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(serverKey)}); err != nil { + return nil, nil, err + } + return certBuffer.Bytes(), keyBuffer.Bytes(), nil +} + func PrepareSpokeAgentNamespace(kubeClient kubernetes.Interface, namespace string) error { _, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) if err == nil { diff --git a/test/integration/util/proxy.go b/test/integration/util/proxy.go new file mode 100644 index 000000000..ad01e8b66 --- /dev/null +++ b/test/integration/util/proxy.go @@ -0,0 +1,213 @@ +package util + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "strings" + "time" +) + +// ProxyServer is a simple proxy server that supports http tunnel +type ProxyServer struct { + httpServer *http.Server + HTTPProxyURL string + httpServerErrCh chan error + + httpsServer *http.Server + HTTPSProxyURL string + httpsServerErrCh chan error + certData []byte + keyData []byte +} + +func NewProxyServer(certData, keyData []byte) *ProxyServer { + return &ProxyServer{ + httpServerErrCh: make(chan error, 1), + certData: certData, + keyData: keyData, + httpsServerErrCh: make(chan error, 1), + } +} + +func (ps *ProxyServer) Start(ctx context.Context, timeout time.Duration) error { + var failures []string + go func() { + select { + case <-ctx.Done(): + fmt.Printf("[proxy-server] - shutting down...\n") + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + stopServer(ctx, "http", ps.httpServer) + stopServer(ctx, "https", ps.httpsServer) + case err := <-ps.httpServerErrCh: + msg := fmt.Sprintf("failed to start http server: %v", err) + failures = append(failures, msg) + fmt.Printf("[proxy-server] - %s\n", msg) + case err := <-ps.httpsServerErrCh: + msg := fmt.Sprintf("failed to start https server: %v", err) + failures = append(failures, msg) + fmt.Printf("[proxy-server] - %s\n", msg) + } + }() + + go func() { + ps.httpServerErrCh <- ps.startHTTP() + }() + + go func() { + ps.httpsServerErrCh <- ps.startHTTPS() + }() + + // wait for the proxy server started + timeoutCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + timer := time.NewTimer(1 * time.Second) + for { + timer.Reset(1 * time.Second) + select { + case <-timeoutCtx.Done(): + if ps.ready() { + return nil + } + return fmt.Errorf("failed to start proxy server in %v", timeout) + case <-timer.C: + if len(failures) > 0 { + return fmt.Errorf("failed to start proxy server: %v", strings.Join(failures, ", ")) + } + + if ps.ready() { + return nil + } + } + } +} + +func (ps *ProxyServer) ready() bool { + if len(ps.HTTPProxyURL) == 0 { + return false + } + + if len(ps.HTTPSProxyURL) == 0 { + return false + } + + return true +} + +func stopServer(ctx context.Context, name string, server *http.Server) { + if server == nil { + return + } + if err := server.Shutdown(ctx); err != nil { + fmt.Printf("[proxy-server] - failed to stop %s server: %v\n", name, err) + } +} + +func (ps *ProxyServer) startHTTP() error { + ps.httpServer = newHTTPServer(nil) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return err + } + + ps.HTTPProxyURL = fmt.Sprintf("http://%s", ln.Addr().String()) + fmt.Printf("[proxy-server] - starting http proxy server on %s...\n", ps.HTTPProxyURL) + return ps.httpServer.Serve(ln) +} + +func (ps *ProxyServer) startHTTPS() error { + serverCert, err := tls.X509KeyPair(ps.certData, ps.keyData) + if err != nil { + return err + } + + ps.httpsServer = newHTTPServer(&tls.Config{ + Certificates: []tls.Certificate{serverCert}, + MinVersion: tls.VersionTLS12, + }) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return err + } + + ps.HTTPSProxyURL = fmt.Sprintf("https://%s", ln.Addr().String()) + fmt.Printf("[proxy-server] - starting https proxy server on %s...\n", ps.HTTPSProxyURL) + return ps.httpsServer.ServeTLS(ln, "", "") +} + +func newHTTPServer(tlsConfig *tls.Config) *http.Server { + return &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodConnect { + handleTunneling(w, r) + } else { + handleHTTP(w, r) + } + }), + // Disable HTTP/2. + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), + TLSConfig: tlsConfig, + ReadHeaderTimeout: 30 * time.Second, + } +} + +func handleTunneling(w http.ResponseWriter, r *http.Request) { + fmt.Printf("[proxy-server] - handleTunneling: %s\n", r.RequestURI) + + dest_conn, err := net.DialTimeout("tcp", r.Host, 10*time.Second) + if err != nil { + http.Error(w, err.Error(), http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + hijacker, ok := w.(http.Hijacker) + if !ok { + http.Error(w, "Hijacking not supported", http.StatusInternalServerError) + return + } + client_conn, _, err := hijacker.Hijack() + if err != nil { + http.Error(w, err.Error(), http.StatusServiceUnavailable) + } + go transfer(dest_conn, client_conn) + go transfer(client_conn, dest_conn) +} + +func transfer(destination io.WriteCloser, source io.ReadCloser) { + defer destination.Close() + defer source.Close() + _, err := io.Copy(destination, source) + if err != nil { + fmt.Printf("[proxy-server] - failed to transfer traffic: %v\n", err) + } +} + +func handleHTTP(w http.ResponseWriter, req *http.Request) { + fmt.Printf("[proxy-server] - handleHTTP: %s\n", req.RequestURI) + resp, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + http.Error(w, err.Error(), http.StatusServiceUnavailable) + return + } + defer resp.Body.Close() + copyHeader(w.Header(), resp.Header) + w.WriteHeader(resp.StatusCode) + _, err = io.Copy(w, resp.Body) + if err != nil { + fmt.Printf("[proxy-server] - failed to handleHTTP %q: %v\n", req.RequestURI, err) + } +} + +func copyHeader(dst, src http.Header) { + for k, vv := range src { + for _, v := range vv { + dst.Add(k, v) + } + } +}