-
Notifications
You must be signed in to change notification settings - Fork 94
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Signed-off-by: xuezhaojun <zxue@redhat.com>
- Loading branch information
1 parent
bf12b9a
commit 4645a2a
Showing
1 changed file
with
344 additions
and
0 deletions.
There are no files selected for viewing
344 changes: 344 additions & 0 deletions
344
test/integration/registration/spokeagent_switch_hub_test.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,344 @@ | ||
package registration_test | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"path" | ||
"time" | ||
|
||
"github.com/onsi/ginkgo/v2" | ||
. "github.com/onsi/ginkgo/v2" | ||
"github.com/onsi/gomega" | ||
. "github.com/onsi/gomega" | ||
"github.com/openshift/library-go/pkg/controller/controllercmd" | ||
"k8s.io/apimachinery/pkg/api/meta" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/util/rand" | ||
"k8s.io/apiserver/pkg/server/healthz" | ||
"k8s.io/client-go/kubernetes" | ||
"k8s.io/client-go/kubernetes/scheme" | ||
addonclientset "open-cluster-management.io/api/client/addon/clientset/versioned" | ||
clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" | ||
clusterv1 "open-cluster-management.io/api/cluster/v1" | ||
commonoptions "open-cluster-management.io/ocm/pkg/common/options" | ||
"open-cluster-management.io/ocm/pkg/registration/hub" | ||
"open-cluster-management.io/ocm/pkg/registration/spoke" | ||
"open-cluster-management.io/ocm/test/integration/util" | ||
"sigs.k8s.io/controller-runtime/pkg/envtest" | ||
) | ||
|
||
// In the switch-hub test, we will start 2 hubs, and a spoke, we will test 2 cases: | ||
// 1. the spoke first connects to hub1, we set the hub1 to not accept the spoke, then the spoke should switch to hub2. | ||
// 2. the spoke first connects to hub1, we stop hub1, then the spoke should switch to hub2. | ||
|
||
var _ = Describe("Switch Hub", func() { | ||
var managedClusterName, bootstrapFile, hubKubeconfigSecret, suffix string | ||
|
||
BeforeEach(func() { | ||
var err error | ||
|
||
// start 2 hubs | ||
hub1bootstrapFilePath, hub1kubeClient, hub1ClusterClient, _, hub1Env, hub1NewAuthn := startNewHub(context.Background(), "switch-hub-hub1") | ||
hub2bootstrapFilePath, hub2kubeClient, hub2ClusterClient, _, hub2Env, hub2NewAuthn := startNewHub(context.Background(), "switch-hub-hub2") | ||
|
||
// create: | ||
// 1. hub1 bootstrapkubeconfig file | ||
// 2. hub2 bootstrapkubeconfig file | ||
// 3. agent bootstrapkubeconfig secret(based on hub1) | ||
// 4. hub1 bootstrapkubeconfig secret | ||
// 5. hub2 bootstrapkubeconfig secret | ||
|
||
err = hub1NewAuthn.CreateBootstrapKubeConfigWithCertAge(hub1bootstrapFilePath, serverCertFile, securePort, 10*time.Minute) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
err = hub2NewAuthn.CreateBootstrapKubeConfigWithCertAge(hub2bootstrapFilePath, serverCertFile, securePort, 10*time.Minute) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
err = util.SyncBootstrapKubeConfigDataToSecret( | ||
hub1bootstrapFilePath, | ||
testNamespace, | ||
"bootstrap-hub-kubeconfig", | ||
kubeClient, | ||
) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
err = util.SyncBootstrapKubeConfigDataToSecret( | ||
hub1bootstrapFilePath, | ||
testNamespace, | ||
"bootstrap-hub-kubeconfig-hub1", | ||
kubeClient, | ||
) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
err = util.SyncBootstrapKubeConfigDataToSecret( | ||
hub2bootstrapFilePath, | ||
testNamespace, | ||
"bootstrap-hub-kubeconfig-hub2", | ||
kubeClient, | ||
) | ||
Expect(err).NotTo(HaveOccurred()) | ||
|
||
// start sync secret changes to file for bootstrap-hub-kubeconfig | ||
// this is used to simulate the agent to sync the bootstrap kubeconfig secret to file | ||
go startAgentBootstrapKubeConfigSecretToFileSyncer(context.Background(), bootstrapFile, kubeClient) | ||
|
||
// start a auto restart agent | ||
By("Starting a auto restart spoke agent") | ||
suffix = rand.String(5) | ||
managedClusterName = fmt.Sprintf("cluster-%s", suffix) | ||
hubKubeconfigSecret = fmt.Sprintf("hub-kubeconfig-secret-%s", suffix) | ||
hubKubeconfigDir := path.Join(util.TestDir, fmt.Sprintf("rebootstrap-%s", suffix), "hub-kubeconfig") | ||
|
||
go startAutoRestartAgent(context.Background(), | ||
managedClusterName, hubKubeconfigDir, | ||
func() *spoke.SpokeAgentOptions { | ||
agentOptions := spoke.NewSpokeAgentOptions() | ||
agentOptions.BootstrapKubeconfig = bootstrapFile | ||
agentOptions.HubKubeconfigSecret = hubKubeconfigSecret | ||
agentOptions.BootstrapKubeconfigSecret = "bootstrap-hub-kubeconfig" | ||
agentOptions.BootstrapKubeconfigSecrets = []string{"bootstrap-hub-kubeconfig-hub1", "bootstrap-hub-kubeconfig-hub2"} | ||
return agentOptions | ||
}, | ||
func(ctx context.Context, stopAgent context.CancelFunc, agentOptions *spoke.SpokeAgentOptions) { | ||
startAgentHealthChecker(ctx, stopAgent, agentOptions.GetHealthCheckers()) | ||
}) | ||
}) | ||
|
||
AfterEach(func() {})) | ||
Check failure on line 107 in test/integration/registration/spokeagent_switch_hub_test.go GitHub Actions / verify
|
||
|
||
Context("Switch hub because hub doesn't accept client", func() { | ||
It("Should switch hub", func() { | ||
|
||
}) | ||
}) | ||
|
||
Context("Switch hub because hub is down", func() { | ||
It("Should switch hub", func() { | ||
|
||
}) | ||
}) | ||
}) | ||
|
||
func startNewHub(ctx context.Context, hubName string) ( | ||
string, | ||
kubernetes.Interface, | ||
clusterclientset.Interface, | ||
addonclientset.Interface, | ||
*envtest.Environment, *util.TestAuthn) { | ||
apiserver := &envtest.APIServer{} | ||
newAuthn := util.NewTestAuthn(path.Join(util.CertDir, "another-ca.crt"), path.Join(util.CertDir, "another-ca.key")) | ||
apiserver.SecureServing.Authn = newAuthn | ||
|
||
env := &envtest.Environment{ | ||
ControlPlane: envtest.ControlPlane{ | ||
APIServer: apiserver, | ||
}, | ||
ErrorIfCRDPathMissing: true, | ||
CRDDirectoryPaths: CRDPaths, | ||
} | ||
|
||
cfg, err := env.Start() | ||
gomega.Expect(err).ToNot(gomega.HaveOccurred()) | ||
gomega.Expect(cfg).ToNot(gomega.BeNil()) | ||
|
||
err = clusterv1.Install(scheme.Scheme) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
// prepare configs | ||
newSecurePort := env.ControlPlane.APIServer.SecureServing.Port | ||
gomega.Expect(len(newSecurePort)).ToNot(gomega.BeZero()) | ||
|
||
anotherServerCertFile := fmt.Sprintf("%s/apiserver.crt", env.ControlPlane.APIServer.CertDir) | ||
|
||
bootstrapKubeConfigFile := path.Join(util.TestDir, fmt.Sprintf("bootstrapkubeconfig-%s", hubName)) | ||
err = newAuthn.CreateBootstrapKubeConfigWithCertAge(bootstrapKubeConfigFile, anotherServerCertFile, newSecurePort, 24*time.Hour) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
// prepare clients | ||
kubeClient, err := kubernetes.NewForConfig(cfg) | ||
gomega.Expect(err).ToNot(gomega.HaveOccurred()) | ||
gomega.Expect(kubeClient).ToNot(gomega.BeNil()) | ||
|
||
clusterClient, err := clusterclientset.NewForConfig(cfg) | ||
gomega.Expect(err).ToNot(gomega.HaveOccurred()) | ||
gomega.Expect(clusterClient).ToNot(gomega.BeNil()) | ||
|
||
addOnClient, err := addonclientset.NewForConfig(cfg) | ||
gomega.Expect(err).ToNot(gomega.HaveOccurred()) | ||
gomega.Expect(clusterClient).ToNot(gomega.BeNil()) | ||
|
||
// start hub controller | ||
go func() { | ||
err := hub.NewHubManagerOptions().RunControllerManager(ctx, &controllercmd.ControllerContext{ | ||
KubeConfig: cfg, | ||
EventRecorder: util.NewIntegrationTestEventRecorder(hubName), | ||
}) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
}() | ||
|
||
return bootstrapKubeConfigFile, kubeClient, clusterClient, addOnClient, env, newAuthn | ||
} | ||
|
||
func startAgent(ctx context.Context, managedClusterName, hubKubeconfigDir string, | ||
agentOptions *spoke.SpokeAgentOptions) (context.Context, context.CancelFunc) { | ||
ginkgo.By("run registration agent") | ||
commOptions := commonoptions.NewAgentOptions() | ||
commOptions.HubKubeconfigDir = hubKubeconfigDir | ||
commOptions.SpokeClusterName = managedClusterName | ||
|
||
agentCtx, stopAgent := context.WithCancel(ctx) | ||
runAgentWithContext(agentCtx, "rebootstrap-test", agentOptions, commOptions, spokeCfg) | ||
|
||
return agentCtx, stopAgent | ||
} | ||
|
||
func assertSuccessClusterBootstrap(testNamespace, managedClusterName, hubKubeconfigSecret string, | ||
hubKubeClient, spokeKubeClient kubernetes.Interface, hubClusterClient clusterclientset.Interface, | ||
auth *util.TestAuthn, certAget time.Duration) { | ||
// the spoke cluster and csr should be created after bootstrap | ||
ginkgo.By("Check existence of ManagedCluster & CSR") | ||
gomega.Eventually(func() bool { | ||
if _, err := util.GetManagedCluster(hubClusterClient, managedClusterName); err != nil { | ||
return false | ||
} | ||
return true | ||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) | ||
|
||
gomega.Eventually(func() error { | ||
if _, err := util.FindUnapprovedSpokeCSR(hubKubeClient, managedClusterName); err != nil { | ||
return err | ||
} | ||
return nil | ||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) | ||
|
||
// the spoke cluster should has finalizer that is added by hub controller | ||
gomega.Eventually(func() bool { | ||
spokeCluster, err := util.GetManagedCluster(hubClusterClient, managedClusterName) | ||
if err != nil { | ||
return false | ||
} | ||
if len(spokeCluster.Finalizers) != 1 { | ||
return false | ||
} | ||
|
||
if spokeCluster.Finalizers[0] != clusterv1.ManagedClusterFinalizer { | ||
return false | ||
} | ||
|
||
return true | ||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) | ||
|
||
ginkgo.By("Accept and approve the ManagedCluster") | ||
// simulate hub cluster admin to accept the managedcluster and approve the csr | ||
gomega.Eventually(func() error { | ||
return util.AcceptManagedCluster(hubClusterClient, managedClusterName) | ||
}, eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) | ||
err := auth.ApproveSpokeClusterCSR(hubKubeClient, managedClusterName, certAget) | ||
gomega.Expect(err).NotTo(gomega.HaveOccurred()) | ||
|
||
// the managed cluster should have accepted condition after it is accepted | ||
gomega.Eventually(func() error { | ||
spokeCluster, err := util.GetManagedCluster(hubClusterClient, managedClusterName) | ||
if err != nil { | ||
return err | ||
} | ||
if meta.IsStatusConditionFalse(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionHubAccepted) { | ||
return fmt.Errorf("cluster should be accepted") | ||
} | ||
return nil | ||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) | ||
|
||
// the hub kubeconfig secret should be filled after the csr is approved | ||
gomega.Eventually(func() error { | ||
if _, err := util.GetFilledHubKubeConfigSecret(spokeKubeClient, testNamespace, hubKubeconfigSecret); err != nil { | ||
return err | ||
} | ||
return nil | ||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) | ||
|
||
ginkgo.By("ManagedCluster joins the hub") | ||
// the spoke cluster should have joined condition finally | ||
gomega.Eventually(func() error { | ||
spokeCluster, err := util.GetManagedCluster(hubClusterClient, managedClusterName) | ||
if err != nil { | ||
return err | ||
} | ||
joined := meta.FindStatusCondition(spokeCluster.Status.Conditions, clusterv1.ManagedClusterConditionJoined) | ||
if joined == nil { | ||
return fmt.Errorf("cluster should be joined") | ||
} | ||
return nil | ||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) | ||
|
||
// ensure cluster namespace is in place | ||
gomega.Eventually(func() error { | ||
_, err := hubKubeClient.CoreV1().Namespaces().Get(context.TODO(), managedClusterName, metav1.GetOptions{}) | ||
if err != nil { | ||
return err | ||
} | ||
return nil | ||
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) | ||
} | ||
|
||
func startAutoRestartAgent(ctx context.Context, | ||
managedClusterName, hubKubeconfigDir string, | ||
getNewAgentOptions func() *spoke.SpokeAgentOptions, | ||
watchStop func(ctx context.Context, stopAgent context.CancelFunc, agentOptions *spoke.SpokeAgentOptions), | ||
) { | ||
fmt.Println("[auto-restart-agent] - restart agent...") | ||
newAgentOptions := getNewAgentOptions() | ||
agentCtx, stopAgent := startAgent(ctx, managedClusterName, hubKubeconfigDir, getNewAgentOptions()) | ||
go watchStop(ctx, stopAgent, newAgentOptions) | ||
for { | ||
select { | ||
case <-agentCtx.Done(): | ||
// restart agent | ||
newAgentOptions := getNewAgentOptions() | ||
agentCtx, stopAgent = startAgent(ctx, managedClusterName, hubKubeconfigDir, newAgentOptions) | ||
go watchStop(ctx, stopAgent, newAgentOptions) | ||
case <-ctx.Done(): | ||
// exit | ||
fmt.Println("[auto-restart-agent] - shutting down...") | ||
return | ||
} | ||
} | ||
} | ||
|
||
func startAgentHealthChecker(ctx context.Context, stopAgent context.CancelFunc, healthCheckers []healthz.HealthChecker) { | ||
ticker := time.NewTicker(3 * time.Second) | ||
for { | ||
select { | ||
case <-ticker.C: | ||
fmt.Println("[agent-health-checker] - start health checking...") | ||
for _, healthchecker := range healthCheckers { | ||
if err := healthchecker.Check(nil); err != nil { | ||
fmt.Printf("[agent-health-checker] - agent is not health: %v\n", err) | ||
stopAgent() | ||
return | ||
} | ||
} | ||
case <-ctx.Done(): | ||
// exit | ||
fmt.Println("[agent-health-checker] - shutting down...") | ||
return | ||
} | ||
} | ||
} | ||
|
||
func startAgentBootstrapKubeConfigSecretToFileSyncer(ctx context.Context, bootstrapFile string, kubeClient kubernetes.Interface) { | ||
// simulate the agent to sync the bootstrap kubeconfig secret to file | ||
ticker := time.NewTicker(30 * time.Second) | ||
for { | ||
select { | ||
case <-ticker.C: | ||
err := util.SyncSecretToBootstrapKubeConfigFiles(bootstrapFile, testNamespace, "bootstrap-hub-kubeconfig", kubeClient) | ||
if err != nil { | ||
fmt.Printf("[agent-bootstrap-kubeconfig-secret-syncer] - failed to sync bootstrap kubeconfig secret to file: %v\n", err) | ||
} | ||
case <-ctx.Done(): | ||
// exit | ||
fmt.Println("[agent-bootstrap-kubeconfig-secret-syncer] - shutting down...") | ||
return | ||
} | ||
} | ||
} |