diff --git a/.github/workflows/e2e-fixture-test.yml b/.github/workflows/e2e-fixture-test.yml index d4f5604b87..f0895ae593 100644 --- a/.github/workflows/e2e-fixture-test.yml +++ b/.github/workflows/e2e-fixture-test.yml @@ -30,6 +30,7 @@ jobs: - name: Set Up Environment and Run BMO e2e Tests env: - E2E_CONF_FILE: ${GITHUB_WORKSPACE}/test/e2e/config/fixture.yaml + E2E_CONF_FILE: ${{ github.workspace }}/test/e2e/config/fixture.yaml USE_EXISTING_CLUSTER: "false" + BMO_RELEASE_BRANCH: "release-0.4" run: make test-e2e diff --git a/Makefile b/Makefile index 0ab887685d..009e7dff94 100644 --- a/Makefile +++ b/Makefile @@ -104,6 +104,9 @@ unit-verbose: ## Run unit tests with verbose output ARTIFACTS ?= ${ROOT_DIR}/test/e2e/_artifacts +e2e-ensure-upgrade-kustomization: + ./hack/e2e/ensure_kustomization_paths.sh + .PHONY: test-e2e test-e2e: $(GINKGO) ## Run the end-to-end tests $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \ @@ -112,7 +115,7 @@ test-e2e: $(GINKGO) ## Run the end-to-end tests --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" $(GINKGO_ARGS) test/e2e -- \ -e2e.config="$(E2E_CONF_FILE)" -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER) \ -e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.artifacts-folder="$(ARTIFACTS)" - + ## -------------------------------------- ## Linter Targets ## -------------------------------------- diff --git a/config/overlays/e2e-release-0.4/ironic.env b/config/overlays/e2e-release-0.4/ironic.env new file mode 100644 index 0000000000..2332d5778c --- /dev/null +++ b/config/overlays/e2e-release-0.4/ironic.env @@ -0,0 +1,9 @@ +HTTP_PORT=6180 +PROVISIONING_IP=192.168.222.199 +DEPLOY_KERNEL_URL=http://192.168.222.199:6180/images/ironic-python-agent.kernel +DEPLOY_RAMDISK_URL=http://192.168.222.199:6180/images/ironic-python-agent.initramfs +IRONIC_ENDPOINT=http://192.168.222.199:6385/v1/ +CACHEURL=http://192.168.222.199/images +IRONIC_FAST_TRACK=true +IRONIC_KERNEL_PARAMS=console=ttyS0 +IRONIC_INSPECTOR_VLAN_INTERFACES=all diff --git a/config/overlays/e2e-release-0.4/kustomization.yaml b/config/overlays/e2e-release-0.4/kustomization.yaml new file mode 100644 index 0000000000..07fecf4929 --- /dev/null +++ b/config/overlays/e2e-release-0.4/kustomization.yaml @@ -0,0 +1,26 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- https://github.com/metal3-io/baremetal-operator/config/namespace?ref=release-0.4 +- https://github.com/metal3-io/baremetal-operator/config/default?ref=release-0.4 + +configMapGenerator: +- behavior: replace + envs: + - ironic.env + name: ironic + +patches: +- patch: | + # Don't try to pull again the pre-loaded image + - op: replace + path: /spec/template/spec/containers/0/imagePullPolicy + value: IfNotPresent + target: + kind: Deployment + name: controller-manager + +images: +- name: quay.io/metal3-io/baremetal-operator + newTag: release-0.4 diff --git a/config/overlays/fixture-release-0.4/kustomization.yaml b/config/overlays/fixture-release-0.4/kustomization.yaml new file mode 100644 index 0000000000..559c0097e7 --- /dev/null +++ b/config/overlays/fixture-release-0.4/kustomization.yaml @@ -0,0 +1,24 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- https://github.com/metal3-io/baremetal-operator/config/namespace?ref=release-0.4 +- https://github.com/metal3-io/baremetal-operator/config/default?ref=release-0.4 + +patches: +- patch: | + # Enable test mode (fixture provider instead of ironic) + - op: add + path: /spec/template/spec/containers/0/args/- + value: --test-mode + # Don't try to pull again the pre-loaded image + - op: replace + path: /spec/template/spec/containers/0/imagePullPolicy + value: IfNotPresent + target: + kind: Deployment + name: controller-manager + +images: +- name: quay.io/metal3-io/baremetal-operator + newTag: release-0.4 diff --git a/hack/ci-e2e.sh b/hack/ci-e2e.sh index 9bd9ac2789..57254b8422 100755 --- a/hack/ci-e2e.sh +++ b/hack/ci-e2e.sh @@ -11,6 +11,7 @@ set -eux REPO_ROOT=$(realpath "$(dirname "${BASH_SOURCE[0]}")/..") +BMO_IMAGE="quay.io/metal3-io/baremetal-operator:e2e" cd "${REPO_ROOT}" || exit 1 @@ -25,24 +26,35 @@ export PATH="${PATH}:/usr/local/go/bin" "${REPO_ROOT}/hack/e2e/ensure_kubectl.sh" # Build the container image with e2e tag (used in tests) -IMG=quay.io/metal3-io/baremetal-operator:e2e make docker - -# Set up minikube -minikube start --driver=kvm2 +IMG="${BMO_IMAGE}" make docker virsh -c qemu:///system net-define "${REPO_ROOT}/hack/e2e/net.xml" virsh -c qemu:///system net-start baremetal-e2e -# Attach baremetal-e2e interface to minikube with specific mac. -# This will give minikube a known reserved IP address that we can use for Ironic -virsh -c qemu:///system attach-interface --domain minikube --mac="52:54:00:6c:3c:01" \ - --model virtio --source baremetal-e2e --type network --config -# Restart minikube to apply the changes -minikube stop -minikube start +# kubeconfigs=("${HOME}/.kube/config" "/tmp/kubeconfig-bmo-e2e-upgrade") +profiles=("bmo-e2e" "bmo-e2e-upgrade") + +# Set up minikube +for profile in "${profiles[@]}"; do + # export KUBECONFIG="${kubeconfigs[$index]}" + minikube start --profile "${profile}" --driver=kvm2 + + # Attach baremetal-e2e interface to minikube with specific mac. + # This will give minikube a known reserved IP address that we can use for Ironic + random_mac=$(hexdump -n 6 -ve '1/1 "%.2x "' /dev/random | awk -v a="2,6,a,e" -v r="$RANDOM" 'BEGIN{srand(r);}NR==1{split(a,b,",");r=int(rand()*4+1);printf "%s%s:%s:%s:%s:%s:%s\n",substr($1,0,1),b[r],$2,$3,$4,$5,$6}') + virsh -c qemu:///system attach-interface --domain "${profile}" --mac="${random_mac}" \ + --model virtio --source baremetal-e2e --type network --config + + # Restart minikube to apply the changes + minikube stop --profile "${profile}" + minikube start --profile "${profile}" + + # Load the local BMO image into it + minikube --profile "${profile}" image load "${BMO_IMAGE}" +done -# Load the BMO e2e image into it -minikube image load quay.io/metal3-io/baremetal-operator:e2e +# export KUBECONFIG="${kubeconfigs[0]}" +# export KUBECONFIG_E2E_UPGRADE="${kubeconfigs[1]}" # Create libvirt domain VM_NAME="bmo-e2e-0" @@ -126,6 +138,8 @@ docker run --name image-server-e2e -d \ set +e # Run the e2e tests +export E2E_CONF_FILE="${REPO_ROOT}/test/e2e/config/ironic.yaml" +export USE_EXISTING_CLUSTER="true" make test-e2e test_status="$?" diff --git a/hack/clean-e2e.sh b/hack/clean-e2e.sh index 75c4a597e4..ce74d16d63 100755 --- a/hack/clean-e2e.sh +++ b/hack/clean-e2e.sh @@ -3,7 +3,8 @@ REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. cd "${REPO_ROOT}" || exit 1 -minikube delete +minikube delete --profile bmo-e2e +minikube delete --profile bmo-e2e-upgrade docker rm -f vbmc docker rm -f image-server-e2e docker rm -f sushy-tools diff --git a/test/e2e/common.go b/test/e2e/common.go index 3913d57629..acaade8652 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -16,6 +16,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/clientcmd" metal3api "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" @@ -296,3 +297,41 @@ func CreateBMHCredentialsSecret(ctx context.Context, client client.Client, secre return nil } + +func DeploymentRolledOut(ctx context.Context, clusterProxy framework.ClusterProxy, name string, namespace string, desiredGeneration int64) bool { + clientSet := clusterProxy.GetClientSet() + deploy, err := clientSet.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + Expect(err).To(BeNil()) + if deploy != nil { + // When the number of replicas is equal to the number of available and updated + // replicas, we know that only "new" pods are running. When we also + // have the desired number of replicas and a new enough generation, we + // know that the rollout is complete. + return (deploy.Status.UpdatedReplicas == *deploy.Spec.Replicas) && + (deploy.Status.AvailableReplicas == *deploy.Spec.Replicas) && + (deploy.Status.Replicas == *deploy.Spec.Replicas) && + (deploy.Status.ObservedGeneration >= desiredGeneration) + } + return false +} + +func SwitchKubeconfigContext(kubeConfigFile, contextName string) error { + config, err := clientcmd.LoadFromFile(kubeConfigFile) + if err != nil { + return err + } + + _, exists := config.Contexts[contextName] + if !exists { + return errors.New(fmt.Sprintf("Context %s not existed in kubeconfig file %s", contextName, kubeConfigFile)) + } + + config.CurrentContext = contextName + + err = clientcmd.WriteToFile(*config, kubeConfigFile) + if err != nil { + return err + } + + return nil +} diff --git a/test/e2e/config/fixture.yaml b/test/e2e/config/fixture.yaml index ac5507c672..c42d735536 100644 --- a/test/e2e/config/fixture.yaml +++ b/test/e2e/config/fixture.yaml @@ -15,6 +15,8 @@ variables: DEPLOY_BMO: "true" DEPLOY_CERT_MANAGER: "true" BMO_KUSTOMIZATION: "../../config/overlays/fixture" + BMO_RELEASE_BRANCH: "release-0.4" + BMO_UPGRADE_FROM_KUSTOMIZATION: "../../config/overlays/fixture-release-0.4" EXPECTED_HOST_NAME: "" # Test credentials. The tests will create a BMH with these. # For the fixture provider it does not matter much what we put here. diff --git a/test/e2e/config/ironic.yaml b/test/e2e/config/ironic.yaml index 7c930b76f0..84cda4c1d5 100644 --- a/test/e2e/config/ironic.yaml +++ b/test/e2e/config/ironic.yaml @@ -18,6 +18,8 @@ variables: DEPLOY_BMO: "true" DEPLOY_CERT_MANAGER: "true" BMO_KUSTOMIZATION: "../../config/overlays/e2e" + BMO_UPGRADE_FROM_KUSTOMIZATION: "../../config/overlays/e2e-release-0.4" + BMO_RELEASE_BRANCH: "release-0.4" IRONIC_KUSTOMIZATION: "../../ironic-deployment/overlays/e2e" EXPECTED_HOST_NAME: "localhost.localdomain" # Test credentials. The tests will create a BMH with these. @@ -49,3 +51,5 @@ intervals: default/wait-deprovisioning: ["1m", "10ms"] default/wait-deleted: ["20s", "10ms"] default/wait-secret-deletion: ["1m", "1s"] + upgrade/wait-provisioning: ["1m", "1s"] + upgrade/wait-provisioned: ["10m", "1s"] diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index d054d8c8e3..56fc7d816a 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -77,6 +77,8 @@ var _ = SynchronizedBeforeSuite(func() []byte { if kubeconfigPath == "" { kubeconfigPath = os.Getenv("HOME") + "/.kube/config" } + err := SwitchKubeconfigContext(kubeconfigPath, "bmo-e2e") + Expect(err).NotTo(HaveOccurred()) } else { clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ Name: "bmo-e2e", @@ -89,6 +91,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { scheme := runtime.NewScheme() framework.TryAddDefaultSchemes(scheme) + clusterProxy := framework.NewClusterProxy("bmo-e2e", kubeconfigPath, scheme) Expect(clusterProxy).ToNot(BeNil(), "Failed to get a cluster proxy") diff --git a/test/e2e/upgrade_test.go b/test/e2e/upgrade_test.go new file mode 100644 index 0000000000..5a9fe3f42d --- /dev/null +++ b/test/e2e/upgrade_test.go @@ -0,0 +1,402 @@ +package e2e + +import ( + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/bootstrap" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + + metal3api "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" +) + +const hardwareDetailsRelease04 = ` +{ + "cpu": { + "arch": "x86_64", + "count": 2, + "flags": [ + "3dnowprefetch", + "abm", + "adx", + "aes", + "apic", + "arat", + "arch_capabilities", + "avx", + "avx2", + "avx_vnni", + "bmi1", + "bmi2", + "clflush", + "clflushopt", + "clwb", + "cmov", + "constant_tsc", + "cpuid", + "cpuid_fault", + "cx16", + "cx8", + "de", + "ept", + "ept_ad", + "erms", + "f16c", + "flexpriority", + "fma", + "fpu", + "fsgsbase", + "fsrm", + "fxsr", + "gfni", + "hypervisor", + "ibpb", + "ibrs", + "ibrs_enhanced", + "invpcid", + "lahf_lm", + "lm", + "mca", + "mce", + "md_clear", + "mmx", + "movbe", + "movdir64b", + "movdiri", + "msr", + "mtrr", + "nopl", + "nx", + "ospke", + "pae", + "pat", + "pclmulqdq", + "pdpe1gb", + "pge", + "pku", + "pni", + "popcnt", + "pse", + "pse36", + "rdpid", + "rdrand", + "rdseed", + "rdtscp", + "rep_good", + "sep", + "serialize", + "sha_ni", + "smap", + "smep", + "ss", + "ssbd", + "sse", + "sse2", + "sse4_1", + "sse4_2", + "ssse3", + "stibp", + "syscall", + "tpr_shadow", + "tsc", + "tsc_adjust", + "tsc_deadline_timer", + "tsc_known_freq", + "umip", + "vaes", + "vme", + "vmx", + "vnmi", + "vpclmulqdq", + "vpid", + "waitpkg", + "x2apic", + "xgetbv1", + "xsave", + "xsavec", + "xsaveopt", + "xsaves", + "xtopology" + ], + "model": "12th Gen Intel(R) Core(TM) i9-12900H" + }, + "firmware": { + "bios": { + "date": "04/01/2014", + "vendor": "SeaBIOS", + "version": "1.15.0-1" + } + }, + "hostname": "localhost.localdomain", + "nics": [ + { + "ip": "192.168.222.122", + "mac": "00:60:2f:31:81:01", + "model": "0x1af4 0x0001", + "name": "enp1s0", + "pxe": true + }, + { + "ip": "fe80::570a:edf2:a3a7:4eb8%enp1s0", + "mac": "00:60:2f:31:81:01", + "model": "0x1af4 0x0001", + "name": "enp1s0", + "pxe": true + } + ], + "ramMebibytes": 4096, + "storage": [ + { + "name": "/dev/disk/by-path/pci-0000:04:00.0", + "rotational": true, + "sizeBytes": 21474836480, + "type": "HDD", + "vendor": "0x1af4" + } + ], + "systemVendor": { + "manufacturer": "QEMU", + "productName": "Standard PC (Q35 + ICH9, 2009)" + } +} +` + +var _ = Describe("BMO Upgrade", func() { + var ( + specName = "upgrade" + namespace *corev1.Namespace + bmoNamespace string + bmoDeployName string + bmoReleaseBranch string + bmcUser string + bmcPassword string + bmcAddress string + bootMacAddress string + bmoKustomization string + upgradeClusterProvider bootstrap.ClusterProvider + upgradeClusterProxy framework.ClusterProxy + bmh metal3api.BareMetalHost + ) + BeforeEach(func() { + bmcUser = e2eConfig.GetVariable("BMC_USER") + bmcPassword = e2eConfig.GetVariable("BMC_PASSWORD") + bmcAddress = e2eConfig.GetVariable("BMC_ADDRESS") + bootMacAddress = e2eConfig.GetVariable("BOOT_MAC_ADDRESS") + bmoNamespace = "baremetal-operator-system" + bmoDeployName = "baremetal-operator-controller-manager" + bmoReleaseBranch = e2eConfig.GetVariable("BMO_RELEASE_BRANCH") + bmoUpgradeFromImageName := fmt.Sprintf("quay.io/metal3-io/baremetal-operator:%s", bmoReleaseBranch) + bmoKustomization = e2eConfig.GetVariable("BMO_KUSTOMIZATION") + bmoUpgradeFromKustomization := e2eConfig.GetVariable("BMO_UPGRADE_FROM_KUSTOMIZATION") + + bmoUpgradeFromImage := clusterctl.ContainerImage{ + Name: bmoUpgradeFromImageName, + LoadBehavior: clusterctl.MustLoadImage, + } + + By("Creating a separate cluster for upgrade tests") + var kubeconfigPath string + if useExistingCluster { + kubeconfigPath = os.Getenv("KUBECONFIG") + if kubeconfigPath == "" { + kubeconfigPath = os.Getenv("HOME") + "/.kube/config" + } + err := SwitchKubeconfigContext(kubeconfigPath, "bmo-e2e-upgrade") + Expect(err).NotTo(HaveOccurred()) + } else { + upgradeClusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(ctx, bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ + Name: "bmo-e2e-upgrade", + Images: append(e2eConfig.Images, bmoUpgradeFromImage), + }) + Expect(upgradeClusterProvider).ToNot(BeNil(), "Failed to create a cluster") + kubeconfigPath = clusterProvider.GetKubeconfigPath() + } + Expect(kubeconfigPath).To(BeAnExistingFile(), "Failed to get the kubeconfig file for the cluster") + scheme := runtime.NewScheme() + framework.TryAddDefaultSchemes(scheme) + metal3api.AddToScheme(scheme) + upgradeClusterProxy = framework.NewClusterProxy("bmo-e2e-upgrade", kubeconfigPath, scheme) + By("Installing cert-manager on the upgrade cluster") + cmVersion := e2eConfig.GetVariable("CERT_MANAGER_VERSION") + err := installCertManager(ctx, upgradeClusterProxy, cmVersion) + Expect(err).NotTo(HaveOccurred()) + By("Waiting for cert-manager webhook") + Eventually(func() error { + return checkCertManagerWebhook(ctx, upgradeClusterProxy) + }, e2eConfig.GetIntervals("default", "wait-available")...).Should(Succeed()) + err = checkCertManagerAPI(upgradeClusterProxy) + Expect(err).NotTo(HaveOccurred()) + + By("Installing BMO on the upgrade cluster") + manifest, err := buildKustomizeManifest(bmoUpgradeFromKustomization) + Expect(err).NotTo(HaveOccurred()) + err = upgradeClusterProxy.Apply(ctx, manifest) + Expect(err).NotTo(HaveOccurred()) + + bmoDeployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baremetal-operator-controller-manager", + Namespace: "baremetal-operator-system", + }, + } + // Wait for it to become available + framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + Getter: upgradeClusterProxy.GetClient(), + Deployment: bmoDeployment, + }, e2eConfig.GetIntervals("default", "wait-deployment")...) + // Set up log watcher + framework.WatchDeploymentLogsByName(ctx, framework.WatchDeploymentLogsByNameInput{ + GetLister: upgradeClusterProxy.GetClient(), + Cache: upgradeClusterProxy.GetCache(ctx), + ClientSet: upgradeClusterProxy.GetClientSet(), + Deployment: bmoDeployment, + LogPath: filepath.Join(artifactFolder, "logs", bmoDeployment.GetNamespace()), + }) + + if e2eConfig.GetVariable("DEPLOY_IRONIC") != "false" { + // Install Ironic + By("Installing Ironic on the upgrade cluster") + ironicKustomization := e2eConfig.GetVariable("IRONIC_KUSTOMIZATION") + ironicManifest, err := buildKustomizeManifest(ironicKustomization) + Expect(err).NotTo(HaveOccurred()) + err = upgradeClusterProxy.Apply(ctx, ironicManifest) + Expect(err).NotTo(HaveOccurred()) + + ironicDeployment := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baremetal-operator-ironic", + Namespace: "baremetal-operator-system", + }, + } + // Wait for it to become available + framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + Getter: upgradeClusterProxy.GetClient(), + Deployment: ironicDeployment, + }, e2eConfig.GetIntervals("ironic", "wait-deployment")...) + // Set up log watcher + framework.WatchDeploymentLogsByName(ctx, framework.WatchDeploymentLogsByNameInput{ + GetLister: upgradeClusterProxy.GetClient(), + Cache: upgradeClusterProxy.GetCache(ctx), + ClientSet: upgradeClusterProxy.GetClientSet(), + Deployment: ironicDeployment, + LogPath: filepath.Join(artifactFolder, "logs", ironicDeployment.GetNamespace()), + }) + namespace, cancelWatches = framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ + Creator: upgradeClusterProxy.GetClient(), + ClientSet: upgradeClusterProxy.GetClientSet(), + Name: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + LogFolder: artifactFolder, + }) + } + }) + + It("Should upgrade BMO to latest version", func() { + By("Creating a secret with BMH credentials") + bmcCredentials := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bmc-credentials", + Namespace: namespace.Name, + }, + StringData: map[string]string{ + "username": bmcUser, + "password": bmcPassword, + }, + } + err := upgradeClusterProxy.GetClient().Create(ctx, &bmcCredentials) + Expect(err).NotTo(HaveOccurred()) + + By("Creating a BMH with inspection disabled and hardware details added") + bmh = metal3api.BareMetalHost{ + ObjectMeta: metav1.ObjectMeta{ + Name: specName, + Namespace: namespace.Name, + Annotations: map[string]string{ + metal3api.InspectAnnotationPrefix: "disabled", + metal3api.HardwareDetailsAnnotation: hardwareDetailsRelease04, + }, + }, + Spec: metal3api.BareMetalHostSpec{ + Online: true, + BMC: metal3api.BMCDetails{ + Address: bmcAddress, + CredentialsName: "bmc-credentials", + }, + BootMode: metal3api.Legacy, + BootMACAddress: bootMacAddress, + }, + } + err = upgradeClusterProxy.GetClient().Create(ctx, &bmh) + Expect(err).NotTo(HaveOccurred()) + + By("Waiting for the BMH to become available") + WaitForBmhInProvisioningState(ctx, WaitForBmhInProvisioningStateInput{ + Client: upgradeClusterProxy.GetClient(), + Bmh: bmh, + State: metal3api.StateAvailable, + }, e2eConfig.GetIntervals(specName, "wait-available")...) + + By("Upgrading BMO deployment") + clientSet := upgradeClusterProxy.GetClientSet() + deploy, err := clientSet.AppsV1().Deployments(bmoNamespace).Get(ctx, bmoDeployName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + manifest, err := buildKustomizeManifest(bmoKustomization) + Expect(err).NotTo(HaveOccurred()) + err = upgradeClusterProxy.Apply(ctx, manifest) + Expect(err).NotTo(HaveOccurred()) + By("Waiting for BMO update to rollout") + Eventually(func() bool { + return DeploymentRolledOut(ctx, upgradeClusterProxy, bmoDeployName, bmoNamespace, deploy.Status.ObservedGeneration+1) + }, + e2eConfig.GetIntervals("default", "wait-deployment")..., + ).Should(BeTrue()) + By("Patching the BMH to test provisioning") + helper, err := patch.NewHelper(&bmh, upgradeClusterProxy.GetClient()) + Expect(err).NotTo(HaveOccurred()) + bmh.ObjectMeta.Annotations[metal3api.HardwareDetailsAnnotation] = hardwareDetails + bmh.Spec.Image = &metal3api.Image{ + URL: e2eConfig.GetVariable("IMAGE_URL"), + Checksum: e2eConfig.GetVariable("IMAGE_CHECKSUM"), + } + bmh.Spec.RootDeviceHints = &metal3api.RootDeviceHints{ + DeviceName: "/dev/vda", + } + Expect(helper.Patch(ctx, &bmh)).To(Succeed()) + + By("Waiting for the BMH to be in provisioning state") + WaitForBmhInProvisioningState(ctx, WaitForBmhInProvisioningStateInput{ + Client: upgradeClusterProxy.GetClient(), + Bmh: bmh, + State: metal3api.StateProvisioning, + }, e2eConfig.GetIntervals(specName, "wait-provisioning")...) + + By("Waiting for the BMH to become provisioned") + WaitForBmhInProvisioningState(ctx, WaitForBmhInProvisioningStateInput{ + Client: upgradeClusterProxy.GetClient(), + Bmh: bmh, + State: metal3api.StateProvisioned, + }, e2eConfig.GetIntervals(specName, "wait-provisioned")...) + }) + + AfterEach(func() { + cancelWatches() + if !skipCleanup { + if upgradeClusterProxy != nil { + upgradeClusterProxy.Dispose(ctx) + } + if upgradeClusterProvider != nil { + upgradeClusterProvider.Dispose(ctx) + } + } + }) + +})