Skip to content

Commit

Permalink
Use Logf and AnnotateBmh functions from BMO E2E
Browse files Browse the repository at this point in the history
Signed-off-by: Huy Mai <huy.mai@est.tech>
  • Loading branch information
mquhuy committed May 20, 2024
1 parent 96c0814 commit 4cfcd3f
Show file tree
Hide file tree
Showing 14 changed files with 184 additions and 96 deletions.
3 changes: 2 additions & 1 deletion test/e2e/cert_rotation.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
appv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -25,7 +26,7 @@ type CertRotationInput struct {
}

func certRotation(ctx context.Context, inputGetter func() CertRotationInput) {
Logf("Start the certificate rotation test")
bmo_e2e.Logf("Start the certificate rotation test")
input := inputGetter()
clientSet := input.ManagementCluster.GetClientSet()
clusterClient := input.ManagementCluster.GetClient()
Expand Down
52 changes: 16 additions & 36 deletions test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"time"

bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
infrav1 "github.com/metal3-io/cluster-api-provider-metal3/api/v1beta1"
ipamv1 "github.com/metal3-io/ip-address-manager/api/v1alpha1"
. "github.com/onsi/ginkgo/v2"
Expand Down Expand Up @@ -60,14 +61,10 @@ func Byf(format string, a ...interface{}) {
By(fmt.Sprintf(format, a...))
}

func Logf(format string, a ...interface{}) {
fmt.Fprintf(GinkgoWriter, "INFO: "+format+"\n", a...)
}

func LogFromFile(logFile string) {
data, err := os.ReadFile(filepath.Clean(logFile))
Expect(err).ToNot(HaveOccurred(), "No log file found")
Logf(string(data))
bmo_e2e.Logf(string(data))
}

// return only the boolean value from ParseBool.
Expand Down Expand Up @@ -157,9 +154,9 @@ func EnsureImage(k8sVersion string) (imageURL string, imageChecksum string) {
imagePath := filepath.Join(ironicImageDir, imageName)
rawImagePath := filepath.Join(ironicImageDir, rawImageName)
if _, err := os.Stat(rawImagePath); err == nil {
Logf("Local image %v already exists", rawImagePath)
bmo_e2e.Logf("Local image %v already exists", rawImagePath)
} else if os.IsNotExist(err) {
Logf("Local image %v is not found \nDownloading..", rawImagePath)
bmo_e2e.Logf("Local image %v is not found \nDownloading..", rawImagePath)
err = DownloadFile(imagePath, fmt.Sprintf("%s/%s", imageLocation, imageName))
Expect(err).ToNot(HaveOccurred())
cmd := exec.Command("qemu-img", "convert", "-O", "raw", imagePath, rawImagePath) // #nosec G204:gosec
Expand All @@ -170,7 +167,7 @@ func EnsureImage(k8sVersion string) (imageURL string, imageChecksum string) {
formattedSha256sum := fmt.Sprintf("%x", sha256sum)
err = os.WriteFile(fmt.Sprintf("%s/%s.sha256sum", ironicImageDir, rawImageName), []byte(formattedSha256sum), 0544) //#nosec G306:gosec
Expect(err).ToNot(HaveOccurred())
Logf("Image: %v downloaded", rawImagePath)
bmo_e2e.Logf("Image: %v downloaded", rawImagePath)
} else {
fmt.Fprintf(GinkgoWriter, "ERROR: %v\n", err)
os.Exit(1)
Expand Down Expand Up @@ -234,23 +231,6 @@ func FilterMachines(machines []clusterv1.Machine, accept func(clusterv1.Machine)
return
}

// AnnotateBmh annotates BaremetalHost with a given key and value.
func AnnotateBmh(ctx context.Context, client client.Client, host bmov1alpha1.BareMetalHost, key string, value *string) {
helper, err := patch.NewHelper(&host, client)
Expect(err).NotTo(HaveOccurred())
annotations := host.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
if value == nil {
delete(annotations, key)
} else {
annotations[key] = *value
}
host.SetAnnotations(annotations)
Expect(helper.Patch(ctx, &host)).To(Succeed())
}

// DeleteNodeReuseLabelFromHost deletes nodeReuseLabelName from the host if it exists.
func DeleteNodeReuseLabelFromHost(ctx context.Context, client client.Client, host bmov1alpha1.BareMetalHost, nodeReuseLabelName string) {
helper, err := patch.NewHelper(&host, client)
Expand Down Expand Up @@ -413,7 +393,7 @@ type WaitForNumInput struct {

// WaitForNumBmhInState will wait for the given number of BMHs to be in the given state.
func WaitForNumBmhInState(ctx context.Context, state bmov1alpha1.ProvisioningState, input WaitForNumInput) {
Logf("Waiting for %d BMHs to be in %s state", input.Replicas, state)
bmo_e2e.Logf("Waiting for %d BMHs to be in %s state", input.Replicas, state)
Eventually(func(g Gomega) {
bmhList := bmov1alpha1.BareMetalHostList{}
g.Expect(input.Client.List(ctx, &bmhList, input.Options...)).To(Succeed())
Expand All @@ -424,7 +404,7 @@ func WaitForNumBmhInState(ctx context.Context, state bmov1alpha1.ProvisioningSta

// WaitForNumMetal3MachinesReady will wait for the given number of M3Ms to be ready.
func WaitForNumMetal3MachinesReady(ctx context.Context, input WaitForNumInput) {
Logf("Waiting for %d Metal3Machines to be ready", input.Replicas)
bmo_e2e.Logf("Waiting for %d Metal3Machines to be ready", input.Replicas)
Eventually(func(g Gomega) {
m3mList := infrav1.Metal3MachineList{}
g.Expect(input.Client.List(ctx, &m3mList, input.Options...)).To(Succeed())
Expand All @@ -441,7 +421,7 @@ func WaitForNumMetal3MachinesReady(ctx context.Context, input WaitForNumInput) {

// WaitForNumMachinesInState will wait for the given number of Machines to be in the given state.
func WaitForNumMachinesInState(ctx context.Context, phase clusterv1.MachinePhase, input WaitForNumInput) {
Logf("Waiting for %d Machines to be in %s phase", input.Replicas, phase)
bmo_e2e.Logf("Waiting for %d Machines to be in %s phase", input.Replicas, phase)
inPhase := func(machine clusterv1.Machine) bool {
return machine.Status.GetTypedPhase() == phase
}
Expand Down Expand Up @@ -510,8 +490,8 @@ func GenerateIPPoolPreallocations(ctx context.Context, ippool ipamv1.IPPool, poo
for m3dataPoolName, ipaddress := range allocations {
fmt.Println("datapoolName:", m3dataPoolName, "=>", "ipaddress:", ipaddress)
BMHName := strings.Split(m3dataPoolName, "-"+poolName)[0]
Logf("poolName: %s", poolName)
Logf("BMHName: %s", BMHName)
bmo_e2e.Logf("poolName: %s", poolName)
bmo_e2e.Logf("BMHName: %s", BMHName)
newAllocations[BMHName+"-"+ippool.Name] = ipaddress
}
return newAllocations, nil
Expand All @@ -531,10 +511,10 @@ func Metal3DataToMachineName(m3data infrav1.Metal3Data) (string, error) {

// FilterMetal3DatasByName returns a filtered list of m3data objects with specific name.
func FilterMetal3DatasByName(m3datas []infrav1.Metal3Data, name string) (result []infrav1.Metal3Data) {
Logf("m3datas: %v", m3datas)
Logf("looking for name: %s", name)
bmo_e2e.Logf("m3datas: %v", m3datas)
bmo_e2e.Logf("looking for name: %s", name)
for _, m3data := range m3datas {
Logf("m3data: %v", m3data)
bmo_e2e.Logf("m3data: %v", m3data)
if m3data.ObjectMeta.Name == name {
result = append(result, m3data)
}
Expand Down Expand Up @@ -583,7 +563,7 @@ func MachineToVMName(ctx context.Context, cli client.Client, m *clusterv1.Machin
for _, machine := range allMetal3Machines.Items {
name, err := Metal3MachineToMachineName(machine)
if err != nil {
Logf("error getting Machine name from Metal3machine: %w", err)
bmo_e2e.Logf("error getting Machine name from Metal3machine: %w", err)
} else if name == m.Name {
return BmhNameToVMName(Metal3MachineToBmhName(machine)), nil
}
Expand Down Expand Up @@ -753,7 +733,7 @@ func (Metal3LogCollector) CollectMachineLog(ctx context.Context, cli client.Clie
}
}

Logf("Successfully collected logs for machine %s", m.Name)
bmo_e2e.Logf("Successfully collected logs for machine %s", m.Name)
return nil
}

Expand Down Expand Up @@ -784,7 +764,7 @@ func LabelCRD(ctx context.Context, c client.Client, crdName string, labels map[s
if err != nil {
return err
}
Logf("CRD '%s' labeled successfully\n", crdName)
bmo_e2e.Logf("CRD '%s' labeled successfully\n", crdName)
return nil
}

Expand Down
3 changes: 2 additions & 1 deletion test/e2e/e2e_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (

"github.com/jinzhu/copier"
bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
infrav1 "github.com/metal3-io/cluster-api-provider-metal3/api/v1beta1"
ipamv1 "github.com/metal3-io/ip-address-manager/api/v1alpha1"
. "github.com/onsi/ginkgo/v2"
Expand Down Expand Up @@ -262,7 +263,7 @@ func updateCalico(config *clusterctl.E2EConfig, calicoYaml, calicoInterface stri
cniYaml, err := os.ReadFile(calicoYaml)
Expect(err).ToNot(HaveOccurred(), "Unable to read Calico manifest")

Logf("Replace the default CIDR with the one set in $POD_CIDR")
bmo_e2e.Logf("Replace the default CIDR with the one set in $POD_CIDR")
podCIDR := config.GetVariable("POD_CIDR")
calicoContainerRegistry := config.GetVariable("DOCKER_HUB_PROXY")
cniYaml = []byte(strings.Replace(string(cniYaml), "192.168.0.0/16", podCIDR, -1))
Expand Down
9 changes: 5 additions & 4 deletions test/e2e/inspection.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"

bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/utils/ptr"
Expand All @@ -26,7 +27,7 @@ type InspectionInput struct {

// Inspection test request inspection on all the available BMH using annotation.
func inspection(ctx context.Context, inputGetter func() InspectionInput) {
Logf("Starting inspection tests")
bmo_e2e.Logf("Starting inspection tests")
input := inputGetter()
var (
numberOfWorkers = int(*input.E2EConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT"))
Expand All @@ -35,13 +36,13 @@ func inspection(ctx context.Context, inputGetter func() InspectionInput) {

bootstrapClient := input.BootstrapClusterProxy.GetClient()

Logf("Request inspection for all Available BMHs via API")
bmo_e2e.Logf("Request inspection for all Available BMHs via API")
availableBMHList := bmov1alpha1.BareMetalHostList{}
Expect(bootstrapClient.List(ctx, &availableBMHList, client.InNamespace(input.Namespace))).To(Succeed())
Logf("Request inspection for all Available BMHs via API")
bmo_e2e.Logf("Request inspection for all Available BMHs via API")
for _, bmh := range availableBMHList.Items {
if bmh.Status.Provisioning.State == bmov1alpha1.StateAvailable {
AnnotateBmh(ctx, bootstrapClient, bmh, inspectAnnotation, ptr.To(""))
bmo_e2e.AnnotateBmh(ctx, bootstrapClient, bmh, inspectAnnotation, ptr.To(""))
}
}

Expand Down
15 changes: 8 additions & 7 deletions test/e2e/ip_reuse.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"reflect"

bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/types"
Expand All @@ -26,7 +27,7 @@ type IPReuseInput struct {
}

func IPReuse(ctx context.Context, inputGetter func() IPReuseInput) {
Logf("Starting IP reuse tests")
bmo_e2e.Logf("Starting IP reuse tests")
input := inputGetter()
targetClusterClient := input.TargetCluster.GetClient()
managementClusterClient := input.BootstrapClusterProxy.GetClient()
Expand Down Expand Up @@ -86,11 +87,11 @@ func IPReuse(ctx context.Context, inputGetter func() IPReuseInput) {
By("Get new values of spec.Preallocations field for baremetal IPPool")
bmv4PoolPreallocations, err := GenerateIPPoolPreallocations(ctx, baremetalv4Pool[0], baremetalv4PoolName, managementClusterClient)
Expect(err).NotTo(HaveOccurred())
Logf("new values of spec.Preallocations field for baremetal IPPool is: %s", bmv4PoolPreallocations)
bmo_e2e.Logf("new values of spec.Preallocations field for baremetal IPPool is: %s", bmv4PoolPreallocations)
By("Get new values of spec.Preallocations field for provisioning IPPool")
provPoolPreallocations, err := GenerateIPPoolPreallocations(ctx, provisioningPool[0], provisioningPoolName, managementClusterClient)
Expect(err).NotTo(HaveOccurred())
Logf("new values of spec.Preallocations field for provisioning IPPool is: %s", provPoolPreallocations)
bmo_e2e.Logf("new values of spec.Preallocations field for provisioning IPPool is: %s", provPoolPreallocations)

By("Patch baremetal IPPool with new Preallocations field and values")
Expect(managementClusterClient.Get(ctx, client.ObjectKey{Namespace: input.Namespace, Name: baremetalv4PoolName}, &baremetalv4Pool[0])).To(Succeed())
Expand Down Expand Up @@ -157,12 +158,12 @@ func IPReuse(ctx context.Context, inputGetter func() IPReuseInput) {
Expect(provisioningPool).To(HaveLen(1))

By("Check if same IP addresses are reused for nodes")
Logf("baremetalv4Pool[0].Spec.PreAllocations: %v", baremetalv4Pool[0].Spec.PreAllocations)
Logf("baremetalv4Pool[0].Status.Allocations: %v", baremetalv4Pool[0].Status.Allocations)
bmo_e2e.Logf("baremetalv4Pool[0].Spec.PreAllocations: %v", baremetalv4Pool[0].Spec.PreAllocations)
bmo_e2e.Logf("baremetalv4Pool[0].Status.Allocations: %v", baremetalv4Pool[0].Status.Allocations)
bmv4equal := reflect.DeepEqual(baremetalv4Pool[0].Spec.PreAllocations, baremetalv4Pool[0].Status.Allocations)
Expect(bmv4equal).To(BeTrue(), "The same IP addreesses from baremetal IPPool were not reused for nodes")
Logf("provisioningPool[0].Spec.PreAllocations: %v", provisioningPool[0].Spec.PreAllocations)
Logf("provisioningPool[0].Status.Allocations: %v", provisioningPool[0].Status.Allocations)
bmo_e2e.Logf("provisioningPool[0].Spec.PreAllocations: %v", provisioningPool[0].Spec.PreAllocations)
bmo_e2e.Logf("provisioningPool[0].Status.Allocations: %v", provisioningPool[0].Status.Allocations)
provequal := reflect.DeepEqual(provisioningPool[0].Spec.PreAllocations, provisioningPool[0].Status.Allocations)
Expect(provequal).To(BeTrue(), "The same IP addreesses from provisioning IPPool were not reused for nodes")

Expand Down
13 changes: 7 additions & 6 deletions test/e2e/live_iso_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"strings"

bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/utils/ptr"
Expand Down Expand Up @@ -44,7 +45,7 @@ func liveIsoTest() {
})
It("Should update the BMH with live ISO", func() {
liveISOImageURL := e2eConfig.GetVariable("LIVE_ISO_IMAGE")
Logf("Starting live ISO test")
bmo_e2e.Logf("Starting live ISO test")
bootstrapClient := bootstrapClusterProxy.GetClient()
ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace))

Expand All @@ -59,12 +60,12 @@ func liveIsoTest() {
Expect(err).NotTo(HaveOccurred(), "Error getting BMHs")
var isoBmh bmov1alpha1.BareMetalHost
for _, bmh := range bmhs {
Logf("Checking BMH %s", bmh.Name)
bmo_e2e.Logf("Checking BMH %s", bmh.Name)
// Pick the first BMH that is available and uses redfish-virtualmedia (ipmi and redfish does not support live-iso)
if bmh.Status.Provisioning.State == bmov1alpha1.StateAvailable &&
strings.HasPrefix(bmh.Spec.BMC.Address, "redfish-virtualmedia") {
isoBmh = bmh
Logf("BMH %s is in %s state", bmh.Name, bmh.Status.Provisioning.State)
bmo_e2e.Logf("BMH %s is in %s state", bmh.Name, bmh.Status.Provisioning.State)
break
}
}
Expand All @@ -83,7 +84,7 @@ func liveIsoTest() {
Eventually(func(g Gomega) {
g.Expect(bootstrapClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: isoBmhName}, &isoBmh)).To(Succeed())
g.Expect(isoBmh.Status.Provisioning.State).To(Equal(bmov1alpha1.StateProvisioned), fmt.Sprintf("BMH %s is not in provisioned state", isoBmh.Name))
Logf("BMH %s is in %s state", isoBmh.Name, isoBmh.Status.Provisioning.State)
bmo_e2e.Logf("BMH %s is in %s state", isoBmh.Name, isoBmh.Status.Provisioning.State)
}, e2eConfig.GetIntervals(specName, "wait-bmh-provisioned")...).Should(Succeed())
ListBareMetalHosts(ctx, bootstrapClient, client.InNamespace(namespace))

Expand All @@ -110,9 +111,9 @@ func liveIsoTest() {

for _, bmh := range bmhs {
bmh := bmh // for gosec G601
Logf("Checking BMH %s", bmh.Name)
bmo_e2e.Logf("Checking BMH %s", bmh.Name)
if bmh.Status.Provisioning.State == bmov1alpha1.StateProvisioned {
Logf("live ISO image booted BMH found %s", bmh.Name)
bmo_e2e.Logf("live ISO image booted BMH found %s", bmh.Name)
bmh.Spec.Online = false
bmh.Spec.Image = nil
Expect(bootstrapClient.Update(ctx, &bmh)).NotTo(HaveOccurred())
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/node_deletion_remediation.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (

bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
infrav1 "github.com/metal3-io/cluster-api-provider-metal3/api/v1beta1"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -53,7 +54,7 @@ type NodeDeletionRemediation struct {
*/

func nodeDeletionRemediation(ctx context.Context, inputGetter func() NodeDeletionRemediation) {
Logf("Starting node deletion remediation tests")
bmo_e2e.Logf("Starting node deletion remediation tests")
input := inputGetter()
bootstrapClient := input.BootstrapClusterProxy.GetClient()
targetClient := input.TargetCluster.GetClient()
Expand Down
13 changes: 7 additions & 6 deletions test/e2e/pivoting.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
containerTypes "github.com/docker/docker/api/types/container"
docker "github.com/docker/docker/client"
bmov1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
bmo_e2e "github.com/metal3-io/baremetal-operator/test/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -50,7 +51,7 @@ type PivotingInput struct {

// Pivoting implements a test that verifies successful moving of management resources (CRs, BMO, Ironic) to a target cluster after initializing it with Provider components.
func pivoting(ctx context.Context, inputGetter func() PivotingInput) {
Logf("Starting pivoting tests")
bmo_e2e.Logf("Starting pivoting tests")
input := inputGetter()
numberOfWorkers := int(*input.E2EConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT"))
numberOfControlplane := int(*input.E2EConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT"))
Expand Down Expand Up @@ -92,14 +93,14 @@ func pivoting(ctx context.Context, inputGetter func() PivotingInput) {
By("Fetch target cluster kubeconfig for target cluster log collection")
kconfigPathWorkload := input.TargetCluster.GetKubeconfigPath()
os.Setenv("KUBECONFIG_WORKLOAD", kconfigPathWorkload)
Logf("Save kubeconfig in temp folder for project-infra target log collection")
bmo_e2e.Logf("Save kubeconfig in temp folder for project-infra target log collection")
// TODO(smoshiur1237): This is a workaround to copy the target kubeconfig and enable project-infra
// target log collection. There is possibility to handle the kubeconfig in better way.
// KubeconfigPathTemp will be used by project-infra target log collection only incase of failed e2e test
kubeconfigPathTemp := "/tmp/kubeconfig-test1.yaml"
cmd = exec.Command("cp", kconfigPathWorkload, kubeconfigPathTemp) // #nosec G204:gosec
stdoutStderr, er := cmd.CombinedOutput()
Logf("%s\n", stdoutStderr)
bmo_e2e.Logf("%s\n", stdoutStderr)
Expect(er).ToNot(HaveOccurred(), "Cannot fetch target cluster kubeconfig")

By("Remove Ironic containers from the source cluster")
Expand Down Expand Up @@ -313,7 +314,7 @@ func installIronicBMO(ctx context.Context, inputGetter func() installIronicBMOIn
cmd.Env = append(env, os.Environ()...)

stdoutStderr, er := cmd.CombinedOutput()
Logf("%s\n", stdoutStderr)
bmo_e2e.Logf("%s\n", stdoutStderr)
Expect(er).ToNot(HaveOccurred(), "Failed to deploy Ironic")
deploymentNameList := []string{}
if input.deployIronic {
Expand Down Expand Up @@ -417,7 +418,7 @@ type RePivotingInput struct {
}

func rePivoting(ctx context.Context, inputGetter func() RePivotingInput) {
Logf("Start the re-pivoting test")
bmo_e2e.Logf("Start the re-pivoting test")
input := inputGetter()
numberOfWorkers := int(*input.E2EConfig.GetInt32PtrVariable("WORKER_MACHINE_COUNT"))
numberOfControlplane := int(*input.E2EConfig.GetInt32PtrVariable("CONTROL_PLANE_MACHINE_COUNT"))
Expand All @@ -431,7 +432,7 @@ func rePivoting(ctx context.Context, inputGetter func() RePivotingInput) {
_ = cmd.Start()
errorData, _ := io.ReadAll(errorPipe)
if len(errorData) > 0 {
Logf("Error of the shell: %v\n", string(errorData))
bmo_e2e.Logf("Error of the shell: %v\n", string(errorData))
}

By("Fetch manifest for workload cluster after pivot")
Expand Down
Loading

0 comments on commit 4cfcd3f

Please sign in to comment.