Skip to content

Commit

Permalink
Changes to make use of ginkgo v2 (#105)
Browse files Browse the repository at this point in the history
* [WIP] Changes to make use of ginkgo v2

Ref: kubernetes-sigs/cluster-api#6906

* KRBN-5429 added validation for machine template parameters

* fixes for supporting ginkgo v2

* fixed panic by calling setupsinglehandler only once

* updated package with cve fixes
  • Loading branch information
deepakm-ntnx committed Aug 29, 2022
1 parent 9db4848 commit 4176c1e
Show file tree
Hide file tree
Showing 26 changed files with 377 additions and 255 deletions.
11 changes: 6 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,10 @@ KUSTOMIZE_VER := v4.5.4
KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER))
KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4

GINGKO_VER := v1.16.5
GINGKO_VER := v2.1.4
GINKGO_BIN := ginkgo
GINKGO := $(abspath $(TOOLS_BIN_DIR)/$(GINKGO_BIN)-$(GINGKO_VER))
GINKGO_PKG := github.com/onsi/ginkgo/ginkgo
GINKGO_PKG := github.com/onsi/ginkgo/v2/ginkgo

SETUP_ENVTEST_VER := latest
SETUP_ENVTEST_BIN := setup-envtest
Expand Down Expand Up @@ -131,8 +131,7 @@ endif
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec

GINKGO_FOCUS ?= "\\[PR-Blocking\\]"
# GINKGO_FOCUS ?= "\\[health-remediation\\]"
LABEL_FILTERS ?= prblocker
GINKGO_SKIP ?=
GINKGO_NODES ?= 1
E2E_CONF_FILE ?= ${E2E_DIR}/config/nutanix.yaml
Expand Down Expand Up @@ -323,7 +322,9 @@ test-kubectl-workload: ## Run kubectl queries to get all capx workload related o
.PHONY: test-e2e
test-e2e: docker-build-e2e $(GINKGO_BIN) cluster-e2e-templates cluster-templates ## Run the end-to-end tests
mkdir -p $(ARTIFACTS)
$(GINKGO) -v -trace -tags=e2e -focus="$(GINKGO_FOCUS)" $(_SKIP_ARGS) -nodes=$(GINKGO_NODES) --noColor=$(GINKGO_NOCOLOR) $(GINKGO_ARGS) ./test/e2e -- \
$(GINKGO) -v --trace --tags=e2e --label-filter="$(LABEL_FILTERS)" --fail-fast $(_SKIP_ARGS) --nodes=$(GINKGO_NODES) \
--no-color=$(GINKGO_NOCOLOR) --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.1.xml" \
$(GINKGO_ARGS) ./test/e2e -- \
-e2e.artifacts-folder="$(ARTIFACTS)" \
-e2e.config="$(E2E_CONF_FILE)" \
-e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) -e2e.use-existing-cluster=$(USE_EXISTING_CLUSTER)
Expand Down
8 changes: 4 additions & 4 deletions api/v1alpha4/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

//NOTE: https://pkg.go.dev/k8s.io/code-generator/cmd/conversion-gen
// NOTE: https://pkg.go.dev/k8s.io/code-generator/cmd/conversion-gen
// Package v1alpha4 contains API Schema definitions for the infrastructure v1alpha4 API group
//+kubebuilder:object:generate=true
//+groupName=infrastructure.cluster.x-k8s.io
//+k8s:conversion-gen=github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1
// +kubebuilder:object:generate=true
// +groupName=infrastructure.cluster.x-k8s.io
// +k8s:conversion-gen=github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1
package v1alpha4
4 changes: 2 additions & 2 deletions api/v1beta1/groupversion_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ limitations under the License.
*/

// Package v1beta1 contains API Schema definitions for the infrastructure v1beta1 API group
//+kubebuilder:object:generate=true
//+groupName=infrastructure.cluster.x-k8s.io
// +kubebuilder:object:generate=true
// +groupName=infrastructure.cluster.x-k8s.io
package v1beta1

import (
Expand Down
22 changes: 13 additions & 9 deletions controllers/nutanixcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ type NutanixClusterReconciler struct {
}

// SetupWithManager sets up the NutanixCluster controller with the Manager.
func (r *NutanixClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *NutanixClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {

return ctrl.NewControllerManagedBy(mgr).
// Watch the controlled, infrastructure resource.
Expand All @@ -64,7 +64,11 @@ func (r *NutanixClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
&source.Kind{Type: &capiv1.Cluster{}},
handler.EnqueueRequestsFromMapFunc(
capiutil.ClusterToInfrastructureMapFunc(
infrav1.GroupVersion.WithKind("NutanixCluster"))),
ctx,
infrav1.GroupVersion.WithKind("NutanixCluster"),
mgr.GetClient(),
&infrav1.NutanixCluster{},
)),
).
Complete(r)
}
Expand Down Expand Up @@ -183,7 +187,7 @@ func (r *NutanixClusterReconciler) reconcileDelete(rctx *nctx.ClusterContext) (r

err = r.reconcileCredentialRefDelete(rctx.Context, rctx.NutanixCluster)
if err != nil {
klog.Errorf("%s error occurred while reconciling credential ref deletion for cluster %s: %v", rctx.LogPrefix, rctx.Cluster.ClusterName, err)
klog.Errorf("%s error occurred while reconciling credential ref deletion for cluster %s: %v", rctx.LogPrefix, rctx.Cluster.Name, err)
return reconcile.Result{}, err
}

Expand Down Expand Up @@ -267,7 +271,7 @@ func (r *NutanixClusterReconciler) reconcileCredentialRefDelete(ctx context.Cont
if credentialRef == nil {
return nil
}
klog.Infof("Credential ref is kind Secret for cluster %s. Continue with deletion of secret", nutanixCluster.ClusterName)
klog.Infof("Credential ref is kind Secret for cluster %s. Continue with deletion of secret", nutanixCluster.Name)
secret := &corev1.Secret{}
secretKey := client.ObjectKey{
Namespace: nutanixCluster.Namespace,
Expand All @@ -278,11 +282,11 @@ func (r *NutanixClusterReconciler) reconcileCredentialRefDelete(ctx context.Cont
return err
}
ctrlutil.RemoveFinalizer(secret, infrav1.NutanixClusterCredentialFinalizer)
klog.Infof("removing finalizers from secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.ClusterName)
klog.Infof("removing finalizers from secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.Name)
if err := r.Client.Update(ctx, secret); err != nil {
return err
}
klog.Infof("removing secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.ClusterName)
klog.Infof("removing secret %s in namespace %s for cluster %s", secret.Name, secret.Namespace, nutanixCluster.Name)
if err := r.Client.Delete(ctx, secret); err != nil {
return err
}
Expand All @@ -297,7 +301,7 @@ func (r *NutanixClusterReconciler) reconcileCredentialRef(ctx context.Context, n
if credentialRef == nil {
return nil
}
klog.Infof("Credential ref is kind Secret for cluster %s", nutanixCluster.ClusterName)
klog.Infof("Credential ref is kind Secret for cluster %s", nutanixCluster.Name)
secret := &corev1.Secret{}
secretKey := client.ObjectKey{
Namespace: nutanixCluster.Namespace,
Expand All @@ -311,7 +315,7 @@ func (r *NutanixClusterReconciler) reconcileCredentialRef(ctx context.Context, n
}
if !capiutil.IsOwnedByObject(secret, nutanixCluster) {
if len(secret.GetOwnerReferences()) > 0 {
return fmt.Errorf("secret for cluster %s already has other owners set", nutanixCluster.ClusterName)
return fmt.Errorf("secret for cluster %s already has other owners set", nutanixCluster.Name)
}
secret.SetOwnerReferences([]metav1.OwnerReference{{
APIVersion: infrav1.GroupVersion.String(),
Expand All @@ -325,7 +329,7 @@ func (r *NutanixClusterReconciler) reconcileCredentialRef(ctx context.Context, n
}
err = r.Client.Update(ctx, secret)
if err != nil {
errorMsg := fmt.Errorf("Failed to update secret for cluster %s: %v", nutanixCluster.ClusterName, err)
errorMsg := fmt.Errorf("Failed to update secret for cluster %s: %v", nutanixCluster.Name, err)
klog.Error(errorMsg)
return errorMsg
}
Expand Down
2 changes: 1 addition & 1 deletion controllers/nutanixcluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (

infrav1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1"

. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

Expand Down
30 changes: 24 additions & 6 deletions controllers/nutanixmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,15 @@ type NutanixMachineReconciler struct {
}

// SetupWithManager sets up the controller with the Manager.
func (r *NutanixMachineReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *NutanixMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&infrav1.NutanixMachine{}).
// Watch the CAPI resource that owns this infrastructure resource.
Watches(
&source.Kind{Type: &capiv1.Machine{}},
handler.EnqueueRequestsFromMapFunc(
capiutil.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("NutanixMachine"))),
capiutil.MachineToInfrastructureMapFunc(
infrav1.GroupVersion.WithKind("NutanixMachine"))),
).
Complete(r)
}
Expand Down Expand Up @@ -316,7 +317,7 @@ func (r *NutanixMachineReconciler) reconcileNormal(rctx *nctx.MachineContext) (r
klog.Infof("%s Added the spec.bootstrapRef to NutanixMachine object: %v", rctx.LogPrefix, rctx.NutanixMachine.Spec.BootstrapRef)
}

// Create the or get existing VM
// Create or get existing VM
vm, err := r.getOrCreateVM(rctx)
if err != nil {
klog.Errorf("%s Failed to create VM %s.", rctx.LogPrefix, rctx.NutanixMachine.Name)
Expand Down Expand Up @@ -432,10 +433,14 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
klog.Errorf("%s error occurred finding VM %s by name or uuid %s: %v", rctx.LogPrefix, vmName, err)
return nil, err
}

if vm != nil {
// VM exists case
klog.Infof("%s vm %s found with UUID %s", rctx.LogPrefix, *vm.Spec.Name, rctx.NutanixMachine.Status.VmUUID)
} else {
// VM Does not exist case
klog.Infof("%s No existing VM found. Starting creation process of VM %s.", rctx.LogPrefix, vmName)

// Get PE UUID
peUUID, err := getPEUUID(client, rctx.NutanixMachine.Spec.Cluster.Name, rctx.NutanixMachine.Spec.Cluster.UUID)
if err != nil {
Expand All @@ -444,6 +449,7 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
klog.Errorf("%s %v", rctx.LogPrefix, errorMsg)
return nil, err
}

// Get Subnet UUIDs
subnetUUIDs, err := getSubnetUUIDList(client, rctx.NutanixMachine.Spec.Subnets, peUUID)
if err != nil {
Expand All @@ -452,6 +458,7 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
rctx.SetFailureStatus(capierrors.CreateMachineError, errorMsg)
return nil, err
}

// Get Image UUID
imageUUID, err := getImageUUID(
client,
Expand All @@ -464,22 +471,24 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
rctx.SetFailureStatus(capierrors.CreateMachineError, errorMsg)
return nil, err
}

// Get the bootstrapData from the referenced secret
bootstrapData, err := r.getBootstrapData(rctx)
if err != nil {
klog.Errorf("%s Failed to get the bootstrap data for create the VM %s. %v", rctx.LogPrefix, vmName, err)
klog.Errorf("%s Failed to get the bootstrap data to create the VM %s. %v", rctx.LogPrefix, vmName, err)
return nil, err
}
// Encode the bootstrapData by base64
bsdataEncoded := base64.StdEncoding.EncodeToString(bootstrapData)
klog.Infof("%s Retrieved the bootstrap data from secret %s (before encoding size: %d, encoded string size:%d)",
rctx.LogPrefix, rctx.NutanixMachine.Spec.BootstrapRef.Name, len(bootstrapData), len(bsdataEncoded))

// Create the VM
klog.Infof("%s To create VM with name %s for cluster %s.", rctx.LogPrefix,
klog.Infof("%s Creating VM with name %s for cluster %s.", rctx.LogPrefix,
rctx.NutanixMachine.Name, rctx.NutanixCluster.Name)

vmInput := nutanixClientV3.VMIntentInput{}
vmSpec := nutanixClientV3.VM{Name: utils.StringPtr(vmName)}

nicList := []*nutanixClientV3.VMNic{}
for _, subnetUUID := range subnetUUIDs {
nicList = append(nicList, &nutanixClientV3.VMNic{
Expand All @@ -488,6 +497,8 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
Kind: utils.StringPtr("subnet"),
}})
}

// Create Disk Spec for systemdisk to be set later in VM Spec
diskSize := rctx.NutanixMachine.Spec.SystemDiskSize
diskSizeMib := getMibValueOfQuantity(diskSize)
systemDisk, err := createSystemDiskSpec(imageUUID, diskSizeMib)
Expand All @@ -499,6 +510,8 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
diskList := []*nutanixClientV3.VMDisk{
systemDisk,
}

// Set Categories to VM Sepc before creating VM
categories, err := getCategoryVMSpec(client, r.getMachineCategoryIdentifiers(rctx))
if err != nil {
errorMsg := fmt.Errorf("error occurred while creating category spec for vm %s: %v", vmName, err)
Expand All @@ -512,6 +525,8 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
}

vmMetadataPtr := &vmMetadata

// Set Project in VM Spec before creating VM
err = r.addVMToProject(rctx, vmMetadataPtr)
if err != nil {
errorMsg := fmt.Errorf("error occurred while trying to add VM %s to project: %v", vmName, err)
Expand All @@ -537,6 +552,8 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
UUID: utils.StringPtr(peUUID),
}
vmSpecPtr := &vmSpec

// Set BootType in VM Spec before creating VM
err = r.addBootTypeToVM(rctx, vmSpecPtr)
if err != nil {
errorMsg := fmt.Errorf("error occurred while adding boot type to vm spec: %v", err)
Expand All @@ -547,6 +564,7 @@ func (r *NutanixMachineReconciler) getOrCreateVM(rctx *nctx.MachineContext) (*nu
vmInput.Spec = vmSpecPtr
vmInput.Metadata = vmMetadataPtr

// Create the actual VM/Machine
vmResponse, err := client.V3.CreateVM(&vmInput)
if err != nil {
errorMsg := fmt.Errorf("Failed to create VM %s. error: %v", vmName, err)
Expand Down
2 changes: 1 addition & 1 deletion controllers/nutanixmachine_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"

infrav1 "github.com/nutanix-cloud-native/cluster-api-provider-nutanix/api/v1beta1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

Expand Down
9 changes: 3 additions & 6 deletions controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,11 @@ import (
"path/filepath"
"testing"

. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"

Expand All @@ -42,9 +41,7 @@ var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)

RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
RunSpecs(t, "capx-e2e")
}

var _ = BeforeSuite(func() {
Expand Down Expand Up @@ -72,7 +69,7 @@ var _ = BeforeSuite(func() {
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())

}, 60)
})

var _ = AfterSuite(func() {
By("tearing down the test environment")
Expand Down
Loading

0 comments on commit 4176c1e

Please sign in to comment.