diff --git a/PROJECT b/PROJECT index a3ee540b0..5bd38683a 100644 --- a/PROJECT +++ b/PROJECT @@ -35,4 +35,11 @@ resources: kind: NSXServiceAccount path: github.com/vmware-tanzu/nsx-operator/pkg/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + domain: nsx.vmware.com + kind: IPPool + path: github.com/vmware-tanzu/nsx-operator/pkg/api/v1alpha2 + version: v1alpha2 version: "3" diff --git a/build/yaml/crd/nsx.vmware.com_ippools.yaml b/build/yaml/crd/nsx.vmware.com_ippools.yaml index 36f4542e1..c6c5c6cf5 100644 --- a/build/yaml/crd/nsx.vmware.com_ippools.yaml +++ b/build/yaml/crd/nsx.vmware.com_ippools.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.7.0 + controller-gen.kubebuilder.io/version: v0.11.0 creationTimestamp: null name: ippools.nsx.vmware.com spec: @@ -15,34 +15,10 @@ spec: singular: ippool scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - spec: - properties: - subnets: - items: - properties: - ipFamily: - pattern: ^ipv(4|6)$ - type: string - name: - type: string - prefixLength: - minimum: 1 - type: integer - type: object - type: array - type: object - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: false - name: v1alpha2 schema: openAPIV3Schema: - description: IPPool is the Schema for the ippools API + description: IPPool is the Schema for the ippools API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -152,9 +128,3 @@ spec: storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: ["v1alpha2"] \ No newline at end of file diff --git a/build/yaml/crd/nsx.vmware.com_subnets.yaml b/build/yaml/crd/nsx.vmware.com_subnets.yaml index 6c0d0878f..72fbfa366 100644 --- a/build/yaml/crd/nsx.vmware.com_subnets.yaml +++ b/build/yaml/crd/nsx.vmware.com_subnets.yaml @@ -33,8 +33,7 @@ spec: metadata: type: object spec: - description: SubnetSpec defines the desired state of Subnet. TODO Add - validate webhook or CEL(k8s 1.25+) for immutable fields(IPv4SubnetSize/AccessMode/IPAddresses/DHCPConfig). + description: SubnetSpec defines the desired state of Subnet. properties: DHCPConfig: description: DHCPConfig DHCP configuration. diff --git a/build/yaml/crd/nsx.vmware.com_subnetsets.yaml b/build/yaml/crd/nsx.vmware.com_subnetsets.yaml index bd958ff3d..9ec510180 100644 --- a/build/yaml/crd/nsx.vmware.com_subnetsets.yaml +++ b/build/yaml/crd/nsx.vmware.com_subnetsets.yaml @@ -33,8 +33,7 @@ spec: metadata: type: object spec: - description: SubnetSetSpec defines the desired state of SubnetSet. TODO - Add validate webhook or CEL(k8s 1.25+) for immutable fields(IPv4SubnetSize/AccessMode/DHCPConfig). + description: SubnetSetSpec defines the desired state of SubnetSet. properties: DHCPConfig: description: DHCPConfig DHCP configuration. diff --git a/cmd/main.go b/cmd/main.go index 8e028250c..fe7112bcc 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -15,8 +15,10 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" "github.com/vmware-tanzu/nsx-operator/pkg/config" commonctl "github.com/vmware-tanzu/nsx-operator/pkg/controllers/common" + ippool2 "github.com/vmware-tanzu/nsx-operator/pkg/controllers/ippool" namespacecontroller "github.com/vmware-tanzu/nsx-operator/pkg/controllers/namespace" nsxserviceaccountcontroller "github.com/vmware-tanzu/nsx-operator/pkg/controllers/nsxserviceaccount" securitypolicycontroller "github.com/vmware-tanzu/nsx-operator/pkg/controllers/securitypolicy" @@ -29,6 +31,7 @@ import ( "github.com/vmware-tanzu/nsx-operator/pkg/metrics" "github.com/vmware-tanzu/nsx-operator/pkg/nsx" "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/ippool" "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/nsxserviceaccount" "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/securitypolicy" "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/vpc" @@ -45,6 +48,7 @@ func init() { var err error utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(v1alpha2.AddToScheme(scheme)) config.AddFlags() logf.SetLogger(logger.ZapLogger()) @@ -105,6 +109,31 @@ func StartNSXServiceAccountController(mgr ctrl.Manager, commonService common.Ser } } +func StartIPPoolController(mgr ctrl.Manager, commonService common.Service) { + ippoolReconcile := &ippool2.IPPoolReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + } + if ipPoolService, err := ippool.InitializeIPPool(commonService); err != nil { + log.Error(err, "failed to initialize ippool commonService", "controller", "IPPool") + os.Exit(1) + } else { + ippoolReconcile.Service = ipPoolService + } + + // TODO: remove this after vpc is ready + if vpcService, err := vpc.InitializeVPC(commonService); err != nil { + log.Error(err, "failed to initialize vpc commonService", "controller", "vpc") + os.Exit(1) + } else { + commonctl.ServiceMediator.VPCService = vpcService + } + if err := ippoolReconcile.Start(mgr); err != nil { + log.Error(err, "failed to create controller", "controller", "IPPool") + os.Exit(1) + } +} + func StartVPCController(mgr ctrl.Manager, commonService common.Service) { vpcReconciler := &vpccontroller.VPCReconciler{ Client: mgr.GetClient(), @@ -182,8 +211,10 @@ func main() { StartNamespaceController(mgr, commonService) StartVPCController(mgr, commonService) + StartIPPoolController(mgr, commonService) } - // Start the security policy controller, it supports VPC and non VPC mode + + // Start the security policy controller. StartSecurityPolicyController(mgr, commonService) // Start the NSXServiceAccount controller. diff --git a/go.mod b/go.mod index 5843c7b34..6905c7bcf 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( github.com/stretchr/objx v0.4.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect - golang.org/x/mod v0.11.0 // indirect + golang.org/x/mod v0.6.0 // indirect golang.org/x/net v0.5.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect golang.org/x/sys v0.4.0 // indirect diff --git a/go.sum b/go.sum index 3ea6bbf5e..71f88f646 100644 --- a/go.sum +++ b/go.sum @@ -373,8 +373,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 583b8380c..a39156ae3 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,2 +1,2 @@ -/* Copyright © 2022 VMware, Inc. All Rights Reserved. +/* Copyright © 2023 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ diff --git a/pkg/apis/nsx.vmware.com/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/nsx.vmware.com/v1alpha1/zz_generated.deepcopy.go index 395a95186..2fa1d9ec7 100644 --- a/pkg/apis/nsx.vmware.com/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/nsx.vmware.com/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -/* Copyright © 2022 VMware, Inc. All Rights Reserved. +/* Copyright © 2023 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ // Code generated by controller-gen. DO NOT EDIT. diff --git a/pkg/apis/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/v1alpha1/zz_generated.deepcopy.go index 395a95186..2fa1d9ec7 100644 --- a/pkg/apis/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -/* Copyright © 2022 VMware, Inc. All Rights Reserved. +/* Copyright © 2023 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ // Code generated by controller-gen. DO NOT EDIT. diff --git a/pkg/apis/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/v1alpha2/zz_generated.deepcopy.go index 7b99fda69..7dadeb757 100644 --- a/pkg/apis/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/v1alpha2/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -/* Copyright © 2022 VMware, Inc. All Rights Reserved. +/* Copyright © 2023 VMware, Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ // Code generated by controller-gen. DO NOT EDIT. diff --git a/pkg/controllers/common/types.go b/pkg/controllers/common/types.go index d7c9b7f2f..71e7311eb 100644 --- a/pkg/controllers/common/types.go +++ b/pkg/controllers/common/types.go @@ -10,6 +10,7 @@ import ( const ( MetricResTypeSecurityPolicy = "securitypolicy" + MetricResTypeIPPool = "ippool" MetricResTypeNSXServiceAccount = "nsxserviceaccount" MetricResTypeSubnetPort = "subnetport" MetricResTypeStaticRoute = "staticroute" diff --git a/pkg/controllers/ippool/ippool_controller.go b/pkg/controllers/ippool/ippool_controller.go new file mode 100644 index 000000000..4bc213872 --- /dev/null +++ b/pkg/controllers/ippool/ippool_controller.go @@ -0,0 +1,258 @@ +/* Copyright © 2023 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 */ + +package ippool + +import ( + "context" + "fmt" + "runtime" + "time" + + v1 "k8s.io/api/core/v1" + apimachineryruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" + "github.com/vmware-tanzu/nsx-operator/pkg/controllers/common" + "github.com/vmware-tanzu/nsx-operator/pkg/logger" + "github.com/vmware-tanzu/nsx-operator/pkg/metrics" + servicecommon "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/ippool" +) + +var ( + log = logger.Log + resultNormal = common.ResultNormal + resultRequeue = common.ResultRequeue + MetricResType = common.MetricResTypeIPPool +) + +// IPPoolReconciler reconciles a IPPool object +type IPPoolReconciler struct { + client.Client + Scheme *apimachineryruntime.Scheme + Service *ippool.IPPoolService +} + +func deleteSuccess(r *IPPoolReconciler, _ *context.Context, _ *v1alpha2.IPPool) { + metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerDeleteSuccessTotal, MetricResType) +} + +func deleteFail(r *IPPoolReconciler, c *context.Context, o *v1alpha2.IPPool, e *error) { + r.setReadyStatusFalse(c, o, e) + metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerDeleteFailTotal, MetricResType) +} + +func updateSuccess(r *IPPoolReconciler, c *context.Context, o *v1alpha2.IPPool) { + r.setReadyStatusTrue(c, o) + metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerUpdateSuccessTotal, MetricResType) +} + +func updateFail(r *IPPoolReconciler, c *context.Context, o *v1alpha2.IPPool, e *error) { + r.setReadyStatusFalse(c, o, e) + metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerUpdateFailTotal, MetricResType) +} + +func (r *IPPoolReconciler) setReadyStatusFalse(ctx *context.Context, ippool *v1alpha2.IPPool, err *error) { + conditions := []v1alpha1.Condition{ + { + Type: v1alpha1.Ready, + Status: v1.ConditionFalse, + Message: "NSX IPPool could not be created/updated/deleted", + Reason: fmt.Sprintf( + "error occurred while processing the IPPool CR. Error: %v", + *err, + ), + }, + } + ippool.Status.Conditions = conditions + if ippool.Status.Subnets == nil { + ippool.Status.Subnets = make([]v1alpha2.SubnetResult, 0) + } + e := r.Client.Status().Update(*ctx, ippool) + if e != nil { + log.Error(e, "unable to update IPPool status", "ippool", ippool) + } +} + +func (r *IPPoolReconciler) setReadyStatusTrue(ctx *context.Context, ippool *v1alpha2.IPPool) { + conditions := []v1alpha1.Condition{ + { + Type: v1alpha1.Ready, + Status: v1.ConditionTrue, + Message: "NSX IPPool has been successfully created/updated", + Reason: "", + }, + } + ippool.Status.Conditions = conditions + e := r.Client.Status().Update(*ctx, ippool) + if e != nil { + log.Error(e, "unable to update IPPool status", "ippool", ippool) + } +} + +func (r *IPPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + obj := &v1alpha2.IPPool{} + log.Info("reconciling ippool CR", "ippool", req.NamespacedName) + metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerSyncTotal, MetricResType) + if err := r.Client.Get(ctx, req.NamespacedName, obj); err != nil { + log.Error(err, "unable to fetch ippool CR", "req", req.NamespacedName) + return resultNormal, client.IgnoreNotFound(err) + } + + // TODO: As we do not have base controller in Go, we need to take care of NSX exceptions in each controller separately. + //I agree we should not do infinite retry for all errors, but it's ok to add error handling in a following patch + + // TODO: Since only the cloud provider creates it, we can take all the validation logic into consideration later. + + // TODO: add webhook to disallow user update prefixLength + + // TODO: Tao's suggestions: Should we consider some Orphan subnets may exist? + + // TODO: Xiaopei's suggestions: is there possibility that IPPool was deleted from nsx store but NSX block subnet was not deleted? + + // TODO: get default mode from NS NetworkConfig CR + + if obj.ObjectMeta.DeletionTimestamp.IsZero() { + metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerUpdateTotal, MetricResType) + if !controllerutil.ContainsFinalizer(obj, servicecommon.IPPoolFinalizerName) { + controllerutil.AddFinalizer(obj, servicecommon.IPPoolFinalizerName) + if err := r.Client.Update(ctx, obj); err != nil { + log.Error(err, "add finalizer", "ippool", req.NamespacedName) + updateFail(r, &ctx, obj, &err) + return resultRequeue, err + } + log.V(1).Info("added finalizer on ippool CR", "ippool", req.NamespacedName) + } + + subnetCidrUpdated, ipPoolSubnetsUpdated, err := r.Service.CreateOrUpdateIPPool(obj) + if err != nil { + log.Error(err, "operate failed, would retry exponentially", "ippool", req.NamespacedName) + updateFail(r, &ctx, obj, &err) + return resultRequeue, err + } + if !r.Service.FullyRealized(obj) { + if len(obj.Spec.Subnets) == 0 { + updateSuccess(r, &ctx, obj) + return resultNormal, nil + } + if subnetCidrUpdated || ipPoolSubnetsUpdated { + err := fmt.Errorf("partial subnets are unrealized, would retry exponentially") + updateFail(r, &ctx, obj, &err) + log.Info("successfully reconcile ippool CR, but put back ippool again, since partial subnets are unrealized", "subnets", + r.Service.GetUnrealizedSubnetNames(obj)) + return resultRequeue, nil + } + } else { + if subnetCidrUpdated || ipPoolSubnetsUpdated || len(obj.Spec.Subnets) == 0 { + updateSuccess(r, &ctx, obj) + log.Info("successfully reconcile ippool CR and all subnets are fully realized", "ippool", obj) + } else { + log.Info("full realized already, and resources are not changed, skip updating them", "obj", obj) + } + } + } else { + if controllerutil.ContainsFinalizer(obj, servicecommon.IPPoolFinalizerName) { + metrics.CounterInc(r.Service.NSXConfig, metrics.ControllerDeleteTotal, MetricResType) + if err := r.Service.DeleteIPPool(obj); err != nil { + log.Error(err, "deletion failed, would retry exponentially", "ippool", req.NamespacedName) + deleteFail(r, &ctx, obj, &err) + return resultRequeue, err + } + controllerutil.RemoveFinalizer(obj, servicecommon.IPPoolFinalizerName) + if err := r.Client.Update(ctx, obj); err != nil { + log.Error(err, "deletion failed, would retry exponentially", "ippool", req.NamespacedName) + deleteFail(r, &ctx, obj, &err) + return resultRequeue, err + } + log.V(1).Info("removed finalizer on ippool CR", "ippool", req.NamespacedName) + deleteSuccess(r, &ctx, obj) + log.Info("successfully deleted ippool CR and all subnets", "ippool", obj) + } else { + // only print a message because it's not a normal case + log.Info("ippool CR is being deleted but its finalizers cannot be recognized", "ippool", req.NamespacedName) + } + } + return resultNormal, nil +} + +func (r *IPPoolReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha2.IPPool{}). + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Ignore updates to CR status in which case metadata.Generation does not change + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Suppress Delete events to avoid filtering them out in the Reconcile function + return false + }, + }). + WithOptions( + controller.Options{ + MaxConcurrentReconciles: runtime.NumCPU(), + }). + Complete(r) +} + +// Start setup manager and launch GC +func (r *IPPoolReconciler) Start(mgr ctrl.Manager) error { + err := r.SetupWithManager(mgr) + if err != nil { + return err + } + go r.IPPoolGarbageCollector(make(chan bool), servicecommon.GCInterval) + return nil +} + +// IPPoolGarbageCollector collect ippool which has been removed from crd. +// cancel is used to break the loop during UT +func (r *IPPoolReconciler) IPPoolGarbageCollector(cancel chan bool, timeout time.Duration) { + ctx := context.Background() + log.Info("ippool garbage collector started") + for { + select { + case <-cancel: + return + case <-time.After(timeout): + } + nsxIPPoolSet := r.Service.ListIPPoolID() + if len(nsxIPPoolSet) == 0 { + continue + } + ipPoolList := &v1alpha2.IPPoolList{} + err := r.Client.List(ctx, ipPoolList) + if err != nil { + log.Error(err, "failed to list ip pool CR") + continue + } + + CRIPPoolSet := sets.NewString() + for _, ipp := range ipPoolList.Items { + CRIPPoolSet.Insert(string(ipp.UID)) + } + + log.V(2).Info("ippool garbage collector", "nsxIPPoolSet", nsxIPPoolSet, "CRIPPoolSet", CRIPPoolSet) + + for elem := range nsxIPPoolSet { + if CRIPPoolSet.Has(elem) { + continue + } + log.Info("GC collected ip pool CR", "UID", elem) + err = r.Service.DeleteIPPool(types.UID(elem)) + if err != nil { + log.Error(err, "failed to delete ip pool CR", "UID", elem) + } + } + } +} diff --git a/pkg/controllers/ippool/ippool_controller_test.go b/pkg/controllers/ippool/ippool_controller_test.go new file mode 100644 index 000000000..3cec9bd88 --- /dev/null +++ b/pkg/controllers/ippool/ippool_controller_test.go @@ -0,0 +1,302 @@ +/* Copyright © 2021 VMware, Inc. All Rights Reserved. + SPDX-License-Identifier: Apache-2.0 */ + +package ippool + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "github.com/agiledragon/gomonkey/v2" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha1" + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" + "github.com/vmware-tanzu/nsx-operator/pkg/config" + mock_client "github.com/vmware-tanzu/nsx-operator/pkg/mock/controller-runtime/client" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx" + _ "github.com/vmware-tanzu/nsx-operator/pkg/nsx/ratelimiter" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/ippool" +) + +func NewFakeIPPoolReconciler() *IPPoolReconciler { + return &IPPoolReconciler{ + Client: fake.NewClientBuilder().Build(), + Scheme: fake.NewClientBuilder().Build().Scheme(), + Service: nil, + } +} + +func TestIPPoolController_setReadyStatusTrue(t *testing.T) { + r := NewFakeIPPoolReconciler() + ctx := context.TODO() + dummyIPPool := &v1alpha2.IPPool{} + + // Case: Static Route CRD creation fails + newConditions := []v1alpha1.Condition{ + { + Type: v1alpha1.Ready, + Status: v1.ConditionTrue, + Message: "NSX IPPool has been successfully created/updated", + Reason: "", + }, + } + r.setReadyStatusTrue(&ctx, dummyIPPool) + + if !reflect.DeepEqual(dummyIPPool.Status.Conditions, newConditions) { + t.Fatalf("Failed to correctly update Status Conditions when conditions haven't changed") + } +} + +type fakeStatusWriter struct { +} + +func (writer fakeStatusWriter) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + return nil +} +func (writer fakeStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + return nil +} +func (writer fakeStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + return nil +} +func TestIPPoolReconciler_Reconcile(t *testing.T) { + + mockCtl := gomock.NewController(t) + k8sClient := mock_client.NewMockClient(mockCtl) + + service := &ippool.IPPoolService{ + Service: common.Service{ + NSXClient: &nsx.Client{}, + + NSXConfig: &config.NSXOperatorConfig{ + NsxConfig: &config.NsxConfig{ + EnforcementPoint: "vmc-enforcementpoint", + }, + }, + }, + } + service.NSXConfig.CoeConfig = &config.CoeConfig{} + service.NSXConfig.Cluster = "k8s_cluster" + r := &IPPoolReconciler{ + Client: k8sClient, + Scheme: nil, + Service: service, + } + ctx := context.Background() + req := controllerruntime.Request{NamespacedName: types.NamespacedName{Namespace: "dummy", Name: "dummy"}} + + // not found + errNotFound := errors.New("not found") + k8sClient.EXPECT().Get(ctx, gomock.Any(), gomock.Any()).Return(errNotFound) + _, err := r.Reconcile(ctx, req) + assert.Equal(t, err, errNotFound) + + // DeletionTimestamp.IsZero = ture, client update failed + sp := &v1alpha2.IPPool{} + k8sClient.EXPECT().Get(ctx, gomock.Any(), sp).Return(nil) + err = errors.New("Update failed") + k8sClient.EXPECT().Update(ctx, gomock.Any(), gomock.Any()).Return(err) + fakewriter := fakeStatusWriter{} + k8sClient.EXPECT().Status().Return(fakewriter) + _, ret := r.Reconcile(ctx, req) + assert.Equal(t, err, ret) + + // DeletionTimestamp.IsZero = false, Finalizers doesn't include util.FinalizerName + k8sClient.EXPECT().Get(ctx, gomock.Any(), sp).Return(nil).Do(func(_ context.Context, _ client.ObjectKey, obj client.Object, option ...client.GetOption) error { + v1sp := obj.(*v1alpha2.IPPool) + time := metav1.Now() + v1sp.ObjectMeta.DeletionTimestamp = &time + return nil + }) + + patch := gomonkey.ApplyMethod(reflect.TypeOf(service), "DeleteIPPool", func(_ *ippool.IPPoolService, uid interface{}) error { + assert.FailNow(t, "should not be called") + return nil + }) + + k8sClient.EXPECT().Update(ctx, gomock.Any(), gomock.Any()).Return(nil) + _, ret = r.Reconcile(ctx, req) + assert.Equal(t, ret, nil) + patch.Reset() + + // DeletionTimestamp.IsZero = false, Finalizers include util.FinalizerName + k8sClient.EXPECT().Get(ctx, gomock.Any(), sp).Return(nil).Do(func(_ context.Context, _ client.ObjectKey, obj client.Object, option ...client.GetOption) error { + v1sp := obj.(*v1alpha2.IPPool) + time := metav1.Now() + v1sp.ObjectMeta.DeletionTimestamp = &time + v1sp.Finalizers = []string{common.IPPoolFinalizerName} + return nil + }) + patch = gomonkey.ApplyMethod(reflect.TypeOf(service), "DeleteIPPool", func(_ *ippool.IPPoolService, uid interface{}) error { + return nil + }) + _, ret = r.Reconcile(ctx, req) + assert.Equal(t, ret, nil) + patch.Reset() + + // DeletionTimestamp.IsZero = false, Finalizers include util.FinalizerName, DeleteIPPool fail + k8sClient.EXPECT().Get(ctx, gomock.Any(), sp).Return(nil).Do(func(_ context.Context, _ client.ObjectKey, obj client.Object, option ...client.GetOption) error { + v1sp := obj.(*v1alpha2.IPPool) + time := metav1.Now() + v1sp.ObjectMeta.DeletionTimestamp = &time + v1sp.Finalizers = []string{common.IPPoolFinalizerName} + return nil + }) + patch = gomonkey.ApplyMethod(reflect.TypeOf(service), "DeleteIPPool", func(_ *ippool.IPPoolService, + uid interface{}) error { + return errors.New("delete failed") + }) + + k8sClient.EXPECT().Status().Times(2).Return(fakewriter) + _, ret = r.Reconcile(ctx, req) + assert.NotEqual(t, ret, nil) + patch.Reset() + + // DeletionTimestamp.IsZero = true, Finalizers include util.FinalizerName, CreateorUpdateIPPool fail + k8sClient.EXPECT().Get(ctx, gomock.Any(), sp).Return(nil).Do(func(_ context.Context, _ client.ObjectKey, obj client.Object, option ...client.GetOption) error { + v1sp := obj.(*v1alpha2.IPPool) + v1sp.ObjectMeta.DeletionTimestamp = nil + v1sp.Finalizers = []string{common.IPPoolFinalizerName} + return nil + }) + + patch = gomonkey.ApplyMethod(reflect.TypeOf(service), "CreateOrUpdateIPPool", func(_ *ippool.IPPoolService, + obj *v1alpha2.IPPool) (bool, bool, error) { + return false, false, errors.New("create failed") + }) + _, ret = r.Reconcile(ctx, req) + assert.NotEqual(t, ret, nil) + patch.Reset() + + // DeletionTimestamp.IsZero = true, Finalizers include util.FinalizerName, CreateorUpdateIPPool succ + k8sClient.EXPECT().Get(ctx, gomock.Any(), sp).Return(nil).Do(func(_ context.Context, _ client.ObjectKey, obj client.Object, option ...client.GetOption) error { + v1sp := obj.(*v1alpha2.IPPool) + v1sp.ObjectMeta.DeletionTimestamp = nil + v1sp.Finalizers = []string{common.IPPoolFinalizerName} + return nil + }) + + patch = gomonkey.ApplyMethod(reflect.TypeOf(service), "CreateOrUpdateIPPool", func(_ *ippool.IPPoolService, + obj *v1alpha2.IPPool) (bool, bool, error) { + return false, false, nil + }) + k8sClient.EXPECT().Status().Times(1).Return(fakewriter) + _, ret = r.Reconcile(ctx, req) + assert.Equal(t, ret, nil) + patch.Reset() +} + +func TestReconciler_GarbageCollector(t *testing.T) { + // gc collect item "2345", local store has more item than k8s cache + service := &ippool.IPPoolService{ + Service: common.Service{ + NSXConfig: &config.NSXOperatorConfig{ + NsxConfig: &config.NsxConfig{ + EnforcementPoint: "vmc-enforcementpoint", + }, + }, + }, + } + patch := gomonkey.ApplyMethod(reflect.TypeOf(service), "ListIPPoolID", func(_ *ippool.IPPoolService) sets.String { + a := sets.NewString() + a.Insert("1234") + a.Insert("2345") + return a + }) + patch.ApplyMethod(reflect.TypeOf(service), "DeleteIPPool", func(_ *ippool.IPPoolService, UID interface{}) error { + return nil + }) + cancel := make(chan bool) + defer patch.Reset() + mockCtl := gomock.NewController(t) + k8sClient := mock_client.NewMockClient(mockCtl) + + r := &IPPoolReconciler{ + Client: k8sClient, + Scheme: nil, + Service: service, + } + ctx := context.Background() + policyList := &v1alpha2.IPPoolList{} + k8sClient.EXPECT().List(gomock.Any(), policyList).Return(nil).Do(func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + a := list.(*v1alpha2.IPPoolList) + a.Items = append(a.Items, v1alpha2.IPPool{}) + a.Items[0].ObjectMeta = metav1.ObjectMeta{} + a.Items[0].UID = "1234" + return nil + }) + go func() { + time.Sleep(1 * time.Second) + cancel <- true + }() + r.IPPoolGarbageCollector(cancel, time.Second) + + // local store has same item as k8s cache + patch.Reset() + patch.ApplyMethod(reflect.TypeOf(service), "ListIPPoolID", func(_ *ippool.IPPoolService) sets.String { + a := sets.NewString() + a.Insert("1234") + return a + }) + patch.ApplyMethod(reflect.TypeOf(service), "DeleteIPPool", func(_ *ippool.IPPoolService, UID interface{}) error { + assert.FailNow(t, "should not be called") + return nil + }) + k8sClient.EXPECT().List(ctx, policyList).Return(nil).Do(func(_ context.Context, list client.ObjectList, _ ...client.ListOption) error { + a := list.(*v1alpha2.IPPoolList) + a.Items = append(a.Items, v1alpha2.IPPool{}) + a.Items[0].ObjectMeta = metav1.ObjectMeta{} + a.Items[0].UID = "1234" + return nil + }) + go func() { + time.Sleep(1 * time.Second) + cancel <- true + }() + r.IPPoolGarbageCollector(cancel, time.Second) + + // local store has no item + patch.Reset() + patch.ApplyMethod(reflect.TypeOf(service), "ListIPPoolID", func(_ *ippool.IPPoolService) sets.String { + a := sets.NewString() + return a + }) + patch.ApplyMethod(reflect.TypeOf(service), "DeleteIPPool", func(_ *ippool.IPPoolService, UID interface{}) error { + assert.FailNow(t, "should not be called") + return nil + }) + k8sClient.EXPECT().List(ctx, policyList).Return(nil).Times(0) + go func() { + time.Sleep(1 * time.Second) + cancel <- true + }() + r.IPPoolGarbageCollector(cancel, time.Second) +} + +func TestReconciler_Start(t *testing.T) { + mockCtl := gomock.NewController(t) + k8sClient := mock_client.NewMockClient(mockCtl) + service := &ippool.IPPoolService{} + var mgr controllerruntime.Manager + r := &IPPoolReconciler{ + Client: k8sClient, + Scheme: nil, + Service: service, + } + err := r.Start(mgr) + assert.NotEqual(t, err, nil) +} diff --git a/pkg/nsx/client.go b/pkg/nsx/client.go index bf026af9c..e6f1f4e2a 100644 --- a/pkg/nsx/client.go +++ b/pkg/nsx/client.go @@ -9,8 +9,6 @@ import ( "strings" "github.com/sirupsen/logrus" - "github.com/vmware-tanzu/nsx-operator/pkg/config" - "github.com/vmware-tanzu/nsx-operator/pkg/nsx/ratelimiter" vspherelog "github.com/vmware/vsphere-automation-sdk-go/runtime/log" "github.com/vmware/vsphere-automation-sdk-go/runtime/protocol/client" nsx_policy "github.com/vmware/vsphere-automation-sdk-go/services/nsxt" @@ -28,6 +26,9 @@ import ( "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/orgs/projects/vpcs/subnets/ip_pools" "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/orgs/projects/vpcs/subnets/ports" "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/search" + + "github.com/vmware-tanzu/nsx-operator/pkg/config" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/ratelimiter" ) const ( @@ -57,6 +58,7 @@ type Client struct { ClusterControlPlanesClient enforcement_points.ClusterControlPlanesClient SubnetStatusClient subnets.StatusClient RealizedEntitiesClient realized_state.RealizedEntitiesClient + ProjectInfraClient projects.InfraClient MPQueryClient mpsearch.QueryClient CertificatesClient trust_management.CertificatesClient @@ -131,6 +133,7 @@ func GetClient(cf *config.NSXOperatorConfig) *Client { principalIdentitiesClient := trust_management.NewPrincipalIdentitiesClient(restConnector(cluster)) withCertificateClient := principal_identities.NewWithCertificateClient(restConnector(cluster)) + projectInfraClient := projects.NewInfraClient(restConnector(cluster)) portClient := subnets.NewPortsClient(restConnector(cluster)) portStateClient := ports.NewStateClient(restConnector(cluster)) @@ -167,6 +170,7 @@ func GetClient(cf *config.NSXOperatorConfig) *Client { PrincipalIdentitiesClient: principalIdentitiesClient, WithCertificateClient: withCertificateClient, + ProjectInfraClient: projectInfraClient, PortClient: portClient, PortStateClient: portStateClient, SubnetStatusClient: subnetStatusClient, diff --git a/pkg/nsx/services/common/store.go b/pkg/nsx/services/common/store.go index 890def73b..d7ac90efd 100644 --- a/pkg/nsx/services/common/store.go +++ b/pkg/nsx/services/common/store.go @@ -143,8 +143,11 @@ func (service *Service) InitializeCommonStore(wg *sync.WaitGroup, fatalErrors ch for _, tag := range tags { tagKey := strings.Replace(*tag.Scope, "/", "\\/", -1) - tagValue := strings.Replace(*tag.Tag, ":", "\\:", -1) - tagParam += fmt.Sprintf(" AND tags.scope:%s AND tags.tag:%s", tagKey, tagValue) + tagParam += fmt.Sprintf(" AND tags.scope:%s ", tagKey) + if tag.Tag != nil { + tagValue := strings.Replace(*tag.Tag, ":", "\\:", -1) + tagParam += fmt.Sprintf(" AND tags.tag:%s ", tagValue) + } } resourceParam := fmt.Sprintf("%s:%s", ResourceType, resourceTypeValue) diff --git a/pkg/nsx/services/common/types.go b/pkg/nsx/services/common/types.go index 320d0aaca..030679827 100644 --- a/pkg/nsx/services/common/types.go +++ b/pkg/nsx/services/common/types.go @@ -39,6 +39,10 @@ const ( TagScopeVPCCRUID string = "nsx-op/vpc_cr_uid" TagScopeSubnetPortCRName string = "nsx-op/subnetport_cr_name" TagScopeSubnetPortCRUID string = "nsx-op/subnetport_cr_uid" + TagScopeIPPoolCRName string = "nsx-op/ippool_cr_name" + TagScopeIPPoolCRUID string = "nsx-op/ippool_cr_uid" + TagScopeIPPoolCRType string = "nsx-op/ippool_cr_type" + TagScopeIPSubnetName string = "nsx-op/ipsubnet_cr_name" LabelDefaultSubnetSet string = "nsxoperator.vmware.com/default-subnetset-for" LabelDefaultVMSubnet string = "VirtualMachine" LabelDefaultPodSubnetSet string = "Pod" @@ -50,8 +54,12 @@ const ( AnnotationVPCName string = "nsx.vmware.com/vpc_name" DefaultNetworkConfigName string = "default" - GCInterval = 60 * time.Second - RealizeTimeout = 2 * time.Minute + GCInterval = 60 * time.Second + RealizeTimeout = 2 * time.Minute + RealizeMaxRetries = 3 + IPPoolFinalizerName = "ippool.nsx.vmware.com/finalizer" + IPPoolTypePublic = "public" + IPPoolTypePrivate = "private" FinalizerName = "securitypolicy.nsx.vmware.com/finalizer" StaticRouteFinalizerName = "staticroute.nsx.vmware.com/finalizer" @@ -76,6 +84,8 @@ var ( // ResourceTypePrincipalIdentity is used by NSXServiceAccountController, and it is MP resource type. ResourceTypePrincipalIdentity = "principalidentity" ResourceTypeSubnet = "VpcSubnet" + ResourceTypeIPPool = "IpAddressPool" + ResourceTypeIPPoolBlockSubnet = "IpAddressPoolBlockSubnet" ) type Service struct { diff --git a/pkg/nsx/services/ippool/builder.go b/pkg/nsx/services/ippool/builder.go new file mode 100644 index 000000000..d22017ce7 --- /dev/null +++ b/pkg/nsx/services/ippool/builder.go @@ -0,0 +1,112 @@ +package ippool + +import ( + "fmt" + "strings" + + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" + commonctl "github.com/vmware-tanzu/nsx-operator/pkg/controllers/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" + "github.com/vmware-tanzu/nsx-operator/pkg/util" +) + +var ( + Int64 = common.Int64 + String = common.String +) + +const ( + IPPOOLPREFIX = "ipc" +) + +func (service *IPPoolService) BuildIPPool(IPPool *v1alpha2.IPPool) (*model.IpAddressPool, []*model.IpAddressPoolBlockSubnet) { + return &model.IpAddressPool{ + Id: String(service.buildIPPoolID(IPPool)), + DisplayName: String(service.buildIPPoolName(IPPool)), + Tags: service.buildIPPoolTags(IPPool), + }, service.buildIPSubnets(IPPool) +} + +func (service *IPPoolService) buildIPPoolID(IPPool *v1alpha2.IPPool) string { + return strings.Join([]string{IPPOOLPREFIX, string(IPPool.UID)}, "_") +} + +func (service *IPPoolService) buildIPPoolName(IPPool *v1alpha2.IPPool) string { + return strings.Join([]string{IPPOOLPREFIX, getCluster(service), string(IPPool.UID), IPPool.ObjectMeta.Name}, "-") +} + +func (service *IPPoolService) buildIPPoolTags(IPPool *v1alpha2.IPPool) []model.Tag { + return []model.Tag{ + {Scope: String(common.TagScopeCluster), Tag: String(getCluster(service))}, + {Scope: String(common.TagScopeNamespace), Tag: String(IPPool.ObjectMeta.Namespace)}, + {Scope: String(common.TagScopeIPPoolCRName), Tag: String(IPPool.ObjectMeta.Name)}, + {Scope: String(common.TagScopeIPPoolCRUID), Tag: String(string(IPPool.UID))}, + {Scope: String(common.TagScopeIPPoolCRType), Tag: String(IPPool.Spec.Type)}, + } +} + +func (service *IPPoolService) buildIPSubnets(IPPool *v1alpha2.IPPool) []*model.IpAddressPoolBlockSubnet { + var IPSubnets []*model.IpAddressPoolBlockSubnet + for _, subnetRequest := range IPPool.Spec.Subnets { + IPSubnet := service.buildIPSubnet(IPPool, subnetRequest) + if IPSubnet != nil { + IPSubnets = append(IPSubnets, IPSubnet) + } + } + return IPSubnets +} + +func (service *IPPoolService) buildIPSubnetID(IPPool *v1alpha2.IPPool, subnetRequest *v1alpha2.SubnetRequest) string { + return strings.Join([]string{"ibs", string(IPPool.UID), subnetRequest.Name}, "_") +} + +func (service *IPPoolService) buildIPSubnetName(IPPool *v1alpha2.IPPool, subnetRequest *v1alpha2.SubnetRequest) string { + return strings.Join([]string{"ibs", IPPool.Name, subnetRequest.Name}, "-") +} + +func (service *IPPoolService) buildIPSubnetTags(IPPool *v1alpha2.IPPool, subnetRequest *v1alpha2.SubnetRequest) []model.Tag { + return []model.Tag{ + {Scope: String(common.TagScopeCluster), Tag: String(getCluster(service))}, + {Scope: String(common.TagScopeNamespace), Tag: String(IPPool.ObjectMeta.Namespace)}, + {Scope: String(common.TagScopeIPPoolCRName), Tag: String(IPPool.ObjectMeta.Name)}, + {Scope: String(common.TagScopeIPPoolCRUID), Tag: String(string(IPPool.UID))}, + {Scope: String(common.TagScopeIPSubnetName), Tag: String(subnetRequest.Name)}, + } +} + +func (service *IPPoolService) buildIPSubnetIntentPath(IPPool *v1alpha2.IPPool, subnetRequest *v1alpha2.SubnetRequest) string { + if IPPool.Spec.Type == common.IPPoolTypePrivate { + VPCInfo := commonctl.ServiceMediator.ListVPCInfo(IPPool.Namespace) + if len(VPCInfo) == 0 { + return "" + } + return strings.Join([]string{fmt.Sprintf("/orgs/%s/projects/%s/infra/ip-pools", VPCInfo[0].OrgID, VPCInfo[0].ProjectID), + service.buildIPPoolID(IPPool), + "ip-subnets", service.buildIPSubnetID(IPPool, subnetRequest)}, "/") + } else { + return strings.Join([]string{"/infra/ip-pools", service.buildIPPoolID(IPPool), + "ip-subnets", service.buildIPSubnetID(IPPool, subnetRequest)}, "/") + } +} + +func (service *IPPoolService) buildIPSubnet(IPPool *v1alpha2.IPPool, subnetRequest v1alpha2.SubnetRequest) *model.IpAddressPoolBlockSubnet { + // TODO: Get the IPBlockPath by IPPool's namespace, external and private + IpBlockPath := String("/infra/ip-blocks/block-test") + if IPPool.Spec.Type == common.IPPoolTypePrivate { + VPCInfo := commonctl.ServiceMediator.ListVPCInfo(IPPool.Namespace) + if len(VPCInfo) == 0 { + return nil + } + // TODO: Get the IPBlockPath by IPPool's namespace, external and private + IpBlockPath = String(fmt.Sprintf("/orgs/%s/projects/%s/infra/ip-blocks/block-test", VPCInfo[0].OrgID, VPCInfo[0].ProjectID)) + } + return &model.IpAddressPoolBlockSubnet{ + Id: String(service.buildIPSubnetID(IPPool, &subnetRequest)), + DisplayName: String(service.buildIPSubnetName(IPPool, &subnetRequest)), + Tags: service.buildIPSubnetTags(IPPool, &subnetRequest), + Size: Int64(util.CalculateSubnetSize(subnetRequest.PrefixLength)), + IpBlockPath: IpBlockPath, + } +} diff --git a/pkg/nsx/services/ippool/builder_test.go b/pkg/nsx/services/ippool/builder_test.go new file mode 100644 index 000000000..3267d5bdd --- /dev/null +++ b/pkg/nsx/services/ippool/builder_test.go @@ -0,0 +1,71 @@ +package ippool + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +func TestIPPoolService_BuildIPPool(t *testing.T) { + ipPoolService := fakeService() + + ipPool := &v1alpha2.IPPool{ + ObjectMeta: v1.ObjectMeta{ + Name: "ippool1", + UID: "uuid1", + }, + Spec: v1alpha2.IPPoolSpec{ + Subnets: []v1alpha2.SubnetRequest{ + { + Name: "subnet1", + PrefixLength: 24, + }, + }, + }, + } + + want := &model.IpAddressPool{ + DisplayName: String("ipc-k8scl-one:test-uuid1-ippool1"), + Id: String("ipc_uuid1"), + Tags: []model.Tag{{Scope: String("nsx-op/cluster"), Tag: String("k8scl-one:test")}, {Scope: String("nsx-op/namespace"), + Tag: String("")}, {Scope: String("nsx-op/ippool_cr_name"), Tag: String("ippool1")}, {Scope: String("nsx-op/ippool_cr_uid"), + Tag: String("uuid1")}, {Scope: String("nsx-op/ippool_cr_type"), Tag: String("")}}, + } + + want2 := model.IpAddressPoolBlockSubnet{ + DisplayName: String("ibs-ippool1-subnet1"), + Id: String("ibs_uuid1_subnet1"), + IpBlockPath: String("/infra/ip-blocks/block-test"), + Tags: []model.Tag{{Scope: String("nsx-op/cluster"), Tag: String("k8scl-one:test")}, {Scope: String("nsx-op/namespace"), + Tag: String("")}, {Scope: String("nsx-op/ippool_cr_name"), Tag: String("ippool1")}, {Scope: String("nsx-op/ippool_cr_uid"), + Tag: String("uuid1")}, {Scope: String("nsx-op/ipsubnet_cr_name"), Tag: String("subnet1")}}, + Size: Int64(256), + } + + type fields struct { + Service common.Service + } + type args struct { + IPPool *v1alpha2.IPPool + } + tests := []struct { + name string + args args + want *model.IpAddressPool + want1 []*model.IpAddressPoolBlockSubnet + }{ + {"test1", args{ipPool}, want, []*model.IpAddressPoolBlockSubnet{&want2}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1 := ipPoolService.BuildIPPool(tt.args.IPPool) + assert.Equalf(t, tt.want, got, "BuildIPPool(%v)", tt.args.IPPool) + assert.Equalf(t, tt.want1, got1, "BuildIPPool(%v)", tt.args.IPPool) + }) + } +} diff --git a/pkg/nsx/services/ippool/compare.go b/pkg/nsx/services/ippool/compare.go new file mode 100644 index 000000000..2b9a65bf2 --- /dev/null +++ b/pkg/nsx/services/ippool/compare.go @@ -0,0 +1,63 @@ +package ippool + +import ( + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +type ( + IpAddressPool model.IpAddressPool + IpAddressPoolBlockSubnet model.IpAddressPoolBlockSubnet +) + +type Comparable = common.Comparable + +func (iap *IpAddressPool) Key() string { + return *iap.Id +} + +func (iapbs *IpAddressPoolBlockSubnet) Key() string { + return *iapbs.Id +} + +func (iap *IpAddressPool) Value() data.DataValue { + s := &IpAddressPool{Id: iap.Id, DisplayName: iap.DisplayName, Tags: iap.Tags} + dataValue, _ := ComparableToIpAddressPool(s).GetDataValue__() + return dataValue +} + +func (iapbs *IpAddressPoolBlockSubnet) Value() data.DataValue { + r := &IpAddressPoolBlockSubnet{Id: iapbs.Id, DisplayName: iapbs.Id, Tags: iapbs.Tags} + dataValue, _ := ComparableToIpAddressPoolBlockSubnet(r).GetDataValue__() + return dataValue +} + +func IpAddressPoolToComparable(iap *model.IpAddressPool) Comparable { + return (*IpAddressPool)(iap) +} + +func IpAddressPoolBlockSubnetsToComparable(iapbs []*model.IpAddressPoolBlockSubnet) []Comparable { + res := make([]Comparable, 0, len(iapbs)) + for i := range iapbs { + res = append(res, (*IpAddressPoolBlockSubnet)(iapbs[i])) + } + return res +} + +func ComparableToIpAddressPool(iap Comparable) *model.IpAddressPool { + return (*model.IpAddressPool)(iap.(*IpAddressPool)) +} + +func ComparableToIpAddressPoolBlockSubnets(iapbs []Comparable) []*model.IpAddressPoolBlockSubnet { + res := make([]*model.IpAddressPoolBlockSubnet, 0, len(iapbs)) + for _, iapb := range iapbs { + res = append(res, (*model.IpAddressPoolBlockSubnet)(iapb.(*IpAddressPoolBlockSubnet))) + } + return res +} + +func ComparableToIpAddressPoolBlockSubnet(iapbs Comparable) *model.IpAddressPoolBlockSubnet { + return (*model.IpAddressPoolBlockSubnet)(iapbs.(*IpAddressPoolBlockSubnet)) +} diff --git a/pkg/nsx/services/ippool/compare_test.go b/pkg/nsx/services/ippool/compare_test.go new file mode 100644 index 000000000..022ea115c --- /dev/null +++ b/pkg/nsx/services/ippool/compare_test.go @@ -0,0 +1,183 @@ +package ippool + +import ( + "reflect" + "testing" + + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" +) + +func TestComparableToIpAddressPool(t *testing.T) { + type args struct { + iap Comparable + } + tests := []struct { + name string + args args + want *model.IpAddressPool + }{ + {"1", args{&IpAddressPool{Id: String("1")}}, &model.IpAddressPool{Id: String("1")}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ComparableToIpAddressPool(tt.args.iap); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ComparableToIpAddressPool() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestComparableToIpAddressPoolBlockSubnet(t *testing.T) { + type args struct { + iapbs Comparable + } + tests := []struct { + name string + args args + want *model.IpAddressPoolBlockSubnet + }{ + {"1", args{&IpAddressPoolBlockSubnet{Id: String("1")}}, &model.IpAddressPoolBlockSubnet{Id: String("1")}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ComparableToIpAddressPoolBlockSubnet(tt.args.iapbs); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ComparableToIpAddressPoolBlockSubnet() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestComparableToIpAddressPoolBlockSubnets(t *testing.T) { + type args struct { + iapbs []Comparable + } + tests := []struct { + name string + args args + want []*model.IpAddressPoolBlockSubnet + }{ + {"1", args{[]Comparable{&IpAddressPoolBlockSubnet{Id: String("1")}}}, []*model.IpAddressPoolBlockSubnet{{Id: String("1")}}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ComparableToIpAddressPoolBlockSubnets(tt.args.iapbs); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ComparableToIpAddressPoolBlockSubnets() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIpAddressPoolBlockSubnet_Key(t *testing.T) { + tests := []struct { + name string + iapbs IpAddressPoolBlockSubnet + want string + }{ + {"1", IpAddressPoolBlockSubnet{Id: String("1")}, "1"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.iapbs.Key(); got != tt.want { + t.Errorf("Key() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIpAddressPoolBlockSubnet_Value(t *testing.T) { + m := model.IpAddressPoolBlockSubnet{Id: String("1"), DisplayName: String("1"), Tags: []model.Tag{{Scope: String("1"), Tag: String("1")}}} + v, _ := m.GetDataValue__() + p := IpAddressPoolBlockSubnet{Id: String("1"), DisplayName: String("1"), Tags: []model.Tag{{Scope: String("1"), Tag: String("1")}}} + tests := []struct { + name string + iapbs IpAddressPoolBlockSubnet + want data.DataValue + }{ + {"1", p, v}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.iapbs.Value(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Value() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIpAddressPoolBlockSubnetsToComparable(t *testing.T) { + type args struct { + iapbs []*model.IpAddressPoolBlockSubnet + } + tests := []struct { + name string + args args + want []Comparable + }{ + {"1", args{[]*model.IpAddressPoolBlockSubnet{{Id: String("1")}}}, []Comparable{&IpAddressPoolBlockSubnet{Id: String("1")}}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IpAddressPoolBlockSubnetsToComparable(tt.args.iapbs); !reflect.DeepEqual(got, tt.want) { + t.Errorf("IpAddressPoolBlockSubnetsToComparable() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIpAddressPoolToComparable(t *testing.T) { + type args struct { + iap *model.IpAddressPool + } + tests := []struct { + name string + args args + want Comparable + }{ + {"1", args{&model.IpAddressPool{Id: String("1")}}, &IpAddressPool{Id: String("1")}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IpAddressPoolToComparable(tt.args.iap); !reflect.DeepEqual(got, tt.want) { + t.Errorf("IpAddressPoolToComparable() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIpAddressPool_Key(t *testing.T) { + tests := []struct { + name string + iap IpAddressPool + want string + }{ + {"1", IpAddressPool{Id: String("1")}, "1"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.iap.Key(); got != tt.want { + t.Errorf("Key() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIpAddressPool_Value(t *testing.T) { + m := model.IpAddressPool{Id: String("1"), DisplayName: String("1"), Tags: []model.Tag{{Scope: String("1"), Tag: String("1")}}} + v, _ := m.GetDataValue__() + p := IpAddressPool{Id: String("1"), DisplayName: String("1"), Tags: []model.Tag{{Scope: String("1"), Tag: String("1")}}} + tests := []struct { + name string + iap IpAddressPool + want data.DataValue + }{ + {"1", p, v}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.iap.Value(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Value() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/nsx/services/ippool/fake_test.go b/pkg/nsx/services/ippool/fake_test.go new file mode 100644 index 000000000..fa55d3c9c --- /dev/null +++ b/pkg/nsx/services/ippool/fake_test.go @@ -0,0 +1,97 @@ +package ippool + +import ( + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + "k8s.io/client-go/tools/cache" + + "github.com/vmware-tanzu/nsx-operator/pkg/config" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/ratelimiter" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +type fakeQueryClient struct { +} + +type fakeProjectQueryClient struct { +} + +func (f fakeProjectQueryClient) List(orgIdParam string, projectIdParam string, queryParam string, cursorParam *string, includedFieldsParam *string, pageSizeParam *int64, sortAscendingParam *bool, sortByParam *string) (model.SearchResponse, error) { + return model.SearchResponse{}, nil +} + +func (qIface *fakeQueryClient) List(queryParam string, cursorParam *string, includedFieldsParam *string, pageSizeParam *int64, sortAscendingParam *bool, sortByParam *string) (model.SearchResponse, error) { + cursor := "2" + resultCount := int64(2) + return model.SearchResponse{ + Results: []*data.StructValue{&data.StructValue{}}, + Cursor: &cursor, ResultCount: &resultCount, + }, nil +} + +type fakeProjectInfraClient struct { +} + +func (f fakeProjectInfraClient) Get(orgIdParam string, projectIdParam string, basePathParam *string, filterParam *string, typeFilterParam *string) (model.Infra, error) { + return model.Infra{}, nil +} + +func (f fakeProjectInfraClient) Patch(orgIdParam string, projectIdParam string, infraParam model.Infra, enforceRevisionCheckParam *bool) error { + return nil +} + +type fakeRealizedEntitiesClient struct { +} + +func (f fakeRealizedEntitiesClient) List(_ string, _ string, _ string, _ *string) (model.GenericPolicyRealizedResourceListResult, error) { + a := model.GenericPolicyRealizedResourceListResult{ + Results: []model.GenericPolicyRealizedResource{ + { + EntityType: String("IpBlockSubnet"), + ExtendedAttributes: []model.AttributeVal{ + {Key: String("cidr"), Values: []string{"1.1.1.1/24"}}, + }, + }, + }, + } + return a, nil +} + +func fakeService() *IPPoolService { + c := nsx.NewConfig("localhost", "1", "1", []string{}, 10, 3, 20, 20, true, true, true, ratelimiter.AIMD, nil, nil, []string{}) + cluster, _ := nsx.NewCluster(c) + rc, _ := cluster.NewRestConnector() + ipPoolStore := &IPPoolStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBindingType(), + }} + ipPoolBlockSubnetStore := &IPPoolBlockSubnetStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBlockSubnetBindingType(), + }} + + service := &IPPoolService{ + Service: common.Service{ + NSXClient: &nsx.Client{ + QueryClient: &fakeQueryClient{}, + RestConnector: rc, + RealizedEntitiesClient: &fakeRealizedEntitiesClient{}, + ProjectInfraClient: &fakeProjectInfraClient{}, + NsxConfig: &config.NSXOperatorConfig{ + CoeConfig: &config.CoeConfig{ + Cluster: "k8scl-one:test", + }, + }, + }, + NSXConfig: &config.NSXOperatorConfig{ + CoeConfig: &config.CoeConfig{ + Cluster: "k8scl-one:test", + }, + }, + }, + ipPoolStore: ipPoolStore, + ipPoolBlockSubnetStore: ipPoolBlockSubnetStore, + } + return service +} diff --git a/pkg/nsx/services/ippool/ippool.go b/pkg/nsx/services/ippool/ippool.go new file mode 100644 index 000000000..5e03b2bc1 --- /dev/null +++ b/pkg/nsx/services/ippool/ippool.go @@ -0,0 +1,263 @@ +package ippool + +import ( + "fmt" + "sync" + "time" + + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" + commonctl "github.com/vmware-tanzu/nsx-operator/pkg/controllers/common" + "github.com/vmware-tanzu/nsx-operator/pkg/logger" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/util" +) + +var ( + log = logger.Log + MarkedForDelete = true + EnforceRevisionCheckParam = false + ResourceTypeIPPool = common.ResourceTypeIPPool + ResourceTypeIPPoolBlockSubnet = common.ResourceTypeIPPoolBlockSubnet + NewConverter = common.NewConverter +) + +type IPPoolService struct { + common.Service + ipPoolStore *IPPoolStore + ipPoolBlockSubnetStore *IPPoolBlockSubnetStore +} + +func InitializeIPPool(service common.Service) (*IPPoolService, error) { + wg := sync.WaitGroup{} + wgDone := make(chan bool) + fatalErrors := make(chan error) + + wg.Add(2) + + ipPoolService := &IPPoolService{Service: service} + ipPoolService.ipPoolStore = &IPPoolStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBindingType(), + }} + ipPoolService.ipPoolBlockSubnetStore = &IPPoolBlockSubnetStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBlockSubnetBindingType(), + }} + + tags := []model.Tag{ + {Scope: String(common.TagScopeIPPoolCRUID)}, + } + go ipPoolService.InitializeResourceStore(&wg, fatalErrors, ResourceTypeIPPool, tags, ipPoolService.ipPoolStore) + go ipPoolService.InitializeResourceStore(&wg, fatalErrors, ResourceTypeIPPoolBlockSubnet, tags, ipPoolService.ipPoolBlockSubnetStore) + + go func() { + wg.Wait() + close(wgDone) + }() + select { + case <-wgDone: + break + case err := <-fatalErrors: + close(fatalErrors) + return ipPoolService, err + } + return ipPoolService, nil +} + +func (service *IPPoolService) CreateOrUpdateIPPool(obj *v1alpha2.IPPool) (bool, bool, error) { + nsxIPPool, nsxIPSubnets := service.BuildIPPool(obj) + existingIPPool, existingIPSubnets, err := service.indexedIPPoolAndIPPoolSubnets(obj.UID) + if err != nil { + log.Error(err, "failed to get ip pool and ip pool subnets by UID", "UID", obj.UID) + return false, false, err + } + log.V(1).Info("existing ippool and ip subnets", "existingIPPool", existingIPPool, "existingIPSubnets", existingIPSubnets) + ipPoolSubnetsUpdated := false + ipPoolUpdated := common.CompareResource(IpAddressPoolToComparable(existingIPPool), IpAddressPoolToComparable(nsxIPPool)) + changed, stale := common.CompareResources(IpAddressPoolBlockSubnetsToComparable(existingIPSubnets), IpAddressPoolBlockSubnetsToComparable(nsxIPSubnets)) + changedIPSubnets, staleIPSubnets := ComparableToIpAddressPoolBlockSubnets(changed), ComparableToIpAddressPoolBlockSubnets(stale) + for i := len(staleIPSubnets) - 1; i >= 0; i-- { + staleIPSubnets[i].MarkedForDelete = &MarkedForDelete + } + finalIPSubnets := append(changedIPSubnets, staleIPSubnets...) + if len(finalIPSubnets) > 0 { + ipPoolSubnetsUpdated = true + } + + if err := service.Operate(nsxIPPool, finalIPSubnets, ipPoolUpdated, ipPoolSubnetsUpdated); err != nil { + return false, false, err + } + + realizedSubnets, subnetCidrUpdated, e := service.AcquireRealizedSubnetIP(obj) + if e != nil { + return false, false, e + } + obj.Status.Subnets = realizedSubnets + return subnetCidrUpdated, ipPoolSubnetsUpdated, nil +} + +func (service *IPPoolService) Operate(nsxIPPool *model.IpAddressPool, nsxIPSubnets []*model.IpAddressPoolBlockSubnet, IPPoolUpdated bool, IPPoolSubnetsUpdated bool) error { + if !(IPPoolUpdated || IPPoolSubnetsUpdated) { + return nil + } + infraIPPool, err := service.WrapHierarchyIPPool(nsxIPPool, nsxIPSubnets) + if err != nil { + return err + } + // Get IPPool Type from nsxIPPool + IPPoolType := common.IPPoolTypePrivate + for _, tag := range nsxIPPool.Tags { + if *tag.Scope == common.TagScopeIPPoolCRType { + IPPoolType = *tag.Tag + break + } + } + + if IPPoolType == common.IPPoolTypePrivate { + ns := service.GetIPPoolNamespace(nsxIPPool) + VPCInfo := commonctl.ServiceMediator.ListVPCInfo(ns) + if len(VPCInfo) == 0 { + err = util.NoEffectiveOption{Desc: "no valid org and project for ippool"} + } else { + err = service.NSXClient.ProjectInfraClient.Patch(VPCInfo[0].OrgID, VPCInfo[0].ProjectID, *infraIPPool, + &EnforceRevisionCheckParam) + } + } else if IPPoolType == common.IPPoolTypePublic { + err = service.NSXClient.InfraClient.Patch(*infraIPPool, &EnforceRevisionCheckParam) + } else { + err = util.NoEffectiveOption{Desc: "not valid IPPool type"} + } + if err != nil { + return err + } + if IPPoolUpdated { + err = service.ipPoolStore.Operate(nsxIPPool) + if err != nil { + return err + } + } + if IPPoolSubnetsUpdated { + err = service.ipPoolBlockSubnetStore.Operate(nsxIPSubnets) + if err != nil { + return err + } + } + log.V(1).Info("successfully created or updated ippool and ip subnets", "nsxIPPool", nsxIPPool) + return nil +} + +func (service *IPPoolService) AcquireRealizedSubnetIP(obj *v1alpha2.IPPool) ([]v1alpha2.SubnetResult, bool, error) { + realizedSubnets := []v1alpha2.SubnetResult{} + subnetCidrUpdated := false + for _, subnetRequest := range obj.Spec.Subnets { + // check if the subnet is already realized + realized := false + realizedSubnet := v1alpha2.SubnetResult{Name: subnetRequest.Name} + for _, statusSubnet := range obj.Status.Subnets { + if statusSubnet.Name == subnetRequest.Name && statusSubnet.CIDR != "" { + realizedSubnet.CIDR = statusSubnet.CIDR + realized = true + break + } + } + if !realized { + cidr, err := service.acquireCidr(obj, &subnetRequest, common.RealizeMaxRetries) + if err != nil { + return nil, subnetCidrUpdated, err + } + if cidr != "" { + subnetCidrUpdated = true + } + realizedSubnet.CIDR = cidr + } + realizedSubnets = append(realizedSubnets, realizedSubnet) + } + return realizedSubnets, subnetCidrUpdated, nil +} + +func (service *IPPoolService) DeleteIPPool(obj interface{}) error { + var err error + var nsxIPPool *model.IpAddressPool + nsxIPSubnets := make([]*model.IpAddressPoolBlockSubnet, 0) + switch o := obj.(type) { + case *v1alpha2.IPPool: + nsxIPPool, nsxIPSubnets = service.BuildIPPool(o) + if err != nil { + log.Error(err, "failed to build ip pool", "IPPool", o) + return err + } + case types.UID: + nsxIPPool, nsxIPSubnets, err = service.indexedIPPoolAndIPPoolSubnets(o) + if err != nil { + log.Error(err, "failed to get ip pool and ip pool subnets by UID", "UID", o) + return err + } + } + nsxIPPool.MarkedForDelete = &MarkedForDelete + for i := len(nsxIPSubnets) - 1; i >= 0; i-- { + nsxIPSubnets[i].MarkedForDelete = &MarkedForDelete + } + if err := service.Operate(nsxIPPool, nsxIPSubnets, true, true); err != nil { + return err + } + log.V(1).Info("successfully deleted nsxIPPool", "nsxIPPool", nsxIPPool) + return nil +} + +func (service *IPPoolService) acquireCidr(obj *v1alpha2.IPPool, subnetRequest *v1alpha2.SubnetRequest, retry int) (string, error) { + intentPath := service.buildIPSubnetIntentPath(obj, subnetRequest) + if intentPath == "" { + return "", fmt.Errorf("failed to build intent path for ip pool %s, subnetRequest %s", obj.Name, subnetRequest.Name) + } + VPCInfo := commonctl.ServiceMediator.ListVPCInfo(obj.Namespace) + var err error + if len(VPCInfo) == 0 { + err = util.NoEffectiveOption{Desc: "no effective org and project for ippool"} + return "", err + } + m, err := service.NSXClient.RealizedEntitiesClient.List(VPCInfo[0].OrgID, VPCInfo[0].ProjectID, intentPath, nil) + if err != nil { + return "", err + } + for _, realizedEntity := range m.Results { + if *realizedEntity.EntityType == "IpBlockSubnet" { + for _, attr := range realizedEntity.ExtendedAttributes { + if *attr.Key == "cidr" { + cidr := attr.Values[0] + log.V(1).Info("successfully realized ippool subnet from ipblock", "subnetRequest.Name", subnetRequest.Name, "cidr", cidr) + return cidr, nil + } + } + } + } + if retry > 0 { + log.V(1).Info("failed to acquire subnet cidr, retrying...", "subnet request", subnetRequest, "retry", retry) + time.Sleep(30 * time.Second) + cidr, e := service.acquireCidr(obj, subnetRequest, retry-1) + return cidr, e + } else { + log.V(1).Info("failed to acquire subnet cidr after multiple retries", "subnet request", subnetRequest) + return "", nil + } +} + +func (service *IPPoolService) ListIPPoolID() sets.String { + ipPoolSet := service.ipPoolStore.ListIndexFuncValues(common.TagScopeIPPoolCRUID) + ipPoolSubnetSet := service.ipPoolBlockSubnetStore.ListIndexFuncValues(common.TagScopeIPPoolCRUID) + return ipPoolSet.Union(ipPoolSubnetSet) +} + +// GetIPPoolNamespace Get IPPool's namespace by tags +func (service *IPPoolService) GetIPPoolNamespace(nsxIPPool *model.IpAddressPool) string { + for _, tag := range nsxIPPool.Tags { + if *tag.Scope == common.TagScopeNamespace { + return *tag.Tag + } + } + return "" +} diff --git a/pkg/nsx/services/ippool/ippool_test.go b/pkg/nsx/services/ippool/ippool_test.go new file mode 100644 index 000000000..68b8bf396 --- /dev/null +++ b/pkg/nsx/services/ippool/ippool_test.go @@ -0,0 +1,278 @@ +package ippool + +import ( + "fmt" + "reflect" + "sync" + "testing" + + "github.com/agiledragon/gomonkey/v2" + "github.com/stretchr/testify/assert" + "github.com/vmware/vsphere-automation-sdk-go/runtime/bindings" + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" + commonctl "github.com/vmware-tanzu/nsx-operator/pkg/controllers/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/mediator" + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/vpc" +) + +func TestIPPoolService_ListIPPoolID(t *testing.T) { + ipPoolService := fakeService() + p := &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}} + _ = ipPoolService.ipPoolStore.Operate(p) + + tests := []struct { + name string + want sets.String + }{ + {"test", sets.NewString("1")}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, ipPoolService.ListIPPoolID(), "ListIPPoolID()") + }) + } +} + +func TestIPPoolService_acquireCidr(t *testing.T) { + vpcCacheIndexer := cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeVPCCRUID: indexFunc}) + resourceStore := common.ResourceStore{ + Indexer: vpcCacheIndexer, + BindingType: model.VpcBindingType(), + } + vpcStore := &vpc.VPCStore{ResourceStore: resourceStore} + commonctl.ServiceMediator.VPCService = &vpc.VPCService{VpcStore: vpcStore} + patches := gomonkey.ApplyMethod(reflect.TypeOf(vpcStore), "GetVPCsByNamespace", func(_ *vpc.VPCStore, ns string) []model.Vpc { + id := "vpc-1" + return []model.Vpc{{Path: common.String("/orgs/default/projects/project-1/vpcs/vpc-1"), Id: &id}} + }) + defer patches.Reset() + + ipPoolService := fakeService() + + type args struct { + obj *v1alpha2.IPPool + subnetRequest *v1alpha2.SubnetRequest + } + tests := []struct { + name string + args args + want string + wantErr assert.ErrorAssertionFunc + }{ + {"1", args{obj: &v1alpha2.IPPool{}, subnetRequest: &v1alpha2.SubnetRequest{}}, "1.1.1.1/24", assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ipPoolService.acquireCidr(tt.args.obj, tt.args.subnetRequest, 3) + if !tt.wantErr(t, err, fmt.Sprintf("acquireCidr(%v, %v)", tt.args.obj, tt.args.subnetRequest)) { + return + } + assert.Equalf(t, tt.want, got, "acquireCidr(%v, %v)", tt.args.obj, tt.args.subnetRequest) + }) + } +} + +func TestIPPoolService_DeleteIPPool(t *testing.T) { + service := fakeService() + iap := &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}} + iapbs := []*model.IpAddressPoolBlockSubnet{ + {Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}}} + + patch := gomonkey.ApplyMethod(reflect.TypeOf(service), "BuildIPPool", func(service *IPPoolService, IPPool *v1alpha2.IPPool) (*model. + IpAddressPool, + []*model.IpAddressPoolBlockSubnet) { + return iap, iapbs + }) + patch.ApplyMethod(reflect.TypeOf(service), "Operate", func(service *IPPoolService, nsxIPPool *model.IpAddressPool, + nsxIPSubnets []*model.IpAddressPoolBlockSubnet, IPPoolUpdated bool, IPPoolSubnetsUpdated bool) error { + return nil + }) + defer patch.Reset() + + ipPool := &v1alpha2.IPPool{} + + t.Run("1", func(t *testing.T) { + err := service.DeleteIPPool(ipPool) + assert.NoError(t, err, "DeleteIPPool(%v)", ipPool) + }) +} + +func TestIPPoolService_AcquireRealizedSubnetIP(t *testing.T) { + vpcCacheIndexer := cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeVPCCRUID: indexFunc}) + resourceStore := common.ResourceStore{ + Indexer: vpcCacheIndexer, + BindingType: model.VpcBindingType(), + } + vpcStore := &vpc.VPCStore{ResourceStore: resourceStore} + commonctl.ServiceMediator.VPCService = &vpc.VPCService{VpcStore: vpcStore} + patches := gomonkey.ApplyMethod(reflect.TypeOf(vpcStore), "GetVPCsByNamespace", func(_ *vpc.VPCStore, ns string) []model.Vpc { + id := "vpc-1" + return []model.Vpc{{Path: common.String("/orgs/default/projects/project-1/vpcs/vpc-1"), Id: &id}} + }) + defer patches.Reset() + ipPoolService := fakeService() + + ipPool2 := &v1alpha2.IPPool{ + Spec: v1alpha2.IPPoolSpec{ + Subnets: []v1alpha2.SubnetRequest{ + { + Name: "subnet1", + }, + }, + }, + Status: v1alpha2.IPPoolStatus{ + Subnets: []v1alpha2.SubnetResult{ + { + Name: "subnet1", + }, + }, + }, + } + + result := []v1alpha2.SubnetResult{ + { + Name: "subnet1", + CIDR: "1.1.1.1/24", + }, + } + + t.Run("1", func(t *testing.T) { + got, got1, err := ipPoolService.AcquireRealizedSubnetIP(ipPool2) + assert.NoError(t, err, "AcquireRealizedSubnetIP(%v)", ipPool2) + assert.Equalf(t, result, got, "AcquireRealizedSubnetIP(%v)", ipPool2) + assert.Equalf(t, true, got1, "AcquireRealizedSubnetIP(%v)", ipPool2) + }) +} + +func TestIPPoolService_CRUDResource(t *testing.T) { + vpcCacheIndexer := cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeVPCCRUID: indexFunc}) + resourceStore := common.ResourceStore{ + Indexer: vpcCacheIndexer, + BindingType: model.VpcBindingType(), + } + vpcStore := &vpc.VPCStore{ResourceStore: resourceStore} + commonctl.ServiceMediator.VPCService = &vpc.VPCService{VpcStore: vpcStore} + patches := gomonkey.ApplyMethod(reflect.TypeOf(vpcStore), "GetVPCsByNamespace", func(_ *vpc.VPCStore, ns string) []model.Vpc { + id := "vpc-1" + return []model.Vpc{{Path: common.String("/orgs/default/projects/project-1/vpcs/vpc-1"), Id: &id}} + }) + defer patches.Reset() + service := fakeService() + iap := &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}} + iapbs := []*model.IpAddressPoolBlockSubnet{ + {Id: String("1"), DisplayName: String("1"), Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), Tag: String("1")}}}} + + ipPoolStore := &IPPoolStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBindingType(), + }} + ipPoolBlockSubnetStore := &IPPoolBlockSubnetStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBlockSubnetBindingType(), + }} + patch := gomonkey.ApplyMethod(reflect.TypeOf(ipPoolStore), "Operate", func(_ *IPPoolStore, nsxIPPool interface{}) error { + return nil + }) + patch.ApplyMethod(reflect.TypeOf(ipPoolBlockSubnetStore), "Operate", func(_ *IPPoolBlockSubnetStore, _ interface{}) error { + return nil + }) + defer patch.Reset() + + t.Run("1", func(t *testing.T) { + err := service.Operate(iap, iapbs, true, true) + assert.NoError(t, err, "Operate(%v)(%v)", iap, iapbs) + }) +} + +func TestIPPoolService_CreateOrUpdateIPPool(t *testing.T) { + service := fakeService() + ipPool2 := &v1alpha2.IPPool{ + Spec: v1alpha2.IPPoolSpec{ + Subnets: []v1alpha2.SubnetRequest{ + { + Name: "subnet1", + }, + }, + Type: common.IPPoolTypePrivate, + }, + Status: v1alpha2.IPPoolStatus{ + Subnets: []v1alpha2.SubnetResult{ + { + Name: "subnet1", + }, + }, + }, + } + + var vpcinfolist = []common.VPCResourceInfo{ + {OrgID: "1", VPCID: "1", ProjectID: "1", ID: "1", ParentID: "1"}, + } + md := mediator.ServiceMediator{} + patch := gomonkey.ApplyMethod(reflect.TypeOf(&md), "ListVPCInfo", func(serviceMediator *mediator.ServiceMediator, + ns string) []common.VPCResourceInfo { + return vpcinfolist + }) + defer patch.Reset() + + p := &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}} + ipPoolStore := &IPPoolStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBindingType(), + }} + ipPoolStore.Operate(p) + + iapbs := []*model.IpAddressPoolBlockSubnet{ + {Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}}} + ipPoolBlockSubnetStore := &IPPoolBlockSubnetStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBlockSubnetBindingType(), + }} + ipPoolBlockSubnetStore.Operate(iapbs) + t.Run("1", func(t *testing.T) { + commonctl.ServiceMediator = md + got, got1, err := service.CreateOrUpdateIPPool(ipPool2) + assert.NoError(t, err, "CreateOrUpdateIPPool(%v)(%v)", got, got1) + }) +} + +func TestInitializeIPPool(t *testing.T) { + service := fakeService() + wg := sync.WaitGroup{} + fatalErrors := make(chan error) + wg.Add(3) + + var tc *bindings.TypeConverter + patches2 := gomonkey.ApplyMethod(reflect.TypeOf(tc), "ConvertToGolang", + func(_ *bindings.TypeConverter, d data.DataValue, b bindings.BindingType) (interface{}, []error) { + mId, mTag, mScope := "11111", "11111", "11111" + m := model.IpAddressPool{ + Id: &mId, + Tags: []model.Tag{{Tag: &mTag, Scope: &mScope}}, + } + var j interface{} = m + return j, nil + }) + defer patches2.Reset() + + service.InitializeResourceStore(&wg, fatalErrors, ResourceTypeIPPool, nil, service.ipPoolStore) + assert.Empty(t, fatalErrors) + assert.Equal(t, []string{"11111"}, service.ipPoolStore.ListKeys()) +} diff --git a/pkg/nsx/services/ippool/parse.go b/pkg/nsx/services/ippool/parse.go new file mode 100644 index 000000000..8dde41d89 --- /dev/null +++ b/pkg/nsx/services/ippool/parse.go @@ -0,0 +1,30 @@ +package ippool + +import ( + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" +) + +func (service *IPPoolService) GetUnrealizedSubnetNames(obj *v1alpha2.IPPool) []string { + var unrealizedSubnets []string + for _, subnetRequest := range obj.Spec.Subnets { + realized := false + for _, statusSubnet := range obj.Status.Subnets { + if statusSubnet.Name == subnetRequest.Name && statusSubnet.CIDR != "" { + realized = true + break + } + } + if !realized { + unrealizedSubnets = append(unrealizedSubnets, subnetRequest.Name) + } + } + return unrealizedSubnets +} + +func (service *IPPoolService) FullyRealized(obj *v1alpha2.IPPool) bool { + return len(service.GetUnrealizedSubnetNames(obj)) == 0 +} + +func getCluster(service *IPPoolService) string { + return service.NSXConfig.Cluster +} diff --git a/pkg/nsx/services/ippool/parse_test.go b/pkg/nsx/services/ippool/parse_test.go new file mode 100644 index 000000000..0e04e140a --- /dev/null +++ b/pkg/nsx/services/ippool/parse_test.go @@ -0,0 +1,99 @@ +package ippool + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/vmware-tanzu/nsx-operator/pkg/apis/v1alpha2" +) + +func TestIPPoolService_FullyRealized(t *testing.T) { + ipPoolService := fakeService() + ipPool := &v1alpha2.IPPool{ + Spec: v1alpha2.IPPoolSpec{ + Subnets: []v1alpha2.SubnetRequest{ + { + Name: "subnet1", + }, + }, + }, + Status: v1alpha2.IPPoolStatus{ + Subnets: []v1alpha2.SubnetResult{ + { + Name: "subnet1", + CIDR: "1.1.1/24", + }, + }, + }, + } + + ipPool2 := &v1alpha2.IPPool{ + Spec: v1alpha2.IPPoolSpec{ + Subnets: []v1alpha2.SubnetRequest{ + { + Name: "subnet1", + }, + }, + }, + Status: v1alpha2.IPPoolStatus{ + Subnets: []v1alpha2.SubnetResult{ + { + Name: "subnet1", + }, + }, + }, + } + type args struct { + obj *v1alpha2.IPPool + } + tests := []struct { + name string + args args + want bool + }{ + {"fully realized", args{ipPool}, true}, + {"not fully realized", args{ipPool2}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, ipPoolService.FullyRealized(tt.args.obj), "FullyRealized(%v)", tt.args.obj) + }) + } +} + +func TestIPPoolService_GetUnrealizedSubnetNames(t *testing.T) { + ipPoolService := fakeService() + + ipPool2 := &v1alpha2.IPPool{ + Spec: v1alpha2.IPPoolSpec{ + Subnets: []v1alpha2.SubnetRequest{ + { + Name: "subnet1", + }, + }, + }, + Status: v1alpha2.IPPoolStatus{ + Subnets: []v1alpha2.SubnetResult{ + { + Name: "subnet1", + }, + }, + }, + } + type args struct { + obj *v1alpha2.IPPool + } + tests := []struct { + name string + args args + want []string + }{ + {"1", args{ipPool2}, []string{"subnet1"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, ipPoolService.GetUnrealizedSubnetNames(tt.args.obj), "GetUnrealizedSubnetNames(%v)", tt.args.obj) + }) + } +} diff --git a/pkg/nsx/services/ippool/store.go b/pkg/nsx/services/ippool/store.go new file mode 100644 index 000000000..1e1076e3e --- /dev/null +++ b/pkg/nsx/services/ippool/store.go @@ -0,0 +1,144 @@ +package ippool + +import ( + "errors" + + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + "k8s.io/apimachinery/pkg/types" + + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +func keyFunc(obj interface{}) (string, error) { + switch v := obj.(type) { + case model.IpAddressPool: + return *v.Id, nil + case model.IpAddressPoolBlockSubnet: + return *v.Id, nil + case model.GenericPolicyRealizedResource: + return *v.Id, nil + default: + return "", errors.New("keyFunc doesn't support unknown type") + } +} + +func indexFunc(obj interface{}) ([]string, error) { + res := make([]string, 0, 5) + switch v := obj.(type) { + case model.IpAddressPoolBlockSubnet: + return filterTag(v.Tags), nil + case model.IpAddressPool: + return filterTag(v.Tags), nil + default: + return res, errors.New("indexFunc doesn't support unknown type") + } +} + +var filterTag = func(v []model.Tag) []string { + res := make([]string, 0, 5) + for _, tag := range v { + if *tag.Scope == common.TagScopeIPPoolCRUID { + res = append(res, *tag.Tag) + } + } + return res +} + +type IPPoolStore struct { + common.ResourceStore +} + +type IPPoolBlockSubnetStore struct { + common.ResourceStore +} + +func ipPoolAssertion(i interface{}) interface{} { + return i.(model.IpAddressPool) +} + +func ipPoolBlockSubnetAssertion(i interface{}) interface{} { + return i.(model.IpAddressPoolBlockSubnet) +} + +func (ipPoolStore *IPPoolStore) Operate(i interface{}) error { + ipPool := i.(*model.IpAddressPool) + if ipPool.MarkedForDelete != nil && *ipPool.MarkedForDelete { + err := ipPoolStore.Delete(*ipPool) + log.V(1).Info("delete ipPool from store", "ipPool", ipPool) + if err != nil { + return err + } + } else { + err := ipPoolStore.Add(*ipPool) + log.V(1).Info("add ipPool to store", "ipPool", ipPool) + if err != nil { + return err + } + } + return nil +} + +func (ipPoolBlockSubnetStore *IPPoolBlockSubnetStore) Operate(i interface{}) error { + ipPoolBlockSubnets := i.([]*model.IpAddressPoolBlockSubnet) + for _, ipPoolBlockSubnet := range ipPoolBlockSubnets { + if ipPoolBlockSubnet.MarkedForDelete != nil && *ipPoolBlockSubnet.MarkedForDelete { + err := ipPoolBlockSubnetStore.Delete(*ipPoolBlockSubnet) + log.V(1).Info("delete ipPoolBlockSubnet from store", "ipPoolBlockSubnet", ipPoolBlockSubnet) + if err != nil { + return err + } + } else { + err := ipPoolBlockSubnetStore.Add(*ipPoolBlockSubnet) + log.V(1).Info("add ipPoolBlockSubnet to store", "ipPoolBlockSubnet", ipPoolBlockSubnet) + if err != nil { + return err + } + } + } + return nil +} + +func (service *IPPoolService) indexedIPPoolAndIPPoolSubnets(uid types.UID) (*model.IpAddressPool, []*model.IpAddressPoolBlockSubnet, error) { + nsxIPPool, err := service.ipPoolStore.GetByIndex(uid) + if err != nil { + return nil, nil, err + } + nsxIPPoolSubnets, err := service.ipPoolBlockSubnetStore.GetByIndex(uid) + if err != nil { + return nil, nil, err + } + return nsxIPPool, nsxIPPoolSubnets, nil +} + +func (ipPoolBlockSubnetStore *IPPoolBlockSubnetStore) GetByIndex(uid types.UID) ([]*model.IpAddressPoolBlockSubnet, error) { + nsxIPSubnets := make([]*model.IpAddressPoolBlockSubnet, 0) + indexResults, err := ipPoolBlockSubnetStore.ResourceStore.ByIndex(common.TagScopeIPPoolCRUID, string(uid)) + if err != nil { + log.Error(err, "failed to get ip subnets", "UID", string(uid)) + return nil, err + } + if len(indexResults) == 0 { + log.Info("did not get ip subnets with index", "UID", string(uid)) + } + for _, ipSubnet := range indexResults { + t := ipSubnet.(model.IpAddressPoolBlockSubnet) + nsxIPSubnets = append(nsxIPSubnets, &t) + } + return nsxIPSubnets, nil +} + +func (ipPoolStore *IPPoolStore) GetByIndex(uid types.UID) (*model.IpAddressPool, error) { + nsxIPPool := &model.IpAddressPool{} + indexResults, err := ipPoolStore.ResourceStore.ByIndex(common.TagScopeIPPoolCRUID, string(uid)) + if err != nil { + log.Error(err, "failed to get ip pool", "UID", string(uid)) + return nil, err + } + if len(indexResults) > 0 { + t := indexResults[0].(model.IpAddressPool) + nsxIPPool = &t + } else { + log.Info("did not get ip pool with index", "UID", string(uid)) + } + return nsxIPPool, nil +} diff --git a/pkg/nsx/services/ippool/store_test.go b/pkg/nsx/services/ippool/store_test.go new file mode 100644 index 000000000..7492011dc --- /dev/null +++ b/pkg/nsx/services/ippool/store_test.go @@ -0,0 +1,270 @@ +package ippool + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +func TestIPPoolBlockSubnetStore_CRUDResource(t *testing.T) { + ipPoolBlockSubnetCacheIndexer := cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}) + resourceStore := common.ResourceStore{ + Indexer: ipPoolBlockSubnetCacheIndexer, + BindingType: model.IpAddressPoolBlockSubnetBindingType(), + } + ipPoolBlockSubnetStore := &IPPoolBlockSubnetStore{ResourceStore: resourceStore} + type args struct { + i interface{} + } + tests := []struct { + name string + args args + wantErr assert.ErrorAssertionFunc + }{ + {"1", args{i: []*model.IpAddressPoolBlockSubnet{{Id: String("1")}}}, assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.wantErr(t, ipPoolBlockSubnetStore.Operate(tt.args.i), fmt.Sprintf("Operate(%v)", tt.args.i)) + }) + } +} + +func TestIPPoolStore_GetByIndex(t *testing.T) { + p := &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}} + ipPoolStore := &IPPoolStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBindingType(), + }} + ipPoolStore.Operate(p) + type args struct { + uid types.UID + } + tests := []struct { + name string + args args + want *model.IpAddressPool + wantErr bool + }{ + {"1", args{uid: "1"}, &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), Tag: String("1")}}}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ipPoolStore.GetByIndex(tt.args.uid) + if (err != nil) != tt.wantErr { + t.Errorf("indexedIPPool() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("indexedIPPool() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIPPoolBlockSubnetStore_GetByIndex(t *testing.T) { + p := []*model.IpAddressPoolBlockSubnet{ + {Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}}} + ipPoolBlockSubnetStore := &IPPoolBlockSubnetStore{ResourceStore: common.ResourceStore{ + Indexer: cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}), + BindingType: model.IpAddressPoolBlockSubnetBindingType(), + }} + ipPoolBlockSubnetStore.Operate(p) + type args struct { + uid types.UID + } + tests := []struct { + name string + args args + want []*model.IpAddressPoolBlockSubnet + wantErr bool + }{ + {"1", args{uid: "1"}, []*model.IpAddressPoolBlockSubnet{{Id: String("1"), DisplayName: String("1"), Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), Tag: String("1")}}}}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ipPoolBlockSubnetStore.GetByIndex(tt.args.uid) + if (err != nil) != tt.wantErr { + t.Errorf("indexedIPPoolSubnets() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("indexedIPPoolSubnets() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIPPoolStore_CRUDResource(t *testing.T) { + ipPoolCacheIndexer := cache.NewIndexer(keyFunc, cache.Indexers{common.TagScopeIPPoolCRUID: indexFunc}) + resourceStore := common.ResourceStore{ + Indexer: ipPoolCacheIndexer, + BindingType: model.IpAddressPoolBindingType(), + } + ipPoolStore := &IPPoolStore{ResourceStore: resourceStore} + type args struct { + i interface{} + } + tests := []struct { + name string + args args + wantErr assert.ErrorAssertionFunc + }{ + {"1", args{i: &model.IpAddressPool{Id: String("1")}}, assert.NoError}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.wantErr(t, ipPoolStore.Operate(tt.args.i), fmt.Sprintf("Operate(%v)", tt.args.i)) + }) + } +} + +func Test_indexFunc(t *testing.T) { + mId, mTag, mScope := "11111", "11111", common.TagScopeIPPoolCRUID + m := model.IpAddressPool{ + Id: &mId, + Tags: []model.Tag{{Tag: &mTag, Scope: &mScope}}, + } + r := model.IpAddressPoolBlockSubnet{ + Id: &mId, + Tags: []model.Tag{{Tag: &mTag, Scope: &mScope}}, + } + t.Run("1", func(t *testing.T) { + got, _ := indexFunc(m) + if !reflect.DeepEqual(got, []string{"11111"}) { + t.Errorf("indexFunc() = %v, want %v", got, model.Tag{Tag: &mTag, Scope: &mScope}) + } + }) + t.Run("2", func(t *testing.T) { + got, _ := indexFunc(r) + if !reflect.DeepEqual(got, []string{"11111"}) { + t.Errorf("indexFunc() = %v, want %v", got, model.Tag{Tag: &mTag, Scope: &mScope}) + } + }) +} + +func Test_ipPoolAssertion(t *testing.T) { + mId, mTag, mScope := "11111", "11111", common.TagScopeIPPoolCRUID + m := model.IpAddressPool{ + Id: &mId, + Tags: []model.Tag{{Tag: &mTag, Scope: &mScope}}, + } + type args struct { + i interface{} + } + tests := []struct { + name string + args args + want interface{} + }{ + {"1", args{i: m}, m}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ipPoolAssertion(tt.args.i); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ipPoolAssertion() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_ipPoolBlockSubnetAssertion(t *testing.T) { + mId, mTag, mScope := "11111", "11111", common.TagScopeIPPoolCRUID + r := model.IpAddressPoolBlockSubnet{ + Id: &mId, + Tags: []model.Tag{{Tag: &mTag, Scope: &mScope}}, + } + type args struct { + i interface{} + } + tests := []struct { + name string + args args + want interface{} + }{ + {"1", args{i: r}, r}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ipPoolBlockSubnetAssertion(tt.args.i); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ipPoolBlockSubnetAssertion() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_keyFunc(t *testing.T) { + Id := "11111" + g := model.IpAddressPool{Id: &Id} + s := model.IpAddressPoolBlockSubnet{Id: &Id} + t.Run("1", func(t *testing.T) { + got, _ := keyFunc(s) + if got != "11111" { + t.Errorf("keyFunc() = %v, want %v", got, "11111") + } + }) + t.Run("2", func(t *testing.T) { + got, _ := keyFunc(g) + if got != "11111" { + t.Errorf("keyFunc() = %v, want %v", got, "11111") + } + }) +} + +func TestIPPoolService_indexedIPPoolAndIPPoolSubnets1(t *testing.T) { + ipPoolService := fakeService() + p := &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}} + ipPoolService.ipPoolStore.Operate(p) + + iapbs := []*model.IpAddressPoolBlockSubnet{ + {Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}}} + ipPoolService.ipPoolBlockSubnetStore.Operate(iapbs) + + type args struct { + uid types.UID + } + tests := []struct { + name string + args args + want []*model.IpAddressPoolBlockSubnet + want2 *model.IpAddressPool + wantErr bool + }{ + { + "1", + args{uid: "1"}, + []*model.IpAddressPoolBlockSubnet{{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), Tag: String("1")}}}}, + &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), Tag: String("1")}}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got2, _ := ipPoolService.indexedIPPoolAndIPPoolSubnets(tt.args.uid) + if !reflect.DeepEqual(got, tt.want2) { + t.Errorf("indexedIPPool() got = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(got2, tt.want) { + t.Errorf("indexedIPPoolSubnets() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/nsx/services/ippool/wrap.go b/pkg/nsx/services/ippool/wrap.go new file mode 100644 index 000000000..62cc2d62e --- /dev/null +++ b/pkg/nsx/services/ippool/wrap.go @@ -0,0 +1,80 @@ +package ippool + +import ( + "github.com/openlyinc/pointy" + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +func (service *IPPoolService) WrapHierarchyIPPool(iap *model.IpAddressPool, iapbs []*model.IpAddressPoolBlockSubnet) (*model.Infra, error) { + IPSubnetsChildren, err := service.wrapIPSubnets(iapbs) + if err != nil { + return nil, err + } + iap.Children = IPSubnetsChildren + IPPoolChildren, err := service.wrapIPPool(iap) + if err != nil { + return nil, err + } + var infraChildren []*data.StructValue + infraChildren = append(infraChildren, IPPoolChildren...) + + infra, err := service.wrapInfra(infraChildren) + if err != nil { + return nil, err + } + return infra, nil +} + +func (service *IPPoolService) wrapInfra(children []*data.StructValue) (*model.Infra, error) { + // This is the outermost layer of the hierarchy security policy. + // It doesn't need ID field. + infraType := "Infra" + infraObj := model.Infra{ + Children: children, + ResourceType: &infraType, + } + return &infraObj, nil +} + +func (service *IPPoolService) wrapIPSubnets(IPSubnets []*model.IpAddressPoolBlockSubnet) ([]*data.StructValue, error) { + var IPSubnetsChildren []*data.StructValue + for _, IPSubnet := range IPSubnets { + IPSubnet.ResourceType = common.ResourceTypeIPPoolBlockSubnet + dataValue, errs := NewConverter().ConvertToVapi(IPSubnet, model.IpAddressPoolBlockSubnetBindingType()) + if len(errs) > 0 { + return nil, errs[0] + } + childIPSubnet := model.ChildIpAddressPoolSubnet{ + ResourceType: "ChildIpAddressPoolSubnet", + Id: IPSubnet.Id, + MarkedForDelete: IPSubnet.MarkedForDelete, + IpAddressPoolSubnet: dataValue.(*data.StructValue), + } + dataValue, errs = NewConverter().ConvertToVapi(childIPSubnet, model.ChildIpAddressPoolSubnetBindingType()) + if len(errs) > 0 { + return nil, errs[0] + } + IPSubnetsChildren = append(IPSubnetsChildren, dataValue.(*data.StructValue)) + } + return IPSubnetsChildren, nil +} + +func (service *IPPoolService) wrapIPPool(iap *model.IpAddressPool) ([]*data.StructValue, error) { + var IPPoolChildren []*data.StructValue + iap.ResourceType = pointy.String(common.ResourceTypeIPPool) + childIPool := model.ChildIpAddressPool{ + Id: iap.Id, + MarkedForDelete: iap.MarkedForDelete, + ResourceType: "ChildIpAddressPool", + IpAddressPool: iap, + } + dataValue, errs := NewConverter().ConvertToVapi(childIPool, model.ChildIpAddressPoolBindingType()) + if len(errs) > 0 { + return nil, errs[0] + } + IPPoolChildren = append(IPPoolChildren, dataValue.(*data.StructValue)) + return IPPoolChildren, nil +} diff --git a/pkg/nsx/services/ippool/wrap_test.go b/pkg/nsx/services/ippool/wrap_test.go new file mode 100644 index 000000000..1e38bfed6 --- /dev/null +++ b/pkg/nsx/services/ippool/wrap_test.go @@ -0,0 +1,41 @@ +package ippool + +import ( + "testing" + + "github.com/vmware/vsphere-automation-sdk-go/runtime/bindings" + "github.com/vmware/vsphere-automation-sdk-go/runtime/data" + "github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model" + + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +func TestIPPoolService_WrapHierarchyIPPool(t *testing.T) { + Converter := bindings.NewTypeConverter() + Converter.SetMode(bindings.REST) + service := fakeService() + iapbs := []*model.IpAddressPoolBlockSubnet{ + &model.IpAddressPoolBlockSubnet{ + Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), Tag: String("1")}}}} + iap := &model.IpAddressPool{Id: String("1"), DisplayName: String("1"), + Tags: []model.Tag{{Scope: String(common.TagScopeIPPoolCRUID), + Tag: String("1")}}} + + tests := []struct { + name string + want []*data.StructValue + wantErr bool + }{ + {"1", nil, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := service.WrapHierarchyIPPool(iap, iapbs) + if (err != nil) != tt.wantErr { + t.Errorf("WrapHierarchyIPPool() error = %v, wantErr %v", err, tt.wantErr) + return + } + }) + } +} diff --git a/pkg/util/utils.go b/pkg/util/utils.go index 888c9452a..3068063be 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -94,6 +94,11 @@ func ToUpper(obj interface{}) string { return strings.ToUpper(str) } +func CalculateSubnetSize(mask int) int64 { + size := 1 << uint(32-mask) + return int64(size) +} + func IsSystemNamespace(c client.Client, ns string, obj *v1.Namespace) (bool, error) { nsObj := &v1.Namespace{} if obj != nil { diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index 5c7099cf0..2148446c3 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -210,6 +210,24 @@ func TestRemoveDuplicateStr(t *testing.T) { } } +func TestCalculateSubnetSize(t *testing.T) { + type args struct { + mask int + } + tests := []struct { + name string + args args + want int64 + }{ + {"1", args{24}, 256}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, CalculateSubnetSize(tt.args.mask), "CalculateSubnetSize(%v)", tt.args.mask) + }) + } +} + func TestNormalizeId(t *testing.T) { type args struct { name string diff --git a/test/e2e/framework.go b/test/e2e/framework.go index c76599d35..9d9a5a5b1 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -355,11 +355,11 @@ func (data *TestData) deletePodAndWait(timeout time.Duration, name string, ns st type PodCondition func(*corev1.Pod) (bool, error) -// waitForSecurityPolicyReady polls the K8s apiServer until the specified SecurityPolicy is in the "True" state (or until +// waitForSecurityPolicyReady polls the K8s apiServer until the specified CR is in the "True" state (or until // the provided timeout expires). -func (data *TestData) waitForSecurityPolicyReadyOrDeleted(timeout time.Duration, namespace string, name string, status Status) error { +func (data *TestData) waitForCRReadyOrDeleted(timeout time.Duration, cr string, namespace string, name string, status Status) error { err := wait.Poll(1*time.Second, timeout, func() (bool, error) { - cmd := fmt.Sprintf("kubectl get securitypolicy %s -n %s -o jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'", name, namespace) + cmd := fmt.Sprintf("kubectl get %s %s -n %s -o jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'", cr, name, namespace) log.Printf("%s", cmd) rc, stdout, _, err := RunCommandOnNode(clusterInfo.masterNodeName, cmd) if err != nil || rc != 0 { @@ -654,7 +654,7 @@ func (data *TestData) waitForResourceExistOrNot(namespace string, resourceType s if len(response.Results) == 0 { exist = false } - //log.Printf("QueryParam: %s Result: %t", queryParam, exist) + log.Printf("QueryParam: %s exist: %t", queryParam, exist) if exist != shouldExist { return false, nil } diff --git a/test/e2e/manifest/testIPPool/ippool.yaml b/test/e2e/manifest/testIPPool/ippool.yaml new file mode 100644 index 000000000..806f0766c --- /dev/null +++ b/test/e2e/manifest/testIPPool/ippool.yaml @@ -0,0 +1,16 @@ +apiVersion: nsx.vmware.com/v1alpha2 +kind: IPPool +metadata: + name: guestcluster-ippool-2 +spec: + type: public + subnets: + - ipFamily: IPv4 + name: guestcluster1-workers-a + prefixLength: 26 + - ipFamily: IPv4 + name: guestcluster1-workers-b + prefixLength: 26 + - ipFamily: IPv4 + name: guestcluster1-workers-c + prefixLength: 26 \ No newline at end of file diff --git a/test/e2e/manifest/testIPPool/ippool_delete.yaml b/test/e2e/manifest/testIPPool/ippool_delete.yaml new file mode 100644 index 000000000..242c778db --- /dev/null +++ b/test/e2e/manifest/testIPPool/ippool_delete.yaml @@ -0,0 +1,10 @@ +apiVersion: nsx.vmware.com/v1alpha2 +kind: IPPool +metadata: + name: guestcluster-ippool-2 +spec: + type: public + subnets: + - ipFamily: IPv4 + name: guestcluster1-workers-a + prefixLength: 26 \ No newline at end of file diff --git a/test/e2e/manifest/testIPPool/ippool_subnet_nil.yaml b/test/e2e/manifest/testIPPool/ippool_subnet_nil.yaml new file mode 100644 index 000000000..f2836ca4d --- /dev/null +++ b/test/e2e/manifest/testIPPool/ippool_subnet_nil.yaml @@ -0,0 +1,6 @@ +apiVersion: nsx.vmware.com/v1alpha2 +kind: IPPool +metadata: + name: guestcluster-ippool-2 +spec: + type: public \ No newline at end of file diff --git a/test/e2e/nsx_ippool_test.go b/test/e2e/nsx_ippool_test.go new file mode 100644 index 000000000..f6f08c19c --- /dev/null +++ b/test/e2e/nsx_ippool_test.go @@ -0,0 +1,152 @@ +// This file is for e2e ippool tests. + +package e2e + +import ( + "path/filepath" + "testing" + + "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" +) + +const ( + IPPool = "ippool" +) + +// TestIPPoolBasic verifies that it could successfully realize ippool subnet from ipblock. +func TestIPPoolBasic(t *testing.T) { + ns := "sc-a" + name := "guestcluster-ippool-2" + subnet_name_1 := "guestcluster1-workers-a" + subnet_name_2 := "guestcluster1-workers-b" + subnet_name_3 := "guestcluster1-workers-c" + setupTest(t, ns) + defer teardownTest(t, ns) + + // Create ippool + ippoolPath, _ := filepath.Abs("./manifest/testIPPool/ippool.yaml") + _ = applyYAML(ippoolPath, ns) + defer deleteYAML(ippoolPath, ns) + + // Check ippool status + err := testData.waitForCRReadyOrDeleted(defaultTimeout, IPPool, ns, name, Ready) + assert_nil(t, err, "Error when waiting for Security Policy %s", name) + + // Check nsx-t resource existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPool, name, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_1, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_2, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_3, true) + assert_nil(t, err) + + // Delete ippool + _ = deleteYAML(ippoolPath, ns) + + // Check nsx-t resource not existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPool, name, false) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_1, false) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_2, false) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_3, false) + assert_nil(t, err) +} + +// TestIPPoolAddDeleteSubnet verifies that it is as expected when adding or deleting some subnets. +func TestIPPoolAddDeleteSubnet(t *testing.T) { + ns := "sc-a" + name := "guestcluster-ippool-2" + subnet_name_1 := "guestcluster1-workers-a" + subnet_name_2 := "guestcluster1-workers-b" + subnet_name_3 := "guestcluster1-workers-c" + setupTest(t, ns) + defer teardownTest(t, ns) + + // Create ippool + ippoolPath, _ := filepath.Abs("./manifest/testIPPool/ippool.yaml") + _ = applyYAML(ippoolPath, ns) + defer deleteYAML(ippoolPath, ns) + + // Check ippool status + err := testData.waitForCRReadyOrDeleted(defaultTimeout, IPPool, ns, name, Ready) + assert_nil(t, err, "Error when waiting for Security Policy %s", name) + + // Check nsx-t resource existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPool, name, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_1, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_2, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_3, true) + assert_nil(t, err) + + // Delete subnet_name_2 and subnet_name_3 + ippoolDeletePath, _ := filepath.Abs("./manifest/testIPPool/ippool_delete.yaml") + _ = applyYAML(ippoolDeletePath, ns) + + // Check ippool status + err = testData.waitForCRReadyOrDeleted(defaultTimeout, IPPool, ns, name, Ready) + assert_nil(t, err, "Error when waiting for Security Policy %s", name) + + // Check nsx-t resource existing and not existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPool, name, true) + assert_nil(t, err) + // Still existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_1, true) + assert_nil(t, err) + // Deleted + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_2, false) + assert_nil(t, err) + // Deleted + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_3, false) + assert_nil(t, err) + + // Add back subnet_name_2 and subnet_name_3 + _ = applyYAML(ippoolPath, ns) + // Check ippool status + err = testData.waitForCRReadyOrDeleted(defaultTimeout, IPPool, ns, name, Ready) + assert_nil(t, err, "Error when waiting for Security Policy %s", name) + + // Check nsx-t resource existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPool, name, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_1, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_2, true) + assert_nil(t, err) + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPoolBlockSubnet, subnet_name_3, true) + assert_nil(t, err) +} + +// TestIPPoolBasic verifies that it could support when subnets are nil +func TestIPPoolSubnetsNil(t *testing.T) { + ns := "sc-a" + name := "guestcluster-ippool-2" + setupTest(t, ns) + defer teardownTest(t, ns) + + // Create ippool + ippoolPath, _ := filepath.Abs("./manifest/testIPPool/ippool.yaml") + _ = applyYAML(ippoolPath, ns) + defer deleteYAML(ippoolPath, ns) + + // Check ippool status + err := testData.waitForCRReadyOrDeleted(defaultTimeout, IPPool, ns, name, Ready) + assert_nil(t, err, "Error when waiting for Security Policy %s", name) + + // Check nsx-t resource existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPool, name, true) + assert_nil(t, err) + + // Delete ippool + _ = deleteYAML(ippoolPath, ns) + + // Check nsx-t resource not existing + err = testData.waitForResourceExistOrNot(ns, common.ResourceTypeIPPool, name, false) + assert_nil(t, err) +} diff --git a/test/e2e/nsx_security_policy_test.go b/test/e2e/nsx_security_policy_test.go index f1a85951e..446b7588a 100644 --- a/test/e2e/nsx_security_policy_test.go +++ b/test/e2e/nsx_security_policy_test.go @@ -24,6 +24,10 @@ import ( "github.com/vmware-tanzu/nsx-operator/pkg/nsx/services/common" ) +const ( + SP = "securitypolicy" +) + // TestSecurityPolicyBasicTraffic verifies that the basic traffic of security policy. // This is the very basic, blocking all in and out traffic between pods should take effect. func TestSecurityPolicyBasicTraffic(t *testing.T) { @@ -56,7 +60,7 @@ func TestSecurityPolicyBasicTraffic(t *testing.T) { nsIsolationPath, _ := filepath.Abs("./manifest/testSecurityPolicy/ns-isolation-policy.yaml") _ = applyYAML(nsIsolationPath, ns) defer deleteYAML(nsIsolationPath, ns) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, ns, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, ns, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -71,7 +75,7 @@ func TestSecurityPolicyBasicTraffic(t *testing.T) { // Delete security policy _ = deleteYAML(nsIsolationPath, ns) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, ns, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, ns, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -100,7 +104,7 @@ func TestSecurityPolicyAddDeleteRule(t *testing.T) { nsIsolationPath, _ := filepath.Abs("./manifest/testSecurityPolicy/ns-isolation-policy.yaml") _ = applyYAML(nsIsolationPath, ns) defer deleteYAML(nsIsolationPath, ns) - err := testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, ns, securityPolicyName, Ready) + err := testData.waitForCRReadyOrDeleted(defaultTimeout, SP, ns, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -115,7 +119,7 @@ func TestSecurityPolicyAddDeleteRule(t *testing.T) { nsIsolationPath, _ = filepath.Abs("./manifest/testSecurityPolicy/ns-isolation-policy-1.yaml") _ = applyYAML(nsIsolationPath, ns) defer deleteYAML(nsIsolationPath, ns) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, ns, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, ns, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -126,7 +130,7 @@ func TestSecurityPolicyAddDeleteRule(t *testing.T) { // Delete security policy _ = deleteYAML(nsIsolationPath, ns) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, ns, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, ns, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -172,7 +176,7 @@ func TestSecurityPolicyMatchExpression(t *testing.T) { nsIsolationPath, _ := filepath.Abs("./manifest/testSecurityPolicy/match-expression.yaml") _ = applyYAML(nsIsolationPath, ns) defer deleteYAML(nsIsolationPath, ns) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, ns, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, ns, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -189,7 +193,7 @@ func TestSecurityPolicyMatchExpression(t *testing.T) { // Delete security policy _ = deleteYAML(nsIsolationPath, ns) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, ns, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, ns, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -235,7 +239,7 @@ func TestSecurityPolicyNamedPort0(t *testing.T) { psb, _, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod %s", webA) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -252,7 +256,7 @@ func TestSecurityPolicyNamedPort0(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -292,7 +296,7 @@ func TestSecurityPolicyNamedPort1(t *testing.T) { psb, _, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod %s", webA) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -309,7 +313,7 @@ func TestSecurityPolicyNamedPort1(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -349,7 +353,7 @@ func TestSecurityPolicyNamedPort2(t *testing.T) { psb, _, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod %s", webA) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -371,7 +375,7 @@ func TestSecurityPolicyNamedPort2(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -417,7 +421,7 @@ func TestSecurityPolicyNamedPort3(t *testing.T) { _, psb, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod ns %s", nsWeb) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -432,7 +436,7 @@ func TestSecurityPolicyNamedPort3(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -478,7 +482,7 @@ func TestSecurityPolicyNamedPort4(t *testing.T) { _, psb, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod ns %s", nsWeb) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -493,7 +497,7 @@ func TestSecurityPolicyNamedPort4(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -550,7 +554,7 @@ func TestSecurityPolicyNamedPort5(t *testing.T) { _, psb, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod ns %s", nsWeb) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -569,7 +573,7 @@ func TestSecurityPolicyNamedPort5(t *testing.T) { cmd = fmt.Sprintf("kubectl label ns %s %s=%s --overwrite", nsDB2, "role", "db") _, err = runCommand(cmd) assert_nil(t, err, "Error when running command %s", cmd) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) err = testData.waitForResourceExistOrNot(nsWeb, common.ResourceTypeRule, ruleName1, true) assert_nil(t, err) @@ -582,7 +586,7 @@ func TestSecurityPolicyNamedPort5(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -646,7 +650,7 @@ func TestSecurityPolicyNamedPort6(t *testing.T) { _, psb, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod ns %s", nsWeb) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -665,7 +669,7 @@ func TestSecurityPolicyNamedPort6(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing @@ -729,7 +733,7 @@ func TestSecurityPolicyNamedPort7(t *testing.T) { _, psb, err := testData.deploymentWaitForIPsOrNames(defaultTimeout, nsWeb, labelWeb) t.Logf("Pods are %v", psb) assert_nil(t, err, "Error when waiting for IP for Pod ns %s", nsWeb) - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Ready) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Ready) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource existing @@ -748,7 +752,7 @@ func TestSecurityPolicyNamedPort7(t *testing.T) { // Delete all _ = deleteYAML(podPath, "") - err = testData.waitForSecurityPolicyReadyOrDeleted(defaultTimeout, nsWeb, securityPolicyName, Deleted) + err = testData.waitForCRReadyOrDeleted(defaultTimeout, SP, nsWeb, securityPolicyName, Deleted) assert_nil(t, err, "Error when waiting for Security Policy %s", securityPolicyName) // Check nsx-t resource not existing