diff --git a/.golangci.yaml b/.golangci.yaml index e5ef0b5316..05f96b68dd 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -57,3 +57,4 @@ linters: - goerr113 # TODO: Need to introduce error definition and bring this back - goheader # TODO: Introduce back post fixing linter errors - gci + - interfacer # interfacer linter is archived and deprecated (https://github.com/mvdan/interfacer) diff --git a/api/v1alpha1/drpolicy_types.go b/api/v1alpha1/drpolicy_types.go index f727de213c..67cb51927d 100644 --- a/api/v1alpha1/drpolicy_types.go +++ b/api/v1alpha1/drpolicy_types.go @@ -25,8 +25,17 @@ type DRPolicySpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file + // schedule is an interval + SchedulingInterval string `json:"schedulingInterval"` + // Members of the DRPolicy set ClusterNames []string `json:"clusterNames"` + + // Label selector to identify all the VolumeReplicationClasses. + // This selector is assumed to be the same for all subscriptions that + // need DR protection. It will be passed in to the VRG when it is created + //+optional + ReplicationClassSelector metav1.LabelSelector `json:"replicationClassSelector,omitempty"` } // DRPolicyStatus defines the observed state of DRPolicy diff --git a/api/v1alpha1/volumereplicationgroup_types.go b/api/v1alpha1/volumereplicationgroup_types.go index fdf78636db..c684c0402a 100644 --- a/api/v1alpha1/volumereplicationgroup_types.go +++ b/api/v1alpha1/volumereplicationgroup_types.go @@ -20,6 +20,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// Important: Run "make" to regenerate code after modifying this file + // ReplicationState represents the replication operations to be performed on the volume type ReplicationState string @@ -47,9 +49,9 @@ const ( // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// VolumeReplicationGroup (VRG) spec declares the desired replication class -// and replication state of all the PVCs identified via the given PVC label -// selector. For each such PVC, the VRG will do the following: +// VolumeReplicationGroup (VRG) spec declares the desired schedule for data +// replication and replication state of all PVCs identified via the given +// PVC label selector. For each such PVC, the VRG will do the following: // - Create a VolumeReplication (VR) CR to enable storage level replication // of volume data and set the desired replication state (primary, secondary, // etc). @@ -60,15 +62,18 @@ const ( // - Manage the lifecycle of VR CR and S3 data according to CUD operations on // the PVC and the VRG CR. type VolumeReplicationGroupSpec struct { - // Important: Run "make" to regenerate code after modifying this file - // Label selector to identify all the PVCs that are in this group // that needs to be replicated to the peer cluster. PVCSelector metav1.LabelSelector `json:"pvcSelector"` - // ReplicationClass of all volumes in this replication group; - // this value is propagated to children VolumeReplication CRs - VolumeReplicationClass string `json:"volumeReplicationClass"` + // Label selector to identify the VolumeReplicationClass resources + // that are scanned to select an appropriate VolumeReplicationClass + // for the VolumeReplication resource. + //+optional + ReplicationClassSelector metav1.LabelSelector `json:"replicationClassSelector,omitempty"` + + // schedule is an interval + SchedulingInterval string `json:"schedulingInterval"` // Desired state of all volumes [primary or secondary] in this replication group; // this value is propagated to children VolumeReplication CRs @@ -85,7 +90,6 @@ type VolumeReplicationGroupSpec struct { // is replicated by a different mechanism; this mode of operation may be // referred to as backup-less mode. S3Endpoint string `json:"s3Endpoint,omitempty"` - // S3 Region: https://docs.aws.amazon.com/general/latest/gr/rande.html S3Region string `json:"s3Region,omitempty"` @@ -109,7 +113,6 @@ type ProtectedPVCMap map[string]*ProtectedPVC // VolumeReplicationGroupStatus defines the observed state of VolumeReplicationGroup // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster -// Important: Run "make" to regenerate code after modifying this file type VolumeReplicationGroupStatus struct { State State `json:"state,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index cb026ebc38..9a737f5281 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -190,6 +190,7 @@ func (in *DRPolicySpec) DeepCopyInto(out *DRPolicySpec) { *out = make([]string, len(*in)) copy(*out, *in) } + in.ReplicationClassSelector.DeepCopyInto(&out.ReplicationClassSelector) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRPolicySpec. @@ -377,6 +378,7 @@ func (in *VolumeReplicationGroupList) DeepCopyObject() runtime.Object { func (in *VolumeReplicationGroupSpec) DeepCopyInto(out *VolumeReplicationGroupSpec) { *out = *in in.PVCSelector.DeepCopyInto(&out.PVCSelector) + in.ReplicationClassSelector.DeepCopyInto(&out.ReplicationClassSelector) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeReplicationGroupSpec. diff --git a/config/crd/bases/ramendr.openshift.io_drpolicies.yaml b/config/crd/bases/ramendr.openshift.io_drpolicies.yaml index 8b404e02af..db12ffcab0 100644 --- a/config/crd/bases/ramendr.openshift.io_drpolicies.yaml +++ b/config/crd/bases/ramendr.openshift.io_drpolicies.yaml @@ -41,8 +41,58 @@ spec: items: type: string type: array + replicationClassSelector: + description: Label selector to identify all the VolumeReplicationClasses. + This selector is assumed to be the same for all subscriptions that + need DR protection. It will be passed in to the VRG when it is created + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + schedulingInterval: + description: schedule is an interval + type: string required: - clusterNames + - schedulingInterval type: object status: description: 'DRPolicyStatus defines the observed state of DRPolicy INSERT diff --git a/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml b/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml index 1c7ab3f4d3..39ff62af87 100644 --- a/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml +++ b/config/crd/bases/ramendr.openshift.io_volumereplicationgroups.yaml @@ -37,17 +37,17 @@ spec: metadata: type: object spec: - description: "VolumeReplicationGroup (VRG) spec declares the desired replication - class and replication state of all the PVCs identified via the given - PVC label selector. For each such PVC, the VRG will do the following: - \t- Create a VolumeReplication (VR) CR to enable storage level replication - \t of volume data and set the desired replication state (primary, secondary, - \ etc). - Take the corresponding PV metadata in Kubernetes etcd and - deposit it in the S3 store. The url, access key and access id required - to access the S3 store is specified via environment variables of - the VRG operator POD, which is obtained from a secret resource. - - Manage the lifecycle of VR CR and S3 data according to CUD operations - on the PVC and the VRG CR." + description: "VolumeReplicationGroup (VRG) spec declares the desired schedule + for data replication and replication state of all PVCs identified via + the given PVC label selector. For each such PVC, the VRG will do the + following: \t- Create a VolumeReplication (VR) CR to enable storage + level replication \t of volume data and set the desired replication + state (primary, secondary, etc). - Take the corresponding PV metadata + in Kubernetes etcd and deposit it in the S3 store. The url, access + key and access id required to access the S3 store is specified via + environment variables of the VRG operator POD, which is obtained + from a secret resource. - Manage the lifecycle of VR CR and S3 data + according to CUD operations on the PVC and the VRG CR." properties: pvcSelector: description: Label selector to identify all the PVCs that are in this @@ -94,6 +94,52 @@ spec: are ANDed. type: object type: object + replicationClassSelector: + description: Label selector to identify the VolumeReplicationClass + resources that are scanned to select an appropriate VolumeReplicationClass + for the VolumeReplication resource. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object replicationState: description: Desired state of all volumes [primary or secondary] in this replication group; this value is propagated to children VolumeReplication @@ -121,20 +167,18 @@ spec: key set using the keys: AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The secret should be present in the same namespace as the VRG' type: string - volumeReplicationClass: - description: ReplicationClass of all volumes in this replication group; - this value is propagated to children VolumeReplication CRs + schedulingInterval: + description: schedule is an interval type: string required: - pvcSelector - replicationState - - volumeReplicationClass + - schedulingInterval type: object status: - description: 'VolumeReplicationGroupStatus defines the observed state - of VolumeReplicationGroup INSERT ADDITIONAL STATUS FIELD - define observed - state of cluster Important: Run "make" to regenerate code after modifying - this file' + description: VolumeReplicationGroupStatus defines the observed state of + VolumeReplicationGroup INSERT ADDITIONAL STATUS FIELD - define observed + state of cluster properties: conditions: description: Conditions are the list of conditions and their status. diff --git a/config/dr_cluster/rbac/role.yaml b/config/dr_cluster/rbac/role.yaml index 5588cdcf42..c0c48a0896 100644 --- a/config/dr_cluster/rbac/role.yaml +++ b/config/dr_cluster/rbac/role.yaml @@ -76,3 +76,20 @@ rules: - patch - update - watch +- apiGroups: + - replication.storage.openshift.io + resources: + - volumereplicationclasses + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index c9978e1be3..7b881390c4 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -134,6 +134,16 @@ rules: - get - patch - update +- apiGroups: + - replication.storage.openshift.io + resources: + - volumereplicationclasses + verbs: + - get + - list + - patch + - update + - watch - apiGroups: - replication.storage.openshift.io resources: @@ -146,6 +156,13 @@ rules: - patch - update - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + verbs: + - get + - list - apiGroups: - view.open-cluster-management.io resources: diff --git a/config/samples/ramendr_v1alpha1_drpolicy.yaml b/config/samples/ramendr_v1alpha1_drpolicy.yaml index 55672380d3..2e64678c6c 100644 --- a/config/samples/ramendr_v1alpha1_drpolicy.yaml +++ b/config/samples/ramendr_v1alpha1_drpolicy.yaml @@ -3,6 +3,10 @@ kind: DRPolicy metadata: name: drpolicy-sample spec: + schedulingInterval: "1h" # hourly + replicationClassSelector: + matchLabels: + class: ramen clusterNames: - east - west diff --git a/config/samples/ramendr_v1alpha1_volumereplicationgroup.yaml b/config/samples/ramendr_v1alpha1_volumereplicationgroup.yaml index 79e34470c4..432bad5250 100644 --- a/config/samples/ramendr_v1alpha1_volumereplicationgroup.yaml +++ b/config/samples/ramendr_v1alpha1_volumereplicationgroup.yaml @@ -6,8 +6,11 @@ spec: pvcSelector: matchLabels: any-pvc-label: value - volumeReplicationClass: "storage-replication-class" + schedulingInterval: "1h" replicationState: "Primary" + replicationSelector: + matchLabels: + class: ramen s3Endpoint: "path/to/s3Endpoint" s3Region: "unknown" s3SecretName: secret-name-with-s3-credentials diff --git a/controllers/drplacementcontrol_controller.go b/controllers/drplacementcontrol_controller.go index d40e1372f0..ef9ee9b2bc 100644 --- a/controllers/drplacementcontrol_controller.go +++ b/controllers/drplacementcontrol_controller.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "reflect" + "regexp" "time" "github.com/ghodss/yaml" @@ -243,6 +244,18 @@ func (r *DRPlacementControlReconciler) reconcileDRPCInstance(ctx context.Context return ctrl.Result{}, err } + // Currently validation of schedule in DRPolicy is done here. When + // there is a reconciler for DRPolicy, then probably this validation + // has to be done there and can be removed from here. + err = r.validateSchedule(drPolicy) + if err != nil { + r.Log.Error(err, "failed to validate schedule") + + // Should it be no requeue? as there is no reconcile till user + // changes desired spec to a valid value + return ctrl.Result{}, err + } + // Check if the drpc instance is marked for deletion, which is indicated by the // deletion timestamp being set. if drpc.GetDeletionTimestamp() != nil { @@ -274,6 +287,10 @@ func (r *DRPlacementControlReconciler) reconcileDRPCInstance(ctx context.Context mwu: rmnutil.MWUtil{Client: r.Client, Ctx: ctx, Log: r.Log, InstName: drpc.Name, InstNamespace: drpc.Namespace}, } + return r.processAndHandleResponse(&d) +} + +func (r *DRPlacementControlReconciler) processAndHandleResponse(d *DRPCInstance) (ctrl.Result, error) { requeue := d.startProcessing() r.Log.Info("Finished processing", "Requeue?", requeue) @@ -540,6 +557,21 @@ func (r *DRPlacementControlReconciler) clonePlacementRule(ctx context.Context, return clonedPlRule, nil } +func (r *DRPlacementControlReconciler) validateSchedule(drPolicy *rmn.DRPolicy) error { + r.Log.Info("Validating schedule from DRPolicy") + + if drPolicy.Spec.SchedulingInterval == "" { + return fmt.Errorf("scheduling interval empty for the DRPolicy (%s)", drPolicy.Name) + } + + re := regexp.MustCompile(`^\d+[mhd]$`) + if !re.MatchString(drPolicy.Spec.SchedulingInterval) { + return fmt.Errorf("failed to match the scheduling interval string %s", drPolicy.Spec.SchedulingInterval) + } + + return nil +} + func (r *DRPlacementControlReconciler) deleteClonedPlacementRule(ctx context.Context, name, namespace string) error { plRule, err := r.getClonedPlacementRule(ctx, name, namespace) @@ -1374,7 +1406,8 @@ func (d *DRPCInstance) processVRGManifestWork(homeCluster string) error { d.instance.Name, d.instance.Namespace, homeCluster, d.instance.Spec.S3Endpoint, d.instance.Spec.S3Region, - d.instance.Spec.S3SecretName, d.instance.Spec.PVCSelector); err != nil { + d.instance.Spec.S3SecretName, d.instance.Spec.PVCSelector, + d.drPolicy.Spec.SchedulingInterval, d.drPolicy.Spec.ReplicationClassSelector); err != nil { d.log.Error(err, "failed to create or update VolumeReplicationGroup manifest") return fmt.Errorf("failed to create or update VolumeReplicationGroup manifest in namespace %s (%w)", homeCluster, err) diff --git a/controllers/drplacementcontrol_controller_test.go b/controllers/drplacementcontrol_controller_test.go index e4759843b4..b006212b35 100644 --- a/controllers/drplacementcontrol_controller_test.go +++ b/controllers/drplacementcontrol_controller_test.go @@ -3,7 +3,7 @@ Copyright 2021 The RamenDR authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -84,6 +84,8 @@ var ( appNamespace = &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: DRPCNamespaceName}, } + + schedulingInterval = "1h" ) var safeToProceed bool @@ -185,8 +187,8 @@ func updateManagedClusterViewWithVRG(mcv *fndv2.ManagedClusterView, replicationS TypeMeta: metav1.TypeMeta{Kind: "VolumeReplicationGroup", APIVersion: "ramendr.openshift.io/v1alpha1"}, ObjectMeta: metav1.ObjectMeta{Name: DRPCName, Namespace: DRPCNamespaceName}, Spec: rmn.VolumeReplicationGroupSpec{ - VolumeReplicationClass: "volume-rep-class", - ReplicationState: replicationState, + SchedulingInterval: schedulingInterval, + ReplicationState: replicationState, PVCSelector: metav1.LabelSelector{ MatchLabels: map[string]string{ "appclass": "gold", @@ -462,7 +464,8 @@ func createDRPolicy(name, namespace string, clusters []string) { Namespace: namespace, }, Spec: rmn.DRPolicySpec{ - ClusterNames: clusters, + ClusterNames: clusters, + SchedulingInterval: schedulingInterval, }, } diff --git a/controllers/util/mw_util.go b/controllers/util/mw_util.go index 5700de4691..f9def2138d 100644 --- a/controllers/util/mw_util.go +++ b/controllers/util/mw_util.go @@ -146,12 +146,13 @@ func IsManifestInAppliedState(mw *ocmworkv1.ManifestWork) bool { } func (mwu *MWUtil) CreateOrUpdateVRGManifestWork( - name, namespace, homeCluster, s3Endpoint, s3Region, s3SecretName string, pvcSelector metav1.LabelSelector) error { + name, namespace, homeCluster, s3Endpoint, s3Region, s3SecretName string, pvcSelector metav1.LabelSelector, + schedulingInterval string, replClassSelector metav1.LabelSelector) error { mwu.Log.Info(fmt.Sprintf("Create or Update manifestwork %s:%s:%s:%s:%s", name, namespace, homeCluster, s3Endpoint, s3SecretName)) manifestWork, err := mwu.generateVRGManifestWork(name, namespace, homeCluster, - s3Endpoint, s3Region, s3SecretName, pvcSelector) + s3Endpoint, s3Region, s3SecretName, pvcSelector, schedulingInterval, replClassSelector) if err != nil { return err } @@ -161,9 +162,10 @@ func (mwu *MWUtil) CreateOrUpdateVRGManifestWork( func (mwu *MWUtil) generateVRGManifestWork( name, namespace, homeCluster, s3Endpoint, s3Region, s3SecretName string, - pvcSelector metav1.LabelSelector) (*ocmworkv1.ManifestWork, error) { + pvcSelector metav1.LabelSelector, schedulingInterval string, + replClassSelector metav1.LabelSelector) (*ocmworkv1.ManifestWork, error) { vrgClientManifest, err := mwu.generateVRGManifest(name, namespace, s3Endpoint, - s3Region, s3SecretName, pvcSelector) + s3Region, s3SecretName, pvcSelector, schedulingInterval, replClassSelector) if err != nil { mwu.Log.Error(err, "failed to generate VolumeReplicationGroup manifest") @@ -181,17 +183,19 @@ func (mwu *MWUtil) generateVRGManifestWork( func (mwu *MWUtil) generateVRGManifest( name, namespace, s3Endpoint, s3Region, s3SecretName string, - pvcSelector metav1.LabelSelector) (*ocmworkv1.Manifest, error) { + pvcSelector metav1.LabelSelector, schedulingInterval string, + replClassSelector metav1.LabelSelector) (*ocmworkv1.Manifest, error) { return mwu.GenerateManifest(&rmn.VolumeReplicationGroup{ TypeMeta: metav1.TypeMeta{Kind: "VolumeReplicationGroup", APIVersion: "ramendr.openshift.io/v1alpha1"}, ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, Spec: rmn.VolumeReplicationGroupSpec{ - PVCSelector: pvcSelector, - VolumeReplicationClass: "volume-rep-class", - ReplicationState: rmn.Primary, - S3Endpoint: s3Endpoint, - S3Region: s3Region, - S3SecretName: s3SecretName, + PVCSelector: pvcSelector, + SchedulingInterval: schedulingInterval, + ReplicationState: rmn.Primary, + S3Endpoint: s3Endpoint, + S3Region: s3Region, + S3SecretName: s3SecretName, + ReplicationClassSelector: replClassSelector, }, }) } @@ -425,8 +429,8 @@ func (mwu *MWUtil) createOrUpdateManifestWork( // Let DRPC receive notification for any changes to ManifestWork CR created by it. // if err := ctrl.SetControllerReference(d.instance, mw, d.reconciler.Scheme); err != nil { - // return fmt.Errorf("failed to set owner reference to ManifestWork resource (%s/%s) (%v)", - // mw.Name, mw.Namespace, err) + // return fmt.Errorf("failed to set owner reference to ManifestWork resource (%s/%s) (%v)", + // mw.Name, mw.Namespace, err) // } mwu.Log.Info("Creating ManifestWork for", "cluster", managedClusternamespace, "MW", mw) diff --git a/controllers/volumereplicationgroup_controller.go b/controllers/volumereplicationgroup_controller.go index 3767d1a47d..2694a10d29 100644 --- a/controllers/volumereplicationgroup_controller.go +++ b/controllers/volumereplicationgroup_controller.go @@ -21,12 +21,14 @@ import ( "fmt" "net/url" "reflect" + "regexp" "github.com/go-logr/logr" volrep "github.com/csi-addons/volume-replication-operator/api/v1alpha1" volrepController "github.com/csi-addons/volume-replication-operator/controllers" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -228,6 +230,8 @@ func filterPVC(mgr manager.Manager, pvc *corev1.PersistentVolumeClaim, log logr. // +kubebuilder:rbac:groups=ramendr.openshift.io,resources=volumereplicationgroups/status,verbs=get;update;patch // +kubebuilder:rbac:groups=ramendr.openshift.io,resources=volumereplicationgroups/finalizers,verbs=update // +kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplications,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplicationclasses,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list // +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch;update;patch @@ -248,11 +252,12 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct defer log.Info("Exiting reconcile loop") v := VRGInstance{ - reconciler: r, - ctx: ctx, - log: log, - instance: &ramendrv1alpha1.VolumeReplicationGroup{}, - pvcList: &corev1.PersistentVolumeClaimList{}, + reconciler: r, + ctx: ctx, + log: log, + instance: &ramendrv1alpha1.VolumeReplicationGroup{}, + pvcList: &corev1.PersistentVolumeClaimList{}, + replClassList: &volrep.VolumeReplicationClassList{}, } // Fetch the VolumeReplicationGroup instance @@ -273,11 +278,13 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct } type VRGInstance struct { - reconciler *VolumeReplicationGroupReconciler - ctx context.Context - log logr.Logger - instance *ramendrv1alpha1.VolumeReplicationGroup - pvcList *corev1.PersistentVolumeClaimList + reconciler *VolumeReplicationGroupReconciler + ctx context.Context + log logr.Logger + instance *ramendrv1alpha1.VolumeReplicationGroup + pvcList *corev1.PersistentVolumeClaimList + replClassList *volrep.VolumeReplicationClassList + vrcUpdated bool } const ( @@ -322,6 +329,22 @@ func (v *VRGInstance) processVRG() (ctrl.Result, error) { return ctrl.Result{Requeue: true}, nil } + if err := v.validateSchedule(); err != nil { + v.log.Error(err, "Failed to validate the scheduling interval") + + msg := "Failed to validate scheduling interval" + setVRGErrorCondition(&v.instance.Status.Conditions, v.instance.Generation, msg) + + if err = v.updateVRGStatus(false); err != nil { + v.log.Error(err, "VRG Status update failed") + + return ctrl.Result{Requeue: true}, nil + } + + // No requeue, as there is no reconcile till user changes desired spec to a valid value + return ctrl.Result{}, nil + } + v.log = v.log.WithName("vrginstance").WithValues("State", v.instance.Spec.ReplicationState) switch { @@ -336,6 +359,27 @@ func (v *VRGInstance) processVRG() (ctrl.Result, error) { } } +// TODO: Currently DRPC and VRG both validate the schedule. However, +// there is a difference. While DRPC validates the scheduling +// interval for DRPolicy resource, VRG validates for itself. +// Once DRPolicy reconciler is implemented, perhaps validating +// schedule can be moved to "utils" package and both VRG and +// DRPolicy can consume validateSchedule from utils package. +func (v *VRGInstance) validateSchedule() error { + v.log.Info("Validating schedule") + + if v.instance.Spec.SchedulingInterval == "" { + return fmt.Errorf("scheduling interval empty (%s)", v.instance.Name) + } + + re := regexp.MustCompile(`^\d+[mhd]$`) + if !re.MatchString(v.instance.Spec.SchedulingInterval) { + return fmt.Errorf("failed to match the scheduling interval string %s", v.instance.Spec.SchedulingInterval) + } + + return nil +} + func (v *VRGInstance) validateVRGState() error { if v.instance.Spec.ReplicationState != ramendrv1alpha1.Primary && v.instance.Spec.ReplicationState != ramendrv1alpha1.Secondary { @@ -385,6 +429,26 @@ func (v *VRGInstance) updatePVCList() error { return nil } +func (v *VRGInstance) updateReplicationClassList() error { + labelSelector := v.instance.Spec.ReplicationClassSelector + + v.log.Info("Fetching VolumeReplicationClass", "labeled", labels.Set(labelSelector.MatchLabels)) + listOptions := []client.ListOption{ + client.MatchingLabels(labelSelector.MatchLabels), + } + + if err := v.reconciler.List(v.ctx, v.replClassList, listOptions...); err != nil { + v.log.Error(err, "Failed to list Replication Classes", + "labeled", labels.Set(labelSelector.MatchLabels)) + + return fmt.Errorf("failed to list Replication Classes, %w", err) + } + + v.log.Info("Found Replication Classes", "count", len(v.replClassList.Items)) + + return nil +} + // finalizeVRG cleans up managed resources and removes the VRG finalizer for resource deletion func (v *VRGInstance) processForDeletion() (ctrl.Result, error) { v.log.Info("Entering processing VolumeReplicationGroup") @@ -1109,6 +1173,12 @@ func (v *VRGInstance) updateVR(volRep *volrep.VolumeReplication, // createVR creates a VolumeReplication CR with a PVC as its data source. func (v *VRGInstance) createVR(vrNamespacedName types.NamespacedName, state volrep.ReplicationState) error { + volumeReplicationClass, err := v.selectVolumeReplicationClass(vrNamespacedName) + if err != nil { + return fmt.Errorf("failed to find the appropriate VolumeReplicationClass (%s) %w", + v.instance.Name, err) + } + volRep := &volrep.VolumeReplication{ ObjectMeta: metav1.ObjectMeta{ Name: vrNamespacedName.Name, @@ -1120,12 +1190,8 @@ func (v *VRGInstance) createVR(vrNamespacedName types.NamespacedName, state volr Name: vrNamespacedName.Name, APIGroup: new(string), }, - - // Convert to volrep.ReplicationState type - // explicitly. Otherwise compilation fails. - ReplicationState: state, - - VolumeReplicationClass: v.instance.Spec.VolumeReplicationClass, + ReplicationState: state, + VolumeReplicationClass: volumeReplicationClass, }, } @@ -1145,6 +1211,105 @@ func (v *VRGInstance) createVR(vrNamespacedName types.NamespacedName, state volr return nil } +// namespacedName applies to both VolumeReplication resource and pvc as of now. +// This is because, VolumeReplication resource for a pvc that is created by the +// VolumeReplicationGroup has the same name as pvc. But in future if it changes +// functions to be changed would be processVRAsPrimary(), processVRAsSecondary() +// to either receive pvc NamespacedName or pvc itself as an additional argument. +func (v *VRGInstance) selectVolumeReplicationClass(namespacedName types.NamespacedName) (string, error) { + className := "" + + if !v.vrcUpdated { + if err := v.updateReplicationClassList(); err != nil { + v.log.Error(err, "Failed to get VolumeReplicationClass list") + + return className, fmt.Errorf("failed to get VolumeReplicationClass list") + } + + v.vrcUpdated = true + } + + if len(v.replClassList.Items) == 0 { + v.log.Info("No VolumeReplicationClass available") + + return className, fmt.Errorf("no VolumeReplicationClass available") + } + + storageClass, err := v.getStorageClass(namespacedName) + if err != nil { + v.log.Info(fmt.Sprintf("Failed to get the storageclass of pvc %s", + namespacedName)) + + return className, fmt.Errorf("failed to get the storageclass of pvc %s (%w)", + namespacedName, err) + } + + for index := range v.replClassList.Items { + replicationClass := &v.replClassList.Items[index] + if storageClass.Provisioner != replicationClass.Spec.Provisioner { + continue + } + + schedulingInterval, found := replicationClass.Spec.Parameters["schedulingInterval"] + if !found { + // schedule not present in parameters of this replicationClass. + continue + } + + // ReplicationClass that matches both VRG schedule and pvc provisioner + if schedulingInterval == v.instance.Spec.SchedulingInterval { + className = replicationClass.Name + + break + } + } + + if className == "" { + v.log.Info(fmt.Sprintf("No VolumeReplicationClass found to match provisioner and schedule %s/%s", + storageClass.Provisioner, v.instance.Spec.SchedulingInterval)) + + return className, fmt.Errorf("no VolumeReplicationClass found to match provisioner and schedule") + } + + return className, nil +} + +// if the fetched SCs are stashed, fetching it again for the next PVC can be avoided +// saving a call to the API server +func (v *VRGInstance) getStorageClass(namespacedName types.NamespacedName) (*storagev1.StorageClass, error) { + pvc := &corev1.PersistentVolumeClaim{} + + for index := range v.pvcList.Items { + pvcItem := &v.pvcList.Items[index] + + pvcNamespacedName := types.NamespacedName{Name: pvcItem.Name, Namespace: pvcItem.Namespace} + if pvcNamespacedName == namespacedName { + pvc = pvcItem + + break + } + } + + if pvc == nil { + v.log.Info("failed to get the pvc with namespaced name", namespacedName) + + // Need the storage driver of pvc. If pvc is not found return error. + return nil, fmt.Errorf("failed to get the pvc with namespaced name %s", namespacedName) + } + + scName := pvc.Spec.StorageClassName + + storageClass := &storagev1.StorageClass{} + if err := v.reconciler.Get(v.ctx, types.NamespacedName{Name: *scName}, storageClass); err != nil { + v.log.Info(fmt.Sprintf("Failed to get the storageclass %s", *scName)) + + return nil, fmt.Errorf("failed to get the storageclass with name %s (%w)", + *scName, err) + } + + return storageClass, nil +} + func (v *VRGInstance) updateVRGStatus(updateConditions bool) error { v.log.Info("Updating VRG status") diff --git a/controllers/volumereplicationgroup_controller_test.go b/controllers/volumereplicationgroup_controller_test.go index da11696718..0a3fcf4c5e 100644 --- a/controllers/volumereplicationgroup_controller_test.go +++ b/controllers/volumereplicationgroup_controller_test.go @@ -12,6 +12,7 @@ import ( ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" vrgController "github.com/ramendr/ramen/controllers" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -58,12 +59,23 @@ var _ = Describe("Test VolumeReplicationGroup", func() { // and then checks whether appropriate number of VolRep // resources have been created or not. var vrgTests []vrgTest + vrgTestTemplate := &template{ + ClaimBindInfo: corev1.ClaimPending, + VolumeBindInfo: corev1.VolumePending, + schedulingInterval: "1h", + storageClassName: "manual", + replicationClassName: "test-replicationclass", + vrcProvisioner: "manual.storage.com", + scProvisioner: "manual.storage.com", + replicationClassLabels: map[string]string{"protection": "ramen"}, + } + Context("in primary state", func() { It("sets up non-bound PVCs, PVs and then bind them", func() { for c := 0; c < 5; c++ { // Test the scenario where the pvc is not bound yet // and expect no VRs to be created. - v := newVRGTestCaseBindInfo(c, corev1.ClaimPending, corev1.VolumePending, false, false) + v := newVRGTestCaseBindInfo(c, vrgTestTemplate, false, false) vrgTests = append(vrgTests, v) } }) @@ -96,9 +108,9 @@ var _ = Describe("Test VolumeReplicationGroup", func() { }) var vrgStatusTests []vrgTest - Context("in primary state status check", func() { + Context("in primary state status check pending to bound", func() { It("sets up non-bound PVCs, PVs and then bind them", func() { - v := newVRGTestCaseBindInfo(4, corev1.ClaimPending, corev1.VolumePending, false, false) + v := newVRGTestCaseBindInfo(4, vrgTestTemplate, false, false) vrgStatusTests = append(vrgStatusTests, v) }) It("expect no VR to be created as PVC not bound and check status", func() { @@ -127,10 +139,21 @@ var _ = Describe("Test VolumeReplicationGroup", func() { }) // Changes the order in which VRG and PVC/PV are created. + + vrgTest2Template := &template{ + ClaimBindInfo: corev1.ClaimBound, + VolumeBindInfo: corev1.VolumeBound, + schedulingInterval: "1h", + storageClassName: "manual", + replicationClassName: "test-replicationclass", + vrcProvisioner: "manual.storage.com", + scProvisioner: "manual.storage.com", + replicationClassLabels: map[string]string{"protection": "ramen"}, + } var vrgStatus2Tests []vrgTest - Context("in primary state status check", func() { + Context("in primary state status check bound", func() { It("sets up PVCs, PVs", func() { - v := newVRGTestCaseBindInfo(4, corev1.ClaimBound, corev1.VolumeBound, true, true) + v := newVRGTestCaseBindInfo(4, vrgTest2Template, true, true) vrgStatus2Tests = append(vrgStatus2Tests, v) }) It("waits for VRG to create a VR for each PVC bind and checks status", func() { @@ -152,10 +175,20 @@ var _ = Describe("Test VolumeReplicationGroup", func() { // Changes the order in which VRG and PVC/PV are created. VRG is created first and then // PVC/PV are created (with ClaimPending and VolumePending status respectively). Then // each of them is bound and the result should be same (i.e. VRG being available). + vrgTest3Template := &template{ + ClaimBindInfo: corev1.ClaimPending, + VolumeBindInfo: corev1.VolumePending, + schedulingInterval: "1h", + storageClassName: "manual", + replicationClassName: "test-replicationclass", + vrcProvisioner: "manual.storage.com", + scProvisioner: "manual.storage.com", + replicationClassLabels: map[string]string{"protection": "ramen"}, + } var vrgStatus3Tests []vrgTest - Context("in primary state status check", func() { + Context("in primary state status check create VRG first", func() { It("sets up non-bound PVCs, PVs and then bind them", func() { - v := newVRGTestCaseBindInfo(4, corev1.ClaimPending, corev1.VolumePending, false, true) + v := newVRGTestCaseBindInfo(4, vrgTest3Template, false, true) vrgStatus3Tests = append(vrgStatus3Tests, v) }) It("expect no VR to be created as PVC not bound and check status", func() { @@ -183,13 +216,114 @@ var _ = Describe("Test VolumeReplicationGroup", func() { v.cleanup() }) }) + + // VolumeReplicationClass provisioner and StorageClass provisioner + // does not match. VolumeReplication resources should not be created. + var vrgScheduleTests []vrgTest + vrgScheduleTestTemplate := &template{ + ClaimBindInfo: corev1.ClaimBound, + VolumeBindInfo: corev1.VolumeBound, + schedulingInterval: "1h", + storageClassName: "manual", + replicationClassName: "test-replicationclass", + vrcProvisioner: "manual.storage.com", + scProvisioner: "new.storage.com", + replicationClassLabels: map[string]string{"protection": "ramen"}, + } + Context("schedule test, provisioner does not match", func() { + It("sets up non-bound PVCs, PVs and then bind them", func() { + v := newVRGTestCaseBindInfo(4, vrgScheduleTestTemplate, true, true) + vrgScheduleTests = append(vrgScheduleTests, v) + }) + It("expect no VR to be created as PVC not bound and check status", func() { + v := vrgScheduleTests[0] + v.waitForVRCountToMatch(0) + // v.verifyVRGStatusExpectation(false) + }) + It("waits for VRG to status to match", func() { + v := vrgScheduleTests[0] + v.verifyVRGStatusExpectation(false) + }) + It("cleans up after testing", func() { + v := vrgScheduleTests[0] + v.cleanup() + }) + }) + + // provisioner match. But schedule does not match. Again, + // VolumeReplication resource should not be created. + var vrgSchedule2Tests []vrgTest + vrgScheduleTest2Template := &template{ + ClaimBindInfo: corev1.ClaimBound, + VolumeBindInfo: corev1.VolumeBound, + schedulingInterval: "22h", + storageClassName: "manual", + replicationClassName: "test-replicationclass", + vrcProvisioner: "manual.storage.com", + scProvisioner: "manual.storage.com", + replicationClassLabels: map[string]string{"protection": "ramen"}, + } + Context("schedule tests schedue does not match", func() { + It("sets up non-bound PVCs, PVs and then bind them", func() { + v := newVRGTestCaseBindInfo(4, vrgScheduleTest2Template, true, true) + vrgSchedule2Tests = append(vrgSchedule2Tests, v) + }) + It("expect no VR to be created as PVC not bound and check status", func() { + v := vrgSchedule2Tests[0] + v.waitForVRCountToMatch(0) + // v.verifyVRGStatusExpectation(false) + }) + It("waits for VRG to status to match", func() { + v := vrgSchedule2Tests[0] + v.verifyVRGStatusExpectation(false) + }) + It("cleans up after testing", func() { + v := vrgSchedule2Tests[0] + v.cleanup() + }) + }) + + // provisioner and schedule match. But replicationClass + // does not have the labels that VRG expects to find. + var vrgSchedule3Tests []vrgTest + vrgScheduleTest3Template := &template{ + ClaimBindInfo: corev1.ClaimBound, + VolumeBindInfo: corev1.VolumeBound, + schedulingInterval: "1h", + storageClassName: "manual", + replicationClassName: "test-replicationclass", + vrcProvisioner: "manual.storage.com", + scProvisioner: "manual.storage.com", + replicationClassLabels: map[string]string{}, + } + Context("schedule tests replicationclass does not have labels", func() { + It("sets up non-bound PVCs, PVs and then bind them", func() { + v := newVRGTestCaseBindInfo(4, vrgScheduleTest3Template, true, true) + vrgSchedule3Tests = append(vrgSchedule3Tests, v) + }) + It("expect no VR to be created as VR not created and check status", func() { + v := vrgSchedule3Tests[0] + v.waitForVRCountToMatch(0) + // v.verifyVRGStatusExpectation(false) + }) + It("waits for VRG to status to match", func() { + v := vrgSchedule3Tests[0] + v.verifyVRGStatusExpectation(false) + }) + It("cleans up after testing", func() { + v := vrgSchedule3Tests[0] + v.cleanup() + }) + }) }) type vrgTest struct { - namespace string - pvNames []string - pvcNames []string - vrgName string + namespace string + pvNames []string + pvcNames []string + vrgName string + storageClass string + replicationClass string } // Use to generate unique object names across multiple VRG test cases @@ -199,8 +333,29 @@ var testCaseNumber = 0 // input pvcCount), a PV for each PVC, and a VRG in primary state, with // label selector that points to the PVCs created. func newVRGTestCase(pvcCount int) vrgTest { - return newVRGTestCaseBindInfo(pvcCount, corev1.ClaimBound, corev1.VolumeBound, - true, false) + testTemplate := &template{ + ClaimBindInfo: corev1.ClaimBound, + VolumeBindInfo: corev1.VolumeBound, + schedulingInterval: "1h", + storageClassName: "manual", + replicationClassName: "test-replicationclass", + vrcProvisioner: "manual.storage.com", + scProvisioner: "manual.storage.com", + replicationClassLabels: map[string]string{"protection": "ramen"}, + } + + return newVRGTestCaseBindInfo(pvcCount, testTemplate, true, false) +} + +type template struct { + ClaimBindInfo corev1.PersistentVolumeClaimPhase + VolumeBindInfo corev1.PersistentVolumePhase + schedulingInterval string + vrcProvisioner string + scProvisioner string + storageClassName string + replicationClassName string + replicationClassLabels map[string]string } // newVRGTestCaseBindInfo creates a new namespace, zero or more PVCs (equal @@ -209,19 +364,22 @@ func newVRGTestCase(pvcCount int) vrgTest { // with Status.Phase set to ClaimPending instead of ClaimBound. Expectation // is that, until pvc is not bound, VolRep resources should not be created // by VRG. -func newVRGTestCaseBindInfo(pvcCount int, claimBindInfo corev1.PersistentVolumeClaimPhase, - volumeBindInfo corev1.PersistentVolumePhase, checkBind, vrgFirst bool) vrgTest { +func newVRGTestCaseBindInfo(pvcCount int, testTemplate *template, checkBind, vrgFirst bool) vrgTest { pvcLabels := map[string]string{} objectNameSuffix := 'a' + testCaseNumber testCaseNumber++ // each invocation of this function is a new test case v := vrgTest{ - namespace: fmt.Sprintf("envtest-ns-%c", objectNameSuffix), - vrgName: fmt.Sprintf("vrg-%c", objectNameSuffix), + namespace: fmt.Sprintf("envtest-ns-%c", objectNameSuffix), + vrgName: fmt.Sprintf("vrg-%c", objectNameSuffix), + storageClass: testTemplate.storageClassName, + replicationClass: testTemplate.replicationClassName, } By("Creating namespace " + v.namespace) v.createNamespace() + v.createSC(testTemplate) + v.createVRC(testTemplate) // Setup PVC labels if pvcCount > 0 { @@ -231,9 +389,11 @@ func newVRGTestCaseBindInfo(pvcCount int, claimBindInfo corev1.PersistentVolumeC if vrgFirst { v.createVRG(pvcLabels) - v.createPVCandPV(pvcCount, claimBindInfo, volumeBindInfo, objectNameSuffix, pvcLabels) + v.createPVCandPV(pvcCount, testTemplate.ClaimBindInfo, testTemplate.VolumeBindInfo, + objectNameSuffix, pvcLabels) } else { - v.createPVCandPV(pvcCount, claimBindInfo, volumeBindInfo, objectNameSuffix, pvcLabels) + v.createPVCandPV(pvcCount, testTemplate.ClaimBindInfo, testTemplate.VolumeBindInfo, + objectNameSuffix, pvcLabels) v.createVRG(pvcLabels) } @@ -305,7 +465,7 @@ func (v *vrgTest) createPV(pvName, claimName string, bindInfo corev1.PersistentV // UID: types.UID(claimName), }, PersistentVolumeReclaimPolicy: "Delete", - StorageClassName: "manual", + StorageClassName: v.storageClass, MountOptions: []string{}, NodeAffinity: &corev1.VolumeNodeAffinity{ Required: &corev1.NodeSelector{ @@ -346,7 +506,9 @@ func (v *vrgTest) createPVC(pvcName, namespace, volumeName string, labels map[st capacity := corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Gi"), } - storageclass := "manual" + + storageclass := v.storageClass + accessModes := []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} pvc := &corev1.PersistentVolumeClaim{ TypeMeta: metav1.TypeMeta{}, @@ -405,14 +567,19 @@ func (v *vrgTest) bindPVAndPVC() { func (v *vrgTest) createVRG(pvcLabels map[string]string) { By("creating VRG " + v.vrgName) + schedulingInterval := "1h" + replicationClassLabels := map[string]string{"protection": "ramen"} + vrg := &ramendrv1alpha1.VolumeReplicationGroup{ ObjectMeta: metav1.ObjectMeta{ Name: v.vrgName, Namespace: v.namespace, }, Spec: ramendrv1alpha1.VolumeReplicationGroupSpec{ - PVCSelector: metav1.LabelSelector{MatchLabels: pvcLabels}, - ReplicationState: "primary", + PVCSelector: metav1.LabelSelector{MatchLabels: pvcLabels}, + ReplicationState: "primary", + SchedulingInterval: schedulingInterval, + ReplicationClassSelector: metav1.LabelSelector{MatchLabels: replicationClassLabels}, }, } err := k8sClient.Create(context.TODO(), vrg) @@ -426,6 +593,61 @@ func (v *vrgTest) createVRG(pvcLabels map[string]string) { "failed to create VRG %s in %s", v.vrgName, v.namespace) } +func (v *vrgTest) createVRC(testTemplate *template) { + By("creating VRC " + v.replicationClass) + + parameters := make(map[string]string) + + if testTemplate.schedulingInterval != "" { + parameters["schedulingInterval"] = testTemplate.schedulingInterval + } + + vrc := &volrep.VolumeReplicationClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: v.replicationClass, + }, + Spec: volrep.VolumeReplicationClassSpec{ + Provisioner: testTemplate.vrcProvisioner, + Parameters: parameters, + }, + } + + if len(testTemplate.replicationClassLabels) > 0 { + vrc.ObjectMeta.Labels = testTemplate.replicationClassLabels + } + + err := k8sClient.Create(context.TODO(), vrc) + if err != nil { + if errors.IsAlreadyExists(err) { + err = k8sClient.Get(context.TODO(), types.NamespacedName{Name: v.replicationClass}, vrc) + } + } + + Expect(err).NotTo(HaveOccurred(), + "failed to create/get VolumeReplicationClass %s/%s", v.replicationClass, v.vrgName) +} + +func (v *vrgTest) createSC(testTemplate *template) { + By("creating StorageClass " + v.storageClass) + + sc := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: v.storageClass, + }, + Provisioner: testTemplate.scProvisioner, + } + + err := k8sClient.Create(context.TODO(), sc) + if err != nil { + if errors.IsAlreadyExists(err) { + err = k8sClient.Get(context.TODO(), types.NamespacedName{Name: v.storageClass}, sc) + } + } + + Expect(err).NotTo(HaveOccurred(), + "failed to create/get StorageClass %s/%s", v.storageClass, v.vrgName) +} + func (v *vrgTest) verifyPVCBindingToPV(checkBind bool) { By("Waiting for PVC to get bound to PVs for " + v.vrgName) @@ -516,6 +738,8 @@ func (v *vrgTest) cleanup() { v.cleanupPVCs() v.cleanupVRG() v.cleanupNamespace() + v.cleanupSC() + v.cleanupVRC() } func (v *vrgTest) cleanupPVCs() { @@ -536,6 +760,44 @@ func (v *vrgTest) cleanupVRG() { v.waitForVRCountToMatch(0) } +func (v *vrgTest) cleanupSC() { + key := types.NamespacedName{ + Name: v.storageClass, + } + + sc := &storagev1.StorageClass{} + + err := k8sClient.Get(context.TODO(), key, sc) + if err != nil { + if errors.IsNotFound(err) { + return + } + } + + err = k8sClient.Delete(context.TODO(), sc) + Expect(err).To(BeNil(), + "failed to delete StorageClass %s", v.storageClass) +} + +func (v *vrgTest) cleanupVRC() { + key := types.NamespacedName{ + Name: v.replicationClass, + } + + vrc := &volrep.VolumeReplicationClass{} + + err := k8sClient.Get(context.TODO(), key, vrc) + if err != nil { + if errors.IsNotFound(err) { + return + } + } + + err = k8sClient.Delete(context.TODO(), vrc) + Expect(err).To(BeNil(), + "failed to delete replicationClass %s", v.replicationClass) +} + func (v *vrgTest) cleanupNamespace() { By("deleting namespace " + v.namespace) diff --git a/go.mod b/go.mod index 5c389117c4..d6c7224371 100644 --- a/go.mod +++ b/go.mod @@ -22,8 +22,11 @@ require ( replace k8s.io/client-go => k8s.io/client-go v0.20.5 -// below: temporary measure to include multicloud-operators-foundation project with ManagedClusterView through forked repo -require github.com/tjanssen3/multicloud-operators-foundation/v2 v2.0.0-20210512222428-660109db7f1d +require ( + github.com/robfig/cron/v3 v3.0.1 + // below: temporary measure to include multicloud-operators-foundation project with ManagedClusterView through forked repo + github.com/tjanssen3/multicloud-operators-foundation/v2 v2.0.0-20210512222428-660109db7f1d +) replace ( github.com/kubevirt/terraform-provider-kubevirt => github.com/nirarg/terraform-provider-kubevirt v0.0.0-20201222125919-101cee051ed3 diff --git a/go.sum b/go.sum index f9a0ee48af..8439392490 100644 --- a/go.sum +++ b/go.sum @@ -1692,7 +1692,10 @@ github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uY github.com/rickb777/date v1.12.5-0.20200422084442-6300e543c4d9/go.mod h1:L8WrssTzvgYw34/Ppa0JpJfI7KKXZ2cVGI6Djt0brUU= github.com/rickb777/plural v1.2.0/go.mod h1:UdpyWFCGbo3mvK3f/PfZOAOrkjzJlYN/sD46XNWJ+Es= github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= diff --git a/hack/ocm-minikube-ramen.sh b/hack/ocm-minikube-ramen.sh index 54e60ec627..f1a1295418 100755 --- a/hack/ocm-minikube-ramen.sh +++ b/hack/ocm-minikube-ramen.sh @@ -80,6 +80,7 @@ ramen_deploy() spec: provisioner: rook-ceph.rbd.csi.ceph.com parameters: + schedulingInterval: "1h" replication.storage.openshift.io/replication-secret-name: rook-csi-rbd-provisioner replication.storage.openshift.io/replication-secret-namespace: rook-ceph a diff --git a/hack/test/replication.storage.openshift.io_volumereplicationclasses.yaml b/hack/test/replication.storage.openshift.io_volumereplicationclasses.yaml new file mode 100644 index 0000000000..5057182960 --- /dev/null +++ b/hack/test/replication.storage.openshift.io_volumereplicationclasses.yaml @@ -0,0 +1,74 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: volumereplicationclasses.replication.storage.openshift.io +spec: + group: replication.storage.openshift.io + names: + kind: VolumeReplicationClass + listKind: VolumeReplicationClassList + plural: volumereplicationclasses + shortNames: + - vrc + singular: volumereplicationclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.provisioner + name: provisioner + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: VolumeReplicationClass is the Schema for the volumereplicationclasses + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeReplicationClassSpec specifies parameters that an underlying + storage system uses when creating a volume replica. A specific VolumeReplicationClass + is used by specifying its name in a VolumeReplication object. + properties: + parameters: + additionalProperties: + type: string + description: Parameters is a key-value map with storage provisioner + specific configurations for creating volume replicas + type: object + provisioner: + description: Provisioner is the name of storage provisioner + type: string + required: + - provisioner + type: object + status: + description: VolumeReplicationClassStatus defines the observed state of + VolumeReplicationClass + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: []