From 56b5f4a6aaac11c4bc64f1c1f00b3f1244e8c901 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 7 Sep 2021 11:25:41 +0200 Subject: [PATCH 01/18] add support for new kind of client config & managed cluster ref changed in resources --- api/v1alpha1/common_types.go | 89 ++++++- api/v1alpha1/nificluster_types.go | 31 +++ api/v1alpha1/nifidataflow_types.go | 6 +- api/v1alpha1/nifiparametercontext_types.go | 4 +- api/v1alpha1/nifiregistryclient_types.go | 6 +- .../bases/nifi.orange.com_nificlusters.yaml | 5 + .../bases/nifi.orange.com_nifidataflows.yaml | 17 +- ...nifi.orange.com_nifiparametercontexts.yaml | 20 +- .../nifi.orange.com_nifiregistryclients.yaml | 22 +- .../bases/nifi.orange.com_nifiusergroups.yaml | 17 +- .../crd/bases/nifi.orange.com_nifiusers.yaml | 17 +- controllers/controller_common.go | 16 ++ controllers/nifidataflow_controller.go | 184 ++++++++++---- .../nifiparametercontext_controller.go | 133 ++++++++-- controllers/nifiregistryclient_controller.go | 138 ++++++++-- controllers/nifiuser_controller.go | 240 ++++++++++++------ controllers/nifiusergroup_controller.go | 144 ++++++++--- nifikop.iml | 1 + pkg/clientwrappers/accesspolicies/policies.go | 37 ++- .../controllersettings/controllersettings.go | 6 +- pkg/clientwrappers/dataflow/dataflow.go | 100 ++++---- .../parametercontext/parametercontext.go | 33 +-- .../registryclient/registryclient.go | 24 +- .../reportingtask/reportingtask.go | 17 +- pkg/clientwrappers/user/user.go | 37 ++- pkg/clientwrappers/usergroup/usergroup.go | 35 ++- pkg/common/common.go | 22 +- pkg/k8sutil/status.go | 13 +- pkg/nificlient/client.go | 36 ++- pkg/nificlient/client_test.go | 2 +- pkg/nificlient/config.go | 23 +- pkg/nificlient/config_test.go | 16 +- pkg/nificlient/system.go | 13 +- pkg/resources/nifi/nifi.go | 95 +++++-- 34 files changed, 1118 insertions(+), 481 deletions(-) diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 219c75ff2..a4c5026ee 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -14,7 +14,9 @@ package v1alpha1 -import "fmt" +import ( + "fmt" +) // DataflowState defines the state of a NifiDataflow type DataflowState string @@ -82,6 +84,10 @@ func (r State) Complete() State { } } +func (r ClusterState) IsReady() bool { + return r == NifiClusterRunning || r == NifiClusterReconciling +} + // NifiAccessType hold info about Nifi ACL type NifiAccessType string @@ -108,11 +114,39 @@ type SecretConfigReference struct { Data string `json:"data"` } +const( + EXTERNAL_REFERENCE string = "external" + INTERNAL_REFERENCE string = "internal" +) + +type ClusterConnect interface { + //NodeConnection(log logr.Logger, client client.Client) (node nificlient.NifiClient, err error) + IsInternal() bool + IsExternal() bool + ClusterLabelString() string + IsReady() bool + Id() string +} + // ClusterReference states a reference to a cluster for dataflow/registryclient/user // provisioning type ClusterReference struct { - Name string `json:"name"` - Namespace string `json:"namespace,omitempty"` + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Type string `json:"type,omitempty"` + Hostname string `json:"hostname,omitempty"` + SecretRef SecretReference `json:"secretRef,omitempty"` +} + +func (c *ClusterReference) GetType() string { + if c.Type == "" || c.Type != EXTERNAL_REFERENCE { + return INTERNAL_REFERENCE + } + return EXTERNAL_REFERENCE +} + +func (c *ClusterReference) IsSet() bool{ + return (c.Name != "" && c.GetType() == INTERNAL_REFERENCE) || (c.Hostname != "" && c.GetType() == EXTERNAL_REFERENCE) } // RegistryClientReference states a reference to a registry client for dataflow @@ -162,13 +196,13 @@ type AccessPolicy struct { ComponentId string `json:"componentId,omitempty"` } -func (a *AccessPolicy) GetResource(cluster *NifiCluster) string { +func (a *AccessPolicy) GetResource(rootProcessGroupId string) string { if a.Type == GlobalAccessPolicyType { return string(a.Resource) } componentId := a.ComponentId if a.ComponentType == "process-groups" && componentId == "" { - componentId = cluster.Status.RootProcessGroupId + componentId = rootProcessGroupId } resource := a.Resource if a.Resource == ComponentsAccessPolicyResource { @@ -315,6 +349,8 @@ type NodeState struct { ConfigurationState ConfigurationState `json:"configurationState"` // InitClusterNode contains if this nodes was part of the initial cluster InitClusterNode InitClusterNode `json:"initClusterNode"` + // PodIsReady whether or not the associated pod is ready + PodIsReady bool `json:"podIsReady"` } // RackAwarenessState holds info about rack awareness status @@ -381,3 +417,46 @@ const ( // NotInitClusterNode states the node is not part of initial cluster setup NotInitClusterNode InitClusterNode = false ) + +func ClusterRefsEquals(clusterRefs []ClusterReference) bool { + c1 := clusterRefs[0] + refType := c1.Type + hostname := c1.Hostname + name := c1.Name + ns := c1.Namespace + + var secretRefs []SecretReference + for _, cluster := range clusterRefs { + if refType != cluster.Type { + return false + } + if c1.IsExternal() { + if hostname != cluster.Hostname { + return false + } + secretRefs = append(secretRefs, SecretReference{Name: cluster.SecretRef.Name, Namespace: cluster.Namespace}) + } else if name != cluster.Name || ns != cluster.Namespace { + return false + } + } + + if c1.IsExternal() { + return SecretRefsEquals(secretRefs) + } + return true +} + +func (c ClusterReference) IsExternal() bool{ + return c.Type == EXTERNAL_REFERENCE +} + +func SecretRefsEquals(secretRefs []SecretReference) bool { + name := secretRefs[0].Name + ns := secretRefs[0].Namespace + for _, secretRef := range secretRefs { + if name != secretRef.Name || ns != secretRef.Namespace { + return false + } + } + return true +} \ No newline at end of file diff --git a/api/v1alpha1/nificluster_types.go b/api/v1alpha1/nificluster_types.go index 06ed93fdd..9eae86c53 100644 --- a/api/v1alpha1/nificluster_types.go +++ b/api/v1alpha1/nificluster_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1alpha1 import ( + "fmt" cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -654,3 +655,33 @@ func (nSpec *NifiClusterSpec) GetMetricPort() *int { return nil } + +func (cluster *NifiCluster) IsExternal() bool{ + return false +} + +func (cluster *NifiCluster) IsInternal() bool{ + return true +} + +func (cluster *NifiCluster) ClusterLabelString() string { + return fmt.Sprintf("%s.%s", cluster.Name, cluster.Namespace) +} + +func (cluster NifiCluster) IsReady() bool { + for _,nodeState := range cluster.Status.NodesState { + if nodeState.ConfigurationState != ConfigInSync || nodeState.GracefulActionState.State != GracefulUpscaleSucceeded || + !nodeState.PodIsReady { + return false + } + } + return cluster.Status.State.IsReady() +} + +func (cluster *NifiCluster) Id() string { + return cluster.Name +} + +func (cluster *NifiCluster) RootProcessGroupId() string{ + return cluster.Status.RootProcessGroupId +} \ No newline at end of file diff --git a/api/v1alpha1/nifidataflow_types.go b/api/v1alpha1/nifidataflow_types.go index 3a253eb3b..f13102a94 100644 --- a/api/v1alpha1/nifidataflow_types.go +++ b/api/v1alpha1/nifidataflow_types.go @@ -150,9 +150,9 @@ func (d *NifiDataflowSpec) GetRunOnce() bool { return true } -func (d *NifiDataflowSpec) GetParentProcessGroupID(cluster *NifiCluster) string { +func (d *NifiDataflowSpec) GetParentProcessGroupID(rootProcessGroupId string) string { if d.ParentProcessGroupID == "" { - return cluster.Status.RootProcessGroupId + return rootProcessGroupId } return d.ParentProcessGroupID -} +} \ No newline at end of file diff --git a/api/v1alpha1/nifiparametercontext_types.go b/api/v1alpha1/nifiparametercontext_types.go index bac1c86d2..1e2babe20 100644 --- a/api/v1alpha1/nifiparametercontext_types.go +++ b/api/v1alpha1/nifiparametercontext_types.go @@ -29,8 +29,8 @@ type NifiParameterContextSpec struct { Description string `json:"description,omitempty"` // a list of non-sensitive Parameters. Parameters []Parameter `json:"parameters"` - // contains the reference to the NifiCluster with the one the user is linked. - ClusterRef ClusterReference `json:"clusterRef"` + // contains the reference to the NifiCluster with the one the dataflow is linked. + ClusterRef ClusterReference `json:"clusterRef,omitempty"` // a list of secret containing sensitive parameters (the key will name of the parameter). SecretRefs []SecretReference `json:"secretRefs,omitempty"` } diff --git a/api/v1alpha1/nifiregistryclient_types.go b/api/v1alpha1/nifiregistryclient_types.go index 0659547e6..42d3aa004 100644 --- a/api/v1alpha1/nifiregistryclient_types.go +++ b/api/v1alpha1/nifiregistryclient_types.go @@ -29,8 +29,8 @@ type NifiRegistryClientSpec struct { Uri string `json:"uri"` // The Description of the Registry client. Description string `json:"description,omitempty"` - // Contains the reference to the NifiCluster with the one the registry client is linked. - ClusterRef ClusterReference `json:"clusterRef"` + // contains the reference to the NifiCluster with the one the dataflow is linked. + ClusterRef ClusterReference `json:"clusterRef,omitempty"` } // NifiRegistryClientStatus defines the observed state of NifiRegistryClient @@ -64,4 +64,4 @@ type NifiRegistryClientList struct { func init() { SchemeBuilder.Register(&NifiRegistryClient{}, &NifiRegistryClientList{}) -} +} \ No newline at end of file diff --git a/config/crd/bases/nifi.orange.com_nificlusters.yaml b/config/crd/bases/nifi.orange.com_nificlusters.yaml index 57d59f174..640b09ebb 100644 --- a/config/crd/bases/nifi.orange.com_nificlusters.yaml +++ b/config/crd/bases/nifi.orange.com_nificlusters.yaml @@ -4383,10 +4383,15 @@ spec: description: InitClusterNode contains if this nodes was part of the initial cluster type: boolean + podIsReady: + description: PodIsReady whether or not the associated pod is + ready + type: boolean required: - configurationState - gracefulActionState - initClusterNode + - podIsReady type: object description: Store the state of each nifi node type: object diff --git a/config/crd/bases/nifi.orange.com_nifidataflows.yaml b/config/crd/bases/nifi.orange.com_nifidataflows.yaml index 265322bfd..bc1e7653b 100644 --- a/config/crd/bases/nifi.orange.com_nifidataflows.yaml +++ b/config/crd/bases/nifi.orange.com_nifidataflows.yaml @@ -43,12 +43,25 @@ spec: description: contains the reference to the NifiCluster with the one the dataflow is linked. properties: + hostname: + type: string name: type: string namespace: type: string - required: - - name + secretRef: + description: SecretReference states a reference to a secret for + parameter context provisioning + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + type: + type: string type: object flowId: description: the UUID of the flow to run. diff --git a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml index a70c23709..aebddc739 100644 --- a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml +++ b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml @@ -39,14 +39,27 @@ spec: properties: clusterRef: description: contains the reference to the NifiCluster with the one - the user is linked. + the dataflow is linked. properties: + hostname: + type: string name: type: string namespace: type: string - required: - - name + secretRef: + description: SecretReference states a reference to a secret for + parameter context provisioning + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + type: + type: string type: object description: description: the Description of the Parameter Context. @@ -84,7 +97,6 @@ spec: type: object type: array required: - - clusterRef - parameters type: object status: diff --git a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml index e604f891f..60c4c294f 100644 --- a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml +++ b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml @@ -38,15 +38,28 @@ spec: description: NifiRegistryClientSpec defines the desired state of NifiRegistryClient properties: clusterRef: - description: Contains the reference to the NifiCluster with the one - the registry client is linked. + description: contains the reference to the NifiCluster with the one + the dataflow is linked. properties: + hostname: + type: string name: type: string namespace: type: string - required: - - name + secretRef: + description: SecretReference states a reference to a secret for + parameter context provisioning + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + type: + type: string type: object description: description: The Description of the Registry client. @@ -56,7 +69,6 @@ spec: pulling the flow. type: string required: - - clusterRef - uri type: object status: diff --git a/config/crd/bases/nifi.orange.com_nifiusergroups.yaml b/config/crd/bases/nifi.orange.com_nifiusergroups.yaml index b3777280c..a7e33eac8 100644 --- a/config/crd/bases/nifi.orange.com_nifiusergroups.yaml +++ b/config/crd/bases/nifi.orange.com_nifiusergroups.yaml @@ -98,12 +98,25 @@ spec: description: clusterRef contains the reference to the NifiCluster with the one the registry client is linked. properties: + hostname: + type: string name: type: string namespace: type: string - required: - - name + secretRef: + description: SecretReference states a reference to a secret for + parameter context provisioning + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + type: + type: string type: object usersRef: description: userRef contains the list of reference to NifiUsers that diff --git a/config/crd/bases/nifi.orange.com_nifiusers.yaml b/config/crd/bases/nifi.orange.com_nifiusers.yaml index f3b6ab4d4..59addd791 100644 --- a/config/crd/bases/nifi.orange.com_nifiusers.yaml +++ b/config/crd/bases/nifi.orange.com_nifiusers.yaml @@ -98,12 +98,25 @@ spec: description: contains the reference to the NifiCluster with the one the user is linked properties: + hostname: + type: string name: type: string namespace: type: string - required: - - name + secretRef: + description: SecretReference states a reference to a secret for + parameter context provisioning + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + type: + type: string type: object createCert: description: Whether or not a certificate will be created for this diff --git a/controllers/controller_common.go b/controllers/controller_common.go index 3649441a5..320d3fc5f 100644 --- a/controllers/controller_common.go +++ b/controllers/controller_common.go @@ -100,6 +100,22 @@ func ApplyClusterRefLabel(cluster *v1alpha1.NifiCluster, labels map[string]strin return labels } +// applyClusterRefLabel ensures a map of labels contains a reference to a parent nifi cluster +func ApplyClusterReferenceLabel(cluster v1alpha1.ClusterConnect, labels map[string]string) map[string]string { + labelValue := cluster.ClusterLabelString() + if labels == nil { + labels = make(map[string]string, 0) + } + if label, ok := labels[ClusterRefLabel]; ok { + if label != labelValue { + labels[ClusterRefLabel] = labelValue + } + } else { + labels[ClusterRefLabel] = labelValue + } + return labels +} + // getClusterRefNamespace returns the expected namespace for a Nifi cluster // referenced by a user/dataflow CR. It takes the namespace of the CR as the first // argument and the reference itself as the second. diff --git a/controllers/nifidataflow_controller.go b/controllers/nifidataflow_controller.go index 09c5e03b2..d105ea76f 100644 --- a/controllers/nifidataflow_controller.go +++ b/controllers/nifidataflow_controller.go @@ -19,11 +19,14 @@ package controllers import ( "context" "emperror.dev/errors" + "encoding/json" "fmt" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/dataflow" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" @@ -79,15 +82,26 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request return RequeueWithError(r.Log, err.Error(), err) } - // Ensure finalizer for cleanup on deletion - if !util.StringSliceContains(instance.GetFinalizers(), dataflowFinalizer) { - r.Log.Info("Adding Finalizer for NifiDataflow") - instance.SetFinalizers(append(instance.GetFinalizers(), dataflowFinalizer)) + // Get the last configuration viewed by the operator. + o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + // Create it if not exist. + if o == nil { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) } - // Push any changes - if instance, err = r.updateAndFetchLatest(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to update NifiDataflow", err) + // Check if the cluster reference changed. + original := &v1alpha1.NifiDataflow{} + current := instance.DeepCopy() + json.Unmarshal(o, original) + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && + original.Spec.ClusterRef.IsSet() { + instance.Spec.ClusterRef = original.Spec.ClusterRef } // Get the referenced NifiRegistryClient @@ -95,14 +109,14 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request var registryClientNamespace string if instance.Spec.RegistryClientRef != nil { registryClientNamespace = - GetRegistryClientRefNamespace(instance.Namespace, *instance.Spec.RegistryClientRef) + GetRegistryClientRefNamespace(current.Namespace, *current.Spec.RegistryClientRef) if registryClient, err = k8sutil.LookupNifiRegistryClient(r.Client, - instance.Spec.RegistryClientRef.Name, registryClientNamespace); err != nil { + current.Spec.RegistryClientRef.Name, registryClientNamespace); err != nil { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Registry client is already gone, there is nothing we can do") + r.Log.Info("Dataflow is already gone, there is nothing we can do") if err = r.removeFinalizer(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer", err) } @@ -116,16 +130,16 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request var parameterContext *v1alpha1.NifiParameterContext var parameterContextNamespace string - if instance.Spec.ParameterContextRef != nil { + if current.Spec.ParameterContextRef != nil { parameterContextNamespace = - GetParameterContextRefNamespace(instance.Namespace, *instance.Spec.ParameterContextRef) + GetParameterContextRefNamespace(current.Namespace, *current.Spec.ParameterContextRef) if parameterContext, err = k8sutil.LookupNifiParameterContext(r.Client, - instance.Spec.ParameterContextRef.Name, parameterContextNamespace); err != nil { + current.Spec.ParameterContextRef.Name, parameterContextNamespace); err != nil { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Parameter context is already gone, there is nothing we can do") + r.Log.Info("Dataflow context is already gone, there is nothing we can do") if err = r.removeFinalizer(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer", err) } @@ -142,17 +156,21 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Check if cluster references are the same - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - if registryClient != nil && - (registryClientNamespace != clusterNamespace || - registryClient.Spec.ClusterRef.Name != instance.Spec.ClusterRef.Name || - (parameterContext != nil && - (parameterContextNamespace != clusterNamespace || - parameterContext.Spec.ClusterRef.Name != instance.Spec.ClusterRef.Name))) { + registryClusterRef := registryClient.Spec.ClusterRef + registryClusterRef.Namespace = registryClientNamespace + + parameterContextClusterRef := parameterContext.Spec.ClusterRef + parameterContextClusterRef.Namespace = parameterContextNamespace + + currentClusterRef := current.Spec.ClusterRef + currentClusterRef.Namespace = GetClusterRefNamespace(current.Namespace, current.Spec.ClusterRef) + + if !v1alpha1.ClusterRefsEquals( + []v1alpha1.ClusterReference{registryClusterRef, parameterContextClusterRef, currentClusterRef}) { r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) + instance.Spec.ClusterRef.Name, currentClusterRef.Namespace)) return RequeueWithError( r.Log, @@ -160,24 +178,85 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request errors.New("inconsistent cluster references")) } - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to remove finalizer", err) + var clientConfig *nificlient.NifiConfig + var clusterConnect v1alpha1.ClusterConnect + + // Get the referenced NifiCluster + if !instance.Spec.ClusterRef.IsExternal() { + var cluster *v1alpha1.NifiCluster + if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, currentClusterRef.Namespace); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer", err) + } + return Reconciled() } - return Reconciled() - } - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiDataflow", err) + } + return RequeueAfter(time.Duration(15) * time.Second) + } + + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + instance.Spec.ClusterRef.Name, currentClusterRef.Namespace)) + + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + } + clusterConnect = cluster + clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, currentClusterRef.Namespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) + } + } else { } // Check if marked for deletion and if so run finalizers if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - return r.checkFinalizers(ctx, instance, cluster) + return r.checkFinalizers(ctx, instance, clientConfig) + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady() { + r.Log.Info("Cluster is not ready yet, will wait until it is.") + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", + fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", + instance.Spec.ClusterRef.Name, clusterConnect.Id())) + + // the cluster does not exist - should have been caught pre-flight + return RequeueAfter(time.Duration(15) * time.Second) + } + + // Ìn case of the cluster reference changed. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + // Delete the resource on the previous cluster. + if _,err := dataflow.RemoveDataflow(instance, clientConfig); err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "RemoveError", + fmt.Sprintf("Failed to delete NifiDataflow %s from cluster %s before moving in %s", + instance.Name, original.Spec.ClusterRef.Name, original.Spec.ClusterRef.Name)) + return RequeueWithError(r.Log, "Failed to delete NifiDataflow before moving", err) + } + // Update the last view configuration to the current one. + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiDatafllow", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } if instance.Spec.GetRunOnce() && instance.Status.State == v1alpha1.DataflowStateRan { @@ -190,7 +269,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) // Check if the dataflow already exist - existing, err := dataflow.DataflowExist(r.Client, instance, cluster) + existing, err := dataflow.DataflowExist(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure checking for existing dataflow", err) } @@ -202,7 +281,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request instance.Name, instance.Spec.BucketId, instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) - processGroupStatus, err := dataflow.CreateDataflow(r.Client, instance, cluster, registryClient) + processGroupStatus, err := dataflow.CreateDataflow(instance, clientConfig, registryClient) if err != nil { r.Recorder.Event(instance, corev1.EventTypeWarning, "CreationFailed", fmt.Sprintf("Creation failed dataflow %s based on flow {bucketId : %s, flowId: %s, version: %s}", @@ -227,6 +306,17 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request existing = true } + // Ensure finalizer for cleanup on deletion + if !util.StringSliceContains(instance.GetFinalizers(), dataflowFinalizer) { + r.Log.Info("Adding Finalizer for NifiDataflow") + instance.SetFinalizers(append(instance.GetFinalizers(), dataflowFinalizer)) + } + + // Push any changes + if instance, err = r.updateAndFetchLatest(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiDataflow", err) + } + // In case where the flow is not sync if instance.Status.State == v1alpha1.DataflowStateOutOfSync { r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronizing", @@ -234,7 +324,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request instance.Name, instance.Spec.BucketId, instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) - status, err := dataflow.SyncDataflow(r.Client, instance, cluster, registryClient, parameterContext) + status, err := dataflow.SyncDataflow(instance, clientConfig, registryClient, parameterContext) if status != nil { instance.Status = *status if err := r.Client.Status().Update(ctx, instance); err != nil { @@ -272,7 +362,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Check if the flow is out of sync - isOutOfSink, err := dataflow.IsOutOfSyncDataflow(r.Client, instance, cluster, registryClient, parameterContext) + isOutOfSink, err := dataflow.IsOutOfSyncDataflow(instance, clientConfig, registryClient, parameterContext) if err != nil { return RequeueWithError(r.Log, "failed to check NifiDataflow sync", err) } @@ -301,7 +391,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request instance.Name, instance.Spec.BucketId, instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) - if err := dataflow.ScheduleDataflow(r.Client, instance, cluster); err != nil { + if err := dataflow.ScheduleDataflow(instance, clientConfig); err != nil { switch errors.Cause(err).(type) { case errorfactory.NifiFlowControllerServiceScheduling, errorfactory.NifiFlowScheduling: return RequeueAfter(time.Duration(5) * time.Second) @@ -326,7 +416,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Ensure NifiCluster label - if instance, err = r.ensureClusterLabel(ctx, cluster, instance); err != nil { + if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance); err != nil { return RequeueWithError(r.Log, "failed to ensure NifiCluster label on dataflow", err) } @@ -356,10 +446,10 @@ func (r *NifiDataflowReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *NifiDataflowReconciler) ensureClusterLabel(ctx context.Context, cluster *v1alpha1.NifiCluster, +func (r *NifiDataflowReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, flow *v1alpha1.NifiDataflow) (*v1alpha1.NifiDataflow, error) { - labels := ApplyClusterRefLabel(cluster, flow.GetLabels()) + labels := ApplyClusterReferenceLabel(cluster, flow.GetLabels()) if !reflect.DeepEqual(labels, flow.GetLabels()) { flow.SetLabels(labels) return r.updateAndFetchLatest(ctx, flow) @@ -380,12 +470,12 @@ func (r *NifiDataflowReconciler) updateAndFetchLatest(ctx context.Context, } func (r *NifiDataflowReconciler) checkFinalizers(ctx context.Context, flow *v1alpha1.NifiDataflow, - cluster *v1alpha1.NifiCluster) (reconcile.Result, error) { + config *nificlient.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi dataflow is marked for deletion") var err error if util.StringSliceContains(flow.GetFinalizers(), dataflowFinalizer) { - if err = r.finalizeNifiDataflow(flow, cluster); err != nil { + if err = r.finalizeNifiDataflow(flow, config); err != nil { switch errors.Cause(err).(type) { case errorfactory.NifiConnectionDropping, errorfactory.NifiFlowDraining: return RequeueAfter(time.Duration(5) * time.Second) @@ -407,9 +497,9 @@ func (r *NifiDataflowReconciler) removeFinalizer(ctx context.Context, flow *v1al return err } -func (r *NifiDataflowReconciler) finalizeNifiDataflow(flow *v1alpha1.NifiDataflow, cluster *v1alpha1.NifiCluster) error { +func (r *NifiDataflowReconciler) finalizeNifiDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) error { - exists, err := dataflow.DataflowExist(r.Client, flow, cluster) + exists, err := dataflow.DataflowExist(flow, config) if err != nil { return err } @@ -420,7 +510,7 @@ func (r *NifiDataflowReconciler) finalizeNifiDataflow(flow *v1alpha1.NifiDataflo flow.Name, flow.Spec.BucketId, flow.Spec.FlowId, strconv.FormatInt(int64(*flow.Spec.FlowVersion), 10))) - if _, err = dataflow.RemoveDataflow(r.Client, flow, cluster); err != nil { + if _, err = dataflow.RemoveDataflow(flow, config); err != nil { return err } r.Recorder.Event(flow, corev1.EventTypeNormal, "Removed", diff --git a/controllers/nifiparametercontext_controller.go b/controllers/nifiparametercontext_controller.go index f139c5ec7..cefa4aea9 100644 --- a/controllers/nifiparametercontext_controller.go +++ b/controllers/nifiparametercontext_controller.go @@ -19,11 +19,14 @@ package controllers import ( "context" "emperror.dev/errors" + "encoding/json" "fmt" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/parametercontext" errorfactory "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" @@ -78,6 +81,28 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl return RequeueWithError(r.Log, err.Error(), err) } + // Get the last configuration viewed by the operator. + o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + // Create it if not exist. + if o == nil { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiParameterContext", err) + } + o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + } + + // Check if the cluster reference changed. + original := &v1alpha1.NifiParameterContext{} + current := instance.DeepCopy() + json.Unmarshal(o, original) + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && + original.Spec.ClusterRef.IsSet() { + instance.Spec.ClusterRef = original.Spec.ClusterRef + } + // Get the referenced secrets var parameterSecrets []*corev1.Secret for _, parameterSecret := range instance.Spec.SecretRefs { @@ -99,37 +124,93 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl parameterSecrets = append(parameterSecrets, secret) } + var clientConfig *nificlient.NifiConfig + var clusterConnect v1alpha1.ClusterConnect + // Get the referenced NifiCluster - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to remove finalizer", err) + if !instance.Spec.ClusterRef.IsExternal() { + clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + var cluster *v1alpha1.NifiCluster + if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer", err) + } + return Reconciled() + } + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } - return Reconciled() - } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + } + // Set cluster connection configuration. + clusterConnect = cluster + clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) + } + } else { } // Check if marked for deletion and if so run finalizers if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - return r.checkFinalizers(ctx, instance, parameterSecrets, cluster) + return r.checkFinalizers(ctx, instance, parameterSecrets, clientConfig) + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady() { + r.Log.Info("Cluster is not ready yet, will wait until it is.") + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", + fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", + instance.Spec.ClusterRef.Name, clusterConnect.Id())) + + // the cluster does not exist - should have been caught pre-flight + return RequeueAfter(time.Duration(15) * time.Second) + } + + // Ìn case of the cluster reference changed. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + // Delete the resource on the previous cluster. + if err := parametercontext.RemoveParameterContext(instance, parameterSecrets, clientConfig); err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "RemoveError", + fmt.Sprintf("Failed to delete NifiParameterContext %s from cluster %s before moving in %s", + instance.Name, original.Spec.ClusterRef.Name, original.Spec.ClusterRef.Name)) + return RequeueWithError(r.Log, "Failed to delete NifiParameterContext before moving", err) + } + // Update the last view configuration to the current one. + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciling", fmt.Sprintf("Reconciling parameter context %s", instance.Name)) // Check if the NiFi registry client already exist - exist, err := parametercontext.ExistParameterContext(r.Client, instance, cluster) + exist, err := parametercontext.ExistParameterContext(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure checking for existing parameter context", err) } @@ -139,7 +220,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl r.Recorder.Event(instance, corev1.EventTypeNormal, "Creating", fmt.Sprintf("Creating parameter context %s", instance.Name)) - status, err := parametercontext.CreateParameterContext(r.Client, instance, parameterSecrets, cluster) + status, err := parametercontext.CreateParameterContext(instance, parameterSecrets, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure creating parameter context", err) } @@ -156,7 +237,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl // Sync ParameterContext resource with NiFi side component r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronizing", fmt.Sprintf("Synchronizing parameter context %s", instance.Name)) - status, err := parametercontext.SyncParameterContext(r.Client, instance, parameterSecrets, cluster) + status, err := parametercontext.SyncParameterContext(instance, parameterSecrets, clientConfig) if status != nil { instance.Status = *status if err := r.Client.Status().Update(ctx, instance); err != nil { @@ -178,7 +259,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl fmt.Sprintf("Synchronized parameter context %s", instance.Name)) // Ensure NifiCluster label - if instance, err = r.ensureClusterLabel(ctx, cluster, instance); err != nil { + if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance); err != nil { return RequeueWithError(r.Log, "failed to ensure NifiCluster label on parameter context", err) } @@ -208,10 +289,10 @@ func (r *NifiParameterContextReconciler) SetupWithManager(mgr ctrl.Manager) erro Complete(r) } -func (r *NifiParameterContextReconciler) ensureClusterLabel(ctx context.Context, cluster *v1alpha1.NifiCluster, +func (r *NifiParameterContextReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, parameterContext *v1alpha1.NifiParameterContext) (*v1alpha1.NifiParameterContext, error) { - labels := ApplyClusterRefLabel(cluster, parameterContext.GetLabels()) + labels := ApplyClusterReferenceLabel(cluster, parameterContext.GetLabels()) if !reflect.DeepEqual(labels, parameterContext.GetLabels()) { parameterContext.SetLabels(labels) return r.updateAndFetchLatest(ctx, parameterContext) @@ -235,12 +316,12 @@ func (r *NifiParameterContextReconciler) checkFinalizers( ctx context.Context, parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, - cluster *v1alpha1.NifiCluster) (reconcile.Result, error) { + config *nificlient.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi parameter context is marked for deletion") var err error if util.StringSliceContains(parameterContext.GetFinalizers(), parameterContextFinalizer) { - if err = r.finalizeNifiParameterContext(parameterContext, parameterSecrets, cluster); err != nil { + if err = r.finalizeNifiParameterContext(parameterContext, parameterSecrets, config); err != nil { return RequeueWithError(r.Log, "failed to finalize parameter context", err) } if err = r.removeFinalizer(ctx, parameterContext); err != nil { @@ -259,9 +340,9 @@ func (r *NifiParameterContextReconciler) removeFinalizer(ctx context.Context, fl func (r *NifiParameterContextReconciler) finalizeNifiParameterContext( parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, - cluster *v1alpha1.NifiCluster) error { + config *nificlient.NifiConfig) error { - if err := parametercontext.RemoveParameterContext(r.Client, parameterContext, parameterSecrets, cluster); err != nil { + if err := parametercontext.RemoveParameterContext(parameterContext, parameterSecrets, config); err != nil { return err } r.Log.Info("Delete Registry client") diff --git a/controllers/nifiregistryclient_controller.go b/controllers/nifiregistryclient_controller.go index 0b759477b..924dac5db 100644 --- a/controllers/nifiregistryclient_controller.go +++ b/controllers/nifiregistryclient_controller.go @@ -18,10 +18,13 @@ package controllers import ( "context" + "encoding/json" "fmt" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/registryclient" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" @@ -76,36 +79,112 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R return RequeueWithError(r.Log, err.Error(), err) } + // Get the last configuration viewed by the operator. + o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + // Create it if not exist. + if o == nil { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + } + + // Check if the cluster reference changed. + original := &v1alpha1.NifiRegistryClient{} + current := instance.DeepCopy() + json.Unmarshal(o, original) + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && + original.Spec.ClusterRef.IsSet() { + instance.Spec.ClusterRef = original.Spec.ClusterRef + } + + var clientConfig *nificlient.NifiConfig + var clusterConnect v1alpha1.ClusterConnect // Get the referenced NifiCluster - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to remove finalizer", err) + if !instance.Spec.ClusterRef.IsExternal(){ + var cluster *v1alpha1.NifiCluster + clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer", err) + } + return Reconciled() + } + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } - return Reconciled() - } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + } + // Set cluster connection configuration. + clusterConnect = cluster + clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) + } + } else { } // Check if marked for deletion and if so run finalizers if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - return r.checkFinalizers(ctx, r.Log, instance, cluster) + return r.checkFinalizers(ctx, r.Log, instance, clientConfig) + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady() { + r.Log.Info("Cluster is not ready yet, will wait until it is.") + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", + fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", + instance.Spec.ClusterRef.Name, clusterConnect.Id())) + // the cluster does not exist - should have been caught pre-flight + return RequeueAfter(time.Duration(15) * time.Second) + } + + // Ìn case of the cluster reference changed. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + // Delete the resource on the previous cluster. + if err := registryclient.RemoveRegistryClient(instance, clientConfig); err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "RemoveError", + fmt.Sprintf("Failed to delete NifiRegistryClient %s from cluster %s before moving in %s", + instance.Name, original.Spec.ClusterRef.Name, original.Spec.ClusterRef.Name)) + return RequeueWithError(r.Log, "Failed to delete NifiRegistryClient before moving", err) + } + // Update the last view configuration to the current one. + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciling", fmt.Sprintf("Reconciling registry client %s", instance.Name)) // Check if the NiFi registry client already exist - exist, err := registryclient.ExistRegistryClient(r.Client, instance, cluster) + exist, err := registryclient.ExistRegistryClient(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure checking for existing registry client", err) } @@ -114,7 +193,7 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R // Create NiFi registry client r.Recorder.Event(instance, corev1.EventTypeNormal, "Creating", fmt.Sprintf("Creating registry client %s", instance.Name)) - status, err := registryclient.CreateRegistryClient(r.Client, instance, cluster) + status, err := registryclient.CreateRegistryClient(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure creating registry client", err) } @@ -126,12 +205,19 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R r.Recorder.Event(instance, corev1.EventTypeNormal, "Created", fmt.Sprintf("Created registry client %s", instance.Name)) + + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } } // Sync RegistryClient resource with NiFi side component r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronizing", fmt.Sprintf("Synchronizing registry client %s", instance.Name)) - status, err := registryclient.SyncRegistryClient(r.Client, instance, cluster) + status, err := registryclient.SyncRegistryClient(instance, clientConfig) if err != nil { r.Recorder.Event(instance, corev1.EventTypeNormal, "SynchronizingFailed", fmt.Sprintf("Synchronizing registry client %s failed", instance.Name)) @@ -146,7 +232,7 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronized", fmt.Sprintf("Synchronized registry client %s", instance.Name)) // Ensure NifiCluster label - if instance, err = r.ensureClusterLabel(ctx, cluster, instance); err != nil { + if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance); err != nil { return RequeueWithError(r.Log, "failed to ensure NifiCluster label on registry client", err) } @@ -176,10 +262,10 @@ func (r *NifiRegistryClientReconciler) SetupWithManager(mgr ctrl.Manager) error Complete(r) } -func (r *NifiRegistryClientReconciler) ensureClusterLabel(ctx context.Context, cluster *v1alpha1.NifiCluster, +func (r *NifiRegistryClientReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, registryClient *v1alpha1.NifiRegistryClient) (*v1alpha1.NifiRegistryClient, error) { - labels := ApplyClusterRefLabel(cluster, registryClient.GetLabels()) + labels := ApplyClusterReferenceLabel(cluster, registryClient.GetLabels()) if !reflect.DeepEqual(labels, registryClient.GetLabels()) { registryClient.SetLabels(labels) return r.updateAndFetchLatest(ctx, registryClient) @@ -200,12 +286,12 @@ func (r *NifiRegistryClientReconciler) updateAndFetchLatest(ctx context.Context, } func (r *NifiRegistryClientReconciler) checkFinalizers(ctx context.Context, reqLogger logr.Logger, - registryClient *v1alpha1.NifiRegistryClient, cluster *v1alpha1.NifiCluster) (reconcile.Result, error) { + registryClient *v1alpha1.NifiRegistryClient, config *nificlient.NifiConfig) (reconcile.Result, error) { reqLogger.Info("NiFi registry client is marked for deletion") var err error if util.StringSliceContains(registryClient.GetFinalizers(), registryClientFinalizer) { - if err = r.finalizeNifiRegistryClient(reqLogger, registryClient, cluster); err != nil { + if err = r.finalizeNifiRegistryClient(reqLogger, registryClient, config); err != nil { return RequeueWithError(reqLogger, "failed to finalize nifiregistryclient", err) } if err = r.removeFinalizer(ctx, registryClient); err != nil { @@ -222,9 +308,9 @@ func (r *NifiRegistryClientReconciler) removeFinalizer(ctx context.Context, regi } func (r *NifiRegistryClientReconciler) finalizeNifiRegistryClient(reqLogger logr.Logger, registryClient *v1alpha1.NifiRegistryClient, - cluster *v1alpha1.NifiCluster) error { + config *nificlient.NifiConfig) error { - if err := registryclient.RemoveRegistryClient(r.Client, registryClient, cluster); err != nil { + if err := registryclient.RemoveRegistryClient(registryClient, config); err != nil { return err } reqLogger.Info("Delete Registry client") diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index 2b9f4468e..1d5269f4c 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -19,12 +19,15 @@ package controllers import ( "context" "emperror.dev/errors" + "encoding/json" "fmt" usercli "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/user" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/pki" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/banzaicloud/k8s-objectmatcher/patch" "github.com/go-logr/logr" certv1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" corev1 "k8s.io/api/core/v1" @@ -82,96 +85,174 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return RequeueWithError(r.Log, err.Error(), err) } - // Get the referenced NifiCluster - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is gone already, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to remove finalizer from NifiUser", err) - } - return Reconciled() + // Get the last configuration viewed by the operator. + o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + // Create it if not exist. + if o == nil { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiUser", err) } + o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + // Check if the cluster reference changed. + original := &v1alpha1.NifiUser{} + current := instance.DeepCopy() + json.Unmarshal(o, original) + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && + original.Spec.ClusterRef.IsSet() { + instance.Spec.ClusterRef = original.Spec.ClusterRef } - if instance.Spec.GetCreateCert() { + var clientConfig *nificlient.NifiConfig + var clusterConnect v1alpha1.ClusterConnect + // Get the referenced NifiCluster + if !instance.Spec.ClusterRef.IsExternal() { + clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + var cluster *v1alpha1.NifiCluster + if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is gone already, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer from NifiUser", err) + } + return Reconciled() + } - // Avoid panic if the user wants to create a nifi user but the cluster is in plaintext mode - // TODO: refactor this and use webhook to validate if the cluster is eligible to create a nifi user - if cluster.Spec.ListenersConfig.SSLSecrets == nil { - return RequeueWithError(r.Log, "could not create Nifi user since cluster does not use ssl", errors.New("failed to create Nifi user")) + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiUser", err) + } + return RequeueAfter(time.Duration(15) * time.Second) + } + + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } - pkiManager := pki.GetPKIManager(r.Client, cluster) + if v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) && + instance.Spec.GetCreateCert() { - r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconcilingCertificate", - fmt.Sprintf("Reconciling certificate for nifi user %s", instance.Name)) - // Reconcile no matter what to get a user certificate instance for ACL management - // TODO (tinyzimmer): This can go wrong if the user made a mistake in their secret path - // using the vault backend, then tried to delete and fix it. Should probably - // have the PKIManager export a GetUserCertificate specifically for deletions - // that will allow the error to fall through if the certificate doesn't exist. - _, err := pkiManager.ReconcileUserCertificate(ctx, instance, r.Scheme) - if err != nil { - switch errors.Cause(err).(type) { - case errorfactory.ResourceNotReady: - r.Log.Info("generated secret not found, may not be ready") - return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Duration(5) * time.Second, - }, nil - case errorfactory.FatalReconcileError: - // TODO: (tinyzimmer) - Sleep for longer for now to give user time to see the error - // But really we should catch these kinds of issues in a pre-admission hook in a future PR - // The user can fix while this is looping and it will pick it up next reconcile attempt - r.Log.Error(err, "Fatal error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") - return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Duration(15) * time.Second, - }, nil - case errorfactory.VaultAPIFailure: - // Same as above in terms of things that could be checked pre-flight on the cluster - r.Log.Error(err, "Vault API error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") - return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Duration(15) * time.Second, - }, nil - default: - return RequeueWithError(r.Log, "failed to reconcile user secret", err) + // Avoid panic if the user wants to create a nifi user but the cluster is in plaintext mode + // TODO: refactor this and use webhook to validate if the cluster is eligible to create a nifi user + if cluster.Spec.ListenersConfig.SSLSecrets == nil { + return RequeueWithError(r.Log, "could not create Nifi user since cluster does not use ssl", errors.New("failed to create Nifi user")) + } + + pkiManager := pki.GetPKIManager(r.Client, cluster) + + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconcilingCertificate", + fmt.Sprintf("Reconciling certificate for nifi user %s", instance.Name)) + // Reconcile no matter what to get a user certificate instance for ACL management + // TODO (tinyzimmer): This can go wrong if the user made a mistake in their secret path + // using the vault backend, then tried to delete and fix it. Should probably + // have the PKIManager export a GetUserCertificate specifically for deletions + // that will allow the error to fall through if the certificate doesn't exist. + _, err := pkiManager.ReconcileUserCertificate(ctx, instance, r.Scheme) + if err != nil { + switch errors.Cause(err).(type) { + case errorfactory.ResourceNotReady: + r.Log.Info("generated secret not found, may not be ready") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Duration(5) * time.Second, + }, nil + case errorfactory.FatalReconcileError: + // TODO: (tinyzimmer) - Sleep for longer for now to give user time to see the error + // But really we should catch these kinds of issues in a pre-admission hook in a future PR + // The user can fix while this is looping and it will pick it up next reconcile attempt + r.Log.Error(err, "Fatal error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Duration(15) * time.Second, + }, nil + case errorfactory.VaultAPIFailure: + // Same as above in terms of things that could be checked pre-flight on the cluster + r.Log.Error(err, "Vault API error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Duration(15) * time.Second, + }, nil + default: + return RequeueWithError(r.Log, "failed to reconcile user secret", err) + } } - } - r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconciledCertificate", - fmt.Sprintf("Reconciled certificate for nifi user %s", instance.Name)) + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconciledCertificate", + fmt.Sprintf("Reconciled certificate for nifi user %s", instance.Name)) - // check if marked for deletion - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Nifi user is marked for deletion, revoking certificates") - if err = pkiManager.FinalizeUserCertificate(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to finalize user certificate", err) + // check if marked for deletion + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Nifi user is marked for deletion, revoking certificates") + if err = pkiManager.FinalizeUserCertificate(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to finalize user certificate", err) + } + return r.checkFinalizers(ctx, instance, clientConfig) } - return r.checkFinalizers(ctx, instance, cluster) } + // Set cluster connection configuration. + clusterConnect = cluster + clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) + } + } else { } // check if marked for deletion if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - return r.checkFinalizers(ctx, instance, cluster) + return r.checkFinalizers(ctx, instance, clientConfig) + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady() { + r.Log.Info("Cluster is not ready yet, will wait until it is.") + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", + fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", + instance.Spec.ClusterRef.Name, clusterConnect.Id())) + // the cluster does not exist - should have been caught pre-flight + return RequeueAfter(time.Duration(15) * time.Second) + } + + // Ìn case of the cluster reference changed. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + // Delete the resource on the previous cluster. + if err := usercli.RemoveUser(instance, clientConfig); err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "RemoveError", + fmt.Sprintf("Failed to delete NifiUser %s from cluster %s before moving in %s", + instance.Name, original.Spec.ClusterRef.Name, original.Spec.ClusterRef.Name)) + return RequeueWithError(r.Log, "Failed to delete NifiUser before moving", err) + } + // Update the last view configuration to the current one. + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiUser", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciling", fmt.Sprintf("Reconciling user %s", instance.Name)) // Check if the NiFi user already exist - exist, err := usercli.ExistUser(r.Client, instance, cluster) + exist, err := usercli.ExistUser(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure checking for existing registry client", err) } @@ -182,14 +263,14 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c var status *v1alpha1.NifiUserStatus - status, err = usercli.FindUserByIdentity(r.Client, instance, cluster) + status, err = usercli.FindUserByIdentity(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure finding user", err) } if status == nil { // Create NiFi registry client - status, err = usercli.CreateUser(r.Client, instance, cluster) + status, err = usercli.CreateUser(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure creating user", err) } @@ -206,21 +287,21 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Sync user resource with NiFi side component r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronizing", fmt.Sprintf("Synchronizing user %s", instance.Name)) - status, err := usercli.SyncUser(r.Client, instance, cluster) + status, err := usercli.SyncUser(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failed to sync NifiUser", err) } instance.Status = *status if err := r.Client.Status().Update(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to update NifiRegistryClient status", err) + return RequeueWithError(r.Log, "failed to update NifiUser status", err) } r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronized", fmt.Sprintf("Synchronized user %s", instance.Name)) // ensure a NifiCluster label - if instance, err = r.ensureClusterLabel(ctx, cluster, instance); err != nil { + if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance); err != nil { return RequeueWithError(r.Log, "failed to ensure NifiCluster label on user", err) } @@ -268,8 +349,8 @@ func (r *NifiUserReconciler) SetupWithManager(mgr ctrl.Manager, certManagerEnabl return builder.Complete(r) } -func (r *NifiUserReconciler) ensureClusterLabel(ctx context.Context, cluster *v1alpha1.NifiCluster, user *v1alpha1.NifiUser) (*v1alpha1.NifiUser, error) { - labels := ApplyClusterRefLabel(cluster, user.GetLabels()) +func (r *NifiUserReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, user *v1alpha1.NifiUser) (*v1alpha1.NifiUser, error) { + labels := ApplyClusterReferenceLabel(cluster, user.GetLabels()) if !reflect.DeepEqual(labels, user.GetLabels()) { user.SetLabels(labels) return r.updateAndFetchLatest(ctx, user) @@ -287,11 +368,11 @@ func (r *NifiUserReconciler) updateAndFetchLatest(ctx context.Context, user *v1a return user, nil } -func (r *NifiUserReconciler) checkFinalizers(ctx context.Context, user *v1alpha1.NifiUser, cluster *v1alpha1.NifiCluster) (reconcile.Result, error) { +func (r *NifiUserReconciler) checkFinalizers(ctx context.Context, user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi user is marked for deletion") var err error if util.StringSliceContains(user.GetFinalizers(), userFinalizer) { - if err = r.finalizeNifiUser(user, cluster); err != nil { + if err = r.finalizeNifiUser(user, config); err != nil { return RequeueWithError(r.Log, "failed to finalize nifiuser", err) } // remove finalizer @@ -308,13 +389,8 @@ func (r *NifiUserReconciler) removeFinalizer(ctx context.Context, user *v1alpha1 return err } -func (r *NifiUserReconciler) finalizeNifiUser(user *v1alpha1.NifiUser, cluster *v1alpha1.NifiCluster) error { - if k8sutil.IsMarkedForDeletion(cluster.ObjectMeta) { - r.Log.Info("Cluster is being deleted, skipping deletion") - return nil - } - - if err := usercli.RemoveUser(r.Client, user, cluster); err != nil { +func (r *NifiUserReconciler) finalizeNifiUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) error { + if err := usercli.RemoveUser(user, config); err != nil { return err } r.Log.Info("Delete user") diff --git a/controllers/nifiusergroup_controller.go b/controllers/nifiusergroup_controller.go index 9eb052ba4..92fedda73 100644 --- a/controllers/nifiusergroup_controller.go +++ b/controllers/nifiusergroup_controller.go @@ -19,10 +19,13 @@ package controllers import ( "context" "emperror.dev/errors" + "encoding/json" "fmt" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/usergroup" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" @@ -77,18 +80,40 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques return RequeueWithError(r.Log, err.Error(), err) } + // Get the last configuration viewed by the operator. + o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + // Create it if not exist. + if o == nil { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + } + + // Check if the cluster reference changed. + original := &v1alpha1.NifiUserGroup{} + current := instance.DeepCopy() + json.Unmarshal(o, original) + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && + original.Spec.ClusterRef.IsSet() { + instance.Spec.ClusterRef = original.Spec.ClusterRef + } + var users []*v1alpha1.NifiUser for _, userRef := range instance.Spec.UsersRef { var user *v1alpha1.NifiUser - userNamespace := GetUserRefNamespace(instance.Namespace, userRef) + userNamespace := GetUserRefNamespace(current.Namespace, userRef) if user, err = k8sutil.LookupNifiUser(r.Client, userRef.Name, userNamespace); err != nil { // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + if k8sutil.IsMarkedForDeletion(current.ObjectMeta) { r.Log.Info("User is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { + if err = r.removeFinalizer(ctx, current); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer", err) } return Reconciled() @@ -103,8 +128,8 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Check if cluster references are the same - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - if user != nil && (userNamespace != clusterNamespace || user.Spec.ClusterRef.Name != instance.Spec.ClusterRef.Name) { + clusterNamespace := GetClusterRefNamespace(current.Namespace, current.Spec.ClusterRef) + if user != nil && (userNamespace != clusterNamespace || user.Spec.ClusterRef.Name != current.Spec.ClusterRef.Name) { r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", fmt.Sprintf("Failed to ensure consistency in cluster referece : %s in %s, with user : %s in %s", instance.Spec.ClusterRef.Name, clusterNamespace, userRef.Name, userRef.Namespace)) @@ -117,37 +142,92 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques users = append(users, user) } + var clientConfig *nificlient.NifiConfig + var clusterConnect v1alpha1.ClusterConnect // Get the referenced NifiCluster - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to remove finalizer", err) + if !instance.Spec.ClusterRef.IsExternal() { + clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + var cluster *v1alpha1.NifiCluster + if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer", err) + } + return Reconciled() + } + + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiDataflow", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } - return Reconciled() - } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + } + // Set cluster connection configuration. + clusterConnect = cluster + clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterNamespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) + } + } else { } // Check if marked for deletion and if so run finalizers if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - return r.checkFinalizers(ctx, instance, users, cluster) + return r.checkFinalizers(ctx, instance, users, clientConfig) + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady() { + r.Log.Info("Cluster is not ready yet, will wait until it is.") + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", + fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", + instance.Spec.ClusterRef.Name, clusterConnect.Id())) + // the cluster does not exist - should have been caught pre-flight + return RequeueAfter(time.Duration(15) * time.Second) + } + + // Ìn case of the cluster reference changed. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + // Delete the resource on the previous cluster. + if err := usergroup.RemoveUserGroup(instance, users, clientConfig); err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "RemoveError", + fmt.Sprintf("Failed to delete NifiRegistryClient %s from cluster %s before moving in %s", + instance.Name, original.Spec.ClusterRef.Name, original.Spec.ClusterRef.Name)) + return RequeueWithError(r.Log, "Failed to delete NifiRegistryClient before moving", err) + } + // Update the last view configuration to the current one. + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + } + return RequeueAfter(time.Duration(15) * time.Second) } r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciling", fmt.Sprintf("Reconciling user group %s", instance.Name)) // Check if the NiFi user group already exist - exist, err := usergroup.ExistUserGroup(r.Client, instance, cluster) + exist, err := usergroup.ExistUserGroup(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure checking for existing user group", err) } @@ -157,7 +237,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques fmt.Sprintf("Creating registry client %s", instance.Name)) // Create NiFi user group - status, err := usergroup.CreateUserGroup(r.Client, instance, users, cluster) + status, err := usergroup.CreateUserGroup(instance, users, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure creating user group", err) } @@ -174,7 +254,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Sync UserGroup resource with NiFi side component r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronizing", fmt.Sprintf("Synchronizing user group %s", instance.Name)) - status, err := usergroup.SyncUserGroup(r.Client, instance, users, cluster) + status, err := usergroup.SyncUserGroup(instance, users, clientConfig) if err != nil { r.Recorder.Event(instance, corev1.EventTypeNormal, "SynchronizingFailed", fmt.Sprintf("Synchronizing user group %s failed", instance.Name)) @@ -190,7 +270,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques fmt.Sprintf("Synchronized user group %s", instance.Name)) // Ensure NifiCluster label - if instance, err = r.ensureClusterLabel(ctx, cluster, instance); err != nil { + if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance); err != nil { return RequeueWithError(r.Log, "failed to ensure NifiCluster label on user group", err) } @@ -220,10 +300,10 @@ func (r *NifiUserGroupReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *NifiUserGroupReconciler) ensureClusterLabel(ctx context.Context, cluster *v1alpha1.NifiCluster, +func (r *NifiUserGroupReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, userGroup *v1alpha1.NifiUserGroup) (*v1alpha1.NifiUserGroup, error) { - labels := ApplyClusterRefLabel(cluster, userGroup.GetLabels()) + labels := ApplyClusterReferenceLabel(cluster, userGroup.GetLabels()) if !reflect.DeepEqual(labels, userGroup.GetLabels()) { userGroup.SetLabels(labels) return r.updateAndFetchLatest(ctx, userGroup) @@ -244,12 +324,12 @@ func (r *NifiUserGroupReconciler) updateAndFetchLatest(ctx context.Context, } func (r *NifiUserGroupReconciler) checkFinalizers(ctx context.Context, userGroup *v1alpha1.NifiUserGroup, - users []*v1alpha1.NifiUser, cluster *v1alpha1.NifiCluster) (reconcile.Result, error) { + users []*v1alpha1.NifiUser, config *nificlient.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi user group is marked for deletion") var err error if util.StringSliceContains(userGroup.GetFinalizers(), userGroupFinalizer) { - if err = r.finalizeNifiNifiUserGroup(userGroup, users, cluster); err != nil { + if err = r.finalizeNifiNifiUserGroup(userGroup, users, config); err != nil { return RequeueWithError(r.Log, "failed to finalize nifiusergroup", err) } if err = r.removeFinalizer(ctx, userGroup); err != nil { @@ -259,7 +339,7 @@ func (r *NifiUserGroupReconciler) checkFinalizers(ctx context.Context, userGroup return Reconciled() } -func (r *NifiUserGroupReconciler) removeFinalizer(ctx context.Context, userGroup *v1alpha1.NifiUserGroup) error { +func (r *NifiUserGroupReconciler) removeFinalizer(ctx context.Context, userGroup *v1alpha1.NifiUserGroup) error { userGroup.SetFinalizers(util.StringSliceRemove(userGroup.GetFinalizers(), userGroupFinalizer)) _, err := r.updateAndFetchLatest(ctx, userGroup) return err @@ -268,9 +348,9 @@ func (r *NifiUserGroupReconciler) removeFinalizer(ctx context.Context, userGroup func (r *NifiUserGroupReconciler) finalizeNifiNifiUserGroup( userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) error { + config *nificlient.NifiConfig) error { - if err := usergroup.RemoveUserGroup(r.Client, userGroup, users, cluster); err != nil { + if err := usergroup.RemoveUserGroup(userGroup, users, config); err != nil { return err } diff --git a/nifikop.iml b/nifikop.iml index 8021953ed..49df094a9 100644 --- a/nifikop.iml +++ b/nifikop.iml @@ -1,5 +1,6 @@ + diff --git a/pkg/clientwrappers/accesspolicies/policies.go b/pkg/clientwrappers/accesspolicies/policies.go index a5fa5b144..7bcfc88d8 100644 --- a/pkg/clientwrappers/accesspolicies/policies.go +++ b/pkg/clientwrappers/accesspolicies/policies.go @@ -7,20 +7,18 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("accesspolicies-method") -func ExistAccessPolicies(client client.Client, accessPolicy *v1alpha1.AccessPolicy, - cluster *v1alpha1.NifiCluster) (bool, error) { +func ExistAccessPolicies(accessPolicy *v1alpha1.AccessPolicy, config *nificlient.NifiConfig) (bool, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } - entity, err := nClient.GetAccessPolicy(string(accessPolicy.Action), accessPolicy.GetResource(cluster)) + entity, err := nClient.GetAccessPolicy(string(accessPolicy.Action), accessPolicy.GetResource(config.RootProcessGroupId)) if err := clientwrappers.ErrorGetOperation(log, err, "Get access policy"); err != nil { if err == nificlient.ErrNifiClusterReturned404 { return false, nil @@ -31,10 +29,9 @@ func ExistAccessPolicies(client client.Client, accessPolicy *v1alpha1.AccessPoli return entity != nil, nil } -func CreateAccessPolicy(client client.Client, accessPolicy *v1alpha1.AccessPolicy, - cluster *v1alpha1.NifiCluster) (string, error) { +func CreateAccessPolicy(accessPolicy *v1alpha1.AccessPolicy, config *nificlient.NifiConfig) (string, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return "", err } @@ -44,7 +41,7 @@ func CreateAccessPolicy(client client.Client, accessPolicy *v1alpha1.AccessPolic accessPolicy, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{}, - cluster, + config, &scratchEntity) entity, err := nClient.CreateAccessPolicy(scratchEntity) @@ -56,52 +53,50 @@ func CreateAccessPolicy(client client.Client, accessPolicy *v1alpha1.AccessPolic } func UpdateAccessPolicy( - client client.Client, accessPolicy *v1alpha1.AccessPolicy, addUsers []*v1alpha1.NifiUser, removeUsers []*v1alpha1.NifiUser, addUserGroups []*v1alpha1.NifiUserGroup, removeUserGroups []*v1alpha1.NifiUserGroup, - cluster *v1alpha1.NifiCluster) error { + config *nificlient.NifiConfig) error { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } // Check if the access policy exist - exist, err := ExistAccessPolicies(client, accessPolicy, cluster) + exist, err := ExistAccessPolicies(accessPolicy, config) if err != nil { return err } if !exist { - _, err := CreateAccessPolicy(client, accessPolicy, cluster) + _, err := CreateAccessPolicy(accessPolicy, config) if err != nil { return err } } - entity, err := nClient.GetAccessPolicy(string(accessPolicy.Action), accessPolicy.GetResource(cluster)) + entity, err := nClient.GetAccessPolicy(string(accessPolicy.Action), accessPolicy.GetResource(config.RootProcessGroupId)) if err := clientwrappers.ErrorGetOperation(log, err, "Get access policy"); err != nil { return err } - updateAccessPolicyEntity(accessPolicy, addUsers, removeUsers, addUserGroups, removeUserGroups, cluster, entity) + updateAccessPolicyEntity(accessPolicy, addUsers, removeUsers, addUserGroups, removeUserGroups, config, entity) entity, err = nClient.UpdateAccessPolicy(*entity) return clientwrappers.ErrorUpdateOperation(log, err, "Update user") } func UpdateAccessPolicyEntity( - client client.Client, entity *nigoapi.AccessPolicyEntity, addUsers []*v1alpha1.NifiUser, removeUsers []*v1alpha1.NifiUser, addUserGroups []*v1alpha1.NifiUserGroup, removeUserGroups []*v1alpha1.NifiUserGroup, - cluster *v1alpha1.NifiCluster) error { + config *nificlient.NifiConfig) error { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } @@ -124,7 +119,7 @@ func updateAccessPolicyEntity( removeUsers []*v1alpha1.NifiUser, addUserGroups []*v1alpha1.NifiUserGroup, removeUserGroups []*v1alpha1.NifiUserGroup, - cluster *v1alpha1.NifiCluster, + config *nificlient.NifiConfig, entity *nigoapi.AccessPolicyEntity) { var defaultVersion int64 = 0 @@ -144,7 +139,7 @@ func updateAccessPolicyEntity( } entity.Component.Action = string(accessPolicy.Action) - entity.Component.Resource = accessPolicy.GetResource(cluster) + entity.Component.Resource = accessPolicy.GetResource(config.RootProcessGroupId) addRemoveUsersFromAccessPolicyEntity(addUsers, removeUsers, entity) addRemoveUserGroupsFromAccessPolicyEntity(addUserGroups, removeUserGroups, entity) diff --git a/pkg/clientwrappers/controllersettings/controllersettings.go b/pkg/clientwrappers/controllersettings/controllersettings.go index f4a85c692..bae0c3290 100644 --- a/pkg/clientwrappers/controllersettings/controllersettings.go +++ b/pkg/clientwrappers/controllersettings/controllersettings.go @@ -4,9 +4,9 @@ import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers" "github.com/Orange-OpenSource/nifikop/pkg/common" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("controllersettings-method") @@ -15,9 +15,9 @@ func controllerConfigIsSync(cluster *v1alpha1.NifiCluster, entity *nigoapi.Contr return cluster.Spec.ReadOnlyConfig.GetMaximumTimerDrivenThreadCount() == entity.Component.MaxTimerDrivenThreadCount } -func SyncConfiguration(client client.Client, cluster *v1alpha1.NifiCluster) error { +func SyncConfiguration(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) error { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } diff --git a/pkg/clientwrappers/dataflow/dataflow.go b/pkg/clientwrappers/dataflow/dataflow.go index 9e22dec22..b2bd6f822 100644 --- a/pkg/clientwrappers/dataflow/dataflow.go +++ b/pkg/clientwrappers/dataflow/dataflow.go @@ -10,19 +10,18 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("dataflow-method") // DataflowExist check if the NifiDataflow exist on NiFi Cluster -func DataflowExist(client client.Client, flow *v1alpha1.NifiDataflow, cluster *v1alpha1.NifiCluster) (bool, error) { +func DataflowExist(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) (bool, error) { if flow.Status.ProcessGroupID == "" { return false, nil } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -38,8 +37,8 @@ func DataflowExist(client client.Client, flow *v1alpha1.NifiDataflow, cluster *v return flowEntity != nil, nil } -func RootProcessGroup(client client.Client, cluster *v1alpha1.NifiCluster) (string, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) +func RootProcessGroup(config *nificlient.NifiConfig) (string, error) { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return "", err } @@ -56,21 +55,18 @@ func RootProcessGroup(client client.Client, cluster *v1alpha1.NifiCluster) (stri } // CreateDataflow will deploy the NifiDataflow on NiFi Cluster -func CreateDataflow( - client client.Client, - flow *v1alpha1.NifiDataflow, - cluster *v1alpha1.NifiCluster, +func CreateDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig, registry *v1alpha1.NifiRegistryClient) (*v1alpha1.NifiDataflowStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } scratchEntity := nigoapi.ProcessGroupEntity{} - updateProcessGroupEntity(flow, registry, cluster, &scratchEntity) + updateProcessGroupEntity(flow, registry, config, &scratchEntity) - entity, err := nClient.CreateProcessGroup(scratchEntity, flow.Spec.GetParentProcessGroupID(cluster)) + entity, err := nClient.CreateProcessGroup(scratchEntity, flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId)) if err := clientwrappers.ErrorCreateOperation(log, err, "Create process-group"); err != nil { return nil, err @@ -81,8 +77,8 @@ func CreateDataflow( } // ScheduleDataflow will schedule the controller services and components of the NifiDataflow. -func ScheduleDataflow(client client.Client, flow *v1alpha1.NifiDataflow, cluster *v1alpha1.NifiCluster) error { - nClient, err := common.NewNodeConnection(log, client, cluster) +func ScheduleDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) error { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } @@ -118,7 +114,7 @@ func ScheduleDataflow(client client.Client, flow *v1alpha1.NifiDataflow, cluster } // Check all components are ok - processGroups, _, _, _, err := listComponents(client, cluster, flow.Status.ProcessGroupID) + processGroups, _, _, _, err := listComponents(config, flow.Status.ProcessGroupID) pGEntity, err := nClient.GetProcessGroup(flow.Status.ProcessGroupID) if err := clientwrappers.ErrorGetOperation(log, err, "Get process group"); err != nil { return err @@ -136,13 +132,12 @@ func ScheduleDataflow(client client.Client, flow *v1alpha1.NifiDataflow, cluster // IsOutOfSyncDataflow control if the deployed dataflow is out of sync with the NifiDataflow resource func IsOutOfSyncDataflow( - client client.Client, flow *v1alpha1.NifiDataflow, - cluster *v1alpha1.NifiCluster, + config *nificlient.NifiConfig, registry *v1alpha1.NifiRegistryClient, parameterContext *v1alpha1.NifiParameterContext) (bool, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -152,7 +147,7 @@ func IsOutOfSyncDataflow( return false, err } - processGroups, _, _, _, err := listComponents(client, cluster, flow.Status.ProcessGroupID) + processGroups, _, _, _, err := listComponents(config, flow.Status.ProcessGroupID) if err != nil { return false, err } @@ -160,7 +155,7 @@ func IsOutOfSyncDataflow( return isParameterContextChanged(parameterContext, processGroups) || isVersioningChanged(flow, registry, pGEntity) || !isVersionSync(flow, pGEntity) || localChanged(pGEntity) || - isParentProcessGroupChanged(flow, cluster, pGEntity) || isNameChanged(flow, pGEntity), nil + isParentProcessGroupChanged(flow, config, pGEntity) || isNameChanged(flow, pGEntity), nil } func isParameterContextChanged( @@ -185,9 +180,9 @@ func isParameterContextChanged( func isParentProcessGroupChanged( flow *v1alpha1.NifiDataflow, - cluster *v1alpha1.NifiCluster, + config *nificlient.NifiConfig, pgFlowEntity *nigoapi.ProcessGroupEntity) bool { - return flow.Spec.GetParentProcessGroupID(cluster) != pgFlowEntity.Component.ParentGroupId + return flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId) != pgFlowEntity.Component.ParentGroupId } func isNameChanged(flow *v1alpha1.NifiDataflow, pgFlowEntity *nigoapi.ProcessGroupEntity) bool { @@ -217,13 +212,12 @@ func isVersioningChanged( // SyncDataflow implements the logic to sync a NifiDataflow with the deployed flow. func SyncDataflow( - client client.Client, flow *v1alpha1.NifiDataflow, - cluster *v1alpha1.NifiCluster, + config *nificlient.NifiConfig, registry *v1alpha1.NifiRegistryClient, parameterContext *v1alpha1.NifiParameterContext) (*v1alpha1.NifiDataflowStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -233,7 +227,7 @@ func SyncDataflow( return nil, err } - processGroups, _, _, _, err := listComponents(client, cluster, flow.Status.ProcessGroupID) + processGroups, _, _, _, err := listComponents(config, flow.Status.ProcessGroupID) if err != nil { return nil, err } @@ -266,11 +260,11 @@ func SyncDataflow( } if isVersioningChanged(flow, registry, pGEntity) { - return RemoveDataflow(client, flow, cluster) + return RemoveDataflow(flow, config) } if isNameChanged(flow, pGEntity) { - pGEntity.Component.ParentGroupId = flow.Spec.GetParentProcessGroupID(cluster) + pGEntity.Component.ParentGroupId = flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId) pGEntity.Component.Name = flow.Name _, err := nClient.UpdateProcessGroup(*pGEntity) if err := clientwrappers.ErrorUpdateOperation(log, err, "Stop flow"); err != nil { @@ -279,7 +273,7 @@ func SyncDataflow( return &flow.Status, errorfactory.NifiFlowSyncing{} } - if isParentProcessGroupChanged(flow, cluster, pGEntity) { + if isParentProcessGroupChanged(flow, config, pGEntity) { snippet, err := nClient.CreateSnippet(nigoapi.SnippetEntity{ Snippet: &nigoapi.SnippetDto{ @@ -294,7 +288,7 @@ func SyncDataflow( _, err = nClient.UpdateSnippet(nigoapi.SnippetEntity{ Snippet: &nigoapi.SnippetDto{ Id: snippet.Snippet.Id, - ParentGroupId: flow.Spec.GetParentProcessGroupID(cluster), + ParentGroupId: flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId), }, }) if err := clientwrappers.ErrorUpdateOperation(log, err, "Update snippet"); err != nil { @@ -328,12 +322,12 @@ func SyncDataflow( } } - isOutOfSink, err := IsOutOfSyncDataflow(client, flow, cluster, registry, parameterContext) + isOutOfSink, err := IsOutOfSyncDataflow(flow, config, registry, parameterContext) if err != nil { return &flow.Status, err } if isOutOfSink { - status, err := prepareUpdatePG(client, flow, cluster) + status, err := prepareUpdatePG(flow, config) if err != nil { return status, err } @@ -396,12 +390,9 @@ func SyncDataflow( } // prepareUpdatePG ensure drain or drop logic -func prepareUpdatePG( - client client.Client, - flow *v1alpha1.NifiDataflow, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiDataflowStatus, error) { +func prepareUpdatePG(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) (*v1alpha1.NifiDataflowStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -435,7 +426,7 @@ func prepareUpdatePG( } // Drop all events in connections - _, _, connections, _, err := listComponents(client, cluster, flow.Status.ProcessGroupID) + _, _, connections, _, err := listComponents(config, flow.Status.ProcessGroupID) if err := clientwrappers.ErrorGetOperation(log, err, "Get recursively flow components"); err != nil { return nil, err } @@ -455,7 +446,7 @@ func prepareUpdatePG( } else { // Check all components are ok - flowEntity, err := nClient.GetFlow(flow.Spec.GetParentProcessGroupID(cluster)) + flowEntity, err := nClient.GetFlow(flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId)) if err := clientwrappers.ErrorGetOperation(log, err, "Get flow"); err != nil { return nil, err } @@ -467,7 +458,7 @@ func prepareUpdatePG( // If flow is not fully drained if pgEntity.Status.AggregateSnapshot.FlowFilesQueued != 0 { - _, processors, connections, inputPorts, err := listComponents(client, cluster, flow.Status.ProcessGroupID) + _, processors, connections, inputPorts, err := listComponents(config, flow.Status.ProcessGroupID) if err := clientwrappers.ErrorGetOperation(log, err, "Get recursively flow components"); err != nil { return nil, err } @@ -509,24 +500,21 @@ func prepareUpdatePG( return &flow.Status, nil } -func RemoveDataflow( - client client.Client, - flow *v1alpha1.NifiDataflow, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiDataflowStatus, error) { +func RemoveDataflow(flow *v1alpha1.NifiDataflow,config *nificlient.NifiConfig) (*v1alpha1.NifiDataflowStatus, error) { // Prepare Dataflow - status, err := prepareUpdatePG(client, flow, cluster) + status, err := prepareUpdatePG(flow, config) if err != nil { return status, err } flow.Status = *status - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } - if err := UnscheduleDataflow(client, flow, cluster); err != nil { + if err := UnscheduleDataflow(flow, config); err != nil { return &flow.Status, err } @@ -546,8 +534,8 @@ func RemoveDataflow( return nil, nil } -func UnscheduleDataflow(client client.Client, flow *v1alpha1.NifiDataflow, cluster *v1alpha1.NifiCluster) error { - nClient, err := common.NewNodeConnection(log, client, cluster) +func UnscheduleDataflow(flow *v1alpha1.NifiDataflow,config *nificlient.NifiConfig) error { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } @@ -583,7 +571,7 @@ func UnscheduleDataflow(client client.Client, flow *v1alpha1.NifiDataflow, clust } // Check all components are ok - flowEntity, err := nClient.GetFlow(flow.Spec.GetParentProcessGroupID(cluster)) + flowEntity, err := nClient.GetFlow(flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId)) if err := clientwrappers.ErrorGetOperation(log, err, "Get flow"); err != nil { return err } @@ -615,9 +603,7 @@ func processGroupFromFlow( } // listComponents will get all ProcessGroups, Processors, Connections and Ports recursively -func listComponents( - client client.Client, - cluster *v1alpha1.NifiCluster, +func listComponents(config *nificlient.NifiConfig, processGroupID string) ([]nigoapi.ProcessGroupEntity, []nigoapi.ProcessorEntity, []nigoapi.ConnectionEntity, []nigoapi.PortEntity, error) { var processGroups []nigoapi.ProcessGroupEntity @@ -625,7 +611,7 @@ func listComponents( var connections []nigoapi.ConnectionEntity var inputPorts []nigoapi.PortEntity - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return processGroups, processors, connections, inputPorts, err } @@ -639,7 +625,7 @@ func listComponents( inputPorts = flow.InputPorts for _, pg := range flow.ProcessGroups { - childPG, childP, childC, childI, err := listComponents(client, cluster, pg.Id) + childPG, childP, childC, childI, err := listComponents(config, pg.Id) if err != nil { return processGroups, processors, connections, inputPorts, err } @@ -693,7 +679,7 @@ func updateRequest2Status(updateRequest *nigoapi.VersionedFlowUpdateRequestEntit func updateProcessGroupEntity( flow *v1alpha1.NifiDataflow, registry *v1alpha1.NifiRegistryClient, - cluster *v1alpha1.NifiCluster, + config *nificlient.NifiConfig, entity *nigoapi.ProcessGroupEntity) { stringFactory := func() string { return "" } @@ -719,7 +705,7 @@ func updateProcessGroupEntity( } entity.Component.Name = flow.Name - entity.Component.ParentGroupId = flow.Spec.GetParentProcessGroupID(cluster) + entity.Component.ParentGroupId = flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId) entity.Component.VersionControlInformation = &nigoapi.VersionControlInformationDto{ GroupId: stringFactory(), RegistryName: stringFactory(), diff --git a/pkg/clientwrappers/parametercontext/parametercontext.go b/pkg/clientwrappers/parametercontext/parametercontext.go index 98ae31280..605ba165f 100644 --- a/pkg/clientwrappers/parametercontext/parametercontext.go +++ b/pkg/clientwrappers/parametercontext/parametercontext.go @@ -9,19 +9,17 @@ import ( nigoapi "github.com/erdrix/nigoapi/pkg/nifi" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("parametercontext-method") -func ExistParameterContext(client client.Client, parameterContext *v1alpha1.NifiParameterContext, - cluster *v1alpha1.NifiCluster) (bool, error) { +func ExistParameterContext( parameterContext *v1alpha1.NifiParameterContext, config *nificlient.NifiConfig) (bool, error) { if parameterContext.Status.Id == "" { return false, nil } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -37,12 +35,10 @@ func ExistParameterContext(client client.Client, parameterContext *v1alpha1.Nifi return entity != nil, nil } -func CreateParameterContext( - client client.Client, - parameterContext *v1alpha1.NifiParameterContext, - parameterSecrets []*corev1.Secret, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiParameterContextStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) +func CreateParameterContext(parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, + config *nificlient.NifiConfig) (*v1alpha1.NifiParameterContextStatus, error) { + + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -61,13 +57,10 @@ func CreateParameterContext( return ¶meterContext.Status, nil } -func SyncParameterContext( - client client.Client, - parameterContext *v1alpha1.NifiParameterContext, - parameterSecrets []*corev1.Secret, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiParameterContextStatus, error) { +func SyncParameterContext(parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, + config *nificlient.NifiConfig) (*v1alpha1.NifiParameterContextStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -113,12 +106,10 @@ func SyncParameterContext( return &status, nil } -func RemoveParameterContext(client client.Client, - parameterContext *v1alpha1.NifiParameterContext, - parameterSecrets []*corev1.Secret, - cluster *v1alpha1.NifiCluster) error { +func RemoveParameterContext(parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, + config *nificlient.NifiConfig) error { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } diff --git a/pkg/clientwrappers/registryclient/registryclient.go b/pkg/clientwrappers/registryclient/registryclient.go index f5c615094..071c14095 100644 --- a/pkg/clientwrappers/registryclient/registryclient.go +++ b/pkg/clientwrappers/registryclient/registryclient.go @@ -7,19 +7,17 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("registryclient-method") -func ExistRegistryClient(client client.Client, registryClient *v1alpha1.NifiRegistryClient, - cluster *v1alpha1.NifiCluster) (bool, error) { +func ExistRegistryClient(registryClient *v1alpha1.NifiRegistryClient, config *nificlient.NifiConfig) (bool, error) { if registryClient.Status.Id == "" { return false, nil } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -35,9 +33,9 @@ func ExistRegistryClient(client client.Client, registryClient *v1alpha1.NifiRegi return entity != nil, nil } -func CreateRegistryClient(client client.Client, registryClient *v1alpha1.NifiRegistryClient, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiRegistryClientStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) +func CreateRegistryClient(registryClient *v1alpha1.NifiRegistryClient, + config *nificlient.NifiConfig) (*v1alpha1.NifiRegistryClientStatus, error) { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -56,10 +54,10 @@ func CreateRegistryClient(client client.Client, registryClient *v1alpha1.NifiReg }, nil } -func SyncRegistryClient(client client.Client, registryClient *v1alpha1.NifiRegistryClient, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiRegistryClientStatus, error) { +func SyncRegistryClient(registryClient *v1alpha1.NifiRegistryClient, + config *nificlient.NifiConfig) (*v1alpha1.NifiRegistryClientStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -84,9 +82,9 @@ func SyncRegistryClient(client client.Client, registryClient *v1alpha1.NifiRegis return &status, nil } -func RemoveRegistryClient(client client.Client, registryClient *v1alpha1.NifiRegistryClient, - cluster *v1alpha1.NifiCluster) error { - nClient, err := common.NewNodeConnection(log, client, cluster) +func RemoveRegistryClient(registryClient *v1alpha1.NifiRegistryClient, + config *nificlient.NifiConfig) error { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } diff --git a/pkg/clientwrappers/reportingtask/reportingtask.go b/pkg/clientwrappers/reportingtask/reportingtask.go index 73a01f5d9..4038f6851 100644 --- a/pkg/clientwrappers/reportingtask/reportingtask.go +++ b/pkg/clientwrappers/reportingtask/reportingtask.go @@ -8,7 +8,6 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" "strconv" ) @@ -24,13 +23,13 @@ const ( reportingTaskSendJVM = "true" ) -func ExistReportingTaks(client client.Client, cluster *v1alpha1.NifiCluster) (bool, error) { +func ExistReportingTaks(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) (bool, error) { if cluster.Status.PrometheusReportingTask.Id == "" { return false, nil } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -46,8 +45,8 @@ func ExistReportingTaks(client client.Client, cluster *v1alpha1.NifiCluster) (bo return entity != nil, nil } -func CreateReportingTask(client client.Client, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) +func CreateReportingTask(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -66,9 +65,9 @@ func CreateReportingTask(client client.Client, cluster *v1alpha1.NifiCluster) (* }, nil } -func SyncReportingTask(client client.Client, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { +func SyncReportingTask(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -124,8 +123,8 @@ func SyncReportingTask(client client.Client, cluster *v1alpha1.NifiCluster) (*v1 return &status, nil } -func RemoveReportingTaks(client client.Client, cluster *v1alpha1.NifiCluster) error { - nClient, err := common.NewNodeConnection(log, client, cluster) +func RemoveReportingTaks(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) error { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } diff --git a/pkg/clientwrappers/user/user.go b/pkg/clientwrappers/user/user.go index ec2f1ed4d..bab0b6c57 100644 --- a/pkg/clientwrappers/user/user.go +++ b/pkg/clientwrappers/user/user.go @@ -8,19 +8,17 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("user-method") -func ExistUser(client client.Client, user *v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) (bool, error) { +func ExistUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (bool, error) { if user.Status.Id == "" { return false, nil } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -36,10 +34,9 @@ func ExistUser(client client.Client, user *v1alpha1.NifiUser, return entity != nil, nil } -func FindUserByIdentity(client client.Client, user *v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiUserStatus, error) { +func FindUserByIdentity(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -64,10 +61,9 @@ func FindUserByIdentity(client client.Client, user *v1alpha1.NifiUser, return nil, nil } -func CreateUser(client client.Client, user *v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiUserStatus, error) { +func CreateUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -86,10 +82,9 @@ func CreateUser(client client.Client, user *v1alpha1.NifiUser, }, nil } -func SyncUser(client client.Client, user *v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiUserStatus, error) { +func SyncUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -116,13 +111,13 @@ func SyncUser(client client.Client, user *v1alpha1.NifiUser, contains := false for _, accessPolicy := range user.Spec.AccessPolicies { if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(cluster) { + entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { contains = true break } } if !contains { - if err := accesspolicies.UpdateAccessPolicyEntity(client, + if err := accesspolicies.UpdateAccessPolicyEntity( &nigoapi.AccessPolicyEntity{ Component: &nigoapi.AccessPolicyDto{ Id: entity.Component.Id, @@ -131,7 +126,7 @@ func SyncUser(client client.Client, user *v1alpha1.NifiUser, }, }, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUser{user}, - []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{}, cluster); err != nil { + []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{}, config); err != nil { return &status, err } } @@ -142,15 +137,15 @@ func SyncUser(client client.Client, user *v1alpha1.NifiUser, contains := false for _, entity := range entity.Component.AccessPolicies { if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(cluster) { + entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { contains = true break } } if !contains { - if err := accesspolicies.UpdateAccessPolicy(client, &accessPolicy, + if err := accesspolicies.UpdateAccessPolicy(&accessPolicy, []*v1alpha1.NifiUser{user}, []*v1alpha1.NifiUser{}, - []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{}, cluster); err != nil { + []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{}, config); err != nil { return &status, err } } @@ -159,8 +154,8 @@ func SyncUser(client client.Client, user *v1alpha1.NifiUser, return &status, nil } -func RemoveUser(client client.Client, user *v1alpha1.NifiUser, cluster *v1alpha1.NifiCluster) error { - nClient, err := common.NewNodeConnection(log, client, cluster) +func RemoveUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) error { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } diff --git a/pkg/clientwrappers/usergroup/usergroup.go b/pkg/clientwrappers/usergroup/usergroup.go index fd02e429c..c8c1e5d93 100644 --- a/pkg/clientwrappers/usergroup/usergroup.go +++ b/pkg/clientwrappers/usergroup/usergroup.go @@ -8,15 +8,13 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("usergroup-method") -func ExistUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, - cluster *v1alpha1.NifiCluster) (bool, error) { +func ExistUserGroup(userGroup *v1alpha1.NifiUserGroup, config *nificlient.NifiConfig) (bool, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -38,9 +36,9 @@ func ExistUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, return false, nil } -func CreateUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiUserGroupStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) +func CreateUserGroup(userGroup *v1alpha1.NifiUserGroup, + users []*v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserGroupStatus, error) { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -59,10 +57,10 @@ func CreateUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, us }, nil } -func SyncUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) (*v1alpha1.NifiUserGroupStatus, error) { +func SyncUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, + config *nificlient.NifiConfig) (*v1alpha1.NifiUserGroupStatus, error) { - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err } @@ -108,15 +106,15 @@ func SyncUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, user contains := false for _, accessPolicy := range userGroup.Spec.AccessPolicies { if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(cluster) { + entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { contains = true break } } if !contains { - if err := accesspolicies.UpdateAccessPolicyEntity(client, &entity, + if err := accesspolicies.UpdateAccessPolicyEntity(&entity, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUser{}, - []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{userGroup}, cluster); err != nil { + []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{userGroup}, config); err != nil { return &status, err } } @@ -127,15 +125,15 @@ func SyncUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, user contains := false for _, entity := range entity.Component.AccessPolicies { if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(cluster) { + entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { contains = true break } } if !contains { - if err := accesspolicies.UpdateAccessPolicy(client, &accessPolicy, + if err := accesspolicies.UpdateAccessPolicy(&accessPolicy, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUser{}, - []*v1alpha1.NifiUserGroup{userGroup}, []*v1alpha1.NifiUserGroup{}, cluster); err != nil { + []*v1alpha1.NifiUserGroup{userGroup}, []*v1alpha1.NifiUserGroup{}, config); err != nil { return &status, err } } @@ -144,9 +142,8 @@ func SyncUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, user return &status, nil } -func RemoveUserGroup(client client.Client, userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, - cluster *v1alpha1.NifiCluster) error { - nClient, err := common.NewNodeConnection(log, client, cluster) +func RemoveUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser,config *nificlient.NifiConfig) error { + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } diff --git a/pkg/common/common.go b/pkg/common/common.go index 03d230766..2958ef890 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -8,9 +8,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// newNifiFromCluster points to the function for retrieving nifi clients, +// NewNifiFromCluster points to the function for retrieving nifi clients, // use as var so it can be overwritten from unit tests -var newNifiFromCluster = nificlient.NewFromCluster +var NewNifiFromCluster = nificlient.NewFromCluster // newNodeConnection is a convenience wrapper for creating a node connection // and creating a safer close function @@ -18,7 +18,23 @@ func NewNodeConnection(log logr.Logger, client client.Client, cluster *v1alpha1. // Get a nifi connection log.Info(fmt.Sprintf("Retrieving Nifi client for %s/%s", cluster.Namespace, cluster.Name)) - node, err = newNifiFromCluster(client, cluster) + node, err = NewNifiFromCluster(client, cluster) + if err != nil { + return + } + return +} + +// NewNifiFromCluster points to the function for retrieving nifi clients, +// use as var so it can be overwritten from unit tests +var NewNifiFromConfig = nificlient.NewFromConfig + +// newNodeConnection is a convenience wrapper for creating a node connection +// and creating a safer close function +func NewClusterConnection(log logr.Logger, config *nificlient.NifiConfig) (node nificlient.NifiClient, err error) { + + // Get a nifi connection + node, err = NewNifiFromConfig(config) if err != nil { return } diff --git a/pkg/k8sutil/status.go b/pkg/k8sutil/status.go index eda1a19da..5cc5c5a76 100644 --- a/pkg/k8sutil/status.go +++ b/pkg/k8sutil/status.go @@ -54,7 +54,8 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {ConfigurationState: s}} case v1alpha1.InitClusterNode: cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {InitClusterNode: s}} - + case bool: + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {PodIsReady: s}} } } else if val, ok := cluster.Status.NodesState[nodeId]; ok { switch s := state.(type) { @@ -64,6 +65,8 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC val.ConfigurationState = s case v1alpha1.InitClusterNode: val.InitClusterNode = s + case bool: + val.PodIsReady = s } cluster.Status.NodesState[nodeId] = val } else { @@ -74,6 +77,8 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{ConfigurationState: s} case v1alpha1.InitClusterNode: cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{InitClusterNode: s} + case bool: + cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{PodIsReady: s} } } } @@ -104,6 +109,8 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {ConfigurationState: s}} case v1alpha1.InitClusterNode: cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {InitClusterNode: s}} + case bool: + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {PodIsReady: s}} } } else if val, ok := cluster.Status.NodesState[nodeId]; ok { switch s := state.(type) { @@ -113,6 +120,8 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC val.ConfigurationState = s case v1alpha1.InitClusterNode: val.InitClusterNode = s + case bool: + val.PodIsReady = s } cluster.Status.NodesState[nodeId] = val } else { @@ -123,6 +132,8 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{ConfigurationState: s} case v1alpha1.InitClusterNode: cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{InitClusterNode: s} + case bool: + cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{PodIsReady: s} } } } diff --git a/pkg/nificlient/client.go b/pkg/nificlient/client.go index 53f88df0f..e9500d929 100644 --- a/pkg/nificlient/client.go +++ b/pkg/nificlient/client.go @@ -15,14 +15,14 @@ package nificlient import ( "fmt" + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "net/http" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "time" - "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("nifi_client") @@ -30,6 +30,22 @@ var log = ctrl.Log.WithName("nifi_client") const ( PRIMARY_NODE = "Primary Node" CLUSTER_COORDINATOR = "Cluster Coordinator" + // ConnectNodeAction states that the NiFi node is connecting to the NiFi Cluster + CONNECTING_STATUS = "CONNECTING" + // ConnectStatus states that the NiFi node is connected to the NiFi Cluster + CONNECTED_STATUS = "CONNECTED" + // DisconnectNodeAction states that the NiFi node is disconnecting from NiFi Cluster + DISCONNECTING_STATUS = "DISCONNECTING" + // DisconnectStatus states that the NiFi node is disconnected from NiFi Cluster + DISCONNECTED_STATUS = "DISCONNECTED" + // OffloadNodeAction states that the NiFi node is offloading data to NiFi Cluster + OFFLOADING_STATUS = "OFFLOADING" + // OffloadStatus states that the NiFi node offloaded data to NiFi Cluster + OFFLOADED_STATUS = "OFFLOADED" + // RemoveNodeAction states that the NiFi node is removing from NiFi Cluster + REMOVING_STATUS = "REMOVING" + // RemoveStatus states that the NiFi node is removed from NiFi Cluster + REMOVED_STATUS = "REMOVED" ) // NiFiClient is the exported interface for NiFi operations @@ -166,6 +182,20 @@ func (n *nifiClient) Build() error { return nil } +// NewFromConfig is a convenient wrapper around New() and ClusterConfig() +func NewFromConfig(opts *NifiConfig) (NifiClient, error) { + var client NifiClient + var err error + + client = New(opts) + err = client.Build() + if err != nil { + return nil, err + } + + return client, nil +} + // NewFromCluster is a convenient wrapper around New() and ClusterConfig() func NewFromCluster(k8sclient client.Client, cluster *v1alpha1.NifiCluster) (NifiClient, error) { var client NifiClient @@ -290,7 +320,7 @@ func isCoordinator(node *nigoapi.NodeDto) bool { } func isConnected(node *nigoapi.NodeDto) bool { - return node.Status == string(v1alpha1.ConnectStatus) + return node.Status == CONNECTED_STATUS } func (n *nifiClient) nodeDtoByNodeId(nId int32) *nigoapi.NodeDto { diff --git a/pkg/nificlient/client_test.go b/pkg/nificlient/client_test.go index e3a504c74..e3311c32e 100644 --- a/pkg/nificlient/client_test.go +++ b/pkg/nificlient/client_test.go @@ -104,4 +104,4 @@ func TestNewFromCluster(t *testing.T) { _, err = NewFromCluster(mockClient{}, cluster) assert.IsType(errorfactory.NodesUnreachable{}, err) -} +} \ No newline at end of file diff --git a/pkg/nificlient/config.go b/pkg/nificlient/config.go index b3c172f31..1436c32ab 100644 --- a/pkg/nificlient/config.go +++ b/pkg/nificlient/config.go @@ -1,17 +1,3 @@ -// Copyright 2020 Orange SA -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License.package apis - package nificlient import ( @@ -33,11 +19,12 @@ const ( type NifiConfig struct { nodeURITemplate string NodesURI map[int32]nodeUri - NifiURI string + NifiURI string UseSSL bool - TLSConfig *tls.Config + TLSConfig *tls.Config OperationTimeout int64 + RootProcessGroupId string } type nodeUri struct { @@ -49,7 +36,7 @@ type nodeUri struct { func ClusterConfig(client client.Client, cluster *v1alpha1.NifiCluster) (*NifiConfig, error) { conf := &NifiConfig{} - + conf.RootProcessGroupId = cluster.Status.RootProcessGroupId conf.nodeURITemplate = generateNodesURITemplate(cluster) conf.NodesURI = generateNodesAddress(cluster) conf.NifiURI = nifi.GenerateRequestNiFiAllNodeAddressFromCluster(cluster) @@ -93,4 +80,4 @@ func generateNodesURITemplate(cluster *v1alpha1.NifiCluster) string { return nodeNameTemplate + fmt.Sprintf(".%s", strings.SplitAfterN(nifi.GenerateRequestNiFiNodeAddressFromCluster(0, cluster), ".", 2)[1], ) -} +} \ No newline at end of file diff --git a/pkg/nificlient/config_test.go b/pkg/nificlient/config_test.go index beba28c7c..26ce6c7bb 100644 --- a/pkg/nificlient/config_test.go +++ b/pkg/nificlient/config_test.go @@ -1,17 +1,3 @@ -// Copyright 2020 Orange SA -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License.package apis - package nificlient import ( @@ -156,4 +142,4 @@ func TestGenerateNodesURITemplate(t *testing.T) { fmt.Sprintf("%s-%s-node.%s.svc.cluster.local:%d", clusterName, "%d", clusterNamespace, httpContainerPort), generateNodesURITemplate(cluster)) -} +} \ No newline at end of file diff --git a/pkg/nificlient/system.go b/pkg/nificlient/system.go index 2f6d341b4..da87033a0 100644 --- a/pkg/nificlient/system.go +++ b/pkg/nificlient/system.go @@ -15,7 +15,6 @@ package nificlient import ( - "github.com/Orange-OpenSource/nifikop/api/v1alpha1" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ) @@ -62,21 +61,21 @@ func (n *nifiClient) GetClusterNode(nId int32) (*nigoapi.NodeEntity, error) { func (n *nifiClient) DisconnectClusterNode(nId int32) (*nigoapi.NodeEntity, error) { // Request to update the node status to DISCONNECTING - nodeEntity, err := n.setClusterNodeStatus(nId, v1alpha1.DisconnectNodeAction, v1alpha1.DisconnectStatus) + nodeEntity, err := n.setClusterNodeStatus(nId, DISCONNECTING_STATUS, DISCONNECTED_STATUS) return setClusterNodeStatusReturn(nodeEntity, err, "Disconnect cluster gracefully failed since Nifi node returned non 200") } func (n *nifiClient) ConnectClusterNode(nId int32) (*nigoapi.NodeEntity, error) { // Request to update the node status to CONNECTING - nodeEntity, err := n.setClusterNodeStatus(nId, v1alpha1.ConnectNodeAction, v1alpha1.ConnectStatus) + nodeEntity, err := n.setClusterNodeStatus(nId, CONNECTING_STATUS, CONNECTED_STATUS) return setClusterNodeStatusReturn(nodeEntity, err, "Connect node gracefully failed since Nifi node returned non 200") } func (n *nifiClient) OffloadClusterNode(nId int32) (*nigoapi.NodeEntity, error) { // Request to update the node status to OFFLOADING - nodeEntity, err := n.setClusterNodeStatus(nId, v1alpha1.OffloadNodeAction, v1alpha1.OffloadStatus) + nodeEntity, err := n.setClusterNodeStatus(nId, OFFLOADING_STATUS, OFFLOADED_STATUS) return setClusterNodeStatusReturn(nodeEntity, err, "Offload node gracefully failed since Nifi node returned non 200") } @@ -116,7 +115,7 @@ func (n *nifiClient) RemoveClusterNodeFromClusterNodeId(nId string) error { return errorDeleteOperation(rsp, body, err) } -func (n *nifiClient) setClusterNodeStatus(nId int32, status, expectedActionStatus v1alpha1.ActionStep) (*nigoapi.NodeEntity, error) { +func (n *nifiClient) setClusterNodeStatus(nId int32, status, expectedActionStatus string) (*nigoapi.NodeEntity, error) { // Find the Cluster node associated to the NifiCluster nodeId targetedNode := n.nodeDtoByNodeId(nId) if targetedNode == nil { @@ -126,8 +125,8 @@ func (n *nifiClient) setClusterNodeStatus(nId int32, status, expectedActionStatu // Check if the targeted node is still in expected status // TODO : ensure it may not leads to inconsistent situations - if targetedNode.Status == string(expectedActionStatus) || - targetedNode.Status == string(status) { + if targetedNode.Status == expectedActionStatus || + targetedNode.Status == status { node := nigoapi.NodeEntity{Node: targetedNode} return &node, nil diff --git a/pkg/resources/nifi/nifi.go b/pkg/resources/nifi/nifi.go index f73345173..0c440fc5e 100644 --- a/pkg/resources/nifi/nifi.go +++ b/pkg/resources/nifi/nifi.go @@ -17,7 +17,8 @@ package nifi import ( "context" "fmt" - + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/pki" "reflect" "strings" @@ -30,8 +31,6 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/scale" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" - "github.com/Orange-OpenSource/nifikop/pkg/pki" "github.com/Orange-OpenSource/nifikop/pkg/resources" "github.com/Orange-OpenSource/nifikop/pkg/resources/templates" "github.com/Orange-OpenSource/nifikop/pkg/util" @@ -184,10 +183,17 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { } } o = r.pod(node.Id, nodeConfig, pvcs, log) - err = r.reconcileNifiPod(log, o.(*corev1.Pod)) + err, isReady := r.reconcileNifiPod(log, o.(*corev1.Pod)) if err != nil { return err } + if nodeState, ok := r.NifiCluster.Status.NodesState[o.(*corev1.Pod).Labels["nodeId"]]; ok && + nodeState.PodIsReady != isReady { + if err = k8sutil.UpdateNodeStatus(r.Client, []string{o.(*corev1.Pod).Labels["nodeId"]}, r.NifiCluster, isReady, log); err != nil { + return errors.WrapIfWithDetails(err, "could not update status for node(s)", + "id(s)", o.(*corev1.Pod).Labels["nodeId"]) + } + } } var err error @@ -225,7 +231,19 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { } // Reconcile cluster communications - pgRootId, err := dataflow.RootProcessGroup(r.Client, r.NifiCluster) + // Ensure the cluster is ready to receive actions + if !r.NifiCluster.IsReady() { + log.Info("Cluster is not ready yet, will wait until it is.") + // the cluster does not exist - should have been caught pre-flight + return errors.WrapIf(err, "Cluster is not ready yet, will wait until it is.") + } + + clientConfig, err := nificlient.ClusterConfig(r.Client, r.NifiCluster) + if err != nil { + // the cluster does not exist - should have been caught pre-flight + return errors.WrapIf(err, "Failed to create HTTP client the for referenced cluster") + } + pgRootId, err := dataflow.RootProcessGroup(clientConfig) if err != nil { return err } @@ -519,7 +537,7 @@ func isDesiredStorageValueInvalid(desired, current *corev1.PersistentVolumeClaim return desired.Spec.Resources.Requests.Storage().Value() < current.Spec.Resources.Requests.Storage().Value() } -func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) error { +func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) (error, bool) { currentPod := desiredPod.DeepCopy() desiredType := reflect.TypeOf(desiredPod) @@ -533,22 +551,25 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) e } err := r.Client.List(context.TODO(), podList, client.InNamespace(currentPod.Namespace), matchingLabels) if err != nil && len(podList.Items) == 0 { - return errorfactory.New(errorfactory.APIFailure{}, err, "getting resource failed", "kind", desiredType) + return errorfactory.New(errorfactory.APIFailure{}, + err, "getting resource failed", "kind", desiredType), false } if len(podList.Items) == 0 { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(desiredPod); err != nil { - return errors.WrapIf(err, "could not apply last state to annotation") + return errors.WrapIf(err, "could not apply last state to annotation"), false } if err := r.Client.Create(context.TODO(), desiredPod); err != nil { - return errorfactory.New(errorfactory.APIFailure{}, err, "creating resource failed", "kind", desiredType) + return errorfactory.New(errorfactory.APIFailure{}, + err, "creating resource failed", "kind", desiredType), false } // Update status to Config InSync because node is configured to go statusErr := k8sutil.UpdateNodeStatus(r.Client, []string{desiredPod.Labels["nodeId"]}, r.NifiCluster, v1alpha1.ConfigInSync, log) if statusErr != nil { - return errorfactory.New(errorfactory.StatusUpdateError{}, statusErr, "updating status for resource failed", "kind", desiredType) + return errorfactory.New(errorfactory.StatusUpdateError{}, + statusErr, "updating status for resource failed", "kind", desiredType), false } if val, ok := r.NifiCluster.Status.NodesState[desiredPod.Labels["nodeId"]]; ok && @@ -561,11 +582,12 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) e statusErr = k8sutil.UpdateNodeStatus(r.Client, []string{desiredPod.Labels["nodeId"]}, r.NifiCluster, gracefulActionState, log) if statusErr != nil { - return errorfactory.New(errorfactory.StatusUpdateError{}, statusErr, "could not update node graceful action state") + return errorfactory.New(errorfactory.StatusUpdateError{}, + statusErr, "could not update node graceful action state"), false } } log.Info("resource created") - return nil + return nil, false } else if len(podList.Items) == 1 { currentPod = podList.Items[0].DeepCopy() nodeId := currentPod.Labels["nodeId"] @@ -574,10 +596,12 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) e log.Info(fmt.Sprintf("pod for NodeId %s does not scheduled to node yet", nodeId)) } } else { - return errorfactory.New(errorfactory.InternalError{}, errors.New("reconcile failed"), fmt.Sprintf("could not find status for the given node id, %s", nodeId)) + return errorfactory.New(errorfactory.InternalError{}, errors.New("reconcile failed"), + fmt.Sprintf("could not find status for the given node id, %s", nodeId)), false } } else { - return errorfactory.New(errorfactory.TooManyResources{}, errors.New("reconcile failed"), "more then one matching pod found", "labels", matchingLabels) + return errorfactory.New(errorfactory.TooManyResources{}, errors.New("reconcile failed"), + "more then one matching pod found", "labels", matchingLabels), false } // TODO check if this err == nil check necessary (baluchicken) @@ -609,11 +633,12 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) e if err := k8sutil.UpdateNodeStatus(r.Client, []string{desiredPod.Labels["nodeId"]}, r.NifiCluster, v1alpha1.GracefulActionState{ErrorMessage: "", State: v1alpha1.GracefulUpscaleSucceeded}, log); err != nil { - return errorfactory.New(errorfactory.StatusUpdateError{}, err, "could not update node graceful action state") + return errorfactory.New(errorfactory.StatusUpdateError{}, + err, "could not update node graceful action state"), false } } log.V(1).Info("resource is in sync") - return nil + return nil, k8sutil.PodReady(currentPod) } } else { log.Info("resource diffs", @@ -624,14 +649,15 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) e } if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(desiredPod); err != nil { - return errors.WrapIf(err, "could not apply last state to annotation") + return errors.WrapIf(err, "could not apply last state to annotation"), false } if !k8sutil.IsPodContainsTerminatedContainer(currentPod) { if r.NifiCluster.Status.State != v1alpha1.NifiClusterRollingUpgrading { if err := k8sutil.UpdateCRStatus(r.Client, r.NifiCluster, v1alpha1.NifiClusterRollingUpgrading, log); err != nil { - return errorfactory.New(errorfactory.StatusUpdateError{}, err, "setting state to rolling upgrade failed") + return errorfactory.New(errorfactory.StatusUpdateError{}, + err, "setting state to rolling upgrade failed"), false } } @@ -641,18 +667,21 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) e matchingLabels := client.MatchingLabels(LabelsForNifi(r.NifiCluster.Name)) err := r.Client.List(context.TODO(), podList, client.ListOption(client.InNamespace(r.NifiCluster.Namespace)), client.ListOption(matchingLabels)) if err != nil { - return errors.WrapIf(err, "failed to reconcile resource") + return errors.WrapIf(err, "failed to reconcile resource"), false } for _, pod := range podList.Items { if k8sutil.IsMarkedForDeletion(pod.ObjectMeta) { - return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, errors.New("pod is still terminating"), "rolling upgrade in progress") + return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, + errors.New("pod is still terminating"), "rolling upgrade in progress"), false } if k8sutil.IsPodContainsPendingContainer(&pod) { - return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, errors.New("pod is still creating"), "rolling upgrade in progress") + return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, + errors.New("pod is still creating"), "rolling upgrade in progress"), false } if !k8sutil.PodReady(&pod) { - return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, errors.New("pod is still not ready"), "rolling upgrade in progress") + return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, + errors.New("pod is still not ready"), "rolling upgrade in progress"), false } } } @@ -660,11 +689,12 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) e err = r.Client.Delete(context.TODO(), currentPod) if err != nil { - return errorfactory.New(errorfactory.APIFailure{}, err, "deleting resource failed", "kind", desiredType) + return errorfactory.New(errorfactory.APIFailure{}, + err, "deleting resource failed", "kind", desiredType), false } } - return nil + return nil, k8sutil.PodReady(currentPod) } func (r *Reconciler) reconcileNifiUsersAndGroups(log logr.Logger) error { @@ -839,15 +869,20 @@ func (r *Reconciler) reconcilePrometheusReportingTask(log logr.Logger) error { var err error + clientConfig, err := nificlient.ClusterConfig(r.Client, r.NifiCluster) + if err != nil { + return err + } + // Check if the NiFi reporting task already exist - exist, err := reportingtask.ExistReportingTaks(r.Client, r.NifiCluster) + exist, err := reportingtask.ExistReportingTaks(clientConfig, r.NifiCluster) if err != nil { return errors.WrapIfWithDetails(err, "failure checking for existing prometheus reporting task") } if !exist { // Create reporting task - status, err := reportingtask.CreateReportingTask(r.Client, r.NifiCluster) + status, err := reportingtask.CreateReportingTask(clientConfig, r.NifiCluster) if err != nil { return errors.WrapIfWithDetails(err, "failure creating prometheus reporting task") } @@ -859,7 +894,7 @@ func (r *Reconciler) reconcilePrometheusReportingTask(log logr.Logger) error { } // Sync prometheus reporting task resource with NiFi side component - status, err := reportingtask.SyncReportingTask(r.Client, r.NifiCluster) + status, err := reportingtask.SyncReportingTask(clientConfig, r.NifiCluster) if err != nil { return errors.WrapIfWithDetails(err, "failed to sync PrometheusReportingTask") } @@ -872,9 +907,13 @@ func (r *Reconciler) reconcilePrometheusReportingTask(log logr.Logger) error { } func (r *Reconciler) reconcileMaximumTimerDrivenThreadCount(log logr.Logger) error { + clientConfig, err := nificlient.ClusterConfig(r.Client, r.NifiCluster) + if err != nil { + return err + } // Sync Maximum Timer Driven Thread Count with NiFi side component - err := controllersettings.SyncConfiguration(r.Client, r.NifiCluster) + err = controllersettings.SyncConfiguration(clientConfig, r.NifiCluster) if err != nil { return errors.WrapIfWithDetails(err, "failed to sync MaximumTimerDrivenThreadCount") } From c22e89c978bb7bfe0159f215af5705a46c3109cc Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 7 Sep 2021 23:28:31 +0200 Subject: [PATCH 02/18] create client config manager & reorganise code --- api/v1alpha1/common_types.go | 71 ++++++++-------- api/v1alpha1/nificluster_types.go | 10 +-- api/v1alpha1/nifidataflow_types.go | 2 +- api/v1alpha1/nifiregistryclient_types.go | 2 +- api/v1alpha1/zz_generated.deepcopy.go | 18 +++-- .../bases/nifi.orange.com_nifidataflows.yaml | 19 ++++- ...nifi.orange.com_nifiparametercontexts.yaml | 19 ++++- .../nifi.orange.com_nifiregistryclients.yaml | 19 ++++- .../bases/nifi.orange.com_nifiusergroups.yaml | 19 ++++- .../crd/bases/nifi.orange.com_nifiusers.yaml | 19 ++++- controllers/controller_common.go | 3 +- controllers/nifidataflow_controller.go | 52 ++++++------ .../nifiparametercontext_controller.go | 54 +++++++------ controllers/nifiregistryclient_controller.go | 55 +++++++------ controllers/nifiuser_controller.go | 69 ++++++++++------ controllers/nifiusergroup_controller.go | 60 +++++++------- pkg/clientwrappers/accesspolicies/policies.go | 11 +-- .../controllersettings/controllersettings.go | 4 +- pkg/clientwrappers/dataflow/dataflow.go | 25 +++--- .../parametercontext/parametercontext.go | 9 ++- .../registryclient/registryclient.go | 9 ++- .../reportingtask/reportingtask.go | 9 ++- pkg/clientwrappers/user/user.go | 11 +-- pkg/clientwrappers/usergroup/usergroup.go | 9 ++- pkg/common/common.go | 3 +- pkg/nificlient/client.go | 16 ++-- pkg/nificlient/client_test.go | 22 ++++- pkg/nificlient/config/config_manager.go | 81 +++++++++++++++++++ .../config/nificluster/nificluster.go | 20 +++++ .../nificluster/nificluster_config.go} | 50 ++++++------ .../nificluster/nificluster_config_test.go} | 8 +- pkg/nificlient/mock_client_test.go | 7 +- .../certmanagerpki/certmanager_tls_config.go | 15 +++- pkg/resources/nifi/configmap.go | 6 +- pkg/resources/nifi/nifi.go | 46 +++++++---- pkg/resources/nifi/pod.go | 4 +- pkg/util/clientconfig/common.go | 41 ++++++++++ 37 files changed, 604 insertions(+), 293 deletions(-) create mode 100644 pkg/nificlient/config/config_manager.go create mode 100644 pkg/nificlient/config/nificluster/nificluster.go rename pkg/nificlient/{config.go => config/nificluster/nificluster_config.go} (62%) rename pkg/nificlient/{config_test.go => config/nificluster/nificluster_config_test.go} (97%) create mode 100644 pkg/util/clientconfig/common.go diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index a4c5026ee..a6c0708f5 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -48,6 +48,9 @@ type InitClusterNode bool // PKIBackend represents an interface implementing the PKIManager type PKIBackend string +// ClientConfigType represents an interface implementing the ClientConfigManager +type ClientConfigType string + // AccessPolicyType represents the type of access policy type AccessPolicyType string @@ -114,39 +117,35 @@ type SecretConfigReference struct { Data string `json:"data"` } -const( - EXTERNAL_REFERENCE string = "external" - INTERNAL_REFERENCE string = "internal" +const ( + EXTERNAL_REFERENCE string = "external" + INTERNAL_REFERENCE string = "internal" ) -type ClusterConnect interface { - //NodeConnection(log logr.Logger, client client.Client) (node nificlient.NifiClient, err error) - IsInternal() bool - IsExternal() bool - ClusterLabelString() string - IsReady() bool - Id() string -} - // ClusterReference states a reference to a cluster for dataflow/registryclient/user // provisioning type ClusterReference struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Type string `json:"type,omitempty"` - Hostname string `json:"hostname,omitempty"` - SecretRef SecretReference `json:"secretRef,omitempty"` + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + // +kubebuilder:validation:Enum={"external-tls","nificluster","external-basic"} + Type ClientConfigType `json:"type,omitempty"` + NodeURITemplate string `json:"nodeURITemplate,omitempty"` + NodeIds []int32 `json:"nodeIds,omitempty"` + NifiURI string `json:"nifiURI,omitempty"` + RootProcessGroupId string `json:"rootProcessGroupId,omitempty"` + SecretRef SecretReference `json:"secretRef,omitempty"` } -func (c *ClusterReference) GetType() string { - if c.Type == "" || c.Type != EXTERNAL_REFERENCE { - return INTERNAL_REFERENCE +func (c *ClusterReference) GetType() ClientConfigType { + if c.Type == "" { + return ClientConfigNiFiCluster } - return EXTERNAL_REFERENCE + return c.Type } -func (c *ClusterReference) IsSet() bool{ - return (c.Name != "" && c.GetType() == INTERNAL_REFERENCE) || (c.Hostname != "" && c.GetType() == EXTERNAL_REFERENCE) +// @TODO +func (c *ClusterReference) IsSet() bool { + return (c.Name != "" && c.GetType() == ClientConfigNiFiCluster) || (c.NodeURITemplate != "" && c.GetType() != "" && c.GetType() != ClientConfigNiFiCluster) } // RegistryClientReference states a reference to a registry client for dataflow @@ -278,6 +277,12 @@ const ( //PKIBackendVault PKIBackend = "vault" ) +const ( + ClientConfigNiFiCluster ClientConfigType = "nificluster" + ClientConfigExternalTLS ClientConfigType = "external-tls" + ClientConfigExternalBasic ClientConfigType = "external-basic" +) + const ( // DataflowStateCreated describes the status of a NifiDataflow as created DataflowStateCreated DataflowState = "Created" @@ -419,11 +424,11 @@ const ( ) func ClusterRefsEquals(clusterRefs []ClusterReference) bool { - c1 := clusterRefs[0] - refType := c1.Type - hostname := c1.Hostname - name := c1.Name - ns := c1.Namespace + c1 := clusterRefs[0] + refType := c1.Type + hostname := c1.NodeURITemplate + name := c1.Name + ns := c1.Namespace var secretRefs []SecretReference for _, cluster := range clusterRefs { @@ -431,7 +436,7 @@ func ClusterRefsEquals(clusterRefs []ClusterReference) bool { return false } if c1.IsExternal() { - if hostname != cluster.Hostname { + if hostname != cluster.NodeURITemplate { return false } secretRefs = append(secretRefs, SecretReference{Name: cluster.SecretRef.Name, Namespace: cluster.Namespace}) @@ -446,17 +451,17 @@ func ClusterRefsEquals(clusterRefs []ClusterReference) bool { return true } -func (c ClusterReference) IsExternal() bool{ - return c.Type == EXTERNAL_REFERENCE +func (c ClusterReference) IsExternal() bool { + return c.Type != ClientConfigNiFiCluster } func SecretRefsEquals(secretRefs []SecretReference) bool { name := secretRefs[0].Name - ns := secretRefs[0].Namespace + ns := secretRefs[0].Namespace for _, secretRef := range secretRefs { if name != secretRef.Name || ns != secretRef.Namespace { return false } } return true -} \ No newline at end of file +} diff --git a/api/v1alpha1/nificluster_types.go b/api/v1alpha1/nificluster_types.go index 9eae86c53..9978572bc 100644 --- a/api/v1alpha1/nificluster_types.go +++ b/api/v1alpha1/nificluster_types.go @@ -656,11 +656,11 @@ func (nSpec *NifiClusterSpec) GetMetricPort() *int { return nil } -func (cluster *NifiCluster) IsExternal() bool{ +func (cluster *NifiCluster) IsExternal() bool { return false } -func (cluster *NifiCluster) IsInternal() bool{ +func (cluster *NifiCluster) IsInternal() bool { return true } @@ -669,7 +669,7 @@ func (cluster *NifiCluster) ClusterLabelString() string { } func (cluster NifiCluster) IsReady() bool { - for _,nodeState := range cluster.Status.NodesState { + for _, nodeState := range cluster.Status.NodesState { if nodeState.ConfigurationState != ConfigInSync || nodeState.GracefulActionState.State != GracefulUpscaleSucceeded || !nodeState.PodIsReady { return false @@ -682,6 +682,6 @@ func (cluster *NifiCluster) Id() string { return cluster.Name } -func (cluster *NifiCluster) RootProcessGroupId() string{ +func (cluster *NifiCluster) RootProcessGroupId() string { return cluster.Status.RootProcessGroupId -} \ No newline at end of file +} diff --git a/api/v1alpha1/nifidataflow_types.go b/api/v1alpha1/nifidataflow_types.go index f13102a94..247476a3b 100644 --- a/api/v1alpha1/nifidataflow_types.go +++ b/api/v1alpha1/nifidataflow_types.go @@ -155,4 +155,4 @@ func (d *NifiDataflowSpec) GetParentProcessGroupID(rootProcessGroupId string) st return rootProcessGroupId } return d.ParentProcessGroupID -} \ No newline at end of file +} diff --git a/api/v1alpha1/nifiregistryclient_types.go b/api/v1alpha1/nifiregistryclient_types.go index 42d3aa004..badbfd10a 100644 --- a/api/v1alpha1/nifiregistryclient_types.go +++ b/api/v1alpha1/nifiregistryclient_types.go @@ -64,4 +64,4 @@ type NifiRegistryClientList struct { func init() { SchemeBuilder.Register(&NifiRegistryClient{}, &NifiRegistryClientList{}) -} \ No newline at end of file +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 4e5e47c8e..9c6ebb32a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -94,6 +94,12 @@ func (in *BootstrapProperties) DeepCopy() *BootstrapProperties { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterReference) DeepCopyInto(out *ClusterReference) { *out = *in + if in.NodeIds != nil { + in, out := &in.NodeIds, &out.NodeIds + *out = make([]int32, len(*in)) + copy(*out, *in) + } + out.SecretRef = in.SecretRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReference. @@ -556,7 +562,7 @@ func (in *NifiDataflowSpec) DeepCopyInto(out *NifiDataflowSpec) { *out = new(bool) **out = **in } - out.ClusterRef = in.ClusterRef + in.ClusterRef.DeepCopyInto(&out.ClusterRef) if in.RegistryClientRef != nil { in, out := &in.RegistryClientRef, &out.RegistryClientRef *out = new(RegistryClientReference) @@ -666,7 +672,7 @@ func (in *NifiParameterContextSpec) DeepCopyInto(out *NifiParameterContextSpec) *out = make([]Parameter, len(*in)) copy(*out, *in) } - out.ClusterRef = in.ClusterRef + in.ClusterRef.DeepCopyInto(&out.ClusterRef) if in.SecretRefs != nil { in, out := &in.SecretRefs, &out.SecretRefs *out = make([]SecretReference, len(*in)) @@ -739,7 +745,7 @@ func (in *NifiRegistryClient) DeepCopyInto(out *NifiRegistryClient) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status } @@ -796,7 +802,7 @@ func (in *NifiRegistryClientList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiRegistryClientSpec) DeepCopyInto(out *NifiRegistryClientSpec) { *out = *in - out.ClusterRef = in.ClusterRef + in.ClusterRef.DeepCopyInto(&out.ClusterRef) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiRegistryClientSpec. @@ -913,7 +919,7 @@ func (in *NifiUserGroupList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiUserGroupSpec) DeepCopyInto(out *NifiUserGroupSpec) { *out = *in - out.ClusterRef = in.ClusterRef + in.ClusterRef.DeepCopyInto(&out.ClusterRef) if in.UsersRef != nil { in, out := &in.UsersRef, &out.UsersRef *out = make([]UserReference, len(*in)) @@ -986,7 +992,7 @@ func (in *NifiUserList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiUserSpec) DeepCopyInto(out *NifiUserSpec) { *out = *in - out.ClusterRef = in.ClusterRef + in.ClusterRef.DeepCopyInto(&out.ClusterRef) if in.DNSNames != nil { in, out := &in.DNSNames, &out.DNSNames *out = make([]string, len(*in)) diff --git a/config/crd/bases/nifi.orange.com_nifidataflows.yaml b/config/crd/bases/nifi.orange.com_nifidataflows.yaml index bc1e7653b..6ac935d8f 100644 --- a/config/crd/bases/nifi.orange.com_nifidataflows.yaml +++ b/config/crd/bases/nifi.orange.com_nifidataflows.yaml @@ -43,12 +43,21 @@ spec: description: contains the reference to the NifiCluster with the one the dataflow is linked. properties: - hostname: - type: string name: type: string namespace: type: string + nifiURI: + type: string + nodeIds: + items: + format: int32 + type: integer + type: array + nodeURITemplate: + type: string + rootProcessGroupId: + type: string secretRef: description: SecretReference states a reference to a secret for parameter context provisioning @@ -61,6 +70,12 @@ spec: - name type: object type: + description: ClientConfigType represents an interface implementing + the ClientConfigManager + enum: + - external-tls + - nificluster + - external-basic type: string type: object flowId: diff --git a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml index aebddc739..4a5792944 100644 --- a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml +++ b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml @@ -41,12 +41,21 @@ spec: description: contains the reference to the NifiCluster with the one the dataflow is linked. properties: - hostname: - type: string name: type: string namespace: type: string + nifiURI: + type: string + nodeIds: + items: + format: int32 + type: integer + type: array + nodeURITemplate: + type: string + rootProcessGroupId: + type: string secretRef: description: SecretReference states a reference to a secret for parameter context provisioning @@ -59,6 +68,12 @@ spec: - name type: object type: + description: ClientConfigType represents an interface implementing + the ClientConfigManager + enum: + - external-tls + - nificluster + - external-basic type: string type: object description: diff --git a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml index 60c4c294f..72615e07c 100644 --- a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml +++ b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml @@ -41,12 +41,21 @@ spec: description: contains the reference to the NifiCluster with the one the dataflow is linked. properties: - hostname: - type: string name: type: string namespace: type: string + nifiURI: + type: string + nodeIds: + items: + format: int32 + type: integer + type: array + nodeURITemplate: + type: string + rootProcessGroupId: + type: string secretRef: description: SecretReference states a reference to a secret for parameter context provisioning @@ -59,6 +68,12 @@ spec: - name type: object type: + description: ClientConfigType represents an interface implementing + the ClientConfigManager + enum: + - external-tls + - nificluster + - external-basic type: string type: object description: diff --git a/config/crd/bases/nifi.orange.com_nifiusergroups.yaml b/config/crd/bases/nifi.orange.com_nifiusergroups.yaml index a7e33eac8..79276b178 100644 --- a/config/crd/bases/nifi.orange.com_nifiusergroups.yaml +++ b/config/crd/bases/nifi.orange.com_nifiusergroups.yaml @@ -98,12 +98,21 @@ spec: description: clusterRef contains the reference to the NifiCluster with the one the registry client is linked. properties: - hostname: - type: string name: type: string namespace: type: string + nifiURI: + type: string + nodeIds: + items: + format: int32 + type: integer + type: array + nodeURITemplate: + type: string + rootProcessGroupId: + type: string secretRef: description: SecretReference states a reference to a secret for parameter context provisioning @@ -116,6 +125,12 @@ spec: - name type: object type: + description: ClientConfigType represents an interface implementing + the ClientConfigManager + enum: + - external-tls + - nificluster + - external-basic type: string type: object usersRef: diff --git a/config/crd/bases/nifi.orange.com_nifiusers.yaml b/config/crd/bases/nifi.orange.com_nifiusers.yaml index 59addd791..e0c76213c 100644 --- a/config/crd/bases/nifi.orange.com_nifiusers.yaml +++ b/config/crd/bases/nifi.orange.com_nifiusers.yaml @@ -98,12 +98,21 @@ spec: description: contains the reference to the NifiCluster with the one the user is linked properties: - hostname: - type: string name: type: string namespace: type: string + nifiURI: + type: string + nodeIds: + items: + format: int32 + type: integer + type: array + nodeURITemplate: + type: string + rootProcessGroupId: + type: string secretRef: description: SecretReference states a reference to a secret for parameter context provisioning @@ -116,6 +125,12 @@ spec: - name type: object type: + description: ClientConfigType represents an interface implementing + the ClientConfigManager + enum: + - external-tls + - nificluster + - external-basic type: string type: object createCert: diff --git a/controllers/controller_common.go b/controllers/controller_common.go index 320d3fc5f..e827ff6dd 100644 --- a/controllers/controller_common.go +++ b/controllers/controller_common.go @@ -16,6 +16,7 @@ package controllers import ( "fmt" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "time" "emperror.dev/errors" @@ -101,7 +102,7 @@ func ApplyClusterRefLabel(cluster *v1alpha1.NifiCluster, labels map[string]strin } // applyClusterRefLabel ensures a map of labels contains a reference to a parent nifi cluster -func ApplyClusterReferenceLabel(cluster v1alpha1.ClusterConnect, labels map[string]string) map[string]string { +func ApplyClusterReferenceLabel(cluster clientconfig.ClusterConnect, labels map[string]string) map[string]string { labelValue := cluster.ClusterLabelString() if labels == nil { labels = make(map[string]string, 0) diff --git a/controllers/nifidataflow_controller.go b/controllers/nifidataflow_controller.go index d105ea76f..520235e33 100644 --- a/controllers/nifidataflow_controller.go +++ b/controllers/nifidataflow_controller.go @@ -24,8 +24,9 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/dataflow" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -83,7 +84,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Get the last configuration viewed by the operator. - o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err := patch.DefaultAnnotator.GetOriginalConfiguration(instance) // Create it if not exist. if o == nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { @@ -92,7 +93,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if err := r.Client.Update(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) } - o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err = patch.DefaultAnnotator.GetOriginalConfiguration(instance) } // Check if the cluster reference changed. @@ -178,13 +179,18 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request errors.New("inconsistent cluster references")) } - var clientConfig *nificlient.NifiConfig - var clusterConnect v1alpha1.ClusterConnect + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect - // Get the referenced NifiCluster - if !instance.Spec.ClusterRef.IsExternal() { - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, currentClusterRef.Namespace); err != nil { + // Get the client config manager associated to the cluster ref. + clusterRef := instance.Spec.ClusterRef + clusterRef.Namespace = currentClusterRef.Namespace + configManager := config.GetClientConfigManager(r.Client, clusterRef) + + // Generate the connect object + if clusterConnect, err = configManager.BuildConnect(); err != nil { + if !configManager.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -212,16 +218,16 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request // the cluster does not exist - should have been caught pre-flight return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } - clusterConnect = cluster - clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) - if err != nil { - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", - instance.Spec.ClusterRef.Name, currentClusterRef.Namespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) - } - } else { + } + + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, currentClusterRef.Namespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) } // Check if marked for deletion and if so run finalizers @@ -243,7 +249,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request // Ìn case of the cluster reference changed. if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { // Delete the resource on the previous cluster. - if _,err := dataflow.RemoveDataflow(instance, clientConfig); err != nil { + if _, err := dataflow.RemoveDataflow(instance, clientConfig); err != nil { r.Recorder.Event(instance, corev1.EventTypeWarning, "RemoveError", fmt.Sprintf("Failed to delete NifiDataflow %s from cluster %s before moving in %s", instance.Name, original.Spec.ClusterRef.Name, original.Spec.ClusterRef.Name)) @@ -446,7 +452,7 @@ func (r *NifiDataflowReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *NifiDataflowReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, +func (r *NifiDataflowReconciler) ensureClusterLabel(ctx context.Context, cluster clientconfig.ClusterConnect, flow *v1alpha1.NifiDataflow) (*v1alpha1.NifiDataflow, error) { labels := ApplyClusterReferenceLabel(cluster, flow.GetLabels()) @@ -470,7 +476,7 @@ func (r *NifiDataflowReconciler) updateAndFetchLatest(ctx context.Context, } func (r *NifiDataflowReconciler) checkFinalizers(ctx context.Context, flow *v1alpha1.NifiDataflow, - config *nificlient.NifiConfig) (reconcile.Result, error) { + config *clientconfig.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi dataflow is marked for deletion") var err error @@ -497,7 +503,7 @@ func (r *NifiDataflowReconciler) removeFinalizer(ctx context.Context, flow *v1al return err } -func (r *NifiDataflowReconciler) finalizeNifiDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) error { +func (r *NifiDataflowReconciler) finalizeNifiDataflow(flow *v1alpha1.NifiDataflow, config *clientconfig.NifiConfig) error { exists, err := dataflow.DataflowExist(flow, config) if err != nil { diff --git a/controllers/nifiparametercontext_controller.go b/controllers/nifiparametercontext_controller.go index cefa4aea9..5973d457d 100644 --- a/controllers/nifiparametercontext_controller.go +++ b/controllers/nifiparametercontext_controller.go @@ -24,8 +24,9 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/parametercontext" errorfactory "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -82,7 +83,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl } // Get the last configuration viewed by the operator. - o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err := patch.DefaultAnnotator.GetOriginalConfiguration(instance) // Create it if not exist. if o == nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { @@ -91,7 +92,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl if err := r.Client.Update(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to update NifiParameterContext", err) } - o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err = patch.DefaultAnnotator.GetOriginalConfiguration(instance) } // Check if the cluster reference changed. @@ -124,14 +125,18 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl parameterSecrets = append(parameterSecrets, secret) } - var clientConfig *nificlient.NifiConfig - var clusterConnect v1alpha1.ClusterConnect + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect - // Get the referenced NifiCluster - if !instance.Spec.ClusterRef.IsExternal() { - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // Get the client config manager associated to the cluster ref. + clusterRef := instance.Spec.ClusterRef + clusterRef.Namespace = GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + configManager := config.GetClientConfigManager(r.Client, clusterRef) + + // Generate the connect object + if clusterConnect, err = configManager.BuildConnect(); err != nil { + if !configManager.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -153,22 +158,21 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) // the cluster does not exist - should have been caught pre-flight return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } - // Set cluster connection configuration. - clusterConnect = cluster - clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) - if err != nil { - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) - } - } else { + } + + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) } // Check if marked for deletion and if so run finalizers @@ -289,7 +293,7 @@ func (r *NifiParameterContextReconciler) SetupWithManager(mgr ctrl.Manager) erro Complete(r) } -func (r *NifiParameterContextReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, +func (r *NifiParameterContextReconciler) ensureClusterLabel(ctx context.Context, cluster clientconfig.ClusterConnect, parameterContext *v1alpha1.NifiParameterContext) (*v1alpha1.NifiParameterContext, error) { labels := ApplyClusterReferenceLabel(cluster, parameterContext.GetLabels()) @@ -316,7 +320,7 @@ func (r *NifiParameterContextReconciler) checkFinalizers( ctx context.Context, parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, - config *nificlient.NifiConfig) (reconcile.Result, error) { + config *clientconfig.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi parameter context is marked for deletion") var err error @@ -340,7 +344,7 @@ func (r *NifiParameterContextReconciler) removeFinalizer(ctx context.Context, fl func (r *NifiParameterContextReconciler) finalizeNifiParameterContext( parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, - config *nificlient.NifiConfig) error { + config *clientconfig.NifiConfig) error { if err := parametercontext.RemoveParameterContext(parameterContext, parameterSecrets, config); err != nil { return err diff --git a/controllers/nifiregistryclient_controller.go b/controllers/nifiregistryclient_controller.go index 924dac5db..33ca0abf0 100644 --- a/controllers/nifiregistryclient_controller.go +++ b/controllers/nifiregistryclient_controller.go @@ -22,8 +22,9 @@ import ( "fmt" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/registryclient" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -80,7 +81,7 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R } // Get the last configuration viewed by the operator. - o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err := patch.DefaultAnnotator.GetOriginalConfiguration(instance) // Create it if not exist. if o == nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { @@ -89,7 +90,7 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R if err := r.Client.Update(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) } - o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err = patch.DefaultAnnotator.GetOriginalConfiguration(instance) } // Check if the cluster reference changed. @@ -101,13 +102,18 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R instance.Spec.ClusterRef = original.Spec.ClusterRef } - var clientConfig *nificlient.NifiConfig - var clusterConnect v1alpha1.ClusterConnect - // Get the referenced NifiCluster - if !instance.Spec.ClusterRef.IsExternal(){ - var cluster *v1alpha1.NifiCluster - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect + + // Get the client config manager associated to the cluster ref. + clusterRef := instance.Spec.ClusterRef + clusterRef.Namespace = GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + configManager := config.GetClientConfigManager(r.Client, clusterRef) + + // Generate the connect object + if clusterConnect, err = configManager.BuildConnect(); err != nil { + if !configManager.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -129,21 +135,20 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) // the cluster does not exist - should have been caught pre-flight return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } - // Set cluster connection configuration. - clusterConnect = cluster - clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) - if err != nil { - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) - } - } else { + } + + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) } // Check if marked for deletion and if so run finalizers @@ -262,7 +267,7 @@ func (r *NifiRegistryClientReconciler) SetupWithManager(mgr ctrl.Manager) error Complete(r) } -func (r *NifiRegistryClientReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, +func (r *NifiRegistryClientReconciler) ensureClusterLabel(ctx context.Context, cluster clientconfig.ClusterConnect, registryClient *v1alpha1.NifiRegistryClient) (*v1alpha1.NifiRegistryClient, error) { labels := ApplyClusterReferenceLabel(cluster, registryClient.GetLabels()) @@ -286,7 +291,7 @@ func (r *NifiRegistryClientReconciler) updateAndFetchLatest(ctx context.Context, } func (r *NifiRegistryClientReconciler) checkFinalizers(ctx context.Context, reqLogger logr.Logger, - registryClient *v1alpha1.NifiRegistryClient, config *nificlient.NifiConfig) (reconcile.Result, error) { + registryClient *v1alpha1.NifiRegistryClient, config *clientconfig.NifiConfig) (reconcile.Result, error) { reqLogger.Info("NiFi registry client is marked for deletion") var err error @@ -308,7 +313,7 @@ func (r *NifiRegistryClientReconciler) removeFinalizer(ctx context.Context, regi } func (r *NifiRegistryClientReconciler) finalizeNifiRegistryClient(reqLogger logr.Logger, registryClient *v1alpha1.NifiRegistryClient, - config *nificlient.NifiConfig) error { + config *clientconfig.NifiConfig) error { if err := registryclient.RemoveRegistryClient(registryClient, config); err != nil { return err diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index 1d5269f4c..f05b7742a 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -24,9 +24,10 @@ import ( usercli "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/user" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" "github.com/Orange-OpenSource/nifikop/pkg/pki" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/banzaicloud/k8s-objectmatcher/patch" "github.com/go-logr/logr" certv1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2" @@ -86,7 +87,7 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } // Get the last configuration viewed by the operator. - o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err := patch.DefaultAnnotator.GetOriginalConfiguration(instance) // Create it if not exist. if o == nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { @@ -95,7 +96,7 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c if err := r.Client.Update(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to update NifiUser", err) } - o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err = patch.DefaultAnnotator.GetOriginalConfiguration(instance) } // Check if the cluster reference changed. @@ -107,13 +108,18 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c instance.Spec.ClusterRef = original.Spec.ClusterRef } - var clientConfig *nificlient.NifiConfig - var clusterConnect v1alpha1.ClusterConnect - // Get the referenced NifiCluster - if !instance.Spec.ClusterRef.IsExternal() { - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect + + // Get the client config manager associated to the cluster ref. + clusterRef := instance.Spec.ClusterRef + clusterRef.Namespace = GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + configManager := config.GetClientConfigManager(r.Client, clusterRef) + + // Generate the connect object + if clusterConnect, err = configManager.BuildConnect(); err != nil { + if !configManager.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is gone already, there is nothing we can do") @@ -136,9 +142,24 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } + } + + // Get the referenced NifiCluster + if !configManager.IsExternal() { + var cluster *v1alpha1.NifiCluster + if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterRef.Namespace); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is gone already, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer from NifiUser", err) + } + return Reconciled() + } + } if v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) && instance.Spec.GetCreateCert() { @@ -200,18 +221,16 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c return r.checkFinalizers(ctx, instance, clientConfig) } } + } - // Set cluster connection configuration. - clusterConnect = cluster - clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) - if err != nil { - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) - } - } else { + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) } // check if marked for deletion @@ -349,7 +368,7 @@ func (r *NifiUserReconciler) SetupWithManager(mgr ctrl.Manager, certManagerEnabl return builder.Complete(r) } -func (r *NifiUserReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, user *v1alpha1.NifiUser) (*v1alpha1.NifiUser, error) { +func (r *NifiUserReconciler) ensureClusterLabel(ctx context.Context, cluster clientconfig.ClusterConnect, user *v1alpha1.NifiUser) (*v1alpha1.NifiUser, error) { labels := ApplyClusterReferenceLabel(cluster, user.GetLabels()) if !reflect.DeepEqual(labels, user.GetLabels()) { user.SetLabels(labels) @@ -368,7 +387,7 @@ func (r *NifiUserReconciler) updateAndFetchLatest(ctx context.Context, user *v1a return user, nil } -func (r *NifiUserReconciler) checkFinalizers(ctx context.Context, user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (reconcile.Result, error) { +func (r *NifiUserReconciler) checkFinalizers(ctx context.Context, user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi user is marked for deletion") var err error if util.StringSliceContains(user.GetFinalizers(), userFinalizer) { @@ -389,7 +408,7 @@ func (r *NifiUserReconciler) removeFinalizer(ctx context.Context, user *v1alpha1 return err } -func (r *NifiUserReconciler) finalizeNifiUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) error { +func (r *NifiUserReconciler) finalizeNifiUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) error { if err := usercli.RemoveUser(user, config); err != nil { return err } diff --git a/controllers/nifiusergroup_controller.go b/controllers/nifiusergroup_controller.go index 92fedda73..5b9962a7c 100644 --- a/controllers/nifiusergroup_controller.go +++ b/controllers/nifiusergroup_controller.go @@ -23,8 +23,9 @@ import ( "fmt" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/usergroup" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/banzaicloud/k8s-objectmatcher/patch" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -81,7 +82,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Get the last configuration viewed by the operator. - o, err :=patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err := patch.DefaultAnnotator.GetOriginalConfiguration(instance) // Create it if not exist. if o == nil { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { @@ -90,7 +91,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques if err := r.Client.Update(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) } - o, err =patch.DefaultAnnotator.GetOriginalConfiguration(instance) + o, err = patch.DefaultAnnotator.GetOriginalConfiguration(instance) } // Check if the cluster reference changed. @@ -102,12 +103,11 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques instance.Spec.ClusterRef = original.Spec.ClusterRef } + // Ensure the cluster ref consistency with all users var users []*v1alpha1.NifiUser - for _, userRef := range instance.Spec.UsersRef { var user *v1alpha1.NifiUser userNamespace := GetUserRefNamespace(current.Namespace, userRef) - if user, err = k8sutil.LookupNifiUser(r.Client, userRef.Name, userNamespace); err != nil { // This shouldn't trigger anymore, but leaving it here as a safetybelt @@ -142,13 +142,18 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques users = append(users, user) } - var clientConfig *nificlient.NifiConfig - var clusterConnect v1alpha1.ClusterConnect - // Get the referenced NifiCluster - if !instance.Spec.ClusterRef.IsExternal() { - clusterNamespace := GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterNamespace); err != nil { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect + + // Get the client config manager associated to the cluster ref. + clusterRef := instance.Spec.ClusterRef + clusterRef.Namespace = GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + configManager := config.GetClientConfigManager(r.Client, clusterRef) + + // Generate the connect object + if clusterConnect, err = configManager.BuildConnect(); err != nil { + if !configManager.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -171,22 +176,21 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", fmt.Sprintf("Failed to lookup reference cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) // the cluster does not exist - should have been caught pre-flight return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } - // Set cluster connection configuration. - clusterConnect = cluster - clientConfig, err = nificlient.ClusterConfig(r.Client, cluster) - if err != nil { - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", - instance.Spec.ClusterRef.Name, clusterNamespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) - } - } else { + } + + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster : %s in %s", + instance.Spec.ClusterRef.Name, clusterRef.Namespace)) + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) } // Check if marked for deletion and if so run finalizers @@ -300,7 +304,7 @@ func (r *NifiUserGroupReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *NifiUserGroupReconciler) ensureClusterLabel(ctx context.Context, cluster v1alpha1.ClusterConnect, +func (r *NifiUserGroupReconciler) ensureClusterLabel(ctx context.Context, cluster clientconfig.ClusterConnect, userGroup *v1alpha1.NifiUserGroup) (*v1alpha1.NifiUserGroup, error) { labels := ApplyClusterReferenceLabel(cluster, userGroup.GetLabels()) @@ -324,7 +328,7 @@ func (r *NifiUserGroupReconciler) updateAndFetchLatest(ctx context.Context, } func (r *NifiUserGroupReconciler) checkFinalizers(ctx context.Context, userGroup *v1alpha1.NifiUserGroup, - users []*v1alpha1.NifiUser, config *nificlient.NifiConfig) (reconcile.Result, error) { + users []*v1alpha1.NifiUser, config *clientconfig.NifiConfig) (reconcile.Result, error) { r.Log.Info("NiFi user group is marked for deletion") var err error @@ -339,7 +343,7 @@ func (r *NifiUserGroupReconciler) checkFinalizers(ctx context.Context, userGroup return Reconciled() } -func (r *NifiUserGroupReconciler) removeFinalizer(ctx context.Context, userGroup *v1alpha1.NifiUserGroup) error { +func (r *NifiUserGroupReconciler) removeFinalizer(ctx context.Context, userGroup *v1alpha1.NifiUserGroup) error { userGroup.SetFinalizers(util.StringSliceRemove(userGroup.GetFinalizers(), userGroupFinalizer)) _, err := r.updateAndFetchLatest(ctx, userGroup) return err @@ -348,7 +352,7 @@ func (r *NifiUserGroupReconciler) removeFinalizer(ctx context.Context, userGrou func (r *NifiUserGroupReconciler) finalizeNifiNifiUserGroup( userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, - config *nificlient.NifiConfig) error { + config *clientconfig.NifiConfig) error { if err := usergroup.RemoveUserGroup(userGroup, users, config); err != nil { return err diff --git a/pkg/clientwrappers/accesspolicies/policies.go b/pkg/clientwrappers/accesspolicies/policies.go index 7bcfc88d8..c59c60aa2 100644 --- a/pkg/clientwrappers/accesspolicies/policies.go +++ b/pkg/clientwrappers/accesspolicies/policies.go @@ -5,13 +5,14 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers" "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" ) var log = ctrl.Log.WithName("accesspolicies-method") -func ExistAccessPolicies(accessPolicy *v1alpha1.AccessPolicy, config *nificlient.NifiConfig) (bool, error) { +func ExistAccessPolicies(accessPolicy *v1alpha1.AccessPolicy, config *clientconfig.NifiConfig) (bool, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -29,7 +30,7 @@ func ExistAccessPolicies(accessPolicy *v1alpha1.AccessPolicy, config *nificlient return entity != nil, nil } -func CreateAccessPolicy(accessPolicy *v1alpha1.AccessPolicy, config *nificlient.NifiConfig) (string, error) { +func CreateAccessPolicy(accessPolicy *v1alpha1.AccessPolicy, config *clientconfig.NifiConfig) (string, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -58,7 +59,7 @@ func UpdateAccessPolicy( removeUsers []*v1alpha1.NifiUser, addUserGroups []*v1alpha1.NifiUserGroup, removeUserGroups []*v1alpha1.NifiUserGroup, - config *nificlient.NifiConfig) error { + config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -94,7 +95,7 @@ func UpdateAccessPolicyEntity( removeUsers []*v1alpha1.NifiUser, addUserGroups []*v1alpha1.NifiUserGroup, removeUserGroups []*v1alpha1.NifiUserGroup, - config *nificlient.NifiConfig) error { + config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -119,7 +120,7 @@ func updateAccessPolicyEntity( removeUsers []*v1alpha1.NifiUser, addUserGroups []*v1alpha1.NifiUserGroup, removeUserGroups []*v1alpha1.NifiUserGroup, - config *nificlient.NifiConfig, + config *clientconfig.NifiConfig, entity *nigoapi.AccessPolicyEntity) { var defaultVersion int64 = 0 diff --git a/pkg/clientwrappers/controllersettings/controllersettings.go b/pkg/clientwrappers/controllersettings/controllersettings.go index bae0c3290..fd863fb44 100644 --- a/pkg/clientwrappers/controllersettings/controllersettings.go +++ b/pkg/clientwrappers/controllersettings/controllersettings.go @@ -4,7 +4,7 @@ import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers" "github.com/Orange-OpenSource/nifikop/pkg/common" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" ) @@ -15,7 +15,7 @@ func controllerConfigIsSync(cluster *v1alpha1.NifiCluster, entity *nigoapi.Contr return cluster.Spec.ReadOnlyConfig.GetMaximumTimerDrivenThreadCount() == entity.Component.MaxTimerDrivenThreadCount } -func SyncConfiguration(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) error { +func SyncConfiguration(config *clientconfig.NifiConfig, cluster *v1alpha1.NifiCluster) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { diff --git a/pkg/clientwrappers/dataflow/dataflow.go b/pkg/clientwrappers/dataflow/dataflow.go index b2bd6f822..c838f1cb0 100644 --- a/pkg/clientwrappers/dataflow/dataflow.go +++ b/pkg/clientwrappers/dataflow/dataflow.go @@ -1,6 +1,7 @@ package dataflow import ( + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "strings" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" @@ -15,7 +16,7 @@ import ( var log = ctrl.Log.WithName("dataflow-method") // DataflowExist check if the NifiDataflow exist on NiFi Cluster -func DataflowExist(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) (bool, error) { +func DataflowExist(flow *v1alpha1.NifiDataflow, config *clientconfig.NifiConfig) (bool, error) { if flow.Status.ProcessGroupID == "" { return false, nil @@ -37,7 +38,7 @@ func DataflowExist(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) ( return flowEntity != nil, nil } -func RootProcessGroup(config *nificlient.NifiConfig) (string, error) { +func RootProcessGroup(config *clientconfig.NifiConfig) (string, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { return "", err @@ -55,7 +56,7 @@ func RootProcessGroup(config *nificlient.NifiConfig) (string, error) { } // CreateDataflow will deploy the NifiDataflow on NiFi Cluster -func CreateDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig, +func CreateDataflow(flow *v1alpha1.NifiDataflow, config *clientconfig.NifiConfig, registry *v1alpha1.NifiRegistryClient) (*v1alpha1.NifiDataflowStatus, error) { nClient, err := common.NewClusterConnection(log, config) @@ -77,7 +78,7 @@ func CreateDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig, } // ScheduleDataflow will schedule the controller services and components of the NifiDataflow. -func ScheduleDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) error { +func ScheduleDataflow(flow *v1alpha1.NifiDataflow, config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { return err @@ -133,7 +134,7 @@ func ScheduleDataflow(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig // IsOutOfSyncDataflow control if the deployed dataflow is out of sync with the NifiDataflow resource func IsOutOfSyncDataflow( flow *v1alpha1.NifiDataflow, - config *nificlient.NifiConfig, + config *clientconfig.NifiConfig, registry *v1alpha1.NifiRegistryClient, parameterContext *v1alpha1.NifiParameterContext) (bool, error) { @@ -180,7 +181,7 @@ func isParameterContextChanged( func isParentProcessGroupChanged( flow *v1alpha1.NifiDataflow, - config *nificlient.NifiConfig, + config *clientconfig.NifiConfig, pgFlowEntity *nigoapi.ProcessGroupEntity) bool { return flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId) != pgFlowEntity.Component.ParentGroupId } @@ -213,7 +214,7 @@ func isVersioningChanged( // SyncDataflow implements the logic to sync a NifiDataflow with the deployed flow. func SyncDataflow( flow *v1alpha1.NifiDataflow, - config *nificlient.NifiConfig, + config *clientconfig.NifiConfig, registry *v1alpha1.NifiRegistryClient, parameterContext *v1alpha1.NifiParameterContext) (*v1alpha1.NifiDataflowStatus, error) { @@ -390,7 +391,7 @@ func SyncDataflow( } // prepareUpdatePG ensure drain or drop logic -func prepareUpdatePG(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) (*v1alpha1.NifiDataflowStatus, error) { +func prepareUpdatePG(flow *v1alpha1.NifiDataflow, config *clientconfig.NifiConfig) (*v1alpha1.NifiDataflowStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -500,7 +501,7 @@ func prepareUpdatePG(flow *v1alpha1.NifiDataflow, config *nificlient.NifiConfig) return &flow.Status, nil } -func RemoveDataflow(flow *v1alpha1.NifiDataflow,config *nificlient.NifiConfig) (*v1alpha1.NifiDataflowStatus, error) { +func RemoveDataflow(flow *v1alpha1.NifiDataflow, config *clientconfig.NifiConfig) (*v1alpha1.NifiDataflowStatus, error) { // Prepare Dataflow status, err := prepareUpdatePG(flow, config) @@ -534,7 +535,7 @@ func RemoveDataflow(flow *v1alpha1.NifiDataflow,config *nificlient.NifiConfig) ( return nil, nil } -func UnscheduleDataflow(flow *v1alpha1.NifiDataflow,config *nificlient.NifiConfig) error { +func UnscheduleDataflow(flow *v1alpha1.NifiDataflow, config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { return err @@ -603,7 +604,7 @@ func processGroupFromFlow( } // listComponents will get all ProcessGroups, Processors, Connections and Ports recursively -func listComponents(config *nificlient.NifiConfig, +func listComponents(config *clientconfig.NifiConfig, processGroupID string) ([]nigoapi.ProcessGroupEntity, []nigoapi.ProcessorEntity, []nigoapi.ConnectionEntity, []nigoapi.PortEntity, error) { var processGroups []nigoapi.ProcessGroupEntity @@ -679,7 +680,7 @@ func updateRequest2Status(updateRequest *nigoapi.VersionedFlowUpdateRequestEntit func updateProcessGroupEntity( flow *v1alpha1.NifiDataflow, registry *v1alpha1.NifiRegistryClient, - config *nificlient.NifiConfig, + config *clientconfig.NifiConfig, entity *nigoapi.ProcessGroupEntity) { stringFactory := func() string { return "" } diff --git a/pkg/clientwrappers/parametercontext/parametercontext.go b/pkg/clientwrappers/parametercontext/parametercontext.go index 605ba165f..222921215 100644 --- a/pkg/clientwrappers/parametercontext/parametercontext.go +++ b/pkg/clientwrappers/parametercontext/parametercontext.go @@ -6,6 +6,7 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -13,7 +14,7 @@ import ( var log = ctrl.Log.WithName("parametercontext-method") -func ExistParameterContext( parameterContext *v1alpha1.NifiParameterContext, config *nificlient.NifiConfig) (bool, error) { +func ExistParameterContext(parameterContext *v1alpha1.NifiParameterContext, config *clientconfig.NifiConfig) (bool, error) { if parameterContext.Status.Id == "" { return false, nil @@ -36,7 +37,7 @@ func ExistParameterContext( parameterContext *v1alpha1.NifiParameterContext, con } func CreateParameterContext(parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, - config *nificlient.NifiConfig) (*v1alpha1.NifiParameterContextStatus, error) { + config *clientconfig.NifiConfig) (*v1alpha1.NifiParameterContextStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -58,7 +59,7 @@ func CreateParameterContext(parameterContext *v1alpha1.NifiParameterContext, par } func SyncParameterContext(parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, - config *nificlient.NifiConfig) (*v1alpha1.NifiParameterContextStatus, error) { + config *clientconfig.NifiConfig) (*v1alpha1.NifiParameterContextStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -107,7 +108,7 @@ func SyncParameterContext(parameterContext *v1alpha1.NifiParameterContext, param } func RemoveParameterContext(parameterContext *v1alpha1.NifiParameterContext, parameterSecrets []*corev1.Secret, - config *nificlient.NifiConfig) error { + config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { diff --git a/pkg/clientwrappers/registryclient/registryclient.go b/pkg/clientwrappers/registryclient/registryclient.go index 071c14095..73b115152 100644 --- a/pkg/clientwrappers/registryclient/registryclient.go +++ b/pkg/clientwrappers/registryclient/registryclient.go @@ -5,13 +5,14 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers" "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" ) var log = ctrl.Log.WithName("registryclient-method") -func ExistRegistryClient(registryClient *v1alpha1.NifiRegistryClient, config *nificlient.NifiConfig) (bool, error) { +func ExistRegistryClient(registryClient *v1alpha1.NifiRegistryClient, config *clientconfig.NifiConfig) (bool, error) { if registryClient.Status.Id == "" { return false, nil @@ -34,7 +35,7 @@ func ExistRegistryClient(registryClient *v1alpha1.NifiRegistryClient, config *ni } func CreateRegistryClient(registryClient *v1alpha1.NifiRegistryClient, - config *nificlient.NifiConfig) (*v1alpha1.NifiRegistryClientStatus, error) { + config *clientconfig.NifiConfig) (*v1alpha1.NifiRegistryClientStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err @@ -55,7 +56,7 @@ func CreateRegistryClient(registryClient *v1alpha1.NifiRegistryClient, } func SyncRegistryClient(registryClient *v1alpha1.NifiRegistryClient, - config *nificlient.NifiConfig) (*v1alpha1.NifiRegistryClientStatus, error) { + config *clientconfig.NifiConfig) (*v1alpha1.NifiRegistryClientStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -83,7 +84,7 @@ func SyncRegistryClient(registryClient *v1alpha1.NifiRegistryClient, } func RemoveRegistryClient(registryClient *v1alpha1.NifiRegistryClient, - config *nificlient.NifiConfig) error { + config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { return err diff --git a/pkg/clientwrappers/reportingtask/reportingtask.go b/pkg/clientwrappers/reportingtask/reportingtask.go index 4038f6851..43504146a 100644 --- a/pkg/clientwrappers/reportingtask/reportingtask.go +++ b/pkg/clientwrappers/reportingtask/reportingtask.go @@ -6,6 +6,7 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" "strconv" @@ -23,7 +24,7 @@ const ( reportingTaskSendJVM = "true" ) -func ExistReportingTaks(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) (bool, error) { +func ExistReportingTaks(config *clientconfig.NifiConfig, cluster *v1alpha1.NifiCluster) (bool, error) { if cluster.Status.PrometheusReportingTask.Id == "" { return false, nil @@ -45,7 +46,7 @@ func ExistReportingTaks(config *nificlient.NifiConfig, cluster *v1alpha1.NifiClu return entity != nil, nil } -func CreateReportingTask(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { +func CreateReportingTask(config *clientconfig.NifiConfig, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err @@ -65,7 +66,7 @@ func CreateReportingTask(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCl }, nil } -func SyncReportingTask(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { +func SyncReportingTask(config *clientconfig.NifiConfig, cluster *v1alpha1.NifiCluster) (*v1alpha1.PrometheusReportingTaskStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -123,7 +124,7 @@ func SyncReportingTask(config *nificlient.NifiConfig, cluster *v1alpha1.NifiClus return &status, nil } -func RemoveReportingTaks(config *nificlient.NifiConfig, cluster *v1alpha1.NifiCluster) error { +func RemoveReportingTaks(config *clientconfig.NifiConfig, cluster *v1alpha1.NifiCluster) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { return err diff --git a/pkg/clientwrappers/user/user.go b/pkg/clientwrappers/user/user.go index bab0b6c57..1bbca316d 100644 --- a/pkg/clientwrappers/user/user.go +++ b/pkg/clientwrappers/user/user.go @@ -6,13 +6,14 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/accesspolicies" "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" ) var log = ctrl.Log.WithName("user-method") -func ExistUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (bool, error) { +func ExistUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (bool, error) { if user.Status.Id == "" { return false, nil @@ -34,7 +35,7 @@ func ExistUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (bool, er return entity != nil, nil } -func FindUserByIdentity(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserStatus, error) { +func FindUserByIdentity(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alpha1.NifiUserStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -61,7 +62,7 @@ func FindUserByIdentity(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) return nil, nil } -func CreateUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserStatus, error) { +func CreateUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alpha1.NifiUserStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -82,7 +83,7 @@ func CreateUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alph }, nil } -func SyncUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserStatus, error) { +func SyncUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alpha1.NifiUserStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -154,7 +155,7 @@ func SyncUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1 return &status, nil } -func RemoveUser(user *v1alpha1.NifiUser, config *nificlient.NifiConfig) error { +func RemoveUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { return err diff --git a/pkg/clientwrappers/usergroup/usergroup.go b/pkg/clientwrappers/usergroup/usergroup.go index c8c1e5d93..bfdde37de 100644 --- a/pkg/clientwrappers/usergroup/usergroup.go +++ b/pkg/clientwrappers/usergroup/usergroup.go @@ -6,13 +6,14 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/accesspolicies" "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" ctrl "sigs.k8s.io/controller-runtime" ) var log = ctrl.Log.WithName("usergroup-method") -func ExistUserGroup(userGroup *v1alpha1.NifiUserGroup, config *nificlient.NifiConfig) (bool, error) { +func ExistUserGroup(userGroup *v1alpha1.NifiUserGroup, config *clientconfig.NifiConfig) (bool, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -37,7 +38,7 @@ func ExistUserGroup(userGroup *v1alpha1.NifiUserGroup, config *nificlient.NifiCo } func CreateUserGroup(userGroup *v1alpha1.NifiUserGroup, - users []*v1alpha1.NifiUser, config *nificlient.NifiConfig) (*v1alpha1.NifiUserGroupStatus, error) { + users []*v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alpha1.NifiUserGroupStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { return nil, err @@ -58,7 +59,7 @@ func CreateUserGroup(userGroup *v1alpha1.NifiUserGroup, } func SyncUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, - config *nificlient.NifiConfig) (*v1alpha1.NifiUserGroupStatus, error) { + config *clientconfig.NifiConfig) (*v1alpha1.NifiUserGroupStatus, error) { nClient, err := common.NewClusterConnection(log, config) if err != nil { @@ -142,7 +143,7 @@ func SyncUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser return &status, nil } -func RemoveUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser,config *nificlient.NifiConfig) error { +func RemoveUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser, config *clientconfig.NifiConfig) error { nClient, err := common.NewClusterConnection(log, config) if err != nil { return err diff --git a/pkg/common/common.go b/pkg/common/common.go index 2958ef890..54791524c 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/go-logr/logr" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -31,7 +32,7 @@ var NewNifiFromConfig = nificlient.NewFromConfig // newNodeConnection is a convenience wrapper for creating a node connection // and creating a safer close function -func NewClusterConnection(log logr.Logger, config *nificlient.NifiConfig) (node nificlient.NifiClient, err error) { +func NewClusterConnection(log logr.Logger, config *clientconfig.NifiConfig) (node nificlient.NifiClient, err error) { // Get a nifi connection node, err = NewNifiFromConfig(config) diff --git a/pkg/nificlient/client.go b/pkg/nificlient/client.go index e9500d929..f07a7bac1 100644 --- a/pkg/nificlient/client.go +++ b/pkg/nificlient/client.go @@ -16,6 +16,8 @@ package nificlient import ( "fmt" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "net/http" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -141,7 +143,7 @@ type NifiClient interface { type nifiClient struct { NifiClient - opts *NifiConfig + opts *clientconfig.NifiConfig client *nigoapi.APIClient nodeClient map[int32]*nigoapi.APIClient timeout time.Duration @@ -151,7 +153,7 @@ type nifiClient struct { newClient func(*nigoapi.Configuration) *nigoapi.APIClient } -func New(opts *NifiConfig) NifiClient { +func New(opts *clientconfig.NifiConfig) NifiClient { nClient := &nifiClient{ opts: opts, timeout: time.Duration(opts.OperationTimeout) * time.Second, @@ -183,7 +185,7 @@ func (n *nifiClient) Build() error { } // NewFromConfig is a convenient wrapper around New() and ClusterConfig() -func NewFromConfig(opts *NifiConfig) (NifiClient, error) { +func NewFromConfig(opts *clientconfig.NifiConfig) (NifiClient, error) { var client NifiClient var err error @@ -200,12 +202,12 @@ func NewFromConfig(opts *NifiConfig) (NifiClient, error) { func NewFromCluster(k8sclient client.Client, cluster *v1alpha1.NifiCluster) (NifiClient, error) { var client NifiClient var err error + var opts *clientconfig.NifiConfig - opts, err := ClusterConfig(k8sclient, cluster) - if err != nil { + if opts, err = nificluster.New(k8sclient, + v1alpha1.ClusterReference{Name: cluster.Name, Namespace: cluster.Namespace}).BuildConfig(); err != nil { return nil, err } - client = New(opts) err = client.Build() if err != nil { @@ -327,7 +329,7 @@ func (n *nifiClient) nodeDtoByNodeId(nId int32) *nigoapi.NodeDto { for id := range n.nodes { nodeDto := n.nodes[id] // Check if the Cluster Node uri match with the one associated to the NifiCluster nodeId searched - if fmt.Sprintf("%s:%d", nodeDto.Address, nodeDto.ApiPort) == fmt.Sprintf(n.opts.nodeURITemplate, nId) { + if fmt.Sprintf("%s:%d", nodeDto.Address, nodeDto.ApiPort) == fmt.Sprintf(n.opts.NodeURITemplate, nId) { return &nodeDto } } diff --git a/pkg/nificlient/client_test.go b/pkg/nificlient/client_test.go index e3311c32e..4e6b14504 100644 --- a/pkg/nificlient/client_test.go +++ b/pkg/nificlient/client_test.go @@ -16,7 +16,9 @@ package nificlient import ( "fmt" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "net/http" + "sigs.k8s.io/controller-runtime/pkg/client" "testing" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" @@ -27,6 +29,18 @@ import ( "github.com/stretchr/testify/assert" ) +const ( + httpContainerPort int32 = 443 + succeededNodeId int32 = 4 + + clusterName = "test-cluster" + clusterNamespace = "test-namespace" +) + +type mockClient struct { + client.Client +} + var ( nodeURITemplate = fmt.Sprintf("%s-%s-node.%s.svc.cluster.local:%s", clusterName, "%d", clusterNamespace, "%d") @@ -44,12 +58,12 @@ func TestBuild(t *testing.T) { assert := assert.New(t) client := newMockClient() - client.opts.NodesURI = make(map[int32]nodeUri) - client.opts.NodesURI[1] = nodeUri{ + client.opts.NodesURI = make(map[int32]clientconfig.NodeUri) + client.opts.NodesURI[1] = clientconfig.NodeUri{ HostListener: fmt.Sprintf(nodeURITemplate, 1, httpContainerPort), RequestHost: fmt.Sprintf(nodeURITemplate, 1, httpContainerPort), } - client.opts.nodeURITemplate = nodeURITemplate + client.opts.NodeURITemplate = nodeURITemplate client.opts.NifiURI = fmt.Sprintf(nifiURITemplate, httpContainerPort) url := "http://" + fmt.Sprintf(nodeURITemplate, 1, httpContainerPort) + "/nifi-api/controller/cluster" @@ -104,4 +118,4 @@ func TestNewFromCluster(t *testing.T) { _, err = NewFromCluster(mockClient{}, cluster) assert.IsType(errorfactory.NodesUnreachable{}, err) -} \ No newline at end of file +} diff --git a/pkg/nificlient/config/config_manager.go b/pkg/nificlient/config/config_manager.go new file mode 100644 index 000000000..dacfbee34 --- /dev/null +++ b/pkg/nificlient/config/config_manager.go @@ -0,0 +1,81 @@ +package config + +import ( + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var MockClientConfig = v1alpha1.ClientConfigType("mock") + +func GetClientConfigManager(client client.Client, clusterRef v1alpha1.ClusterReference) clientconfig.Manager { + switch clusterRef.Type { + case v1alpha1.ClientConfigNiFiCluster: + return nificluster.New(client, clusterRef) + //case v1alpha1.ClientConfigExternalTLS: + // return + //case v1alpha1.ClientConfigExternalBasic: + // return + case MockClientConfig: + return newMockClientConfig(client, clusterRef) + default: + return nificluster.New(client, clusterRef) + } +} + +// Mock types and functions +type mockClientConfig struct { + clientconfig.Manager + client client.Client + clusterRef v1alpha1.ClusterReference +} + +func newMockClientConfig(client client.Client, clusterRef v1alpha1.ClusterReference) clientconfig.Manager { + return &mockClientConfig{client: client, clusterRef: clusterRef} +} + +func (n *mockClientConfig) BuildConfig() (*clientconfig.NifiConfig, error) { + return nil, nil +} + +func (n *mockClientConfig) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { + return +} + +func (n *mockClientConfig) IsExternal() bool { + return true +} + +//// external +//func ExternalTLSConfig(ref v1alpha1.ClusterReference) (*NifiConfig, error) { +// nodesURI := generateNodesAddressFromTemplate(ref.NodeIds, ref.NodeURITemplate) +// +// conf := &NifiConfig{} +// conf.RootProcessGroupId = ref.RootProcessGroupId +// conf.NodeURITemplate = ref.NodeURITemplate +// conf.NodesURI = nodesURI +// conf.NifiURI = ref.NifiURI +// conf.OperationTimeout = nifiDefaultTimeout +// +// tlsConfig, err := certmanagerpki.GetControllerTLSConfigFromSecret() +// if err != nil { +// return conf, err +// } +// conf.UseSSL = true +// conf.TLSConfig = tlsConfig +// return conf, nil +//} +// +// +//func generateNodesAddressFromTemplate(ids []int32, template string) map[int32]nodeUri { +// addresses := make(map[int32]nodeUri) +// +// for _,nId := range ids { +// addresses[nId] = nodeUri{ +// HostListener: fmt.Sprintf(template ,nId), +// RequestHost: fmt.Sprintf(template ,nId), +// } +// } +// return addresses +//} diff --git a/pkg/nificlient/config/nificluster/nificluster.go b/pkg/nificlient/config/nificluster/nificluster.go new file mode 100644 index 000000000..d38d7ef40 --- /dev/null +++ b/pkg/nificlient/config/nificluster/nificluster.go @@ -0,0 +1,20 @@ +package nificluster + +import ( + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type NifiCluster interface { + clientconfig.Manager +} + +type nifiCluster struct { + client client.Client + clusterRef v1alpha1.ClusterReference +} + +func New(client client.Client, clusterRef v1alpha1.ClusterReference) NifiCluster { + return &nifiCluster{clusterRef: clusterRef, client: client} +} diff --git a/pkg/nificlient/config.go b/pkg/nificlient/config/nificluster/nificluster_config.go similarity index 62% rename from pkg/nificlient/config.go rename to pkg/nificlient/config/nificluster/nificluster_config.go index 1436c32ab..38aff5e11 100644 --- a/pkg/nificlient/config.go +++ b/pkg/nificlient/config/nificluster/nificluster_config.go @@ -1,46 +1,44 @@ -package nificlient +package nificluster import ( - "crypto/tls" "fmt" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" "github.com/Orange-OpenSource/nifikop/pkg/pki" "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" "sigs.k8s.io/controller-runtime/pkg/client" "strings" ) -const ( - nifiDefaultTimeout = int64(5) -) +func (n *nifiCluster) BuildConfig() (*clientconfig.NifiConfig, error) { + var cluster *v1alpha1.NifiCluster + var err error + if cluster, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace); err != nil { + return nil, err + } -// NifiConfig are the options to creating a new ClusterAdmin client -type NifiConfig struct { - nodeURITemplate string - NodesURI map[int32]nodeUri - NifiURI string - UseSSL bool - TLSConfig *tls.Config + return clusterConfig(n.client, cluster) +} - OperationTimeout int64 - RootProcessGroupId string +func (n *nifiCluster) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { + cluster, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace) + return } -type nodeUri struct { - HostListener string - RequestHost string +func (n *nifiCluster) IsExternal() bool { + return n.IsExternal() } // ClusterConfig creates connection options from a NifiCluster CR -func ClusterConfig(client client.Client, cluster *v1alpha1.NifiCluster) (*NifiConfig, error) { - - conf := &NifiConfig{} +func clusterConfig(client client.Client, cluster *v1alpha1.NifiCluster) (*clientconfig.NifiConfig, error) { + conf := &clientconfig.NifiConfig{} conf.RootProcessGroupId = cluster.Status.RootProcessGroupId - conf.nodeURITemplate = generateNodesURITemplate(cluster) + conf.NodeURITemplate = generateNodesURITemplate(cluster) conf.NodesURI = generateNodesAddress(cluster) conf.NifiURI = nifi.GenerateRequestNiFiAllNodeAddressFromCluster(cluster) - conf.OperationTimeout = nifiDefaultTimeout + conf.OperationTimeout = clientconfig.NifiDefaultTimeout if cluster.Spec.ListenersConfig.SSLSecrets != nil && UseSSL(cluster) { tlsConfig, err := pki.GetPKIManager(client, cluster).GetControllerTLSConfig() @@ -57,12 +55,12 @@ func UseSSL(cluster *v1alpha1.NifiCluster) bool { return cluster.Spec.ListenersConfig.SSLSecrets != nil } -func generateNodesAddress(cluster *v1alpha1.NifiCluster) map[int32]nodeUri { - addresses := make(map[int32]nodeUri) +func generateNodesAddress(cluster *v1alpha1.NifiCluster) map[int32]clientconfig.NodeUri { + addresses := make(map[int32]clientconfig.NodeUri) for nId, state := range cluster.Status.NodesState { if !(state.GracefulActionState.State.IsRunningState() || state.GracefulActionState.State.IsRequiredState()) && state.GracefulActionState.ActionStep != v1alpha1.RemoveStatus { - addresses[util.ConvertStringToInt32(nId)] = nodeUri{ + addresses[util.ConvertStringToInt32(nId)] = clientconfig.NodeUri{ HostListener: nifi.GenerateHostListenerNodeAddressFromCluster(util.ConvertStringToInt32(nId), cluster), RequestHost: nifi.GenerateRequestNiFiNodeAddressFromCluster(util.ConvertStringToInt32(nId), cluster), } @@ -80,4 +78,4 @@ func generateNodesURITemplate(cluster *v1alpha1.NifiCluster) string { return nodeNameTemplate + fmt.Sprintf(".%s", strings.SplitAfterN(nifi.GenerateRequestNiFiNodeAddressFromCluster(0, cluster), ".", 2)[1], ) -} \ No newline at end of file +} diff --git a/pkg/nificlient/config_test.go b/pkg/nificlient/config/nificluster/nificluster_config_test.go similarity index 97% rename from pkg/nificlient/config_test.go rename to pkg/nificlient/config/nificluster/nificluster_config_test.go index 26ce6c7bb..179333585 100644 --- a/pkg/nificlient/config_test.go +++ b/pkg/nificlient/config/nificluster/nificluster_config_test.go @@ -1,4 +1,4 @@ -package nificlient +package nificluster import ( "fmt" @@ -82,7 +82,7 @@ func TestClusterConfig(t *testing.T) { func testClusterConfig(t *testing.T, cluster *v1alpha1.NifiCluster, expectedUseSSL bool) { assert := assert.New(t) - conf, err := ClusterConfig(mockClient{}, cluster) + conf, err := clusterConfig(mockClient{}, cluster) assert.Nil(err) assert.Equal(expectedUseSSL, conf.UseSSL) @@ -95,7 +95,7 @@ func testClusterConfig(t *testing.T, cluster *v1alpha1.NifiCluster, expectedUseS assert.Equal( fmt.Sprintf("%s-%s-node.%s.svc.cluster.local:%d", clusterName, "%d", clusterNamespace, httpContainerPort), - conf.nodeURITemplate) + conf.NodeURITemplate) assert.Equal(1, len(conf.NodesURI)) assert.NotNil(conf.NodesURI[succeededNodeId]) @@ -142,4 +142,4 @@ func TestGenerateNodesURITemplate(t *testing.T) { fmt.Sprintf("%s-%s-node.%s.svc.cluster.local:%d", clusterName, "%d", clusterNamespace, httpContainerPort), generateNodesURITemplate(cluster)) -} \ No newline at end of file +} diff --git a/pkg/nificlient/mock_client_test.go b/pkg/nificlient/mock_client_test.go index 9db6f119b..85dc624dd 100644 --- a/pkg/nificlient/mock_client_test.go +++ b/pkg/nificlient/mock_client_test.go @@ -15,6 +15,7 @@ package nificlient import ( + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "testing" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" @@ -29,7 +30,7 @@ var ( type mockNiFiClient struct { NifiClient - opts *NifiConfig + opts *clientconfig.NifiConfig client *nigoapi.APIClient nodeClient map[int32]*nigoapi.APIClient nodes []nigoapi.NodeDto @@ -38,8 +39,8 @@ type mockNiFiClient struct { failOpts bool } -func newMockOpts() *NifiConfig { - return &NifiConfig{} +func newMockOpts() *clientconfig.NifiConfig { + return &clientconfig.NifiConfig{} } func newMockHttpClient(c *nigoapi.Configuration) *nigoapi.APIClient { diff --git a/pkg/pki/certmanagerpki/certmanager_tls_config.go b/pkg/pki/certmanagerpki/certmanager_tls_config.go index bb7c54121..854ab45ed 100644 --- a/pkg/pki/certmanagerpki/certmanager_tls_config.go +++ b/pkg/pki/certmanagerpki/certmanager_tls_config.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "sigs.k8s.io/controller-runtime/pkg/client" "strings" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" @@ -32,12 +33,20 @@ import ( // GetControllerTLSConfig creates a TLS config from the user secret created for // cruise control and manager operations func (c *certManager) GetControllerTLSConfig() (config *tls.Config, err error) { + config, err = GetControllerTLSConfigFromSecret(c.client, v1alpha1.SecretReference{ + Namespace: c.cluster.Namespace, + Name: fmt.Sprintf(pkicommon.NodeControllerTemplate, c.cluster.Name), + }) + return +} + +func GetControllerTLSConfigFromSecret(client client.Client, ref v1alpha1.SecretReference) (config *tls.Config, err error) { config = &tls.Config{} tlsKeys := &corev1.Secret{} - err = c.client.Get(context.TODO(), + err = client.Get(context.TODO(), types.NamespacedName{ - Namespace: c.cluster.Namespace, - Name: fmt.Sprintf(pkicommon.NodeControllerTemplate, c.cluster.Name), + Namespace: ref.Namespace, + Name: ref.Name, }, tlsKeys, ) diff --git a/pkg/resources/nifi/configmap.go b/pkg/resources/nifi/configmap.go index 1356a0af9..98f66f6fe 100644 --- a/pkg/resources/nifi/configmap.go +++ b/pkg/resources/nifi/configmap.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -27,7 +28,6 @@ import ( "text/template" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/resources/templates" "github.com/Orange-OpenSource/nifikop/pkg/resources/templates/config" "github.com/Orange-OpenSource/nifikop/pkg/util" @@ -62,7 +62,7 @@ func (r *Reconciler) configMap(id int32, nodeConfig *v1alpha1.NodeConfig, server }, } - if nificlient.UseSSL(r.NifiCluster) { + if nificluster.UseSSL(r.NifiCluster) { configMap.Data["authorizers.xml"] = []byte(r.getAuthorizersConfigString(nodeConfig, id, log)) } return configMap @@ -137,7 +137,7 @@ func (r *Reconciler) getNifiPropertiesConfigString(nConfig *v1alpha1.NodeConfig, webProxyHosts = strings.Join(append(dnsNames, base.WebProxyHosts...), ",") } - useSSL := nificlient.UseSSL(r.NifiCluster) + useSSL := nificluster.UseSSL(r.NifiCluster) var out bytes.Buffer t := template.Must(template.New("nConfig-config").Parse(config.NifiPropertiesTemplate)) if err := t.Execute(&out, map[string]interface{}{ diff --git a/pkg/resources/nifi/nifi.go b/pkg/resources/nifi/nifi.go index 0c440fc5e..05cdf3ef1 100644 --- a/pkg/resources/nifi/nifi.go +++ b/pkg/resources/nifi/nifi.go @@ -17,7 +17,7 @@ package nifi import ( "context" "fmt" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" "github.com/Orange-OpenSource/nifikop/pkg/pki" "reflect" "strings" @@ -238,7 +238,11 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { return errors.WrapIf(err, "Cluster is not ready yet, will wait until it is.") } - clientConfig, err := nificlient.ClusterConfig(r.Client, r.NifiCluster) + configManager := config.GetClientConfigManager(r.Client, v1alpha1.ClusterReference{ + Namespace: r.NifiCluster.Namespace, + Name: r.NifiCluster.Name, + }) + clientConfig, err := configManager.BuildConfig() if err != nil { // the cluster does not exist - should have been caught pre-flight return errors.WrapIf(err, "Failed to create HTTP client the for referenced cluster") @@ -252,7 +256,7 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { return err } - if nificlient.UseSSL(r.NifiCluster) { + if clientConfig.UseSSL { if err := r.reconcileNifiUsersAndGroups(log); err != nil { return errors.WrapIf(err, "failed to reconcile resource") } @@ -552,7 +556,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( err := r.Client.List(context.TODO(), podList, client.InNamespace(currentPod.Namespace), matchingLabels) if err != nil && len(podList.Items) == 0 { return errorfactory.New(errorfactory.APIFailure{}, - err, "getting resource failed", "kind", desiredType), false + err, "getting resource failed", "kind", desiredType), false } if len(podList.Items) == 0 { @@ -562,14 +566,14 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( if err := r.Client.Create(context.TODO(), desiredPod); err != nil { return errorfactory.New(errorfactory.APIFailure{}, - err, "creating resource failed", "kind", desiredType), false + err, "creating resource failed", "kind", desiredType), false } // Update status to Config InSync because node is configured to go statusErr := k8sutil.UpdateNodeStatus(r.Client, []string{desiredPod.Labels["nodeId"]}, r.NifiCluster, v1alpha1.ConfigInSync, log) if statusErr != nil { return errorfactory.New(errorfactory.StatusUpdateError{}, - statusErr, "updating status for resource failed", "kind", desiredType), false + statusErr, "updating status for resource failed", "kind", desiredType), false } if val, ok := r.NifiCluster.Status.NodesState[desiredPod.Labels["nodeId"]]; ok && @@ -583,7 +587,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( statusErr = k8sutil.UpdateNodeStatus(r.Client, []string{desiredPod.Labels["nodeId"]}, r.NifiCluster, gracefulActionState, log) if statusErr != nil { return errorfactory.New(errorfactory.StatusUpdateError{}, - statusErr, "could not update node graceful action state"), false + statusErr, "could not update node graceful action state"), false } } log.Info("resource created") @@ -597,11 +601,11 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( } } else { return errorfactory.New(errorfactory.InternalError{}, errors.New("reconcile failed"), - fmt.Sprintf("could not find status for the given node id, %s", nodeId)), false + fmt.Sprintf("could not find status for the given node id, %s", nodeId)), false } } else { return errorfactory.New(errorfactory.TooManyResources{}, errors.New("reconcile failed"), - "more then one matching pod found", "labels", matchingLabels), false + "more then one matching pod found", "labels", matchingLabels), false } // TODO check if this err == nil check necessary (baluchicken) @@ -634,7 +638,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( if err := k8sutil.UpdateNodeStatus(r.Client, []string{desiredPod.Labels["nodeId"]}, r.NifiCluster, v1alpha1.GracefulActionState{ErrorMessage: "", State: v1alpha1.GracefulUpscaleSucceeded}, log); err != nil { return errorfactory.New(errorfactory.StatusUpdateError{}, - err, "could not update node graceful action state"), false + err, "could not update node graceful action state"), false } } log.V(1).Info("resource is in sync") @@ -657,7 +661,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( if r.NifiCluster.Status.State != v1alpha1.NifiClusterRollingUpgrading { if err := k8sutil.UpdateCRStatus(r.Client, r.NifiCluster, v1alpha1.NifiClusterRollingUpgrading, log); err != nil { return errorfactory.New(errorfactory.StatusUpdateError{}, - err, "setting state to rolling upgrade failed"), false + err, "setting state to rolling upgrade failed"), false } } @@ -672,16 +676,16 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( for _, pod := range podList.Items { if k8sutil.IsMarkedForDeletion(pod.ObjectMeta) { return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, - errors.New("pod is still terminating"), "rolling upgrade in progress"), false + errors.New("pod is still terminating"), "rolling upgrade in progress"), false } if k8sutil.IsPodContainsPendingContainer(&pod) { return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, - errors.New("pod is still creating"), "rolling upgrade in progress"), false + errors.New("pod is still creating"), "rolling upgrade in progress"), false } if !k8sutil.PodReady(&pod) { return errorfactory.New(errorfactory.ReconcileRollingUpgrade{}, - errors.New("pod is still not ready"), "rolling upgrade in progress"), false + errors.New("pod is still not ready"), "rolling upgrade in progress"), false } } } @@ -690,7 +694,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( err = r.Client.Delete(context.TODO(), currentPod) if err != nil { return errorfactory.New(errorfactory.APIFailure{}, - err, "deleting resource failed", "kind", desiredType), false + err, "deleting resource failed", "kind", desiredType), false } } @@ -869,7 +873,11 @@ func (r *Reconciler) reconcilePrometheusReportingTask(log logr.Logger) error { var err error - clientConfig, err := nificlient.ClusterConfig(r.Client, r.NifiCluster) + configManager := config.GetClientConfigManager(r.Client, v1alpha1.ClusterReference{ + Namespace: r.NifiCluster.Namespace, + Name: r.NifiCluster.Name, + }) + clientConfig, err := configManager.BuildConfig() if err != nil { return err } @@ -907,7 +915,11 @@ func (r *Reconciler) reconcilePrometheusReportingTask(log logr.Logger) error { } func (r *Reconciler) reconcileMaximumTimerDrivenThreadCount(log logr.Logger) error { - clientConfig, err := nificlient.ClusterConfig(r.Client, r.NifiCluster) + configManager := config.GetClientConfigManager(r.Client, v1alpha1.ClusterReference{ + Namespace: r.NifiCluster.Namespace, + Name: r.NifiCluster.Name, + }) + clientConfig, err := configManager.BuildConfig() if err != nil { return err } diff --git a/pkg/resources/nifi/pod.go b/pkg/resources/nifi/pod.go index f61dbbc6e..b39e34d97 100644 --- a/pkg/resources/nifi/pod.go +++ b/pkg/resources/nifi/pod.go @@ -16,12 +16,12 @@ package nifi import ( "fmt" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" "sort" "strings" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/resources/templates" "github.com/Orange-OpenSource/nifikop/pkg/util" nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" @@ -401,7 +401,7 @@ func (r *Reconciler) createNifiNodeContainer(nodeConfig *v1alpha1.NodeConfig, id requestClusterStatus := fmt.Sprintf("curl --fail -v http://%s/nifi-api/controller/cluster > $NIFI_BASE_DIR/cluster.state", nifiutil.GenerateRequestNiFiAllNodeAddressFromCluster(r.NifiCluster)) - if nificlient.UseSSL(r.NifiCluster) { + if nificluster.UseSSL(r.NifiCluster) { requestClusterStatus = fmt.Sprintf( "curl --fail -kv --cert /var/run/secrets/java.io/keystores/client/tls.crt --key /var/run/secrets/java.io/keystores/client/tls.key https://%s/nifi-api/controller/cluster > $NIFI_BASE_DIR/cluster.state", nifiutil.GenerateRequestNiFiAllNodeAddressFromCluster(r.NifiCluster)) diff --git a/pkg/util/clientconfig/common.go b/pkg/util/clientconfig/common.go new file mode 100644 index 000000000..b3b93ee3e --- /dev/null +++ b/pkg/util/clientconfig/common.go @@ -0,0 +1,41 @@ +package clientconfig + +import ( + "crypto/tls" +) + +const ( + NifiDefaultTimeout = int64(5) +) + +type Manager interface { + BuildConfig() (*NifiConfig, error) + BuildConnect() (ClusterConnect, error) + IsExternal() bool +} + +type ClusterConnect interface { + //NodeConnection(log logr.Logger, client client.Client) (node nificlient.NifiClient, err error) + IsInternal() bool + IsExternal() bool + ClusterLabelString() string + IsReady() bool + Id() string +} + +// NifiConfig are the options to creating a new ClusterAdmin client +type NifiConfig struct { + NodeURITemplate string + NodesURI map[int32]NodeUri + NifiURI string + UseSSL bool + TLSConfig *tls.Config + + OperationTimeout int64 + RootProcessGroupId string +} + +type NodeUri struct { + HostListener string + RequestHost string +} From 93b7dcc2e10de4480693f3e0a4418be6c721bb90 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 7 Sep 2021 23:51:17 +0200 Subject: [PATCH 03/18] fix declaration --- pkg/nificlient/config/nificluster/nificluster_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/nificlient/config/nificluster/nificluster_config.go b/pkg/nificlient/config/nificluster/nificluster_config.go index 38aff5e11..6f2c53380 100644 --- a/pkg/nificlient/config/nificluster/nificluster_config.go +++ b/pkg/nificlient/config/nificluster/nificluster_config.go @@ -28,7 +28,7 @@ func (n *nifiCluster) BuildConnect() (cluster clientconfig.ClusterConnect, err e } func (n *nifiCluster) IsExternal() bool { - return n.IsExternal() + return false } // ClusterConfig creates connection options from a NifiCluster CR From 9a4aeac0295da54f3425b8ac0b049970c04cd698 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 14 Sep 2021 11:19:57 +0200 Subject: [PATCH 04/18] add support for Shutdown pods --- pkg/resources/nifi/nifi.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/resources/nifi/nifi.go b/pkg/resources/nifi/nifi.go index 05cdf3ef1..cc128c243 100644 --- a/pkg/resources/nifi/nifi.go +++ b/pkg/resources/nifi/nifi.go @@ -629,7 +629,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( if err != nil { log.Error(err, "could not match objects", "kind", desiredType) } else if patchResult.IsEmpty() { - if !k8sutil.IsPodContainsTerminatedContainer(currentPod) && r.NifiCluster.Status.NodesState[currentPod.Labels["nodeId"]].ConfigurationState == v1alpha1.ConfigInSync { + if !k8sutil.IsPodTerminatedOrShutdown(currentPod) && r.NifiCluster.Status.NodesState[currentPod.Labels["nodeId"]].ConfigurationState == v1alpha1.ConfigInSync { if val, found := r.NifiCluster.Status.NodesState[desiredPod.Labels["nodeId"]]; found && val.GracefulActionState.State == v1alpha1.GracefulUpscaleRunning && val.GracefulActionState.ActionStep == v1alpha1.ConnectStatus && @@ -656,7 +656,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( return errors.WrapIf(err, "could not apply last state to annotation"), false } - if !k8sutil.IsPodContainsTerminatedContainer(currentPod) { + if !k8sutil.IsPodTerminatedOrShutdown(currentPod) { if r.NifiCluster.Status.State != v1alpha1.NifiClusterRollingUpgrading { if err := k8sutil.UpdateCRStatus(r.Client, r.NifiCluster, v1alpha1.NifiClusterRollingUpgrading, log); err != nil { From 1bf8999584a3510f9e38fb578a886783f08bbba4 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 14 Sep 2021 14:50:50 +0200 Subject: [PATCH 05/18] support tls configuration --- api/v1alpha1/common_types.go | 17 ++-- pkg/k8sutil/resource.go | 5 ++ pkg/nificlient/config/config_manager.go | 38 +------- pkg/nificlient/config/tls/tls.go | 20 +++++ pkg/nificlient/config/tls/tls_config.go | 110 ++++++++++++++++++++++++ 5 files changed, 144 insertions(+), 46 deletions(-) create mode 100644 pkg/nificlient/config/tls/tls.go create mode 100644 pkg/nificlient/config/tls/tls_config.go diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index a6c0708f5..385c93ff9 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -117,11 +117,6 @@ type SecretConfigReference struct { Data string `json:"data"` } -const ( - EXTERNAL_REFERENCE string = "external" - INTERNAL_REFERENCE string = "internal" -) - // ClusterReference states a reference to a cluster for dataflow/registryclient/user // provisioning type ClusterReference struct { @@ -131,7 +126,7 @@ type ClusterReference struct { Type ClientConfigType `json:"type,omitempty"` NodeURITemplate string `json:"nodeURITemplate,omitempty"` NodeIds []int32 `json:"nodeIds,omitempty"` - NifiURI string `json:"nifiURI,omitempty"` + NifiURI string `json:"nifiURI,omitempty"` RootProcessGroupId string `json:"rootProcessGroupId,omitempty"` SecretRef SecretReference `json:"secretRef,omitempty"` } @@ -143,9 +138,9 @@ func (c *ClusterReference) GetType() ClientConfigType { return c.Type } -// @TODO func (c *ClusterReference) IsSet() bool { - return (c.Name != "" && c.GetType() == ClientConfigNiFiCluster) || (c.NodeURITemplate != "" && c.GetType() != "" && c.GetType() != ClientConfigNiFiCluster) + return (c.GetType() == ClientConfigNiFiCluster && c.Name != "") || + (c.GetType() != ClientConfigNiFiCluster && c.NodeURITemplate != "" && c.NifiURI != "" && c.RootProcessGroupId != "") } // RegistryClientReference states a reference to a registry client for dataflow @@ -425,14 +420,14 @@ const ( func ClusterRefsEquals(clusterRefs []ClusterReference) bool { c1 := clusterRefs[0] - refType := c1.Type + refType := c1.GetType() hostname := c1.NodeURITemplate name := c1.Name ns := c1.Namespace var secretRefs []SecretReference for _, cluster := range clusterRefs { - if refType != cluster.Type { + if refType != cluster.GetType() { return false } if c1.IsExternal() { @@ -452,7 +447,7 @@ func ClusterRefsEquals(clusterRefs []ClusterReference) bool { } func (c ClusterReference) IsExternal() bool { - return c.Type != ClientConfigNiFiCluster + return c.GetType() != ClientConfigNiFiCluster } func SecretRefsEquals(secretRefs []SecretReference) bool { diff --git a/pkg/k8sutil/resource.go b/pkg/k8sutil/resource.go index 9baa5a8fe..70edd759b 100644 --- a/pkg/k8sutil/resource.go +++ b/pkg/k8sutil/resource.go @@ -134,6 +134,11 @@ func CheckIfObjectUpdated(log logr.Logger, desiredType reflect.Type, current, de } } +func IsPodTerminatedOrShutdown(pod *corev1.Pod) bool { + return (pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Shutdown") || + IsPodContainsTerminatedContainer(pod) +} + func IsPodContainsTerminatedContainer(pod *corev1.Pod) bool { for _, containerState := range pod.Status.ContainerStatuses { if containerState.State.Terminated != nil { diff --git a/pkg/nificlient/config/config_manager.go b/pkg/nificlient/config/config_manager.go index dacfbee34..e7c1e20a4 100644 --- a/pkg/nificlient/config/config_manager.go +++ b/pkg/nificlient/config/config_manager.go @@ -3,6 +3,7 @@ package config import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/tls" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -13,8 +14,8 @@ func GetClientConfigManager(client client.Client, clusterRef v1alpha1.ClusterRef switch clusterRef.Type { case v1alpha1.ClientConfigNiFiCluster: return nificluster.New(client, clusterRef) - //case v1alpha1.ClientConfigExternalTLS: - // return + case v1alpha1.ClientConfigExternalTLS: + return tls.New(client, clusterRef) //case v1alpha1.ClientConfigExternalBasic: // return case MockClientConfig: @@ -46,36 +47,3 @@ func (n *mockClientConfig) BuildConnect() (cluster clientconfig.ClusterConnect, func (n *mockClientConfig) IsExternal() bool { return true } - -//// external -//func ExternalTLSConfig(ref v1alpha1.ClusterReference) (*NifiConfig, error) { -// nodesURI := generateNodesAddressFromTemplate(ref.NodeIds, ref.NodeURITemplate) -// -// conf := &NifiConfig{} -// conf.RootProcessGroupId = ref.RootProcessGroupId -// conf.NodeURITemplate = ref.NodeURITemplate -// conf.NodesURI = nodesURI -// conf.NifiURI = ref.NifiURI -// conf.OperationTimeout = nifiDefaultTimeout -// -// tlsConfig, err := certmanagerpki.GetControllerTLSConfigFromSecret() -// if err != nil { -// return conf, err -// } -// conf.UseSSL = true -// conf.TLSConfig = tlsConfig -// return conf, nil -//} -// -// -//func generateNodesAddressFromTemplate(ids []int32, template string) map[int32]nodeUri { -// addresses := make(map[int32]nodeUri) -// -// for _,nId := range ids { -// addresses[nId] = nodeUri{ -// HostListener: fmt.Sprintf(template ,nId), -// RequestHost: fmt.Sprintf(template ,nId), -// } -// } -// return addresses -//} diff --git a/pkg/nificlient/config/tls/tls.go b/pkg/nificlient/config/tls/tls.go new file mode 100644 index 000000000..6c94806f5 --- /dev/null +++ b/pkg/nificlient/config/tls/tls.go @@ -0,0 +1,20 @@ +package tls + +import ( + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Tls interface { + clientconfig.Manager +} + +type tls struct { + client client.Client + clusterRef v1alpha1.ClusterReference +} + +func New(client client.Client, clusterRef v1alpha1.ClusterReference) Tls { + return &tls{clusterRef: clusterRef, client: client} +} diff --git a/pkg/nificlient/config/tls/tls_config.go b/pkg/nificlient/config/tls/tls_config.go new file mode 100644 index 000000000..eadafaa1c --- /dev/null +++ b/pkg/nificlient/config/tls/tls_config.go @@ -0,0 +1,110 @@ +package tls + +import ( + "fmt" + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/common" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/pki/certmanagerpki" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var log = ctrl.Log.WithName("tls_config") + +func (n *tls) BuildConfig() (*clientconfig.NifiConfig, error) { + return clusterConfig(n.client, n.clusterRef) +} + +func (n *tls) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { + config, err := n.BuildConfig() + cluster = &ExternalTLSCluster{ + NodeURITemplate: n.clusterRef.NodeURITemplate, + NodeIds: n.clusterRef.NodeIds, + NifiURI: n.clusterRef.NifiURI, + RootProcessGroupId: n.clusterRef.RootProcessGroupId, + + nifiConfig: config, + } + return +} + +func (n *tls) IsExternal() bool { + return false +} + +func clusterConfig(client client.Client, ref v1alpha1.ClusterReference) (*clientconfig.NifiConfig, error) { + nodesURI := generateNodesAddressFromTemplate(ref.NodeIds, ref.NodeURITemplate) + + conf := &clientconfig.NifiConfig{} + conf.RootProcessGroupId = ref.RootProcessGroupId + conf.NodeURITemplate = ref.NodeURITemplate + conf.NodesURI = nodesURI + conf.NifiURI = ref.NifiURI + conf.OperationTimeout = clientconfig.NifiDefaultTimeout + + tlsConfig, err := certmanagerpki.GetControllerTLSConfigFromSecret(client, ref.SecretRef) + if err != nil { + return conf, err + } + conf.UseSSL = true + conf.TLSConfig = tlsConfig + return conf, nil +} + +func generateNodesAddressFromTemplate(ids []int32, template string) map[int32]clientconfig.NodeUri { + addresses := make(map[int32]clientconfig.NodeUri) + + for _,nId := range ids { + addresses[nId] = clientconfig.NodeUri{ + HostListener: fmt.Sprintf(template ,nId), + RequestHost: fmt.Sprintf(template ,nId), + } + } + return addresses +} + +type ExternalTLSCluster struct { + NodeURITemplate string + NodeIds []int32 + NifiURI string + RootProcessGroupId string + + nifiConfig *clientconfig.NifiConfig +} + +func (cluster *ExternalTLSCluster) IsExternal() bool { + return true +} + +func (cluster *ExternalTLSCluster) IsInternal() bool { + return false +} + +func (cluster *ExternalTLSCluster) ClusterLabelString() string { + return fmt.Sprintf("%s", cluster.NifiURI) +} + +func (cluster ExternalTLSCluster) IsReady() bool { + nClient, err := common.NewClusterConnection(log, cluster.nifiConfig) + if err != nil { + return false + } + + clusterEntity, err := nClient.DescribeCluster() + if err != nil { + return false + } + + for _, node := range clusterEntity.Cluster.Nodes{ + if node.Status != nificlient.CONNECTED_STATUS { + return false + } + } + return true +} + +func (cluster *ExternalTLSCluster) Id() string { + return cluster.NifiURI +} \ No newline at end of file From 6ee53f791aafdb78a4d1d019ae2e09697dc590bc Mon Sep 17 00:00:00 2001 From: erdrix Date: Fri, 17 Sep 2021 09:07:03 +0200 Subject: [PATCH 06/18] fix deletion --- pkg/resources/nifi/nifi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/resources/nifi/nifi.go b/pkg/resources/nifi/nifi.go index cc128c243..b76e01b7d 100644 --- a/pkg/resources/nifi/nifi.go +++ b/pkg/resources/nifi/nifi.go @@ -359,7 +359,7 @@ OUTERLOOP: return errors.WrapIfWithDetails(err, "could not delete node", "id", node.Labels["nodeId"]) } - err = r.Client.Delete(context.TODO(), &corev1.ConfigMap{ObjectMeta: templates.ObjectMeta(fmt.Sprintf(templates.NodeConfigTemplate+"-%s", r.NifiCluster.Name, node.Labels["nodeId"]), LabelsForNifi(r.NifiCluster.Name), r.NifiCluster)}) + err = r.Client.Delete(context.TODO(), &corev1.Secret{ObjectMeta: templates.ObjectMeta(fmt.Sprintf(templates.NodeConfigTemplate+"-%s", r.NifiCluster.Name, node.Labels["nodeId"]), LabelsForNifi(r.NifiCluster.Name), r.NifiCluster)}) if err != nil { return errors.WrapIfWithDetails(err, "could not delete configmap for node", "id", node.Labels["nodeId"]) } From 386f65bbd3396c3349cf84d8a237265dfe0c6264 Mon Sep 17 00:00:00 2001 From: erdrix Date: Mon, 27 Sep 2021 08:41:44 +0200 Subject: [PATCH 07/18] clean --- api/v1alpha1/common_types.go | 56 +--- api/v1alpha1/nificluster_types.go | 56 +++- api/v1alpha1/zz_generated.deepcopy.go | 25 +- .../bases/nifi.orange.com_nificlusters.yaml | 43 ++- .../bases/nifi.orange.com_nifidataflows.yaml | 32 +-- ...nifi.orange.com_nifiparametercontexts.yaml | 32 +-- .../nifi.orange.com_nifiregistryclients.yaml | 32 +-- .../bases/nifi.orange.com_nifiusergroups.yaml | 32 +-- .../crd/bases/nifi.orange.com_nifiusers.yaml | 32 +-- controllers/nificluster_controller.go | 9 +- controllers/nificlustertask_controller.go | 53 +++- controllers/nifidataflow_controller.go | 7 +- .../nifiparametercontext_controller.go | 15 +- controllers/nifiregistryclient_controller.go | 7 +- controllers/nifiuser_controller.go | 9 +- controllers/nifiusergroup_controller.go | 7 +- go.mod | 1 + pkg/clientwrappers/scale/scale.go | 26 +- pkg/common/common.go | 54 ++-- pkg/k8sutil/resource.go | 30 ++- pkg/nificlient/access.go | 42 +++ pkg/nificlient/client.go | 107 +++++--- pkg/nificlient/client_test.go | 26 +- .../nificluster.go => basic/basic.go} | 10 +- pkg/nificlient/config/basic/basic_config.go | 245 ++++++++++++++++++ .../common.go} | 78 +++--- .../nificluster_config_test.go | 16 +- pkg/nificlient/config/config_manager.go | 20 +- .../config/nificluster/externalcluster.go | 54 ++++ .../config/nificluster/internalcluster.go | 39 +++ pkg/nificlient/config/tls/tls_config.go | 122 +++------ pkg/nificlient/config/tls/tls_config_test.go | 112 ++++++++ pkg/nificlient/controllerconfig.go | 8 +- pkg/nificlient/flow.go | 20 +- pkg/nificlient/flowfiles.go | 12 +- pkg/nificlient/inputport.go | 4 +- pkg/nificlient/mock_client_test.go | 7 + pkg/nificlient/parametercontext.go | 20 +- pkg/nificlient/policies.go | 16 +- pkg/nificlient/processgroup.go | 16 +- pkg/nificlient/processor.go | 4 +- pkg/nificlient/registryclient.go | 16 +- pkg/nificlient/reportingtask.go | 20 +- pkg/nificlient/snippet.go | 8 +- pkg/nificlient/system.go | 37 ++- pkg/nificlient/system_test.go | 6 +- pkg/nificlient/user.go | 20 +- pkg/nificlient/usergroup.go | 20 +- pkg/nificlient/version.go | 16 +- pkg/pki/certmanagerpki/certmanager_test.go | 2 +- pkg/pki/pki_manager_test.go | 2 +- pkg/resources/nifi/allNodeService.go | 4 +- pkg/resources/nifi/headlessService.go | 4 +- pkg/resources/nifi/nifi.go | 50 ++-- pkg/resources/nifi/pod.go | 20 +- pkg/resources/nifi/poddisruptionbudget.go | 5 +- pkg/resources/nifi/pvc.go | 3 +- .../nifi/{configmap.go => secretconfig.go} | 21 +- pkg/resources/nifi/service.go | 8 +- pkg/resources/templates/variables.go | 5 +- pkg/util/clientconfig/common.go | 9 +- pkg/util/nifi/common.go | 6 + pkg/util/pki/pki_common_test.go | 1 + pkg/util/util.go | 7 + 64 files changed, 1147 insertions(+), 679 deletions(-) create mode 100644 pkg/nificlient/access.go rename pkg/nificlient/config/{nificluster/nificluster.go => basic/basic.go} (68%) create mode 100644 pkg/nificlient/config/basic/basic_config.go rename pkg/nificlient/config/{nificluster/nificluster_config.go => common/common.go} (52%) rename pkg/nificlient/config/{nificluster => common}/nificluster_config_test.go (94%) create mode 100644 pkg/nificlient/config/nificluster/externalcluster.go create mode 100644 pkg/nificlient/config/nificluster/internalcluster.go create mode 100644 pkg/nificlient/config/tls/tls_config_test.go rename pkg/resources/nifi/{configmap.go => secretconfig.go} (97%) diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 385c93ff9..4bbccdf56 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -48,9 +48,12 @@ type InitClusterNode bool // PKIBackend represents an interface implementing the PKIManager type PKIBackend string -// ClientConfigType represents an interface implementing the ClientConfigManager +// ClientConfigType represents an interface implementing the ClientConfigManager type ClientConfigType string +// ClusterType represents an interface implementing the ClientConfigManager +type ClusterType string + // AccessPolicyType represents the type of access policy type AccessPolicyType string @@ -120,27 +123,8 @@ type SecretConfigReference struct { // ClusterReference states a reference to a cluster for dataflow/registryclient/user // provisioning type ClusterReference struct { - Name string `json:"name,omitempty"` + Name string `json:"name"` Namespace string `json:"namespace,omitempty"` - // +kubebuilder:validation:Enum={"external-tls","nificluster","external-basic"} - Type ClientConfigType `json:"type,omitempty"` - NodeURITemplate string `json:"nodeURITemplate,omitempty"` - NodeIds []int32 `json:"nodeIds,omitempty"` - NifiURI string `json:"nifiURI,omitempty"` - RootProcessGroupId string `json:"rootProcessGroupId,omitempty"` - SecretRef SecretReference `json:"secretRef,omitempty"` -} - -func (c *ClusterReference) GetType() ClientConfigType { - if c.Type == "" { - return ClientConfigNiFiCluster - } - return c.Type -} - -func (c *ClusterReference) IsSet() bool { - return (c.GetType() == ClientConfigNiFiCluster && c.Name != "") || - (c.GetType() != ClientConfigNiFiCluster && c.NodeURITemplate != "" && c.NifiURI != "" && c.RootProcessGroupId != "") } // RegistryClientReference states a reference to a registry client for dataflow @@ -273,9 +257,13 @@ const ( ) const ( - ClientConfigNiFiCluster ClientConfigType = "nificluster" - ClientConfigExternalTLS ClientConfigType = "external-tls" - ClientConfigExternalBasic ClientConfigType = "external-basic" + ClientConfigTLS ClientConfigType = "tls" + ClientConfigBasic ClientConfigType = "basic" +) + +const ( + ExternalCluster ClusterType = "external" + InternalCluster ClusterType = "internal" ) const ( @@ -420,36 +408,18 @@ const ( func ClusterRefsEquals(clusterRefs []ClusterReference) bool { c1 := clusterRefs[0] - refType := c1.GetType() - hostname := c1.NodeURITemplate name := c1.Name ns := c1.Namespace - var secretRefs []SecretReference for _, cluster := range clusterRefs { - if refType != cluster.GetType() { - return false - } - if c1.IsExternal() { - if hostname != cluster.NodeURITemplate { - return false - } - secretRefs = append(secretRefs, SecretReference{Name: cluster.SecretRef.Name, Namespace: cluster.Namespace}) - } else if name != cluster.Name || ns != cluster.Namespace { + if name != cluster.Name || ns != cluster.Namespace { return false } } - if c1.IsExternal() { - return SecretRefsEquals(secretRefs) - } return true } -func (c ClusterReference) IsExternal() bool { - return c.GetType() != ClientConfigNiFiCluster -} - func SecretRefsEquals(secretRefs []SecretReference) bool { name := secretRefs[0].Name ns := secretRefs[0].Namespace diff --git a/api/v1alpha1/nificluster_types.go b/api/v1alpha1/nificluster_types.go index 9978572bc..62252402b 100644 --- a/api/v1alpha1/nificluster_types.go +++ b/api/v1alpha1/nificluster_types.go @@ -17,7 +17,6 @@ limitations under the License. package v1alpha1 import ( - "fmt" cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -38,6 +37,20 @@ const ( // NifiClusterSpec defines the desired state of NifiCluster type NifiClusterSpec struct { + // +kubebuilder:validation:Enum={"tls","basic"} + ClientType ClientConfigType `json:"clientType,omitempty"` + // +kubebuilder:validation:Enum={"external","internal"} + Type ClusterType `json:"type,omitempty"` + // nodeURITemplate used to dynamically compute node uri (used if external-* type) + NodeURITemplate string `json:"nodeURITemplate,omitempty"` + // nifiURI used access through a LB uri (used if external-* type) + NifiURI string `json:"nifiURI,omitempty"` + // rootProcessGroupId contains the uuid of the root process group for this cluster (used if external-* type) + RootProcessGroupId string `json:"rootProcessGroupId,omitempty"` + // secretRef reference the secret containing the informations required to authentiticate to the cluster (used if external-* type) + SecretRef SecretReference `json:"secretRef,omitempty"` + // proxyUrl define the proxy required to query the NiFi cluster (used if external-* type) + ProxyUrl string `json:"proxyUrl,omitempty"` // Service defines the policy for services owned by NiFiKop operator. Service ServicePolicy `json:"service,omitempty"` // Pod defines the policy for pods owned by NiFiKop operator. @@ -45,7 +58,7 @@ type NifiClusterSpec struct { // zKAddress specifies the ZooKeeper connection string // in the form hostname:port where host and port are those of a Zookeeper server. // TODO: rework for nice zookeeper connect string = - ZKAddress string `json:"zkAddress"` + ZKAddress string `json:"zkAddress,omitempty"` // zKPath specifies the Zookeeper chroot path as part // of its Zookeeper connection string which puts its data under same path in the global ZooKeeper namespace. ZKPath string `json:"zkPath,omitempty"` @@ -59,7 +72,7 @@ type NifiClusterSpec struct { // oneNifiNodePerNode if set to true every nifi node is started on a new node, if there is not enough node to do that // it will stay in pending state. If set to false the operator also tries to schedule the nifi node to a unique node // but if the node number is insufficient the nifi node will be scheduled to a node where a nifi node is already running. - OneNifiNodePerNode bool `json:"oneNifiNodePerNode"` + OneNifiNodePerNode bool `json:"oneNifiNodePerNode,omitempty"` // propage PropagateLabels bool `json:"propagateLabels,omitempty"` // managedAdminUsers contains the list of users that will be added to the managed admin group (with all rights) @@ -82,7 +95,7 @@ type NifiClusterSpec struct { // TODO : add vault //VaultConfig VaultConfig `json:"vaultConfig,omitempty"` // listenerConfig specifies nifi's listener specifig configs - ListenersConfig ListenersConfig `json:"listenersConfig"` + ListenersConfig *ListenersConfig `json:"listenersConfig,omitempty"` // SidecarsConfig defines additional sidecar configurations SidecarConfigs []corev1.Container `json:"sidecarConfigs,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` // ExternalService specifies settings required to access nifi externally @@ -656,16 +669,35 @@ func (nSpec *NifiClusterSpec) GetMetricPort() *int { return nil } -func (cluster *NifiCluster) IsExternal() bool { - return false +func (cluster *NifiCluster) RootProcessGroupId() string { + return cluster.Status.RootProcessGroupId } -func (cluster *NifiCluster) IsInternal() bool { - return true +func (c *NifiCluster) GetClientType() ClientConfigType { + if c.Spec.ClientType == "" { + return ClientConfigTLS + } + return c.Spec.ClientType +} + +func (c *NifiCluster) GetType() ClusterType { + if c.Spec.Type == "" { + return InternalCluster + } + return ExternalCluster } -func (cluster *NifiCluster) ClusterLabelString() string { - return fmt.Sprintf("%s.%s", cluster.Name, cluster.Namespace) +func (c *NifiCluster) IsSet() bool { + return (c.GetType() == InternalCluster && len(c.Name) != 0) || + (c.GetType() != ExternalCluster && len(c.Spec.NodeURITemplate) != 0 && len(c.Spec.RootProcessGroupId) != 0) +} + +func (c *NifiCluster) IsInternal() bool { + return c.GetType() == InternalCluster +} + +func (c NifiCluster) IsExternal() bool { + return c.GetType() != InternalCluster } func (cluster NifiCluster) IsReady() bool { @@ -681,7 +713,3 @@ func (cluster NifiCluster) IsReady() bool { func (cluster *NifiCluster) Id() string { return cluster.Name } - -func (cluster *NifiCluster) RootProcessGroupId() string { - return cluster.Status.RootProcessGroupId -} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 9c6ebb32a..de8cae631 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -94,12 +94,6 @@ func (in *BootstrapProperties) DeepCopy() *BootstrapProperties { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterReference) DeepCopyInto(out *ClusterReference) { *out = *in - if in.NodeIds != nil { - in, out := &in.NodeIds, &out.NodeIds - *out = make([]int32, len(*in)) - copy(*out, *in) - } - out.SecretRef = in.SecretRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReference. @@ -382,6 +376,7 @@ func (in *NifiClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiClusterSpec) DeepCopyInto(out *NifiClusterSpec) { *out = *in + out.SecretRef = in.SecretRef in.Service.DeepCopyInto(&out.Service) in.Pod.DeepCopyInto(&out.Pod) if in.InitContainers != nil { @@ -419,7 +414,11 @@ func (in *NifiClusterSpec) DeepCopyInto(out *NifiClusterSpec) { out.DisruptionBudget = in.DisruptionBudget out.LdapConfiguration = in.LdapConfiguration out.NifiClusterTaskSpec = in.NifiClusterTaskSpec - in.ListenersConfig.DeepCopyInto(&out.ListenersConfig) + if in.ListenersConfig != nil { + in, out := &in.ListenersConfig, &out.ListenersConfig + *out = new(ListenersConfig) + (*in).DeepCopyInto(*out) + } if in.SidecarConfigs != nil { in, out := &in.SidecarConfigs, &out.SidecarConfigs *out = make([]v1.Container, len(*in)) @@ -562,7 +561,7 @@ func (in *NifiDataflowSpec) DeepCopyInto(out *NifiDataflowSpec) { *out = new(bool) **out = **in } - in.ClusterRef.DeepCopyInto(&out.ClusterRef) + out.ClusterRef = in.ClusterRef if in.RegistryClientRef != nil { in, out := &in.RegistryClientRef, &out.RegistryClientRef *out = new(RegistryClientReference) @@ -672,7 +671,7 @@ func (in *NifiParameterContextSpec) DeepCopyInto(out *NifiParameterContextSpec) *out = make([]Parameter, len(*in)) copy(*out, *in) } - in.ClusterRef.DeepCopyInto(&out.ClusterRef) + out.ClusterRef = in.ClusterRef if in.SecretRefs != nil { in, out := &in.SecretRefs, &out.SecretRefs *out = make([]SecretReference, len(*in)) @@ -745,7 +744,7 @@ func (in *NifiRegistryClient) DeepCopyInto(out *NifiRegistryClient) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec out.Status = in.Status } @@ -802,7 +801,7 @@ func (in *NifiRegistryClientList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiRegistryClientSpec) DeepCopyInto(out *NifiRegistryClientSpec) { *out = *in - in.ClusterRef.DeepCopyInto(&out.ClusterRef) + out.ClusterRef = in.ClusterRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiRegistryClientSpec. @@ -919,7 +918,7 @@ func (in *NifiUserGroupList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiUserGroupSpec) DeepCopyInto(out *NifiUserGroupSpec) { *out = *in - in.ClusterRef.DeepCopyInto(&out.ClusterRef) + out.ClusterRef = in.ClusterRef if in.UsersRef != nil { in, out := &in.UsersRef, &out.UsersRef *out = make([]UserReference, len(*in)) @@ -992,7 +991,7 @@ func (in *NifiUserList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiUserSpec) DeepCopyInto(out *NifiUserSpec) { *out = *in - in.ClusterRef.DeepCopyInto(&out.ClusterRef) + out.ClusterRef = in.ClusterRef if in.DNSNames != nil { in, out := &in.DNSNames, &out.DNSNames *out = make([]string, len(*in)) diff --git a/config/crd/bases/nifi.orange.com_nificlusters.yaml b/config/crd/bases/nifi.orange.com_nificlusters.yaml index 640b09ebb..74b53d424 100644 --- a/config/crd/bases/nifi.orange.com_nificlusters.yaml +++ b/config/crd/bases/nifi.orange.com_nificlusters.yaml @@ -36,6 +36,13 @@ spec: spec: description: NifiClusterSpec defines the desired state of NifiCluster properties: + clientType: + description: ClientConfigType represents an interface implementing + the ClientConfigManager + enum: + - tls + - basic + type: string clusterImage: description: clusterImage can specify the whole NiFi cluster image in one place @@ -1414,6 +1421,10 @@ spec: required: - retryDurationMinutes type: object + nifiURI: + description: nifiURI used access through a LB uri (used if external-* + type) + type: string nodeConfigGroups: additionalProperties: description: NodeConfig defines the node configuration @@ -1908,6 +1919,10 @@ spec: description: nodeConfigGroups specifies multiple node configs with unique name type: object + nodeURITemplate: + description: nodeURITemplate used to dynamically compute node uri + (used if external-* type) + type: string nodes: description: all node requires an image, unique id, and storageConfigs settings @@ -2849,6 +2864,10 @@ spec: propagateLabels: description: propage type: boolean + proxyUrl: + description: proxyUrl define the proxy required to query the NiFi + cluster (used if external-* type) + type: string readOnlyConfig: description: readOnlyConfig specifies the read-only type Nifi config cluster wide, all theses will be merged with node specified readOnly @@ -3226,6 +3245,21 @@ spec: type: object type: object type: object + rootProcessGroupId: + description: rootProcessGroupId contains the uuid of the root process + group for this cluster (used if external-* type) + type: string + secretRef: + description: secretRef reference the secret containing the informations + required to authentiticate to the cluster (used if external-* type) + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object service: description: Service defines the policy for services owned by NiFiKop operator. @@ -4329,6 +4363,12 @@ spec: - name type: object type: array + type: + description: ClusterType represents an interface implementing the ClientConfigManager + enum: + - external + - internal + type: string zkAddress: description: 'zKAddress specifies the ZooKeeper connection string in the form hostname:port where host and port are those of a Zookeeper @@ -4340,10 +4380,7 @@ spec: in the global ZooKeeper namespace. type: string required: - - listenersConfig - nodes - - oneNifiNodePerNode - - zkAddress type: object status: description: NifiClusterStatus defines the observed state of NifiCluster diff --git a/config/crd/bases/nifi.orange.com_nifidataflows.yaml b/config/crd/bases/nifi.orange.com_nifidataflows.yaml index 6ac935d8f..265322bfd 100644 --- a/config/crd/bases/nifi.orange.com_nifidataflows.yaml +++ b/config/crd/bases/nifi.orange.com_nifidataflows.yaml @@ -47,36 +47,8 @@ spec: type: string namespace: type: string - nifiURI: - type: string - nodeIds: - items: - format: int32 - type: integer - type: array - nodeURITemplate: - type: string - rootProcessGroupId: - type: string - secretRef: - description: SecretReference states a reference to a secret for - parameter context provisioning - properties: - name: - type: string - namespace: - type: string - required: - - name - type: object - type: - description: ClientConfigType represents an interface implementing - the ClientConfigManager - enum: - - external-tls - - nificluster - - external-basic - type: string + required: + - name type: object flowId: description: the UUID of the flow to run. diff --git a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml index 4a5792944..9fb905338 100644 --- a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml +++ b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml @@ -45,36 +45,8 @@ spec: type: string namespace: type: string - nifiURI: - type: string - nodeIds: - items: - format: int32 - type: integer - type: array - nodeURITemplate: - type: string - rootProcessGroupId: - type: string - secretRef: - description: SecretReference states a reference to a secret for - parameter context provisioning - properties: - name: - type: string - namespace: - type: string - required: - - name - type: object - type: - description: ClientConfigType represents an interface implementing - the ClientConfigManager - enum: - - external-tls - - nificluster - - external-basic - type: string + required: + - name type: object description: description: the Description of the Parameter Context. diff --git a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml index 72615e07c..bf9c077e7 100644 --- a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml +++ b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml @@ -45,36 +45,8 @@ spec: type: string namespace: type: string - nifiURI: - type: string - nodeIds: - items: - format: int32 - type: integer - type: array - nodeURITemplate: - type: string - rootProcessGroupId: - type: string - secretRef: - description: SecretReference states a reference to a secret for - parameter context provisioning - properties: - name: - type: string - namespace: - type: string - required: - - name - type: object - type: - description: ClientConfigType represents an interface implementing - the ClientConfigManager - enum: - - external-tls - - nificluster - - external-basic - type: string + required: + - name type: object description: description: The Description of the Registry client. diff --git a/config/crd/bases/nifi.orange.com_nifiusergroups.yaml b/config/crd/bases/nifi.orange.com_nifiusergroups.yaml index 79276b178..b3777280c 100644 --- a/config/crd/bases/nifi.orange.com_nifiusergroups.yaml +++ b/config/crd/bases/nifi.orange.com_nifiusergroups.yaml @@ -102,36 +102,8 @@ spec: type: string namespace: type: string - nifiURI: - type: string - nodeIds: - items: - format: int32 - type: integer - type: array - nodeURITemplate: - type: string - rootProcessGroupId: - type: string - secretRef: - description: SecretReference states a reference to a secret for - parameter context provisioning - properties: - name: - type: string - namespace: - type: string - required: - - name - type: object - type: - description: ClientConfigType represents an interface implementing - the ClientConfigManager - enum: - - external-tls - - nificluster - - external-basic - type: string + required: + - name type: object usersRef: description: userRef contains the list of reference to NifiUsers that diff --git a/config/crd/bases/nifi.orange.com_nifiusers.yaml b/config/crd/bases/nifi.orange.com_nifiusers.yaml index e0c76213c..f3b6ab4d4 100644 --- a/config/crd/bases/nifi.orange.com_nifiusers.yaml +++ b/config/crd/bases/nifi.orange.com_nifiusers.yaml @@ -102,36 +102,8 @@ spec: type: string namespace: type: string - nifiURI: - type: string - nodeIds: - items: - format: int32 - type: integer - type: array - nodeURITemplate: - type: string - rootProcessGroupId: - type: string - secretRef: - description: SecretReference states a reference to a secret for - parameter context provisioning - properties: - name: - type: string - namespace: - type: string - required: - - name - type: object - type: - description: ClientConfigType represents an interface implementing - the ClientConfigManager - enum: - - external-tls - - nificluster - - external-basic - type: string + required: + - name type: object createCert: description: Whether or not a certificate will be created for this diff --git a/controllers/nificluster_controller.go b/controllers/nificluster_controller.go index 746db9a4a..2536623c5 100644 --- a/controllers/nificluster_controller.go +++ b/controllers/nificluster_controller.go @@ -97,6 +97,11 @@ func (r *NifiClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) return r.checkFinalizers(ctx, instance) } + if instance.IsExternal() { + return reconcile.Result{ + RequeueAfter: time.Duration(15) * time.Second, + }, nil + } // if len(instance.Status.State) == 0 || instance.Status.State == v1alpha1.NifiClusterInitializing { if err := k8sutil.UpdateCRStatus(r.Client, instance, v1alpha1.NifiClusterInitializing, r.Log); err != nil { @@ -220,7 +225,7 @@ func (r *NifiClusterReconciler) checkFinalizers(ctx context.Context, namespaces = r.Namespaces } - if cluster.Spec.ListenersConfig.SSLSecrets != nil { + if cluster.IsInternal() && cluster.Spec.ListenersConfig.SSLSecrets != nil { // If we haven't deleted all nifiusers yet, iterate namespaces and delete all nifiusers // with the matching label. if util.StringSliceContains(cluster.GetFinalizers(), clusterUsersFinalizer) { @@ -297,7 +302,7 @@ func (r *NifiClusterReconciler) ensureFinalizers(ctx context.Context, cluster *v1alpha1.NifiCluster) (updated *v1alpha1.NifiCluster, err error) { finalizers := []string{clusterFinalizer} - if cluster.Spec.ListenersConfig.SSLSecrets != nil { + if cluster.IsInternal() && cluster.Spec.ListenersConfig.SSLSecrets != nil { finalizers = append(finalizers, clusterUsersFinalizer) } for _, finalizer := range finalizers { diff --git a/controllers/nificlustertask_controller.go b/controllers/nificlustertask_controller.go index 91508fd7b..0cbdabde7 100644 --- a/controllers/nificlustertask_controller.go +++ b/controllers/nificlustertask_controller.go @@ -23,6 +23,8 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/scale" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -218,6 +220,20 @@ func (r *NifiClusterTaskReconciler) handlePodAddCCTask(nifiCluster *v1alpha1.Nif } func (r *NifiClusterTaskReconciler) handlePodDeleteNCTask(nifiCluster *v1alpha1.NifiCluster, nodeIds []string) error { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var err error + + // Get the client config manager associated to the cluster ref. + clusterRef := v1alpha1.ClusterReference{ + Name: nifiCluster.Name, + Namespace: nifiCluster.Namespace, + } + configManager := config.GetClientConfigManager(r.Client, clusterRef) + if clientConfig, err = configManager.BuildConfig(); err != nil { + return err + } + for _, nodeId := range nodeIds { if nifiCluster.Status.NodesState[nodeId].GracefulActionState.ActionStep == v1alpha1.ConnectNodeAction { err := r.checkNCActionStep(nodeId, nifiCluster, v1alpha1.ConnectStatus, nil) @@ -226,7 +242,7 @@ func (r *NifiClusterTaskReconciler) handlePodDeleteNCTask(nifiCluster *v1alpha1. } } - actionStep, taskStartTime, err := scale.DisconnectClusterNode(r.Client, nifiCluster, nodeId) + actionStep, taskStartTime, err := scale.DisconnectClusterNode(clientConfig, nodeId) if err != nil { r.Log.Info(fmt.Sprintf("nifi cluster communication error during downscaling node(s) id(s): %s", nodeId)) return errorfactory.New(errorfactory.NifiClusterNotReady{}, err, fmt.Sprintf("node(s) id(s): %s", nodeId)) @@ -244,6 +260,19 @@ func (r *NifiClusterTaskReconciler) handlePodDeleteNCTask(nifiCluster *v1alpha1. // TODO: Review logic to simplify it through generic method func (r *NifiClusterTaskReconciler) handlePodRunningTask(nifiCluster *v1alpha1.NifiCluster, nodeIds []string, log logr.Logger) error { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var err error + + // Get the client config manager associated to the cluster ref. + clusterRef := v1alpha1.ClusterReference{ + Name: nifiCluster.Name, + Namespace: nifiCluster.Namespace, + } + configManager := config.GetClientConfigManager(r.Client, clusterRef) + if clientConfig, err = configManager.BuildConfig(); err != nil { + return err + } for _, nodeId := range nodeIds { // Check if node finished to connect @@ -264,7 +293,7 @@ func (r *NifiClusterTaskReconciler) handlePodRunningTask(nifiCluster *v1alpha1.N // If node is disconnected, performing offload if nifiCluster.Status.NodesState[nodeId].GracefulActionState.ActionStep == v1alpha1.DisconnectStatus { - actionStep, taskStartTime, err := scale.OffloadClusterNode(r.Client, nifiCluster, nodeId) + actionStep, taskStartTime, err := scale.OffloadClusterNode(clientConfig, nodeId) if err != nil { r.Log.Info(fmt.Sprintf("nifi cluster communication error during removing node id: %s", nodeId)) return errorfactory.New(errorfactory.NifiClusterNotReady{}, err, fmt.Sprintf("node id: %s", nodeId)) @@ -292,7 +321,7 @@ func (r *NifiClusterTaskReconciler) handlePodRunningTask(nifiCluster *v1alpha1.N // If pod finished deletion // TODO : work here to manage node Status and state (If disconnected && Removing) if nifiCluster.Status.NodesState[nodeId].GracefulActionState.ActionStep == v1alpha1.RemovePodStatus { - actionStep, taskStartTime, err := scale.RemoveClusterNode(r.Client, nifiCluster, nodeId) + actionStep, taskStartTime, err := scale.RemoveClusterNode(clientConfig, nodeId) if err != nil { r.Log.Info(fmt.Sprintf("nifi cluster communication error during removing node id: %s", nodeId)) return errorfactory.New(errorfactory.NifiClusterNotReady{}, err, fmt.Sprintf("node id: %s", nodeId)) @@ -318,10 +347,24 @@ func (r *NifiClusterTaskReconciler) handlePodRunningTask(nifiCluster *v1alpha1.N } func (r *NifiClusterTaskReconciler) checkNCActionStep(nodeId string, nifiCluster *v1alpha1.NifiCluster, actionStep v1alpha1.ActionStep, state *v1alpha1.State) error { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var err error + + // Get the client config manager associated to the cluster ref. + clusterRef := v1alpha1.ClusterReference{ + Name: nifiCluster.Name, + Namespace: nifiCluster.Namespace, + } + configManager := config.GetClientConfigManager(r.Client, clusterRef) + if clientConfig, err = configManager.BuildConfig(); err != nil { + return err + } + nodeState := nifiCluster.Status.NodesState[nodeId] // Check Nifi cluster action status - finished, err := scale.CheckIfNCActionStepFinished(nodeState.GracefulActionState.ActionStep, r.Client, nifiCluster, nodeId) + finished, err := scale.CheckIfNCActionStepFinished(nodeState.GracefulActionState.ActionStep, clientConfig, nodeId) if err != nil { r.Log.Info(fmt.Sprintf("Nifi cluster communication error checking running task: %s", nodeState.GracefulActionState.ActionStep)) return errorfactory.New(errorfactory.NifiClusterNotReady{}, err, "nifi cluster communication error") @@ -361,7 +404,7 @@ func (r *NifiClusterTaskReconciler) checkNCActionStep(nodeId string, nifiCluster r.Log.Info(fmt.Sprintf("Rollback nifi cluster task: %s", nodeState.GracefulActionState.ActionStep)) - actionStep, taskStartTime, err := scale.ConnectClusterNode(r.Client, nifiCluster, nodeId) + actionStep, taskStartTime, err := scale.ConnectClusterNode(clientConfig, nodeId) timedOutNodeNCState := v1alpha1.GracefulActionState{ State: requiredNCState, diff --git a/controllers/nifidataflow_controller.go b/controllers/nifidataflow_controller.go index 520235e33..a95eea434 100644 --- a/controllers/nifidataflow_controller.go +++ b/controllers/nifidataflow_controller.go @@ -100,8 +100,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request original := &v1alpha1.NifiDataflow{} current := instance.DeepCopy() json.Unmarshal(o, original) - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && - original.Spec.ClusterRef.IsSet() { + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) { instance.Spec.ClusterRef = original.Spec.ClusterRef } @@ -190,7 +189,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !configManager.IsExternal() { + if !clusterConnect.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -236,7 +235,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Ensure the cluster is ready to receive actions - if !clusterConnect.IsReady() { + if !clusterConnect.IsReady(r.Log) { r.Log.Info("Cluster is not ready yet, will wait until it is.") r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", diff --git a/controllers/nifiparametercontext_controller.go b/controllers/nifiparametercontext_controller.go index 5973d457d..d35523b05 100644 --- a/controllers/nifiparametercontext_controller.go +++ b/controllers/nifiparametercontext_controller.go @@ -99,8 +99,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl original := &v1alpha1.NifiParameterContext{} current := instance.DeepCopy() json.Unmarshal(o, original) - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && - original.Spec.ClusterRef.IsSet() { + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) { instance.Spec.ClusterRef = original.Spec.ClusterRef } @@ -136,7 +135,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !configManager.IsExternal() { + if !clusterConnect.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -151,7 +150,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl return RequeueWithError(r.Log, "could not apply last state to annotation", err) } if err := r.Client.Update(ctx, current); err != nil { - return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + return RequeueWithError(r.Log, "failed to update NifiParameterContext", err) } return RequeueAfter(time.Duration(15) * time.Second) } @@ -181,7 +180,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl } // Ensure the cluster is ready to receive actions - if !clusterConnect.IsReady() { + if !clusterConnect.IsReady(r.Log) { r.Log.Info("Cluster is not ready yet, will wait until it is.") r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", @@ -205,7 +204,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl return RequeueWithError(r.Log, "could not apply last state to annotation", err) } if err := r.Client.Update(ctx, current); err != nil { - return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) + return RequeueWithError(r.Log, "failed to update NifiParameterContext", err) } return RequeueAfter(time.Duration(15) * time.Second) } @@ -213,7 +212,7 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciling", fmt.Sprintf("Reconciling parameter context %s", instance.Name)) - // Check if the NiFi registry client already exist + // Check if the NiFi parameter context already exist exist, err := parametercontext.ExistParameterContext(instance, clientConfig) if err != nil { return RequeueWithError(r.Log, "failure checking for existing parameter context", err) @@ -349,7 +348,7 @@ func (r *NifiParameterContextReconciler) finalizeNifiParameterContext( if err := parametercontext.RemoveParameterContext(parameterContext, parameterSecrets, config); err != nil { return err } - r.Log.Info("Delete Registry client") + r.Log.Info("Delete NifiParameter Context") return nil } diff --git a/controllers/nifiregistryclient_controller.go b/controllers/nifiregistryclient_controller.go index 33ca0abf0..188e08bbf 100644 --- a/controllers/nifiregistryclient_controller.go +++ b/controllers/nifiregistryclient_controller.go @@ -97,8 +97,7 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R original := &v1alpha1.NifiRegistryClient{} current := instance.DeepCopy() json.Unmarshal(o, original) - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && - original.Spec.ClusterRef.IsSet() { + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) { instance.Spec.ClusterRef = original.Spec.ClusterRef } @@ -113,7 +112,7 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !configManager.IsExternal() { + if !clusterConnect.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -157,7 +156,7 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R } // Ensure the cluster is ready to receive actions - if !clusterConnect.IsReady() { + if !clusterConnect.IsReady(r.Log) { r.Log.Info("Cluster is not ready yet, will wait until it is.") r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index f05b7742a..bd170092b 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -103,8 +103,7 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c original := &v1alpha1.NifiUser{} current := instance.DeepCopy() json.Unmarshal(o, original) - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && - original.Spec.ClusterRef.IsSet() { + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) { instance.Spec.ClusterRef = original.Spec.ClusterRef } @@ -119,7 +118,7 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !configManager.IsExternal() { + if !clusterConnect.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is gone already, there is nothing we can do") @@ -148,7 +147,7 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } // Get the referenced NifiCluster - if !configManager.IsExternal() { + if !clusterConnect.IsExternal() { var cluster *v1alpha1.NifiCluster if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterRef.Namespace); err != nil { // This shouldn't trigger anymore, but leaving it here as a safetybelt @@ -239,7 +238,7 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } // Ensure the cluster is ready to receive actions - if !clusterConnect.IsReady() { + if !clusterConnect.IsReady(r.Log) { r.Log.Info("Cluster is not ready yet, will wait until it is.") r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", diff --git a/controllers/nifiusergroup_controller.go b/controllers/nifiusergroup_controller.go index 5b9962a7c..837435ec1 100644 --- a/controllers/nifiusergroup_controller.go +++ b/controllers/nifiusergroup_controller.go @@ -98,8 +98,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques original := &v1alpha1.NifiUserGroup{} current := instance.DeepCopy() json.Unmarshal(o, original) - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) && - original.Spec.ClusterRef.IsSet() { + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) { instance.Spec.ClusterRef = original.Spec.ClusterRef } @@ -153,7 +152,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !configManager.IsExternal() { + if !clusterConnect.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Cluster is already gone, there is nothing we can do") @@ -199,7 +198,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Ensure the cluster is ready to receive actions - if !clusterConnect.IsReady() { + if !clusterConnect.IsReady(r.Log) { r.Log.Info("Cluster is not ready yet, will wait until it is.") r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", fmt.Sprintf("The referenced cluster is not ready yet : %s in %s", diff --git a/go.mod b/go.mod index 37b16d3cd..6453b8bed 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( emperror.dev/errors v0.4.2 github.com/antihax/optional v1.0.0 github.com/banzaicloud/k8s-objectmatcher v1.4.1 + github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/erdrix/nigoapi v0.0.0-20210322100900-9bf87aec43d9 // github.com/erdrix/nigoapi v0.0.0-20210301104455-ab202e217b78 github.com/go-logr/logr v0.3.0 diff --git a/pkg/clientwrappers/scale/scale.go b/pkg/clientwrappers/scale/scale.go index 19e26bd9a..4be587c15 100644 --- a/pkg/clientwrappers/scale/scale.go +++ b/pkg/clientwrappers/scale/scale.go @@ -16,6 +16,7 @@ package scale import ( "fmt" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" ctrl "sigs.k8s.io/controller-runtime" "time" @@ -24,7 +25,6 @@ import ( "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" - "sigs.k8s.io/controller-runtime/pkg/client" ) var log = ctrl.Log.WithName("scale-methods") @@ -39,7 +39,7 @@ func UpScaleCluster(nodeId, namespace, clusterName string) (v1alpha1.ActionStep, } // DisconnectClusterNode, perform a node disconnection -func DisconnectClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nodeId string) (v1alpha1.ActionStep, string, error) { +func DisconnectClusterNode(config *clientconfig.NifiConfig, nodeId string) (v1alpha1.ActionStep, string, error) { var err error // Extract nifi node Id, from nifi node address. @@ -48,7 +48,7 @@ func DisconnectClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, return "", "", err } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return "", "", err } @@ -65,7 +65,7 @@ func DisconnectClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, } // OffloadCluster, perform offload data from a node. -func OffloadClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nodeId string) (v1alpha1.ActionStep, string, error) { +func OffloadClusterNode(config *clientconfig.NifiConfig, nodeId string) (v1alpha1.ActionStep, string, error) { var err error // Extract nifi node Id, from nifi node address. @@ -74,7 +74,7 @@ func OffloadClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nod return "", "", err } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return "", "", err } @@ -91,7 +91,7 @@ func OffloadClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nod } // ConnectClusterNode, perform node connection. -func ConnectClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nodeId string) (v1alpha1.ActionStep, string, error) { +func ConnectClusterNode(config *clientconfig.NifiConfig, nodeId string) (v1alpha1.ActionStep, string, error) { var err error // Extract nifi node Id, from nifi node address. @@ -100,7 +100,7 @@ func ConnectClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nod return "", "", err } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return "", "", err } @@ -117,7 +117,7 @@ func ConnectClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nod } // RemoveClusterNode, perform node connection. -func RemoveClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, nodeId string) (v1alpha1.ActionStep, string, error) { +func RemoveClusterNode(config *clientconfig.NifiConfig, nodeId string) (v1alpha1.ActionStep, string, error) { var err error // Extract NiFi node Id, from NiFi node address. @@ -126,7 +126,7 @@ func RemoveClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, node return "", "", err } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return "", "", err } @@ -149,7 +149,7 @@ func RemoveClusterNode(client client.Client, cluster *v1alpha1.NifiCluster, node // TODO : rework to check if state is consistent (If waiting removing but disconnected ... // CheckIfCCTaskFinished checks whether the given CC Task ID finished or not // headlessServiceEnabled bool, availableNodes []v1alpha1.Node, serverPort int32, nodeId, namespace, clusterName string -func CheckIfNCActionStepFinished(actionStep v1alpha1.ActionStep, client client.Client, cluster *v1alpha1.NifiCluster, nodeId string) (bool, error) { +func CheckIfNCActionStepFinished(actionStep v1alpha1.ActionStep, config *clientconfig.NifiConfig, nodeId string) (bool, error) { var err error // Extract nifi node Id, from nifi node address. @@ -158,7 +158,7 @@ func CheckIfNCActionStepFinished(actionStep v1alpha1.ActionStep, client client.C return false, err } - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return false, err } @@ -195,10 +195,10 @@ func CheckIfNCActionStepFinished(actionStep v1alpha1.ActionStep, client client.C return false, nil } -func EnsureRemovedNodes(client client.Client, cluster *v1alpha1.NifiCluster) error { +func EnsureRemovedNodes(config *clientconfig.NifiConfig, cluster *v1alpha1.NifiCluster) error { var err error - nClient, err := common.NewNodeConnection(log, client, cluster) + nClient, err := common.NewClusterConnection(log, config) if err != nil { return err } diff --git a/pkg/common/common.go b/pkg/common/common.go index 54791524c..491488af2 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -1,30 +1,46 @@ package common import ( - "fmt" - "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/go-logr/logr" - "sigs.k8s.io/controller-runtime/pkg/client" ) -// NewNifiFromCluster points to the function for retrieving nifi clients, -// use as var so it can be overwritten from unit tests -var NewNifiFromCluster = nificlient.NewFromCluster - -// newNodeConnection is a convenience wrapper for creating a node connection -// and creating a safer close function -func NewNodeConnection(log logr.Logger, client client.Client, cluster *v1alpha1.NifiCluster) (node nificlient.NifiClient, err error) { - - // Get a nifi connection - log.Info(fmt.Sprintf("Retrieving Nifi client for %s/%s", cluster.Namespace, cluster.Name)) - node, err = NewNifiFromCluster(client, cluster) - if err != nil { - return - } - return -} +//// NewFromCluster is a convenient wrapper around New() and ClusterConfig() +//func NewFromCluster(k8sclient client.Client, cluster *v1alpha1.NifiCluster) (nificlient.NifiClient, error) { +// var client nificlient.NifiClient +// var err error +// var opts *clientconfig.NifiConfig +// +// if opts, err = tls.New(k8sclient, +// v1alpha1.ClusterReference{Name: cluster.Name, Namespace: cluster.Namespace}).BuildConfig(); err != nil { +// return nil, err +// } +// client = nificlient.New(opts) +// err = client.Build() +// if err != nil { +// return nil, err +// } +// +// return client, nil +//} +// +//// NewNifiFromCluster points to the function for retrieving nifi clients, +//// use as var so it can be overwritten from unit tests +//var NewNifiFromCluster = NewFromCluster +// +//// newNodeConnection is a convenience wrapper for creating a node connection +//// and creating a safer close function +//func NewNodeConnection(log logr.Logger, client client.Client, cluster *v1alpha1.NifiCluster) (node nificlient.NifiClient, err error) { +// +// // Get a nifi connection +// log.Info(fmt.Sprintf("Retrieving Nifi client for %s/%s", cluster.Namespace, cluster.Name)) +// node, err = NewNifiFromCluster(client, cluster) +// if err != nil { +// return +// } +// return +//} // NewNifiFromCluster points to the function for retrieving nifi clients, // use as var so it can be overwritten from unit tests diff --git a/pkg/k8sutil/resource.go b/pkg/k8sutil/resource.go index 70edd759b..0fe9eb91f 100644 --- a/pkg/k8sutil/resource.go +++ b/pkg/k8sutil/resource.go @@ -90,21 +90,23 @@ func Reconcile(log logr.Logger, client runtimeClient.Client, desired runtimeClie if err := client.Update(context.TODO(), desired); err != nil { return errorfactory.New(errorfactory.APIFailure{}, err, "updating resource failed", "kind", desiredType) } - switch desired.(type) { - case *corev1.ConfigMap: - // Only update status when configmap belongs to node - if id, ok := desired.(*corev1.ConfigMap).Labels["nodeId"]; ok { - statusErr := UpdateNodeStatus(client, []string{id}, cr, v1alpha1.ConfigOutOfSync, log) - if statusErr != nil { - return errors.WrapIfWithDetails(err, "updating status for resource failed", "kind", desiredType) + if cr != nil { + switch desired.(type) { + case *corev1.ConfigMap: + // Only update status when configmap belongs to node + if id, ok := desired.(*corev1.ConfigMap).Labels["nodeId"]; ok { + statusErr := UpdateNodeStatus(client, []string{id}, cr, v1alpha1.ConfigOutOfSync, log) + if statusErr != nil { + return errors.WrapIfWithDetails(err, "updating status for resource failed", "kind", desiredType) + } } - } - case *corev1.Secret: - // Only update status when secret belongs to node - if id, ok := desired.(*corev1.Secret).Labels["nodeId"]; ok { - statusErr := UpdateNodeStatus(client, []string{id}, cr, v1alpha1.ConfigOutOfSync, log) - if statusErr != nil { - return errors.WrapIfWithDetails(err, "updating status for resource failed", "kind", desiredType) + case *corev1.Secret: + // Only update status when secret belongs to node + if id, ok := desired.(*corev1.Secret).Labels["nodeId"]; ok { + statusErr := UpdateNodeStatus(client, []string{id}, cr, v1alpha1.ConfigOutOfSync, log) + if statusErr != nil { + return errors.WrapIfWithDetails(err, "updating status for resource failed", "kind", desiredType) + } } } } diff --git a/pkg/nificlient/access.go b/pkg/nificlient/access.go new file mode 100644 index 000000000..a715a2dba --- /dev/null +++ b/pkg/nificlient/access.go @@ -0,0 +1,42 @@ +// Copyright 2020 Orange SA +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.package apis + +package nificlient + +import ( + "github.com/antihax/optional" + nigoapi "github.com/erdrix/nigoapi/pkg/nifi" +) + +func (n *nifiClient) CreateAccessTokenUsingBasicAuth(username, password string, nodeId int32) (*string, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + // @TODO : force the targeted host, or recreate token for all nodes + client := n.nodeClient[nodeId] + context := n.opts.NodesContext[nodeId] + if client == nil { + log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to get the reporting task informations + _, rsp, body, err := client.AccessApi.CreateAccessToken(context, &nigoapi.AccessApiCreateAccessTokenOpts{ + Username: optional.NewString(username), + Password: optional.NewString(password), + }) + + if err := errorCreateOperation(rsp, body, err); err != nil { + return nil, err + } + return body, nil +} diff --git a/pkg/nificlient/client.go b/pkg/nificlient/client.go index f07a7bac1..61cb60423 100644 --- a/pkg/nificlient/client.go +++ b/pkg/nificlient/client.go @@ -14,13 +14,12 @@ package nificlient import ( + "context" "fmt" - "github.com/Orange-OpenSource/nifikop/api/v1alpha1" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "net/http" + "net/url" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" "time" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" @@ -52,8 +51,12 @@ const ( // NiFiClient is the exported interface for NiFi operations type NifiClient interface { + // Access func + CreateAccessTokenUsingBasicAuth(username, password string, nodeId int32) (*string, error) + // System func DescribeCluster() (*nigoapi.ClusterEntity, error) + DescribeClusterFromNodeId(nodeId int32) (*nigoapi.ClusterEntity, error) DisconnectClusterNode(nId int32) (*nigoapi.NodeEntity, error) ConnectClusterNode(nId int32) (*nigoapi.NodeEntity, error) OffloadClusterNode(nId int32) (*nigoapi.NodeEntity, error) @@ -173,13 +176,15 @@ func (n *nifiClient) Build() error { n.nodeClient[nodeId] = n.newClient(nodeConfig) } - clusterEntity, err := n.DescribeCluster() - if err != nil || clusterEntity == nil || clusterEntity.Cluster == nil { - err = errorfactory.New(errorfactory.NodesUnreachable{}, err, fmt.Sprintf("could not connect to nifi nodes: %s", n.opts.NifiURI)) - return err - } + if !n.opts.SkipDescribeCluster { + clusterEntity, err := n.DescribeCluster() + if err != nil || clusterEntity == nil || clusterEntity.Cluster == nil { + err = errorfactory.New(errorfactory.NodesUnreachable{}, err, fmt.Sprintf("could not connect to nifi nodes: %s", n.opts.NifiURI)) + return err + } - n.nodes = clusterEntity.Cluster.Nodes + n.nodes = clusterEntity.Cluster.Nodes + } return nil } @@ -198,39 +203,42 @@ func NewFromConfig(opts *clientconfig.NifiConfig) (NifiClient, error) { return client, nil } -// NewFromCluster is a convenient wrapper around New() and ClusterConfig() -func NewFromCluster(k8sclient client.Client, cluster *v1alpha1.NifiCluster) (NifiClient, error) { - var client NifiClient - var err error - var opts *clientconfig.NifiConfig - - if opts, err = nificluster.New(k8sclient, - v1alpha1.ClusterReference{Name: cluster.Name, Namespace: cluster.Namespace}).BuildConfig(); err != nil { - return nil, err - } - client = New(opts) - err = client.Build() - if err != nil { - return nil, err - } - - return client, nil -} - func (n *nifiClient) getNifiGoApiConfig() (config *nigoapi.Configuration) { config = nigoapi.NewConfiguration() protocol := "http" + var transport *http.Transport = nil if n.opts.UseSSL { + transport = &http.Transport{} config.Scheme = "HTTPS" n.opts.TLSConfig.BuildNameToCertificate() - transport := &http.Transport{TLSClientConfig: n.opts.TLSConfig} - config.HTTPClient = &http.Client{Transport: transport} + transport.TLSClientConfig = n.opts.TLSConfig protocol = "https" } + + if len(n.opts.ProxyUrl) > 0 { + proxyUrl, err := url.Parse(n.opts.ProxyUrl) + if err == nil { + if transport == nil { + transport = &http.Transport{} + } + transport.Proxy = http.ProxyURL(proxyUrl) + } + } + + config.HTTPClient = &http.Client{} + if transport != nil { + config.HTTPClient = &http.Client{Transport: transport} + } + config.BasePath = fmt.Sprintf("%s://%s/nifi-api", protocol, n.opts.NifiURI) config.Host = n.opts.NifiURI - + if len(n.opts.NifiURI) == 0 { + for nodeId, _ := range n.opts.NodesURI { + config.BasePath = fmt.Sprintf("%s://%s/nifi-api", protocol, n.opts.NodesURI[nodeId].RequestHost) + config.Host = n.opts.NodesURI[nodeId].RequestHost + } + } return } @@ -239,36 +247,55 @@ func (n *nifiClient) getNiNodeGoApiConfig(nodeId int32) (config *nigoapi.Configu config.HTTPClient = &http.Client{} protocol := "http" + var transport *http.Transport = nil if n.opts.UseSSL { + transport = &http.Transport{} config.Scheme = "HTTPS" n.opts.TLSConfig.BuildNameToCertificate() - transport := &http.Transport{TLSClientConfig: n.opts.TLSConfig} - config.HTTPClient = &http.Client{Transport: transport} + transport.TLSClientConfig = n.opts.TLSConfig protocol = "https" } + + if n.opts.ProxyUrl != "" { + proxyUrl, err := url.Parse(n.opts.ProxyUrl) + if err == nil { + if transport == nil { + transport = &http.Transport{} + } + transport.Proxy = http.ProxyURL(proxyUrl) + } + } + config.HTTPClient = &http.Client{} + if transport != nil { + config.HTTPClient = &http.Client{Transport: transport} + } + config.BasePath = fmt.Sprintf("%s://%s/nifi-api", protocol, n.opts.NodesURI[nodeId].RequestHost) - config.Host = n.opts.NifiURI + config.Host = n.opts.NodesURI[nodeId].RequestHost + if len(n.opts.NifiURI) != 0 { + config.Host = n.opts.NifiURI + } return } -func (n *nifiClient) privilegeCoordinatorClient() *nigoapi.APIClient { +func (n *nifiClient) privilegeCoordinatorClient() (*nigoapi.APIClient, context.Context) { if clientId := n.coordinatorNodeId(); clientId != nil { - return n.nodeClient[*clientId] + return n.nodeClient[*clientId], n.opts.NodesContext[*clientId] } if clientId := n.privilegeNodeClient(); clientId != nil { - return n.nodeClient[*clientId] + return n.nodeClient[*clientId], n.opts.NodesContext[*clientId] } - return n.client + return n.client, nil } -func (n *nifiClient) privilegeCoordinatorExceptNodeIdClient(nId int32) *nigoapi.APIClient { +func (n *nifiClient) privilegeCoordinatorExceptNodeIdClient(nId int32) (*nigoapi.APIClient, context.Context) { nodeDto := n.nodeDtoByNodeId(nId) if nodeDto == nil || isCoordinator(nodeDto) { if clientId := n.firstConnectedNodeId(nId); clientId != nil { - return n.nodeClient[*clientId] + return n.nodeClient[*clientId], n.opts.NodesContext[*clientId] } } diff --git a/pkg/nificlient/client_test.go b/pkg/nificlient/client_test.go index 4e6b14504..f04170d75 100644 --- a/pkg/nificlient/client_test.go +++ b/pkg/nificlient/client_test.go @@ -23,14 +23,13 @@ import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" - nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" nigoapi "github.com/erdrix/nigoapi/pkg/nifi" "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" ) const ( - httpContainerPort int32 = 443 + httpContainerPort int32 = 80 succeededNodeId int32 = 4 clusterName = "test-cluster" @@ -96,26 +95,3 @@ func TestBuild(t *testing.T) { err = client.Build() assert.IsType(errorfactory.NodesUnreachable{}, err) } - -func TestNewFromCluster(t *testing.T) { - httpmock.Activate() - assert := assert.New(t) - - cluster := testClusterMock(t) - - url := fmt.Sprintf("http://%s/nifi-api/controller/cluster", nifiutil.GenerateRequestNiFiAllNodeAddressFromCluster(cluster)) - httpmock.RegisterResponder(http.MethodGet, url, - func(req *http.Request) (*http.Response, error) { - return httpmock.NewJsonResponse( - 200, - MockGetClusterResponse(cluster, false)) - }) - - _, err := NewFromCluster(mockClient{}, cluster) - assert.Nil(err) - - httpmock.DeactivateAndReset() - _, err = NewFromCluster(mockClient{}, cluster) - assert.IsType(errorfactory.NodesUnreachable{}, err) - -} diff --git a/pkg/nificlient/config/nificluster/nificluster.go b/pkg/nificlient/config/basic/basic.go similarity index 68% rename from pkg/nificlient/config/nificluster/nificluster.go rename to pkg/nificlient/config/basic/basic.go index d38d7ef40..7ad7caa63 100644 --- a/pkg/nificlient/config/nificluster/nificluster.go +++ b/pkg/nificlient/config/basic/basic.go @@ -1,4 +1,4 @@ -package nificluster +package basic import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" @@ -6,15 +6,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -type NifiCluster interface { +type Basic interface { clientconfig.Manager } -type nifiCluster struct { +type basic struct { client client.Client clusterRef v1alpha1.ClusterReference } -func New(client client.Client, clusterRef v1alpha1.ClusterReference) NifiCluster { - return &nifiCluster{clusterRef: clusterRef, client: client} +func New(client client.Client, clusterRef v1alpha1.ClusterReference) Basic { + return &basic{clusterRef: clusterRef, client: client} } diff --git a/pkg/nificlient/config/basic/basic_config.go b/pkg/nificlient/config/basic/basic_config.go new file mode 100644 index 000000000..3df1aaa67 --- /dev/null +++ b/pkg/nificlient/config/basic/basic_config.go @@ -0,0 +1,245 @@ +package basic + +import ( + "context" + "crypto/tls" + "crypto/x509" + "emperror.dev/errors" + "fmt" + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/common" + "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" + "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + configcommon "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/common" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + "github.com/Orange-OpenSource/nifikop/pkg/resources/templates" + "github.com/Orange-OpenSource/nifikop/pkg/util" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" + nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" + "github.com/dgrijalva/jwt-go" + nigoapi "github.com/erdrix/nigoapi/pkg/nifi" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "strconv" + "strings" + "time" +) + +var log = ctrl.Log.WithName("basic_config") + +func (n *basic) BuildConfig() (*clientconfig.NifiConfig, error) { + var cluster *v1alpha1.NifiCluster + var err error + if cluster, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace); err != nil { + return nil, err + } + return clusterConfig(n.client, cluster) +} + +func (n *basic) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { + var c *v1alpha1.NifiCluster + if c, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace); err != nil { + return nil, err + } + + if !c.IsExternal() { + cluster = &nificluster.InternalCluster{ + Name: c.Name, + Namespace: c.Namespace, + Status: c.Status, + } + return + } + + config, err := n.BuildConfig() + cluster = &nificluster.ExternalCluster{ + NodeURITemplate: c.Spec.NodeURITemplate, + NodeIds: util.NodesToIdList(c.Spec.Nodes), + NifiURI: c.Spec.NifiURI, + RootProcessGroupId: c.Spec.RootProcessGroupId, + Name: c.Name, + + NifiConfig: config, + } + + return +} + +type Claims struct { + Username string `json:"username"` + jwt.StandardClaims +} + +func clusterConfig(client client.Client, cluster *v1alpha1.NifiCluster) (*clientconfig.NifiConfig, error) { + conf := configcommon.ClusterConfig(cluster) + + username, password, rootCAs, err := GetControllerBasicConfigFromSecret(client, cluster.Spec.SecretRef) + if err != nil { + return conf, err + } + conf.UseSSL = true + conf.TLSConfig = &tls.Config{RootCAs: rootCAs} + conf.SkipDescribeCluster = true + + secretName := fmt.Sprintf(templates.ExternalClusterSecretTemplate, cluster.Name) + basicSecret, err := GetAccessTokenSecret(client, v1alpha1.SecretReference{Name: secretName, Namespace: cluster.Namespace}) + + if basicSecret != nil && err == nil { + invalid := false + for id := range conf.NodesURI { + tokenByte, ok := basicSecret.Data[strconv.FormatInt(int64(id), 10)] + if !ok { + invalid = true + break + } + + var expirationTime float64 + tokenString := string(tokenByte) + if len(tokenString) == 0 { + invalid = true + break + } + + token, _, err := new(jwt.Parser).ParseUnverified(tokenString, jwt.MapClaims{}) + if err != nil { + invalid = true + break + } + + claims, ok := token.Claims.(jwt.MapClaims) + if !ok { + invalid = true + break + } + + expirationTime = claims["exp"].(float64) + if expirationTime < float64(time.Now().Unix()) { + invalid = true + break + } + + conf.SkipDescribeCluster = true + ctx := context.WithValue(context.TODO(), nigoapi.ContextAccessToken, tokenString) + conf.NodesContext[id] = ctx + nClient, err := common.NewClusterConnection(log, conf) + if err != nil { + invalid = true + break + } + _, err = nClient.DescribeClusterFromNodeId(id) + if err != nil { + invalid = true + break + } + } + if !invalid { + conf.SkipDescribeCluster = false + return conf, nil + } + } + + // Create a new access token + err = nil + data := make(map[string][]byte) + for id := range conf.NodesURI { + // Create an unauthenticated client + conf.SkipDescribeCluster = true + conf.NodesContext = make(map[int32]context.Context) + + retry := 0 + for retry < 5 { + nClient, err := common.NewClusterConnection(log, conf) + if err != nil { + return nil, err + } + token, err := nClient.CreateAccessTokenUsingBasicAuth(username, password, id) + if err != nil { + return nil, err + } + ctx := context.WithValue(context.TODO(), nigoapi.ContextAccessToken, *token) + conf.NodesContext[id] = ctx + nClient, err = common.NewClusterConnection(log, conf) + if err != nil { + retry++ + continue + } + _, err = nClient.DescribeClusterFromNodeId(id) + if err != nil { + retry++ + continue + } + data[strconv.FormatInt(int64(id), 10)] = []byte(*token) + retry = 6 + } + if err != nil { + return nil, err + } + } + conf.SkipDescribeCluster = false + // Create a secret containing the created access token + secret := &corev1.Secret{ + ObjectMeta: templates.ObjectMeta( + secretName, + util.MergeLabels( + nifiutil.LabelsForNifi(cluster.Name), + ), + cluster, + ), + Data: data, + } + err = k8sutil.Reconcile(log, client, secret, nil) + if err != nil { + return nil, errors.WrapIfWithDetails(err, "failed to reconcile resource", "resource", secret.GetObjectKind().GroupVersionKind()) + } + + return conf, nil +} + +func GetControllerBasicConfigFromSecret(cli client.Client, ref v1alpha1.SecretReference) (clientUsername, clientPassword string, rootCAs *x509.CertPool, err error) { + basicKeys := &corev1.Secret{} + err = cli.Get(context.TODO(), + types.NamespacedName{ + Namespace: ref.Namespace, + Name: ref.Name, + }, + basicKeys, + ) + if err != nil { + if apierrors.IsNotFound(err) { + err = errorfactory.New(errorfactory.ResourceNotReady{}, err, "controller secret not found") + } + return + } + clientPassword = strings.TrimSuffix(string(basicKeys.Data["password"]), "\n") + clientUsername = strings.TrimSuffix(string(basicKeys.Data["username"]), "\n") + + caCert := basicKeys.Data[v1alpha1.CoreCACertKey] + if len(caCert) != 0 { + rootCAs = x509.NewCertPool() + rootCAs.AppendCertsFromPEM(caCert) + } + + return +} + +func GetAccessTokenSecret(cli client.Client, ref v1alpha1.SecretReference) (*corev1.Secret, error) { + accessToken := &corev1.Secret{} + err := cli.Get(context.TODO(), + types.NamespacedName{ + Namespace: ref.Namespace, + Name: ref.Name, + }, + accessToken, + ) + if err != nil { + if apierrors.IsNotFound(err) { + err = errorfactory.New(errorfactory.ResourceNotReady{}, err, "controller secret not found") + } + return nil, err + } + + return accessToken, nil +} diff --git a/pkg/nificlient/config/nificluster/nificluster_config.go b/pkg/nificlient/config/common/common.go similarity index 52% rename from pkg/nificlient/config/nificluster/nificluster_config.go rename to pkg/nificlient/config/common/common.go index 6f2c53380..1c36740c7 100644 --- a/pkg/nificlient/config/nificluster/nificluster_config.go +++ b/pkg/nificlient/config/common/common.go @@ -1,10 +1,12 @@ -package nificluster +package common import ( + "context" + "crypto/tls" "fmt" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" - "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" "github.com/Orange-OpenSource/nifikop/pkg/pki" + "github.com/Orange-OpenSource/nifikop/pkg/pki/certmanagerpki" "github.com/Orange-OpenSource/nifikop/pkg/util" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" @@ -12,47 +14,49 @@ import ( "strings" ) -func (n *nifiCluster) BuildConfig() (*clientconfig.NifiConfig, error) { - var cluster *v1alpha1.NifiCluster - var err error - if cluster, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace); err != nil { - return nil, err +func TlsConfig(client client.Client, cluster *v1alpha1.NifiCluster) (config *tls.Config, err error) { + if cluster.IsExternal() { + return certmanagerpki.GetControllerTLSConfigFromSecret(client, cluster.Spec.SecretRef) } - return clusterConfig(n.client, cluster) + return pki.GetPKIManager(client, cluster).GetControllerTLSConfig() } -func (n *nifiCluster) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { - cluster, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace) - return +func ClusterConfig(cluster *v1alpha1.NifiCluster) *clientconfig.NifiConfig { + if cluster.IsExternal() { + return externalClusterConfig(cluster) + } + + return internalClusterConfig(cluster) } -func (n *nifiCluster) IsExternal() bool { - return false +func externalClusterConfig(cluster *v1alpha1.NifiCluster) *clientconfig.NifiConfig { + conf := &clientconfig.NifiConfig{} + ref := cluster.Spec + nodesURI := generateNodesAddressFromTemplate(ref.Nodes, ref.NodeURITemplate) + + conf.RootProcessGroupId = ref.RootProcessGroupId + conf.NodeURITemplate = ref.NodeURITemplate + conf.NodesURI = nodesURI + conf.NifiURI = ref.NifiURI + conf.OperationTimeout = clientconfig.NifiDefaultTimeout + conf.NodesContext = make(map[int32]context.Context) + conf.ProxyUrl = ref.ProxyUrl + conf.UseSSL = true + + return conf } -// ClusterConfig creates connection options from a NifiCluster CR -func clusterConfig(client client.Client, cluster *v1alpha1.NifiCluster) (*clientconfig.NifiConfig, error) { +func internalClusterConfig(cluster *v1alpha1.NifiCluster) *clientconfig.NifiConfig { conf := &clientconfig.NifiConfig{} conf.RootProcessGroupId = cluster.Status.RootProcessGroupId conf.NodeURITemplate = generateNodesURITemplate(cluster) conf.NodesURI = generateNodesAddress(cluster) conf.NifiURI = nifi.GenerateRequestNiFiAllNodeAddressFromCluster(cluster) conf.OperationTimeout = clientconfig.NifiDefaultTimeout - - if cluster.Spec.ListenersConfig.SSLSecrets != nil && UseSSL(cluster) { - tlsConfig, err := pki.GetPKIManager(client, cluster).GetControllerTLSConfig() - if err != nil { - return conf, err - } - conf.UseSSL = true - conf.TLSConfig = tlsConfig - } - return conf, nil -} - -func UseSSL(cluster *v1alpha1.NifiCluster) bool { - return cluster.Spec.ListenersConfig.SSLSecrets != nil + conf.NodesContext = make(map[int32]context.Context) + conf.UseSSL = cluster.Spec.ListenersConfig.SSLSecrets != nil && UseSSL(cluster) + return conf } func generateNodesAddress(cluster *v1alpha1.NifiCluster) map[int32]clientconfig.NodeUri { @@ -79,3 +83,19 @@ func generateNodesURITemplate(cluster *v1alpha1.NifiCluster) string { strings.SplitAfterN(nifi.GenerateRequestNiFiNodeAddressFromCluster(0, cluster), ".", 2)[1], ) } + +func generateNodesAddressFromTemplate(nodes []v1alpha1.Node, template string) map[int32]clientconfig.NodeUri { + addresses := make(map[int32]clientconfig.NodeUri) + + for _, node := range nodes { + addresses[node.Id] = clientconfig.NodeUri{ + HostListener: fmt.Sprintf(template, node.Id), + RequestHost: fmt.Sprintf(template, node.Id), + } + } + return addresses +} + +func UseSSL(cluster *v1alpha1.NifiCluster) bool { + return cluster.Spec.ListenersConfig.SSLSecrets != nil +} diff --git a/pkg/nificlient/config/nificluster/nificluster_config_test.go b/pkg/nificlient/config/common/nificluster_config_test.go similarity index 94% rename from pkg/nificlient/config/nificluster/nificluster_config_test.go rename to pkg/nificlient/config/common/nificluster_config_test.go index 179333585..6f8ca6859 100644 --- a/pkg/nificlient/config/nificluster/nificluster_config_test.go +++ b/pkg/nificlient/config/common/nificluster_config_test.go @@ -1,4 +1,4 @@ -package nificluster +package common import ( "fmt" @@ -29,6 +29,7 @@ func testCluster(t *testing.T) *v1alpha1.NifiCluster { cluster.Name = clusterName cluster.Namespace = clusterNamespace cluster.Spec = v1alpha1.NifiClusterSpec{} + cluster.Spec.ListenersConfig = &v1alpha1.ListenersConfig{} cluster.Status.NodesState = make(map[string]v1alpha1.NodeState) cluster.Status.NodesState["1"] = v1alpha1.NodeState{ @@ -82,15 +83,14 @@ func TestClusterConfig(t *testing.T) { func testClusterConfig(t *testing.T, cluster *v1alpha1.NifiCluster, expectedUseSSL bool) { assert := assert.New(t) - conf, err := clusterConfig(mockClient{}, cluster) - assert.Nil(err) + conf := ClusterConfig(cluster) assert.Equal(expectedUseSSL, conf.UseSSL) - if expectedUseSSL { - assert.NotNil(conf.TLSConfig) - } else { - assert.Nil(conf.TLSConfig) - } + //if expectedUseSSL { + // assert.NotNil(conf.TLSConfig) + //} else { + // assert.Nil(conf.TLSConfig) + //} assert.Equal( fmt.Sprintf("%s-%s-node.%s.svc.cluster.local:%d", diff --git a/pkg/nificlient/config/config_manager.go b/pkg/nificlient/config/config_manager.go index e7c1e20a4..be748873b 100644 --- a/pkg/nificlient/config/config_manager.go +++ b/pkg/nificlient/config/config_manager.go @@ -2,7 +2,8 @@ package config import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/basic" "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/tls" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "sigs.k8s.io/controller-runtime/pkg/client" @@ -11,17 +12,16 @@ import ( var MockClientConfig = v1alpha1.ClientConfigType("mock") func GetClientConfigManager(client client.Client, clusterRef v1alpha1.ClusterReference) clientconfig.Manager { - switch clusterRef.Type { - case v1alpha1.ClientConfigNiFiCluster: - return nificluster.New(client, clusterRef) - case v1alpha1.ClientConfigExternalTLS: + cluster, _ := k8sutil.LookupNifiCluster(client, clusterRef.Name, clusterRef.Namespace) + switch cluster.GetClientType() { + case v1alpha1.ClientConfigTLS: return tls.New(client, clusterRef) - //case v1alpha1.ClientConfigExternalBasic: - // return + case v1alpha1.ClientConfigBasic: + return basic.New(client, clusterRef) case MockClientConfig: - return newMockClientConfig(client, clusterRef) + return NewMockClientConfig(client, clusterRef) default: - return nificluster.New(client, clusterRef) + return tls.New(client, clusterRef) } } @@ -32,7 +32,7 @@ type mockClientConfig struct { clusterRef v1alpha1.ClusterReference } -func newMockClientConfig(client client.Client, clusterRef v1alpha1.ClusterReference) clientconfig.Manager { +func NewMockClientConfig(client client.Client, clusterRef v1alpha1.ClusterReference) clientconfig.Manager { return &mockClientConfig{client: client, clusterRef: clusterRef} } diff --git a/pkg/nificlient/config/nificluster/externalcluster.go b/pkg/nificlient/config/nificluster/externalcluster.go new file mode 100644 index 000000000..7392d98d4 --- /dev/null +++ b/pkg/nificlient/config/nificluster/externalcluster.go @@ -0,0 +1,54 @@ +package nificluster + +import ( + "fmt" + "github.com/Orange-OpenSource/nifikop/pkg/common" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient" + "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" + "github.com/go-logr/logr" +) + +type ExternalCluster struct { + NodeURITemplate string + NodeIds []int32 + NifiURI string + Name string + RootProcessGroupId string + + NifiConfig *clientconfig.NifiConfig +} + +func (cluster *ExternalCluster) IsExternal() bool { + return true +} + +func (cluster *ExternalCluster) IsInternal() bool { + return false +} + +func (cluster *ExternalCluster) ClusterLabelString() string { + return fmt.Sprintf("%s", cluster.Name) +} + +func (cluster ExternalCluster) IsReady(log logr.Logger) bool { + nClient, err := common.NewClusterConnection(log, cluster.NifiConfig) + if err != nil { + return false + } + + clusterEntity, err := nClient.DescribeCluster() + if err != nil { + return false + } + + for _, node := range clusterEntity.Cluster.Nodes { + if node.Status != nificlient.CONNECTED_STATUS { + return false + } + } + return true +} + +func (cluster *ExternalCluster) Id() string { + return cluster.Name +} diff --git a/pkg/nificlient/config/nificluster/internalcluster.go b/pkg/nificlient/config/nificluster/internalcluster.go new file mode 100644 index 000000000..56c0696d0 --- /dev/null +++ b/pkg/nificlient/config/nificluster/internalcluster.go @@ -0,0 +1,39 @@ +package nificluster + +import ( + "fmt" + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/go-logr/logr" +) + +type InternalCluster struct { + Status v1alpha1.NifiClusterStatus + Name string + Namespace string +} + +func (cluster *InternalCluster) ClusterLabelString() string { + return fmt.Sprintf("%s.%s", cluster.Name, cluster.Namespace) +} + +func (c *InternalCluster) IsInternal() bool { + return true +} + +func (c InternalCluster) IsExternal() bool { + return false +} + +func (c InternalCluster) IsReady(log logr.Logger) bool { + for _, nodeState := range c.Status.NodesState { + if nodeState.ConfigurationState != v1alpha1.ConfigInSync || nodeState.GracefulActionState.State != v1alpha1.GracefulUpscaleSucceeded || + !nodeState.PodIsReady { + return false + } + } + return c.Status.State.IsReady() +} + +func (c *InternalCluster) Id() string { + return c.Name +} diff --git a/pkg/nificlient/config/tls/tls_config.go b/pkg/nificlient/config/tls/tls_config.go index eadafaa1c..2365659db 100644 --- a/pkg/nificlient/config/tls/tls_config.go +++ b/pkg/nificlient/config/tls/tls_config.go @@ -1,11 +1,11 @@ package tls import ( - "fmt" "github.com/Orange-OpenSource/nifikop/api/v1alpha1" - "github.com/Orange-OpenSource/nifikop/pkg/common" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient" - "github.com/Orange-OpenSource/nifikop/pkg/pki/certmanagerpki" + "github.com/Orange-OpenSource/nifikop/pkg/k8sutil" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/common" + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + "github.com/Orange-OpenSource/nifikop/pkg/util" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -14,97 +14,53 @@ import ( var log = ctrl.Log.WithName("tls_config") func (n *tls) BuildConfig() (*clientconfig.NifiConfig, error) { - return clusterConfig(n.client, n.clusterRef) -} - -func (n *tls) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { - config, err := n.BuildConfig() - cluster = &ExternalTLSCluster{ - NodeURITemplate: n.clusterRef.NodeURITemplate, - NodeIds: n.clusterRef.NodeIds, - NifiURI: n.clusterRef.NifiURI, - RootProcessGroupId: n.clusterRef.RootProcessGroupId, - - nifiConfig: config, + var cluster *v1alpha1.NifiCluster + var err error + if cluster, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace); err != nil { + return nil, err } - return -} - -func (n *tls) IsExternal() bool { - return false + return clusterConfig(n.client, cluster) } -func clusterConfig(client client.Client, ref v1alpha1.ClusterReference) (*clientconfig.NifiConfig, error) { - nodesURI := generateNodesAddressFromTemplate(ref.NodeIds, ref.NodeURITemplate) - - conf := &clientconfig.NifiConfig{} - conf.RootProcessGroupId = ref.RootProcessGroupId - conf.NodeURITemplate = ref.NodeURITemplate - conf.NodesURI = nodesURI - conf.NifiURI = ref.NifiURI - conf.OperationTimeout = clientconfig.NifiDefaultTimeout - - tlsConfig, err := certmanagerpki.GetControllerTLSConfigFromSecret(client, ref.SecretRef) - if err != nil { - return conf, err +func (n *tls) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { + var c *v1alpha1.NifiCluster + if c, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace); err != nil { + return nil, err } - conf.UseSSL = true - conf.TLSConfig = tlsConfig - return conf, nil -} - -func generateNodesAddressFromTemplate(ids []int32, template string) map[int32]clientconfig.NodeUri { - addresses := make(map[int32]clientconfig.NodeUri) - for _,nId := range ids { - addresses[nId] = clientconfig.NodeUri{ - HostListener: fmt.Sprintf(template ,nId), - RequestHost: fmt.Sprintf(template ,nId), + if !c.IsExternal() { + cluster = &nificluster.InternalCluster{ + Name: c.Name, + Namespace: c.Namespace, + Status: c.Status, } + return } - return addresses -} -type ExternalTLSCluster struct { - NodeURITemplate string - NodeIds []int32 - NifiURI string - RootProcessGroupId string - - nifiConfig *clientconfig.NifiConfig -} - -func (cluster *ExternalTLSCluster) IsExternal() bool { - return true -} - -func (cluster *ExternalTLSCluster) IsInternal() bool { - return false -} + config, err := n.BuildConfig() + cluster = &nificluster.ExternalCluster{ + NodeURITemplate: c.Spec.NodeURITemplate, + NodeIds: util.NodesToIdList(c.Spec.Nodes), + NifiURI: c.Spec.NifiURI, + RootProcessGroupId: c.Spec.RootProcessGroupId, + Name: c.Name, + + NifiConfig: config, + } -func (cluster *ExternalTLSCluster) ClusterLabelString() string { - return fmt.Sprintf("%s", cluster.NifiURI) + return } -func (cluster ExternalTLSCluster) IsReady() bool { - nClient, err := common.NewClusterConnection(log, cluster.nifiConfig) - if err != nil { - return false - } - - clusterEntity, err := nClient.DescribeCluster() - if err != nil { - return false - } +func clusterConfig(client client.Client, cluster *v1alpha1.NifiCluster) (*clientconfig.NifiConfig, error) { + conf := common.ClusterConfig(cluster) - for _, node := range clusterEntity.Cluster.Nodes{ - if node.Status != nificlient.CONNECTED_STATUS { - return false + if conf.UseSSL { + tlsConfig, err := common.TlsConfig(client, cluster) + if err != nil { + return conf, err } + conf.TLSConfig = tlsConfig } - return true -} -func (cluster *ExternalTLSCluster) Id() string { - return cluster.NifiURI -} \ No newline at end of file + return conf, nil +} diff --git a/pkg/nificlient/config/tls/tls_config_test.go b/pkg/nificlient/config/tls/tls_config_test.go new file mode 100644 index 000000000..0d768e9ac --- /dev/null +++ b/pkg/nificlient/config/tls/tls_config_test.go @@ -0,0 +1,112 @@ +package tls + +import ( + "fmt" + "testing" + + "github.com/Orange-OpenSource/nifikop/api/v1alpha1" + "github.com/Orange-OpenSource/nifikop/pkg/pki" + "github.com/stretchr/testify/assert" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + httpContainerPort int32 = 443 + succeededNodeId int32 = 4 + + clusterName = "test-cluster" + clusterNamespace = "test-namespace" +) + +type mockClient struct { + client.Client +} + +func testCluster(t *testing.T) *v1alpha1.NifiCluster { + t.Helper() + cluster := &v1alpha1.NifiCluster{} + + cluster.Name = clusterName + cluster.Namespace = clusterNamespace + cluster.Spec = v1alpha1.NifiClusterSpec{} + cluster.Spec.ListenersConfig = &v1alpha1.ListenersConfig{} + + cluster.Status.NodesState = make(map[string]v1alpha1.NodeState) + cluster.Status.NodesState["1"] = v1alpha1.NodeState{ + GracefulActionState: v1alpha1.GracefulActionState{ + State: v1alpha1.GracefulDownscaleRunning, + }, + } + + cluster.Status.NodesState["2"] = v1alpha1.NodeState{ + GracefulActionState: v1alpha1.GracefulActionState{ + State: v1alpha1.GracefulUpscaleRequired, + }, + } + + cluster.Status.NodesState["3"] = v1alpha1.NodeState{ + GracefulActionState: v1alpha1.GracefulActionState{ + ActionStep: v1alpha1.RemoveStatus, + }, + } + + cluster.Status.NodesState[fmt.Sprint(succeededNodeId)] = v1alpha1.NodeState{ + GracefulActionState: v1alpha1.GracefulActionState{ + State: v1alpha1.GracefulDownscaleSucceeded, + }, + } + + cluster.Spec.ListenersConfig.InternalListeners = []v1alpha1.InternalListenerConfig{ + {Type: "https", ContainerPort: httpContainerPort}, + {Type: "http", ContainerPort: 8080}, + {Type: "cluster", ContainerPort: 8083}, + {Type: "s2s", ContainerPort: 8085}, + } + return cluster +} + +func testSecuredCluster(t *testing.T) *v1alpha1.NifiCluster { + cluster := testCluster(t) + cluster.Spec.ListenersConfig.SSLSecrets = &v1alpha1.SSLSecrets{ + PKIBackend: pki.MockBackend, + } + + return cluster +} + +func TestClusterConfig(t *testing.T) { + cluster := testCluster(t) + testClusterConfig(t, cluster, false) + cluster = testSecuredCluster(t) + testClusterConfig(t, cluster, true) +} + +func testClusterConfig(t *testing.T, cluster *v1alpha1.NifiCluster, expectedUseSSL bool) { + assert := assert.New(t) + conf, err := clusterConfig(mockClient{}, cluster) + assert.Nil(err) + assert.Equal(expectedUseSSL, conf.UseSSL) + + if expectedUseSSL { + assert.NotNil(conf.TLSConfig) + } else { + assert.Nil(conf.TLSConfig) + } + + assert.Equal( + fmt.Sprintf("%s-%s-node.%s.svc.cluster.local:%d", + clusterName, "%d", clusterNamespace, httpContainerPort), + conf.NodeURITemplate) + + assert.Equal(1, len(conf.NodesURI)) + assert.NotNil(conf.NodesURI[succeededNodeId]) + assert.Equal( + fmt.Sprintf("%s-%d-node.%s.svc.cluster.local:%d", + clusterName, succeededNodeId, clusterNamespace, httpContainerPort), + conf.NodesURI[succeededNodeId].RequestHost) + + assert.Equal( + fmt.Sprintf("%s-all-node.%s.svc.cluster.local:%d", + clusterName, clusterNamespace, httpContainerPort), + conf.NifiURI) +} diff --git a/pkg/nificlient/controllerconfig.go b/pkg/nificlient/controllerconfig.go index 8e7f82bc1..1bbfa9409 100644 --- a/pkg/nificlient/controllerconfig.go +++ b/pkg/nificlient/controllerconfig.go @@ -20,7 +20,7 @@ import ( func (n *nifiClient) GetControllerConfig() (*nigoapi.ControllerConfigurationEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable @@ -28,7 +28,7 @@ func (n *nifiClient) GetControllerConfig() (*nigoapi.ControllerConfigurationEnti // Request on Nifi Rest API to get the reporting task informations - out, rsp, body, err := client.ControllerApi.GetControllerConfig(nil) + out, rsp, body, err := client.ControllerApi.GetControllerConfig(context) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -39,14 +39,14 @@ func (n *nifiClient) GetControllerConfig() (*nigoapi.ControllerConfigurationEnti func (n *nifiClient) UpdateControllerConfig(entity nigoapi.ControllerConfigurationEntity) (*nigoapi.ControllerConfigurationEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the reporting task - out, rsp, body, err := client.ControllerApi.UpdateControllerConfig(nil, entity) + out, rsp, body, err := client.ControllerApi.UpdateControllerConfig(context, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } diff --git a/pkg/nificlient/flow.go b/pkg/nificlient/flow.go index 1802da3c4..6b6616417 100644 --- a/pkg/nificlient/flow.go +++ b/pkg/nificlient/flow.go @@ -7,14 +7,14 @@ import ( func (n *nifiClient) GetFlow(id string) (*nigoapi.ProcessGroupFlowEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the process group flow informations - flowPGEntity, rsp, body, err := client.FlowApi.GetFlow(nil, id) + flowPGEntity, rsp, body, err := client.FlowApi.GetFlow(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } @@ -24,14 +24,14 @@ func (n *nifiClient) GetFlow(id string) (*nigoapi.ProcessGroupFlowEntity, error) func (n *nifiClient) GetFlowControllerServices(id string) (*nigoapi.ControllerServicesEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the process group flow's controller services informations - csEntity, rsp, body, err := client.FlowApi.GetControllerServicesFromGroup(nil, id, + csEntity, rsp, body, err := client.FlowApi.GetControllerServicesFromGroup(context, id, &nigoapi.FlowApiGetControllerServicesFromGroupOpts{ IncludeAncestorGroups: optional.NewBool(false), IncludeDescendantGroups: optional.NewBool(true), @@ -47,14 +47,14 @@ func (n *nifiClient) GetFlowControllerServices(id string) (*nigoapi.ControllerSe func (n *nifiClient) UpdateFlowControllerServices(entity nigoapi.ActivateControllerServicesEntity) (*nigoapi.ActivateControllerServicesEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to enable or disable the controller services - csEntity, rsp, body, err := client.FlowApi.ActivateControllerServices(nil, entity.Id, entity) + csEntity, rsp, body, err := client.FlowApi.ActivateControllerServices(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -65,14 +65,14 @@ func (n *nifiClient) UpdateFlowControllerServices(entity nigoapi.ActivateControl func (n *nifiClient) UpdateFlowProcessGroup(entity nigoapi.ScheduleComponentsEntity) (*nigoapi.ScheduleComponentsEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to enable or disable the controller services - csEntity, rsp, body, err := client.FlowApi.ScheduleComponents(nil, entity.Id, entity) + csEntity, rsp, body, err := client.FlowApi.ScheduleComponents(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -83,14 +83,14 @@ func (n *nifiClient) UpdateFlowProcessGroup(entity nigoapi.ScheduleComponentsEnt // TODO : when last supported will be NiFi 1.12.X //func (n *nifiClient) FlowDropRequest(connectionId, id string) (*nigoapi.DropRequestEntity, error) { // // Get nigoapi client, favoring the one associated to the coordinator node. -// client := n.privilegeCoordinatorClient() +// client, context := n.privilegeCoordinatorClient() // if client == nil { // log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") // return nil, ErrNoNodeClientsAvailable // } // // // Request on Nifi Rest API to get the drop request information -// dropRequest, rsp, err := client.FlowfileQueuesApi.GetDropRequest(nil, connectionId, id) +// dropRequest, rsp, err := client.FlowfileQueuesApi.GetDropRequest(context, connectionId, id) // if err := errorGetOperation(rsp, err); err != nil { // return nil, err // } diff --git a/pkg/nificlient/flowfiles.go b/pkg/nificlient/flowfiles.go index fee2d4a11..cc4e9b22d 100644 --- a/pkg/nificlient/flowfiles.go +++ b/pkg/nificlient/flowfiles.go @@ -6,14 +6,14 @@ import ( func (n *nifiClient) GetDropRequest(connectionId, id string) (*nigoapi.DropRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the drop request information - dropRequest, rsp, body, err := client.FlowfileQueuesApi.GetDropRequest(nil, connectionId, id) + dropRequest, rsp, body, err := client.FlowfileQueuesApi.GetDropRequest(context, connectionId, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } @@ -23,14 +23,14 @@ func (n *nifiClient) GetDropRequest(connectionId, id string) (*nigoapi.DropReque func (n *nifiClient) CreateDropRequest(connectionId string) (*nigoapi.DropRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the drop Request - entity, rsp, body, err := client.FlowfileQueuesApi.CreateDropRequest(nil, connectionId) + entity, rsp, body, err := client.FlowfileQueuesApi.CreateDropRequest(context, connectionId) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -41,14 +41,14 @@ func (n *nifiClient) CreateDropRequest(connectionId string) (*nigoapi.DropReques // TODO : when last supported will be NiFi 1.12.X //func (n *nifiClient) CreateDropRequest(pgId string)(*nigoapi.ProcessGroupEntity, error) { // // Get nigoapi client, favoring the one associated to the coordinator node. -// client := n.privilegeCoordinatorClient() +// client, context := n.privilegeCoordinatorClient() // if client == nil { // log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") // return nil, ErrNoNodeClientsAvailable // } // // // Request on Nifi Rest API to create the registry client -// entity, rsp, err := client.ProcessGroupsApi.CreateEmptyAllConnectionsRequest(nil, pgId) +// entity, rsp, err := client.ProcessGroupsApi.CreateEmptyAllConnectionsRequest(context, pgId) // if err := errorCreateOperation(rsp, err); err != nil { // return nil, err // } diff --git a/pkg/nificlient/inputport.go b/pkg/nificlient/inputport.go index 299489f3d..68a198033 100644 --- a/pkg/nificlient/inputport.go +++ b/pkg/nificlient/inputport.go @@ -4,14 +4,14 @@ import nigoapi "github.com/erdrix/nigoapi/pkg/nifi" func (n *nifiClient) UpdateInputPortRunStatus(id string, entity nigoapi.PortRunStatusEntity) (*nigoapi.ProcessorEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the input port run status - processor, rsp, body, err := client.InputPortsApi.UpdateRunStatus(nil, id, entity) + processor, rsp, body, err := client.InputPortsApi.UpdateRunStatus(context, id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } diff --git a/pkg/nificlient/mock_client_test.go b/pkg/nificlient/mock_client_test.go index 85dc624dd..fb2f8ce20 100644 --- a/pkg/nificlient/mock_client_test.go +++ b/pkg/nificlient/mock_client_test.go @@ -15,6 +15,7 @@ package nificlient import ( + "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/common" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "testing" @@ -145,6 +146,7 @@ func testClusterMock(t *testing.T) *v1alpha1.NifiCluster { cluster.Name = clusterName cluster.Namespace = clusterNamespace cluster.Spec = v1alpha1.NifiClusterSpec{} + cluster.Spec.ListenersConfig = &v1alpha1.ListenersConfig{} cluster.Spec.Nodes = []v1alpha1.Node{ {Id: 0}, @@ -159,3 +161,8 @@ func testClusterMock(t *testing.T) *v1alpha1.NifiCluster { } return cluster } + +func configFromCluster(cluster *v1alpha1.NifiCluster) (*clientconfig.NifiConfig, error) { + conf := common.ClusterConfig(cluster) + return conf, nil +} diff --git a/pkg/nificlient/parametercontext.go b/pkg/nificlient/parametercontext.go index 87c59b896..a538aeef3 100644 --- a/pkg/nificlient/parametercontext.go +++ b/pkg/nificlient/parametercontext.go @@ -23,14 +23,14 @@ import ( func (n *nifiClient) GetParameterContext(id string) (*nigoapi.ParameterContextEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the parameter context informations - pcEntity, rsp, body, err := client.ParameterContextsApi.GetParameterContext(nil, id) + pcEntity, rsp, body, err := client.ParameterContextsApi.GetParameterContext(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } @@ -40,14 +40,14 @@ func (n *nifiClient) GetParameterContext(id string) (*nigoapi.ParameterContextEn func (n *nifiClient) CreateParameterContext(entity nigoapi.ParameterContextEntity) (*nigoapi.ParameterContextEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the parameter context - pcEntity, rsp, body, err := client.ParameterContextsApi.CreateParameterContext(nil, entity) + pcEntity, rsp, body, err := client.ParameterContextsApi.CreateParameterContext(context, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -57,14 +57,14 @@ func (n *nifiClient) CreateParameterContext(entity nigoapi.ParameterContextEntit func (n *nifiClient) RemoveParameterContext(entity nigoapi.ParameterContextEntity) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the parameter context - _, rsp, body, err := client.ParameterContextsApi.DeleteParameterContext(nil, entity.Id, + _, rsp, body, err := client.ParameterContextsApi.DeleteParameterContext(context, entity.Id, &nigoapi.ParameterContextsApiDeleteParameterContextOpts{ Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), }) @@ -74,14 +74,14 @@ func (n *nifiClient) RemoveParameterContext(entity nigoapi.ParameterContextEntit func (n *nifiClient) CreateParameterContextUpdateRequest(contextId string, entity nigoapi.ParameterContextEntity) (*nigoapi.ParameterContextUpdateRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the parameter context update request - request, rsp, body, err := client.ParameterContextsApi.SubmitParameterContextUpdate(nil, contextId, entity) + request, rsp, body, err := client.ParameterContextsApi.SubmitParameterContextUpdate(context, contextId, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -91,14 +91,14 @@ func (n *nifiClient) CreateParameterContextUpdateRequest(contextId string, entit func (n *nifiClient) GetParameterContextUpdateRequest(contextId, id string) (*nigoapi.ParameterContextUpdateRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the parameter context update request information - request, rsp, body, err := client.ParameterContextsApi.GetParameterContextUpdate(nil, contextId, id) + request, rsp, body, err := client.ParameterContextsApi.GetParameterContextUpdate(context, contextId, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } diff --git a/pkg/nificlient/policies.go b/pkg/nificlient/policies.go index e07055ce0..ec66d236f 100644 --- a/pkg/nificlient/policies.go +++ b/pkg/nificlient/policies.go @@ -23,7 +23,7 @@ import ( func (n *nifiClient) GetAccessPolicy(action, resource string) (*nigoapi.AccessPolicyEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable @@ -39,7 +39,7 @@ func (n *nifiClient) GetAccessPolicy(action, resource string) (*nigoapi.AccessPo break } - accessPolicyEntity, rsp, body, err := client.PoliciesApi.GetAccessPolicyForResource(nil, action, resource) + accessPolicyEntity, rsp, body, err := client.PoliciesApi.GetAccessPolicyForResource(context, action, resource) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -50,14 +50,14 @@ func (n *nifiClient) GetAccessPolicy(action, resource string) (*nigoapi.AccessPo func (n *nifiClient) CreateAccessPolicy(entity nigoapi.AccessPolicyEntity) (*nigoapi.AccessPolicyEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the access policy - accessPolicyEntity, rsp, body, err := client.PoliciesApi.CreateAccessPolicy(nil, entity) + accessPolicyEntity, rsp, body, err := client.PoliciesApi.CreateAccessPolicy(context, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -67,14 +67,14 @@ func (n *nifiClient) CreateAccessPolicy(entity nigoapi.AccessPolicyEntity) (*nig func (n *nifiClient) UpdateAccessPolicy(entity nigoapi.AccessPolicyEntity) (*nigoapi.AccessPolicyEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the access policy - accessPolicyEntity, rsp, body, err := client.PoliciesApi.UpdateAccessPolicy(nil, entity.Id, entity) + accessPolicyEntity, rsp, body, err := client.PoliciesApi.UpdateAccessPolicy(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -84,14 +84,14 @@ func (n *nifiClient) UpdateAccessPolicy(entity nigoapi.AccessPolicyEntity) (*nig func (n *nifiClient) RemoveAccessPolicy(entity nigoapi.AccessPolicyEntity) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the registry client - _, rsp, body, err := client.PoliciesApi.RemoveAccessPolicy(nil, entity.Id, + _, rsp, body, err := client.PoliciesApi.RemoveAccessPolicy(context, entity.Id, &nigoapi.PoliciesApiRemoveAccessPolicyOpts{ Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), }) diff --git a/pkg/nificlient/processgroup.go b/pkg/nificlient/processgroup.go index a65a70918..9e827f006 100644 --- a/pkg/nificlient/processgroup.go +++ b/pkg/nificlient/processgroup.go @@ -9,14 +9,14 @@ import ( func (n *nifiClient) GetProcessGroup(id string) (*nigoapi.ProcessGroupEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the process group informations - pGEntity, rsp, body, err := client.ProcessGroupsApi.GetProcessGroup(nil, id) + pGEntity, rsp, body, err := client.ProcessGroupsApi.GetProcessGroup(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } @@ -28,14 +28,14 @@ func (n *nifiClient) CreateProcessGroup( entity nigoapi.ProcessGroupEntity, pgParentId string) (*nigoapi.ProcessGroupEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the versioned process group - pgEntity, rsp, body, err := client.ProcessGroupsApi.CreateProcessGroup(nil, pgParentId, entity) + pgEntity, rsp, body, err := client.ProcessGroupsApi.CreateProcessGroup(context, pgParentId, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -45,14 +45,14 @@ func (n *nifiClient) CreateProcessGroup( func (n *nifiClient) UpdateProcessGroup(entity nigoapi.ProcessGroupEntity) (*nigoapi.ProcessGroupEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the versioned process group - pgEntity, rsp, body, err := client.ProcessGroupsApi.UpdateProcessGroup(nil, entity.Id, entity) + pgEntity, rsp, body, err := client.ProcessGroupsApi.UpdateProcessGroup(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -62,7 +62,7 @@ func (n *nifiClient) UpdateProcessGroup(entity nigoapi.ProcessGroupEntity) (*nig func (n *nifiClient) RemoveProcessGroup(entity nigoapi.ProcessGroupEntity) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable @@ -70,7 +70,7 @@ func (n *nifiClient) RemoveProcessGroup(entity nigoapi.ProcessGroupEntity) error // Request on Nifi Rest API to remove the versioned process group _, rsp, body, err := client.ProcessGroupsApi.RemoveProcessGroup( - nil, + context, entity.Id, &nigoapi.ProcessGroupsApiRemoveProcessGroupOpts{ Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), diff --git a/pkg/nificlient/processor.go b/pkg/nificlient/processor.go index cd4ca741b..d3acc2900 100644 --- a/pkg/nificlient/processor.go +++ b/pkg/nificlient/processor.go @@ -7,14 +7,14 @@ func (n *nifiClient) UpdateProcessorRunStatus( entity nigoapi.ProcessorRunStatusEntity) (*nigoapi.ProcessorEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the processor run status - processor, rsp, body, err := client.ProcessorsApi.UpdateRunStatus(nil, id, entity) + processor, rsp, body, err := client.ProcessorsApi.UpdateRunStatus(context, id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } diff --git a/pkg/nificlient/registryclient.go b/pkg/nificlient/registryclient.go index 5ab8008fa..47e607c31 100644 --- a/pkg/nificlient/registryclient.go +++ b/pkg/nificlient/registryclient.go @@ -23,14 +23,14 @@ import ( func (n *nifiClient) GetRegistryClient(id string) (*nigoapi.RegistryClientEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the registy client informations - regCliEntity, rsp, body, err := client.ControllerApi.GetRegistryClient(nil, id) + regCliEntity, rsp, body, err := client.ControllerApi.GetRegistryClient(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -41,14 +41,14 @@ func (n *nifiClient) GetRegistryClient(id string) (*nigoapi.RegistryClientEntity func (n *nifiClient) CreateRegistryClient(entity nigoapi.RegistryClientEntity) (*nigoapi.RegistryClientEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the registry client - regCliEntity, rsp, body, err := client.ControllerApi.CreateRegistryClient(nil, entity) + regCliEntity, rsp, body, err := client.ControllerApi.CreateRegistryClient(context, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -58,14 +58,14 @@ func (n *nifiClient) CreateRegistryClient(entity nigoapi.RegistryClientEntity) ( func (n *nifiClient) UpdateRegistryClient(entity nigoapi.RegistryClientEntity) (*nigoapi.RegistryClientEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the registry client - regCliEntity, rsp, body, err := client.ControllerApi.UpdateRegistryClient(nil, entity.Id, entity) + regCliEntity, rsp, body, err := client.ControllerApi.UpdateRegistryClient(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -75,14 +75,14 @@ func (n *nifiClient) UpdateRegistryClient(entity nigoapi.RegistryClientEntity) ( func (n *nifiClient) RemoveRegistryClient(entity nigoapi.RegistryClientEntity) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the registry client - _, rsp, body, err := client.ControllerApi.DeleteRegistryClient(nil, entity.Id, + _, rsp, body, err := client.ControllerApi.DeleteRegistryClient(context, entity.Id, &nigoapi.ControllerApiDeleteRegistryClientOpts{ Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), }) diff --git a/pkg/nificlient/reportingtask.go b/pkg/nificlient/reportingtask.go index 058fd3580..25f4f9046 100644 --- a/pkg/nificlient/reportingtask.go +++ b/pkg/nificlient/reportingtask.go @@ -23,14 +23,14 @@ import ( func (n *nifiClient) GetReportingTask(id string) (*nigoapi.ReportingTaskEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the reporting task informations - out, rsp, body, err := client.ReportingTasksApi.GetReportingTask(nil, id) + out, rsp, body, err := client.ReportingTasksApi.GetReportingTask(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -41,14 +41,14 @@ func (n *nifiClient) GetReportingTask(id string) (*nigoapi.ReportingTaskEntity, func (n *nifiClient) CreateReportingTask(entity nigoapi.ReportingTaskEntity) (*nigoapi.ReportingTaskEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the reporting task - out, rsp, body, err := client.ControllerApi.CreateReportingTask(nil, entity) + out, rsp, body, err := client.ControllerApi.CreateReportingTask(context, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -58,14 +58,14 @@ func (n *nifiClient) CreateReportingTask(entity nigoapi.ReportingTaskEntity) (*n func (n *nifiClient) UpdateReportingTask(entity nigoapi.ReportingTaskEntity) (*nigoapi.ReportingTaskEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the reporting task - out, rsp, body, err := client.ReportingTasksApi.UpdateReportingTask(nil, entity.Id, entity) + out, rsp, body, err := client.ReportingTasksApi.UpdateReportingTask(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -75,14 +75,14 @@ func (n *nifiClient) UpdateReportingTask(entity nigoapi.ReportingTaskEntity) (*n func (n *nifiClient) UpdateRunStatusReportingTask(id string, entity nigoapi.ReportingTaskRunStatusEntity) (*nigoapi.ReportingTaskEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the reporting task - out, rsp, body, err := client.ReportingTasksApi.UpdateRunStatus(nil, id, entity) + out, rsp, body, err := client.ReportingTasksApi.UpdateRunStatus(context, id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -92,14 +92,14 @@ func (n *nifiClient) UpdateRunStatusReportingTask(id string, entity nigoapi.Repo func (n *nifiClient) RemoveReportingTask(entity nigoapi.ReportingTaskEntity) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the reporting task - _, rsp, body, err := client.ReportingTasksApi.RemoveReportingTask(nil, entity.Id, + _, rsp, body, err := client.ReportingTasksApi.RemoveReportingTask(context, entity.Id, &nigoapi.ReportingTasksApiRemoveReportingTaskOpts{ Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), }) diff --git a/pkg/nificlient/snippet.go b/pkg/nificlient/snippet.go index 8cb95b974..a09873c0e 100644 --- a/pkg/nificlient/snippet.go +++ b/pkg/nificlient/snippet.go @@ -4,14 +4,14 @@ import nigoapi "github.com/erdrix/nigoapi/pkg/nifi" func (n *nifiClient) CreateSnippet(entity nigoapi.SnippetEntity) (*nigoapi.SnippetEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the snippet - snippetEntity, rsp, body, err := client.SnippetsApi.CreateSnippet(nil, entity) + snippetEntity, rsp, body, err := client.SnippetsApi.CreateSnippet(context, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -21,14 +21,14 @@ func (n *nifiClient) CreateSnippet(entity nigoapi.SnippetEntity) (*nigoapi.Snipp func (n *nifiClient) UpdateSnippet(entity nigoapi.SnippetEntity) (*nigoapi.SnippetEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the snippet - snippetEntity, rsp, body, err := client.SnippetsApi.UpdateSnippet(nil, entity.Snippet.Id, entity) + snippetEntity, rsp, body, err := client.SnippetsApi.UpdateSnippet(context, entity.Snippet.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } diff --git a/pkg/nificlient/system.go b/pkg/nificlient/system.go index da87033a0..10d4091df 100644 --- a/pkg/nificlient/system.go +++ b/pkg/nificlient/system.go @@ -20,13 +20,30 @@ import ( func (n *nifiClient) DescribeCluster() (*nigoapi.ClusterEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } - clusterEntry, rsp, body, err := client.ControllerApi.GetCluster(nil) + clusterEntry, rsp, body, err := client.ControllerApi.GetCluster(context) + if err := errorGetOperation(rsp, body, err); err != nil { + return nil, err + } + + return &clusterEntry, nil +} + +func (n *nifiClient) DescribeClusterFromNodeId(nodeId int32) (*nigoapi.ClusterEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client := n.nodeClient[nodeId] + context := n.opts.NodesContext[nodeId] + if client == nil { + log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") + return nil, ErrNoNodeClientsAvailable + } + + clusterEntry, rsp, body, err := client.ControllerApi.GetCluster(context) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } @@ -36,7 +53,7 @@ func (n *nifiClient) DescribeCluster() (*nigoapi.ClusterEntity, error) { func (n *nifiClient) GetClusterNode(nId int32) (*nigoapi.NodeEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorExceptNodeIdClient(nId) + client, context := n.privilegeCoordinatorExceptNodeIdClient(nId) if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable @@ -50,7 +67,7 @@ func (n *nifiClient) GetClusterNode(nId int32) (*nigoapi.NodeEntity, error) { } // Request on Nifi Rest API to get the node information - nodeEntity, rsp, body, err := client.ControllerApi.GetNode(nil, targetedNode.NodeId) + nodeEntity, rsp, body, err := client.ControllerApi.GetNode(context, targetedNode.NodeId) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -89,28 +106,28 @@ func (n *nifiClient) RemoveClusterNode(nId int32) error { } // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorExceptNodeIdClient(nId) + client, context := n.privilegeCoordinatorExceptNodeIdClient(nId) if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the node - _, rsp, body, err := client.ControllerApi.DeleteNode(nil, targetedNode.NodeId) + _, rsp, body, err := client.ControllerApi.DeleteNode(context, targetedNode.NodeId) return errorDeleteOperation(rsp, body, err) } func (n *nifiClient) RemoveClusterNodeFromClusterNodeId(nId string) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the node - _, rsp, body, err := client.ControllerApi.DeleteNode(nil, nId) + _, rsp, body, err := client.ControllerApi.DeleteNode(context, nId) return errorDeleteOperation(rsp, body, err) } @@ -133,7 +150,7 @@ func (n *nifiClient) setClusterNodeStatus(nId int32, status, expectedActionStatu } // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorExceptNodeIdClient(nId) + client, context := n.privilegeCoordinatorExceptNodeIdClient(nId) if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable @@ -143,7 +160,7 @@ func (n *nifiClient) setClusterNodeStatus(nId int32, status, expectedActionStatu targetedNode.Status = string(status) // Request on Nifi Rest API to update the node status - nodeEntity, rsp, body, err := client.ControllerApi.UpdateNode(nil, targetedNode.NodeId, nigoapi.NodeEntity{Node: targetedNode}) + nodeEntity, rsp, body, err := client.ControllerApi.UpdateNode(context, targetedNode.NodeId, nigoapi.NodeEntity{Node: targetedNode}) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } diff --git a/pkg/nificlient/system_test.go b/pkg/nificlient/system_test.go index 18330c37c..157c36da7 100644 --- a/pkg/nificlient/system_test.go +++ b/pkg/nificlient/system_test.go @@ -327,8 +327,9 @@ func testRemoveClusterNodeFromClusterNodeId(t *testing.T, cluster *v1alpha1.Nifi func testClientFromCluster(cluster *v1alpha1.NifiCluster, empty bool) (NifiClient, error) { httpmock.Activate() defer httpmock.DeactivateAndReset() - url := nifiAddress(cluster, "/controller/cluster") + + cfg, _ := configFromCluster(cluster) httpmock.RegisterResponder(http.MethodGet, url, func(req *http.Request) (*http.Response, error) { return httpmock.NewJsonResponse( @@ -336,7 +337,8 @@ func testClientFromCluster(cluster *v1alpha1.NifiCluster, empty bool) (NifiClien MockGetClusterResponse(cluster, empty)) }) - return NewFromCluster(mockClient{}, cluster) + cli, err := NewFromConfig(cfg) + return cli, err } func nifiAddress(cluster *v1alpha1.NifiCluster, path string) string { diff --git a/pkg/nificlient/user.go b/pkg/nificlient/user.go index d0cae3385..cdc9ea4a8 100644 --- a/pkg/nificlient/user.go +++ b/pkg/nificlient/user.go @@ -23,14 +23,14 @@ import ( func (n *nifiClient) GetUsers() ([]nigoapi.UserEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the users informations - usersEntity, rsp, body, err := client.TenantsApi.GetUsers(nil) + usersEntity, rsp, body, err := client.TenantsApi.GetUsers(context) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -41,14 +41,14 @@ func (n *nifiClient) GetUsers() ([]nigoapi.UserEntity, error) { func (n *nifiClient) GetUser(id string) (*nigoapi.UserEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the user informations - userEntity, rsp, body, err := client.TenantsApi.GetUser(nil, id) + userEntity, rsp, body, err := client.TenantsApi.GetUser(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -59,14 +59,14 @@ func (n *nifiClient) GetUser(id string) (*nigoapi.UserEntity, error) { func (n *nifiClient) CreateUser(entity nigoapi.UserEntity) (*nigoapi.UserEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the user - userEntity, rsp, body, err := client.TenantsApi.CreateUser(nil, entity) + userEntity, rsp, body, err := client.TenantsApi.CreateUser(context, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -76,14 +76,14 @@ func (n *nifiClient) CreateUser(entity nigoapi.UserEntity) (*nigoapi.UserEntity, func (n *nifiClient) UpdateUser(entity nigoapi.UserEntity) (*nigoapi.UserEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the user - userEntity, rsp, body, err := client.TenantsApi.UpdateUser(nil, entity.Id, entity) + userEntity, rsp, body, err := client.TenantsApi.UpdateUser(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -93,14 +93,14 @@ func (n *nifiClient) UpdateUser(entity nigoapi.UserEntity) (*nigoapi.UserEntity, func (n *nifiClient) RemoveUser(entity nigoapi.UserEntity) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the user - _, rsp, body, err := client.TenantsApi.RemoveUser(nil, entity.Id, + _, rsp, body, err := client.TenantsApi.RemoveUser(context, entity.Id, &nigoapi.TenantsApiRemoveUserOpts{ Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), }) diff --git a/pkg/nificlient/usergroup.go b/pkg/nificlient/usergroup.go index 6909aa290..d5bd0e885 100644 --- a/pkg/nificlient/usergroup.go +++ b/pkg/nificlient/usergroup.go @@ -23,14 +23,14 @@ import ( func (n *nifiClient) GetUserGroups() ([]nigoapi.UserGroupEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the user groups informations - userGroupsEntity, rsp, body, err := client.TenantsApi.GetUserGroups(nil) + userGroupsEntity, rsp, body, err := client.TenantsApi.GetUserGroups(context) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -41,14 +41,14 @@ func (n *nifiClient) GetUserGroups() ([]nigoapi.UserGroupEntity, error) { func (n *nifiClient) GetUserGroup(id string) (*nigoapi.UserGroupEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the user groups informations - userGroupEntity, rsp, body, err := client.TenantsApi.GetUserGroup(nil, id) + userGroupEntity, rsp, body, err := client.TenantsApi.GetUserGroup(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err @@ -59,14 +59,14 @@ func (n *nifiClient) GetUserGroup(id string) (*nigoapi.UserGroupEntity, error) { func (n *nifiClient) CreateUserGroup(entity nigoapi.UserGroupEntity) (*nigoapi.UserGroupEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the user group - userGroupEntity, rsp, body, err := client.TenantsApi.CreateUserGroup(nil, entity) + userGroupEntity, rsp, body, err := client.TenantsApi.CreateUserGroup(context, entity) if err := errorCreateOperation(rsp, body, err); err != nil { return nil, err } @@ -75,14 +75,14 @@ func (n *nifiClient) CreateUserGroup(entity nigoapi.UserGroupEntity) (*nigoapi.U func (n *nifiClient) UpdateUserGroup(entity nigoapi.UserGroupEntity) (*nigoapi.UserGroupEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to update the user group - userGroupEntity, rsp, body, err := client.TenantsApi.UpdateUserGroup(nil, entity.Id, entity) + userGroupEntity, rsp, body, err := client.TenantsApi.UpdateUserGroup(context, entity.Id, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -92,14 +92,14 @@ func (n *nifiClient) UpdateUserGroup(entity nigoapi.UserGroupEntity) (*nigoapi.U func (n *nifiClient) RemoveUserGroup(entity nigoapi.UserGroupEntity) error { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return ErrNoNodeClientsAvailable } // Request on Nifi Rest API to remove the user group - _, rsp, body, err := client.TenantsApi.RemoveUserGroup(nil, entity.Id, + _, rsp, body, err := client.TenantsApi.RemoveUserGroup(context, entity.Id, &nigoapi.TenantsApiRemoveUserGroupOpts{ Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), }) diff --git a/pkg/nificlient/version.go b/pkg/nificlient/version.go index ae14b41f5..1c489b5d1 100644 --- a/pkg/nificlient/version.go +++ b/pkg/nificlient/version.go @@ -4,14 +4,14 @@ import nigoapi "github.com/erdrix/nigoapi/pkg/nifi" func (n *nifiClient) CreateVersionUpdateRequest(pgId string, entity nigoapi.VersionControlInformationEntity) (*nigoapi.VersionedFlowUpdateRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the version update request - request, rsp, body, err := client.VersionsApi.InitiateVersionControlUpdate(nil, pgId, entity) + request, rsp, body, err := client.VersionsApi.InitiateVersionControlUpdate(context, pgId, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -21,14 +21,14 @@ func (n *nifiClient) CreateVersionUpdateRequest(pgId string, entity nigoapi.Vers func (n *nifiClient) GetVersionUpdateRequest(id string) (*nigoapi.VersionedFlowUpdateRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the update request information - request, rsp, body, err := client.VersionsApi.GetUpdateRequest(nil, id) + request, rsp, body, err := client.VersionsApi.GetUpdateRequest(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } @@ -38,14 +38,14 @@ func (n *nifiClient) GetVersionUpdateRequest(id string) (*nigoapi.VersionedFlowU func (n *nifiClient) CreateVersionRevertRequest(pgId string, entity nigoapi.VersionControlInformationEntity) (*nigoapi.VersionedFlowUpdateRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to create the version revert request - request, rsp, body, err := client.VersionsApi.InitiateRevertFlowVersion(nil, pgId, entity) + request, rsp, body, err := client.VersionsApi.InitiateRevertFlowVersion(context, pgId, entity) if err := errorUpdateOperation(rsp, body, err); err != nil { return nil, err } @@ -55,14 +55,14 @@ func (n *nifiClient) CreateVersionRevertRequest(pgId string, entity nigoapi.Vers func (n *nifiClient) GetVersionRevertRequest(id string) (*nigoapi.VersionedFlowUpdateRequestEntity, error) { // Get nigoapi client, favoring the one associated to the coordinator node. - client := n.privilegeCoordinatorClient() + client, context := n.privilegeCoordinatorClient() if client == nil { log.Error(ErrNoNodeClientsAvailable, "Error during creating node client") return nil, ErrNoNodeClientsAvailable } // Request on Nifi Rest API to get the revert request information - request, rsp, body, err := client.VersionsApi.GetRevertRequest(nil, id) + request, rsp, body, err := client.VersionsApi.GetRevertRequest(context, id) if err := errorGetOperation(rsp, body, err); err != nil { return nil, err } diff --git a/pkg/pki/certmanagerpki/certmanager_test.go b/pkg/pki/certmanagerpki/certmanager_test.go index eaae9a086..747f8a9aa 100644 --- a/pkg/pki/certmanagerpki/certmanager_test.go +++ b/pkg/pki/certmanagerpki/certmanager_test.go @@ -34,7 +34,7 @@ func newMockCluster() *v1alpha1.NifiCluster { cluster.Name = "test" cluster.Namespace = "test-namespace" cluster.Spec = v1alpha1.NifiClusterSpec{} - cluster.Spec.ListenersConfig = v1alpha1.ListenersConfig{} + cluster.Spec.ListenersConfig = &v1alpha1.ListenersConfig{} cluster.Spec.ListenersConfig.InternalListeners = []v1alpha1.InternalListenerConfig{ {ContainerPort: 9092}, } diff --git a/pkg/pki/pki_manager_test.go b/pkg/pki/pki_manager_test.go index 2bc364fb9..67d2206b8 100644 --- a/pkg/pki/pki_manager_test.go +++ b/pkg/pki/pki_manager_test.go @@ -36,7 +36,7 @@ func newMockCluster() *v1alpha1.NifiCluster { cluster.Name = "test" cluster.Namespace = "test" cluster.Spec = v1alpha1.NifiClusterSpec{} - cluster.Spec.ListenersConfig = v1alpha1.ListenersConfig{} + cluster.Spec.ListenersConfig = &v1alpha1.ListenersConfig{} cluster.Spec.ListenersConfig.InternalListeners = []v1alpha1.InternalListenerConfig{ {ContainerPort: 80}, } diff --git a/pkg/resources/nifi/allNodeService.go b/pkg/resources/nifi/allNodeService.go index b24d0b3e2..e7ca1f716 100644 --- a/pkg/resources/nifi/allNodeService.go +++ b/pkg/resources/nifi/allNodeService.go @@ -27,13 +27,13 @@ func (r *Reconciler) allNodeService() runtimeClient.Object { return &corev1.Service{ ObjectMeta: templates.ObjectMetaWithAnnotations(nifiutils.ComputeRequestNiFiAllNodeService(r.NifiCluster.Name, false), - LabelsForNifi(r.NifiCluster.Name), + nifiutils.LabelsForNifi(r.NifiCluster.Name), r.NifiCluster.Spec.Service.Annotations, r.NifiCluster), Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, SessionAffinity: corev1.ServiceAffinityNone, - Selector: LabelsForNifi(r.NifiCluster.Name), + Selector: nifiutils.LabelsForNifi(r.NifiCluster.Name), Ports: usedPorts, }, } diff --git a/pkg/resources/nifi/headlessService.go b/pkg/resources/nifi/headlessService.go index 471c23c8c..ed2fa840e 100644 --- a/pkg/resources/nifi/headlessService.go +++ b/pkg/resources/nifi/headlessService.go @@ -31,14 +31,14 @@ func (r *Reconciler) headlessService() runtimeClient.Object { return &corev1.Service{ ObjectMeta: templates.ObjectMetaWithAnnotations( fmt.Sprintf(nifiutils.HeadlessServiceTemplate, r.NifiCluster.Name), - util.MergeLabels(LabelsForNifi(r.NifiCluster.Name), r.NifiCluster.Labels), + util.MergeLabels(nifiutils.LabelsForNifi(r.NifiCluster.Name), r.NifiCluster.Labels), r.NifiCluster.Spec.Service.Annotations, r.NifiCluster, ), Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, SessionAffinity: corev1.ServiceAffinityNone, - Selector: LabelsForNifi(r.NifiCluster.Name), + Selector: nifiutils.LabelsForNifi(r.NifiCluster.Name), Ports: usedPorts, ClusterIP: corev1.ClusterIPNone, }, diff --git a/pkg/resources/nifi/nifi.go b/pkg/resources/nifi/nifi.go index b76e01b7d..5e2bb2640 100644 --- a/pkg/resources/nifi/nifi.go +++ b/pkg/resources/nifi/nifi.go @@ -19,6 +19,7 @@ import ( "fmt" "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config" "github.com/Orange-OpenSource/nifikop/pkg/pki" + nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" "reflect" "strings" @@ -49,9 +50,9 @@ import ( const ( componentName = "nifi" - nodeConfigMapVolumeMount = "node-config" - nodeTmp = "node-tmp" - nifiDataVolumeMount = "nifi-data" + nodeSecretVolumeMount = "node-config" + nodeTmp = "node-tmp" + nifiDataVolumeMount = "nifi-data" serverKeystoreVolume = "server-ks-files" serverKeystorePath = "/var/run/secrets/java.io/keystores/server" @@ -65,12 +66,6 @@ type Reconciler struct { Scheme *runtime.Scheme } -// LabelsForNifi returns the labels for selecting the resources -// belonging to the given Nifi CR name. -func LabelsForNifi(name string) map[string]string { - return map[string]string{"app": "nifi", "nifi_cr": name} -} - // New creates a new reconciler for Nifi func New(client client.Client, directClient client.Reader, scheme *runtime.Scheme, cluster *v1alpha1.NifiCluster) *Reconciler { return &Reconciler{ @@ -106,6 +101,10 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { log.V(1).Info("Reconciling") + if r.NifiCluster.IsExternal() { + log.V(1).Info("Reconciled") + return nil + } // TODO : manage external LB uniqueHostnamesMap := make(map[string]struct{}) @@ -164,7 +163,7 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { } - o := r.configMap(node.Id, nodeConfig, serverPass, clientPass, superUsers, log) + o := r.secretConfig(node.Id, nodeConfig, serverPass, clientPass, superUsers, log) err = k8sutil.Reconcile(log, r.Client, o, r.NifiCluster) if err != nil { return errors.WrapIfWithDetails(err, "failed to reconcile resource", "resource", o.GetObjectKind().GroupVersionKind()) @@ -224,20 +223,6 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { return errors.WrapIf(err, "failed to reconcile resource") } - // TODO: Ensure usage and needing - err = scale.EnsureRemovedNodes(r.Client, r.NifiCluster) - if err != nil && len(r.NifiCluster.Status.NodesState) > 0 { - return err - } - - // Reconcile cluster communications - // Ensure the cluster is ready to receive actions - if !r.NifiCluster.IsReady() { - log.Info("Cluster is not ready yet, will wait until it is.") - // the cluster does not exist - should have been caught pre-flight - return errors.WrapIf(err, "Cluster is not ready yet, will wait until it is.") - } - configManager := config.GetClientConfigManager(r.Client, v1alpha1.ClusterReference{ Namespace: r.NifiCluster.Namespace, Name: r.NifiCluster.Name, @@ -247,6 +232,13 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { // the cluster does not exist - should have been caught pre-flight return errors.WrapIf(err, "Failed to create HTTP client the for referenced cluster") } + + // TODO: Ensure usage and needing + err = scale.EnsureRemovedNodes(clientConfig, r.NifiCluster) + if err != nil && len(r.NifiCluster.Status.NodesState) > 0 { + return err + } + pgRootId, err := dataflow.RootProcessGroup(clientConfig) if err != nil { return err @@ -282,7 +274,7 @@ func (r *Reconciler) Reconcile(log logr.Logger) error { func (r *Reconciler) reconcileNifiPodDelete(log logr.Logger) error { podList := &corev1.PodList{} - matchingLabels := client.MatchingLabels(LabelsForNifi(r.NifiCluster.Name)) + matchingLabels := client.MatchingLabels(nifiutil.LabelsForNifi(r.NifiCluster.Name)) err := r.Client.List(context.TODO(), podList, client.ListOption(client.InNamespace(r.NifiCluster.Namespace)), client.ListOption(matchingLabels)) @@ -359,13 +351,13 @@ OUTERLOOP: return errors.WrapIfWithDetails(err, "could not delete node", "id", node.Labels["nodeId"]) } - err = r.Client.Delete(context.TODO(), &corev1.Secret{ObjectMeta: templates.ObjectMeta(fmt.Sprintf(templates.NodeConfigTemplate+"-%s", r.NifiCluster.Name, node.Labels["nodeId"]), LabelsForNifi(r.NifiCluster.Name), r.NifiCluster)}) + err = r.Client.Delete(context.TODO(), &corev1.Secret{ObjectMeta: templates.ObjectMeta(fmt.Sprintf(templates.NodeConfigTemplate+"-%s", r.NifiCluster.Name, node.Labels["nodeId"]), nifiutil.LabelsForNifi(r.NifiCluster.Name), r.NifiCluster)}) if err != nil { - return errors.WrapIfWithDetails(err, "could not delete configmap for node", "id", node.Labels["nodeId"]) + return errors.WrapIfWithDetails(err, "could not delete secret config for node", "id", node.Labels["nodeId"]) } if !r.NifiCluster.Spec.Service.HeadlessEnabled { - err = r.Client.Delete(context.TODO(), &corev1.Service{ObjectMeta: templates.ObjectMeta(fmt.Sprintf("%s-%s", r.NifiCluster.Name, node.Labels["nodeId"]), LabelsForNifi(r.NifiCluster.Name), r.NifiCluster)}) + err = r.Client.Delete(context.TODO(), &corev1.Service{ObjectMeta: templates.ObjectMeta(fmt.Sprintf("%s-%s", r.NifiCluster.Name, node.Labels["nodeId"]), nifiutil.LabelsForNifi(r.NifiCluster.Name), r.NifiCluster)}) if err != nil { if apierrors.IsNotFound(err) { // can happen when node was not fully initialized and now is deleted @@ -668,7 +660,7 @@ func (r *Reconciler) reconcileNifiPod(log logr.Logger, desiredPod *corev1.Pod) ( if r.NifiCluster.Status.State == v1alpha1.NifiClusterRollingUpgrading { // Check if any nifi pod is in terminating, pending or not ready state podList := &corev1.PodList{} - matchingLabels := client.MatchingLabels(LabelsForNifi(r.NifiCluster.Name)) + matchingLabels := client.MatchingLabels(nifiutil.LabelsForNifi(r.NifiCluster.Name)) err := r.Client.List(context.TODO(), podList, client.ListOption(client.InNamespace(r.NifiCluster.Namespace)), client.ListOption(matchingLabels)) if err != nil { return errors.WrapIf(err, "failed to reconcile resource"), false diff --git a/pkg/resources/nifi/pod.go b/pkg/resources/nifi/pod.go index b39e34d97..0a9bb1db0 100644 --- a/pkg/resources/nifi/pod.go +++ b/pkg/resources/nifi/pod.go @@ -16,7 +16,7 @@ package nifi import ( "fmt" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + configcommon "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/common" runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" "sort" "strings" @@ -75,7 +75,7 @@ func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev podVolumes := append(volume, []corev1.Volume{ { - Name: nodeConfigMapVolumeMount, + Name: nodeSecretVolumeMount, VolumeSource: corev1.VolumeSource{ //ConfigMap: &corev1.ConfigMapVolumeSource{ Secret: &corev1.SecretVolumeSource{ @@ -95,7 +95,7 @@ func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev podVolumeMounts := append(volumeMount, []corev1.VolumeMount{ { - Name: nodeConfigMapVolumeMount, + Name: nodeSecretVolumeMount, MountPath: "/opt/nifi/nifi-current/tmp", }, { @@ -133,7 +133,7 @@ func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev ObjectMeta: templates.ObjectMetaWithGeneratedNameAndAnnotations( nifiutil.ComputeNodeName(id, r.NifiCluster.Name), util.MergeLabels( - LabelsForNifi(r.NifiCluster.Name), + nifiutil.LabelsForNifi(r.NifiCluster.Name), map[string]string{"nodeId": fmt.Sprintf("%d", id)}, ), util.MergeAnnotations(anntotationsToMerge...), r.NifiCluster, @@ -219,7 +219,7 @@ func generatePodAntiAffinity(clusterName string, hardRuleEnabled bool) *corev1.P RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ - MatchLabels: LabelsForNifi(clusterName), + MatchLabels: nifiutil.LabelsForNifi(clusterName), }, TopologyKey: "kubernetes.io/hostname", }, @@ -232,7 +232,7 @@ func generatePodAntiAffinity(clusterName string, hardRuleEnabled bool) *corev1.P Weight: int32(100), PodAffinityTerm: corev1.PodAffinityTerm{ LabelSelector: &metav1.LabelSelector{ - MatchLabels: LabelsForNifi(clusterName), + MatchLabels: nifiutil.LabelsForNifi(clusterName), }, TopologyKey: "kubernetes.io/hostname", }, @@ -376,7 +376,7 @@ func (r *Reconciler) createNifiNodeContainer(nodeConfig *v1alpha1.NodeConfig, id nifiNodeContainersPorts = append(nifiNodeContainersPorts, r.generateDefaultContainerPort()...) readinessCommand := fmt.Sprintf(`curl -kv http://$(hostname -f):%d/nifi-api`, - GetServerPort(&r.NifiCluster.Spec.ListenersConfig)) + GetServerPort(r.NifiCluster.Spec.ListenersConfig)) if r.NifiCluster.Spec.ListenersConfig.SSLSecrets != nil { readinessCommand = fmt.Sprintf(`curl -kv --cert %s/%s --key %s/%s https://$(hostname -f):%d/nifi`, @@ -384,7 +384,7 @@ func (r *Reconciler) createNifiNodeContainer(nodeConfig *v1alpha1.NodeConfig, id v1alpha1.TLSCert, serverKeystorePath, v1alpha1.TLSKey, - GetServerPort(&r.NifiCluster.Spec.ListenersConfig)) + GetServerPort(r.NifiCluster.Spec.ListenersConfig)) } failCondition := "" @@ -401,7 +401,7 @@ func (r *Reconciler) createNifiNodeContainer(nodeConfig *v1alpha1.NodeConfig, id requestClusterStatus := fmt.Sprintf("curl --fail -v http://%s/nifi-api/controller/cluster > $NIFI_BASE_DIR/cluster.state", nifiutil.GenerateRequestNiFiAllNodeAddressFromCluster(r.NifiCluster)) - if nificluster.UseSSL(r.NifiCluster) { + if configcommon.UseSSL(r.NifiCluster) { requestClusterStatus = fmt.Sprintf( "curl --fail -kv --cert /var/run/secrets/java.io/keystores/client/tls.crt --key /var/run/secrets/java.io/keystores/client/tls.key https://%s/nifi-api/controller/cluster > $NIFI_BASE_DIR/cluster.state", nifiutil.GenerateRequestNiFiAllNodeAddressFromCluster(r.NifiCluster)) @@ -494,7 +494,7 @@ exec bin/nifi.sh run`, resolveIp, removesFileAction)} FailureThreshold: livenessHealthCheckThreshold, Handler: corev1.Handler{ TCPSocket: &corev1.TCPSocketAction{ - Port: *util.IntstrPointer(int(GetServerPort(&r.NifiCluster.Spec.ListenersConfig))), + Port: *util.IntstrPointer(int(GetServerPort(r.NifiCluster.Spec.ListenersConfig))), }, }, }, diff --git a/pkg/resources/nifi/poddisruptionbudget.go b/pkg/resources/nifi/poddisruptionbudget.go index d64c51292..f04ba6ab6 100644 --- a/pkg/resources/nifi/poddisruptionbudget.go +++ b/pkg/resources/nifi/poddisruptionbudget.go @@ -2,6 +2,7 @@ package nifi import ( "fmt" + nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" "math" runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" "strconv" @@ -30,14 +31,14 @@ func (r *Reconciler) podDisruptionBudget(log logr.Logger) (runtimeClient.Object, }, ObjectMeta: templates.ObjectMetaWithAnnotations( fmt.Sprintf("%s-pdb", r.NifiCluster.Name), - util.MergeLabels(LabelsForNifi(r.NifiCluster.Name), r.NifiCluster.Labels), + util.MergeLabels(nifiutil.LabelsForNifi(r.NifiCluster.Name), r.NifiCluster.Labels), r.NifiCluster.Spec.Service.Annotations, r.NifiCluster, ), Spec: policyv1beta1.PodDisruptionBudgetSpec{ MinAvailable: &minAvailable, Selector: &metav1.LabelSelector{ - MatchLabels: LabelsForNifi(r.NifiCluster.Name), + MatchLabels: nifiutil.LabelsForNifi(r.NifiCluster.Name), }, }, }, nil diff --git a/pkg/resources/nifi/pvc.go b/pkg/resources/nifi/pvc.go index 1aa6b905b..6c964caf7 100644 --- a/pkg/resources/nifi/pvc.go +++ b/pkg/resources/nifi/pvc.go @@ -19,6 +19,7 @@ import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/resources/templates" "github.com/Orange-OpenSource/nifikop/pkg/util" + nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,7 +30,7 @@ func (r *Reconciler) pvc(id int32, storage v1alpha1.StorageConfig, log logr.Logg ObjectMeta: templates.ObjectMetaWithGeneratedNameAndAnnotations( fmt.Sprintf(templates.NodeStorageTemplate, r.NifiCluster.Name, id), util.MergeLabels( - LabelsForNifi(r.NifiCluster.Name), + nifiutil.LabelsForNifi(r.NifiCluster.Name), map[string]string{"nodeId": fmt.Sprintf("%d", id)}, ), map[string]string{"mountPath": storage.MountPath, "storageName": storage.Name}, r.NifiCluster), diff --git a/pkg/resources/nifi/configmap.go b/pkg/resources/nifi/secretconfig.go similarity index 97% rename from pkg/resources/nifi/configmap.go rename to pkg/resources/nifi/secretconfig.go index 98f66f6fe..8c636db25 100644 --- a/pkg/resources/nifi/configmap.go +++ b/pkg/resources/nifi/secretconfig.go @@ -19,7 +19,8 @@ import ( "context" "fmt" "github.com/Orange-OpenSource/nifikop/pkg/errorfactory" - "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/nificluster" + configcommon "github.com/Orange-OpenSource/nifikop/pkg/nificlient/config/common" + nifiutil "github.com/Orange-OpenSource/nifikop/pkg/util/nifi" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,12 +42,12 @@ import ( //func encodeBase64(toEncode string) []byte { // return []byte(base64.StdEncoding.EncodeToString([]byte(toEncode))) //} -func (r *Reconciler) configMap(id int32, nodeConfig *v1alpha1.NodeConfig, serverPass, clientPass string, superUsers []string, log logr.Logger) runtimeClient.Object { - configMap := &corev1.Secret{ +func (r *Reconciler) secretConfig(id int32, nodeConfig *v1alpha1.NodeConfig, serverPass, clientPass string, superUsers []string, log logr.Logger) runtimeClient.Object { + secret := &corev1.Secret{ ObjectMeta: templates.ObjectMeta( fmt.Sprintf(templates.NodeConfigTemplate+"-%d", r.NifiCluster.Name, id), util.MergeLabels( - LabelsForNifi(r.NifiCluster.Name), + nifiutil.LabelsForNifi(r.NifiCluster.Name), map[string]string{"nodeId": fmt.Sprintf("%d", id)}, ), r.NifiCluster, @@ -62,10 +63,10 @@ func (r *Reconciler) configMap(id int32, nodeConfig *v1alpha1.NodeConfig, server }, } - if nificluster.UseSSL(r.NifiCluster) { - configMap.Data["authorizers.xml"] = []byte(r.getAuthorizersConfigString(nodeConfig, id, log)) + if configcommon.UseSSL(r.NifiCluster) { + secret.Data["authorizers.xml"] = []byte(r.getAuthorizersConfigString(nodeConfig, id, log)) } - return configMap + return secret } //////////////////////////////////// @@ -129,7 +130,7 @@ func (r *Reconciler) getNifiPropertiesConfigString(nConfig *v1alpha1.NodeConfig, base := r.GetNifiPropertiesBase(id) var dnsNames []string for _, dnsName := range utilpki.ClusterDNSNames(r.NifiCluster, id) { - dnsNames = append(dnsNames, fmt.Sprintf("%s:%d", dnsName, GetServerPort(&r.NifiCluster.Spec.ListenersConfig))) + dnsNames = append(dnsNames, fmt.Sprintf("%s:%d", dnsName, GetServerPort(r.NifiCluster.Spec.ListenersConfig))) } webProxyHosts := strings.Join(dnsNames, ",") @@ -137,14 +138,14 @@ func (r *Reconciler) getNifiPropertiesConfigString(nConfig *v1alpha1.NodeConfig, webProxyHosts = strings.Join(append(dnsNames, base.WebProxyHosts...), ",") } - useSSL := nificluster.UseSSL(r.NifiCluster) + useSSL := configcommon.UseSSL(r.NifiCluster) var out bytes.Buffer t := template.Must(template.New("nConfig-config").Parse(config.NifiPropertiesTemplate)) if err := t.Execute(&out, map[string]interface{}{ "NifiCluster": r.NifiCluster, "Id": id, "ListenerConfig": config.GenerateListenerSpecificConfig( - &r.NifiCluster.Spec.ListenersConfig, + r.NifiCluster.Spec.ListenersConfig, id, r.NifiCluster.Namespace, r.NifiCluster.Name, diff --git a/pkg/resources/nifi/service.go b/pkg/resources/nifi/service.go index 8bf22202c..80ed8cd50 100644 --- a/pkg/resources/nifi/service.go +++ b/pkg/resources/nifi/service.go @@ -36,14 +36,14 @@ func (r *Reconciler) service(id int32, log logr.Logger) runtimeClient.Object { ObjectMeta: templates.ObjectMeta(nifiutil.ComputeNodeName(id, r.NifiCluster.Name), //fmt.Sprintf("%s-%d", r.NifiCluster.Name, id), util.MergeLabels( - LabelsForNifi(r.NifiCluster.Name), + nifiutil.LabelsForNifi(r.NifiCluster.Name), map[string]string{"nodeId": fmt.Sprintf("%d", id)}, ), r.NifiCluster), Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, SessionAffinity: corev1.ServiceAffinityNone, - Selector: util.MergeLabels(LabelsForNifi(r.NifiCluster.Name), map[string]string{"nodeId": fmt.Sprintf("%d", id)}), + Selector: util.MergeLabels(nifiutil.LabelsForNifi(r.NifiCluster.Name), map[string]string{"nodeId": fmt.Sprintf("%d", id)}), Ports: usedPorts, }, } @@ -70,12 +70,12 @@ func (r *Reconciler) externalServices(log logr.Logger) []runtimeClient.Object { usedPorts := generateServicePortForInternalListeners(listeners) services = append(services, &corev1.Service{ - ObjectMeta: templates.ObjectMetaWithAnnotations(eService.Name, LabelsForNifi(r.NifiCluster.Name), + ObjectMeta: templates.ObjectMetaWithAnnotations(eService.Name, nifiutil.LabelsForNifi(r.NifiCluster.Name), *annotations, r.NifiCluster), Spec: corev1.ServiceSpec{ Type: eService.Spec.Type, SessionAffinity: corev1.ServiceAffinityClientIP, - Selector: LabelsForNifi(r.NifiCluster.Name), + Selector: nifiutil.LabelsForNifi(r.NifiCluster.Name), Ports: usedPorts, ClusterIP: eService.Spec.ClusterIP, ExternalIPs: eService.Spec.ExternalIPs, diff --git a/pkg/resources/templates/variables.go b/pkg/resources/templates/variables.go index a69842bd2..2eada9da9 100644 --- a/pkg/resources/templates/variables.go +++ b/pkg/resources/templates/variables.go @@ -15,6 +15,7 @@ package templates const ( - NodeConfigTemplate = "%s-config" - NodeStorageTemplate = "%s-%d-storage" + NodeConfigTemplate = "%s-config" + NodeStorageTemplate = "%s-%d-storage" + ExternalClusterSecretTemplate = "%s-basic-secret" ) diff --git a/pkg/util/clientconfig/common.go b/pkg/util/clientconfig/common.go index b3b93ee3e..a4657c4de 100644 --- a/pkg/util/clientconfig/common.go +++ b/pkg/util/clientconfig/common.go @@ -1,7 +1,9 @@ package clientconfig import ( + "context" "crypto/tls" + "github.com/go-logr/logr" ) const ( @@ -11,7 +13,6 @@ const ( type Manager interface { BuildConfig() (*NifiConfig, error) BuildConnect() (ClusterConnect, error) - IsExternal() bool } type ClusterConnect interface { @@ -19,7 +20,7 @@ type ClusterConnect interface { IsInternal() bool IsExternal() bool ClusterLabelString() string - IsReady() bool + IsReady(log logr.Logger) bool Id() string } @@ -30,9 +31,13 @@ type NifiConfig struct { NifiURI string UseSSL bool TLSConfig *tls.Config + ProxyUrl string OperationTimeout int64 RootProcessGroupId string + NodesContext map[int32]context.Context + + SkipDescribeCluster bool } type NodeUri struct { diff --git a/pkg/util/nifi/common.go b/pkg/util/nifi/common.go index fe8534faf..9d9f3592e 100644 --- a/pkg/util/nifi/common.go +++ b/pkg/util/nifi/common.go @@ -277,3 +277,9 @@ func determineInternalListenerForComm(internalListeners []v1alpha1.InternalListe } return httpServerPortId } + +// LabelsForNifi returns the labels for selecting the resources +// belonging to the given Nifi CR name. +func LabelsForNifi(name string) map[string]string { + return map[string]string{"app": "nifi", "nifi_cr": name} +} diff --git a/pkg/util/pki/pki_common_test.go b/pkg/util/pki/pki_common_test.go index 499bc1e5d..d3b8a48f7 100644 --- a/pkg/util/pki/pki_common_test.go +++ b/pkg/util/pki/pki_common_test.go @@ -31,6 +31,7 @@ func testCluster(t *testing.T) *v1alpha1.NifiCluster { cluster.Name = "test-cluster" cluster.Namespace = "test-namespace" cluster.Spec = v1alpha1.NifiClusterSpec{} + cluster.Spec.ListenersConfig = &v1alpha1.ListenersConfig{} cluster.Spec.Nodes = []v1alpha1.Node{ {Id: 0}, diff --git a/pkg/util/util.go b/pkg/util/util.go index e40681523..c89b7e525 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -205,6 +205,13 @@ func NifiUserSliceContains(list []*v1alpha1.NifiUser, u *v1alpha1.NifiUser) bool return false } +func NodesToIdList(nodes []v1alpha1.Node) (ids []int32) { + for _, node := range nodes { + ids = append(ids, node.Id) + } + return +} + // computes the max between 2 ints func Max(x, y int) int { if x < y { From 9c12525d608aec459917efbd9ae6ba43b3669ff8 Mon Sep 17 00:00:00 2001 From: erdrix Date: Mon, 27 Sep 2021 10:10:20 +0200 Subject: [PATCH 08/18] fix cluster removing --- controllers/nifidataflow_controller.go | 37 ++++++++---------- .../nifiparametercontext_controller.go | 37 +++++++++--------- controllers/nifiregistryclient_controller.go | 39 +++++++++---------- controllers/nifiuser_controller.go | 38 +++++++++--------- controllers/nifiusergroup_controller.go | 36 ++++++++--------- pkg/nificlient/config/tls/tls_config.go | 2 +- 6 files changed, 90 insertions(+), 99 deletions(-) diff --git a/controllers/nifidataflow_controller.go b/controllers/nifidataflow_controller.go index a95eea434..fced8ba0a 100644 --- a/controllers/nifidataflow_controller.go +++ b/controllers/nifidataflow_controller.go @@ -189,34 +189,31 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !clusterConnect.IsExternal() { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer", err) } - return Reconciled() - } + return Reconciled() + } - // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { - if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { - return RequeueWithError(r.Log, "could not apply last state to annotation", err) - } - if err := r.Client.Update(ctx, current); err != nil { + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { return RequeueWithError(r.Log, "failed to update NifiDataflow", err) } return RequeueAfter(time.Duration(15) * time.Second) - } - - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + } + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", instance.Spec.ClusterRef.Name, currentClusterRef.Namespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) - } + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } // Generate the client configuration. diff --git a/controllers/nifiparametercontext_controller.go b/controllers/nifiparametercontext_controller.go index d35523b05..880028fdc 100644 --- a/controllers/nifiparametercontext_controller.go +++ b/controllers/nifiparametercontext_controller.go @@ -135,35 +135,34 @@ func (r *NifiParameterContextReconciler) Reconcile(ctx context.Context, req ctrl // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !clusterConnect.IsExternal() { // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer", err) } - return Reconciled() + return Reconciled() + } + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) } - // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { - if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { - return RequeueWithError(r.Log, "could not apply last state to annotation", err) - } - if err := r.Client.Update(ctx, current); err != nil { - return RequeueWithError(r.Log, "failed to update NifiParameterContext", err) - } - return RequeueAfter(time.Duration(15) * time.Second) + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiParameterContext", err) } + return RequeueAfter(time.Duration(15) * time.Second) + } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", instance.Spec.ClusterRef.Name, clusterRef.Namespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) - } + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } + // Generate the client configuration. clientConfig, err = configManager.BuildConfig() if err != nil { diff --git a/controllers/nifiregistryclient_controller.go b/controllers/nifiregistryclient_controller.go index 188e08bbf..566bfb402 100644 --- a/controllers/nifiregistryclient_controller.go +++ b/controllers/nifiregistryclient_controller.go @@ -112,34 +112,33 @@ func (r *NifiRegistryClientReconciler) Reconcile(ctx context.Context, req ctrl.R // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !clusterConnect.IsExternal() { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer", err) } - return Reconciled() + return Reconciled() + } + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) } - // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { - if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { - return RequeueWithError(r.Log, "could not apply last state to annotation", err) - } - if err := r.Client.Update(ctx, current); err != nil { - return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) - } - return RequeueAfter(time.Duration(15) * time.Second) + if err := r.Client.Update(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to update NifiRegistryClient", err) } + return RequeueAfter(time.Duration(15) * time.Second) + } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", instance.Spec.ClusterRef.Name, clusterRef.Namespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) - } + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } + // Generate the client configuration. clientConfig, err = configManager.BuildConfig() if err != nil { diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index bd170092b..0a6330f85 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -118,32 +118,30 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !clusterConnect.IsExternal() { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is gone already, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to remove finalizer from NifiUser", err) - } - return Reconciled() + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is gone already, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer from NifiUser", err) } + return Reconciled() + } - // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { - if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { - return RequeueWithError(r.Log, "could not apply last state to annotation", err) - } - if err := r.Client.Update(ctx, current); err != nil { + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { return RequeueWithError(r.Log, "failed to update NifiUser", err) } - return RequeueAfter(time.Duration(15) * time.Second) - } + return RequeueAfter(time.Duration(15) * time.Second) + } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", instance.Spec.ClusterRef.Name, clusterRef.Namespace)) - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) - } + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } // Get the referenced NifiCluster diff --git a/controllers/nifiusergroup_controller.go b/controllers/nifiusergroup_controller.go index 837435ec1..37ec1abbf 100644 --- a/controllers/nifiusergroup_controller.go +++ b/controllers/nifiusergroup_controller.go @@ -152,34 +152,32 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques // Generate the connect object if clusterConnect, err = configManager.BuildConnect(); err != nil { - if !clusterConnect.IsExternal() { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is already gone, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer", err) } - return Reconciled() - } + return Reconciled() + } - // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. - if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { - if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { - return RequeueWithError(r.Log, "could not apply last state to annotation", err) - } - if err := r.Client.Update(ctx, current); err != nil { + // If the referenced cluster no more exist, just skip the deletion requirement in cluster ref change case. + if !v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation", err) + } + if err := r.Client.Update(ctx, current); err != nil { return RequeueWithError(r.Log, "failed to update NifiDataflow", err) } return RequeueAfter(time.Duration(15) * time.Second) - } + } - r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", - fmt.Sprintf("Failed to lookup reference cluster : %s in %s", + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to lookup reference cluster : %s in %s", instance.Spec.ClusterRef.Name, clusterRef.Namespace)) - // the cluster does not exist - should have been caught pre-flight - return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) - } + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) } // Generate the client configuration. diff --git a/pkg/nificlient/config/tls/tls_config.go b/pkg/nificlient/config/tls/tls_config.go index 2365659db..99ee70b6c 100644 --- a/pkg/nificlient/config/tls/tls_config.go +++ b/pkg/nificlient/config/tls/tls_config.go @@ -25,7 +25,7 @@ func (n *tls) BuildConfig() (*clientconfig.NifiConfig, error) { func (n *tls) BuildConnect() (cluster clientconfig.ClusterConnect, err error) { var c *v1alpha1.NifiCluster if c, err = k8sutil.LookupNifiCluster(n.client, n.clusterRef.Name, n.clusterRef.Namespace); err != nil { - return nil, err + return } if !c.IsExternal() { From 4f083a345f54939cee20c41ca9ed40aa83202a68 Mon Sep 17 00:00:00 2001 From: erdrix Date: Mon, 27 Sep 2021 21:45:17 +0200 Subject: [PATCH 09/18] fix rbac --- config/rbac/leader_election_role.yaml | 2 -- helm/nifikop/templates/role.yaml | 2 -- 2 files changed, 4 deletions(-) diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index 6334cc51c..eae283aa3 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -5,10 +5,8 @@ metadata: name: leader-election-role rules: - apiGroups: - - "" - coordination.k8s.io resources: - - configmaps - leases verbs: - get diff --git a/helm/nifikop/templates/role.yaml b/helm/nifikop/templates/role.yaml index 007789115..814d217ce 100644 --- a/helm/nifikop/templates/role.yaml +++ b/helm/nifikop/templates/role.yaml @@ -134,10 +134,8 @@ rules: - update - watch - apiGroups: - - "" - coordination.k8s.io resources: - - configmaps - leases verbs: - get From 8503a0580dde55ef145b9fe82bd2c45796ffa8d6 Mon Sep 17 00:00:00 2001 From: erdrix Date: Mon, 27 Sep 2021 21:50:00 +0200 Subject: [PATCH 10/18] ignore vendor --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ddac32571..3f1496eb8 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ manager: generate fmt vet # Generate code generate: controller-gen @echo "Generate zzz-deepcopy objects" - $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." -mod=mod # Generate manifests e.g. CRD, RBAC etc. manifests: controller-gen From e365bdcb126ceabbfbbc3aa5d706d2c4ade81666 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 28 Sep 2021 11:01:36 +0200 Subject: [PATCH 11/18] fix user & usergroup reconciliation --- Makefile | 2 +- api/v1alpha1/common_types.go | 8 +++ api/v1alpha1/nifidataflow_types.go | 27 ++++++-- api/v1alpha1/zz_generated.deepcopy.go | 5 -- .../bases/nifi.orange.com_nifidataflows.yaml | 11 +++- controllers/nifidataflow_controller.go | 11 +++- pkg/clientwrappers/user/user.go | 62 ++++++++++++++++--- pkg/clientwrappers/usergroup/usergroup.go | 39 +++++++----- pkg/k8sutil/resource.go | 12 +++- 9 files changed, 132 insertions(+), 45 deletions(-) diff --git a/Makefile b/Makefile index 3f1496eb8..ddac32571 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ manager: generate fmt vet # Generate code generate: controller-gen @echo "Generate zzz-deepcopy objects" - $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." -mod=mod + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." # Generate manifests e.g. CRD, RBAC etc. manifests: controller-gen diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 4bbccdf56..d0be4bfb5 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -430,3 +430,11 @@ func SecretRefsEquals(secretRefs []SecretReference) bool { } return true } + +type DataflowSyncMode string + +const ( + SyncNever DataflowSyncMode = "never" + SyncOnce DataflowSyncMode = "once" + SyncAlways DataflowSyncMode = "always" +) diff --git a/api/v1alpha1/nifidataflow_types.go b/api/v1alpha1/nifidataflow_types.go index 247476a3b..9bfa90884 100644 --- a/api/v1alpha1/nifidataflow_types.go +++ b/api/v1alpha1/nifidataflow_types.go @@ -35,8 +35,9 @@ type NifiDataflowSpec struct { FlowVersion *int32 `json:"flowVersion,omitempty"` // contains the reference to the ParameterContext with the one the dataflow is linked. ParameterContextRef *ParameterContextReference `json:"parameterContextRef,omitempty"` - // if the flow will be ran once or continuously checked - RunOnce *bool `json:"runOnce,omitempty"` + // if the flow will be synchronized once, continuously or never + // +kubebuilder:validation:Enum={"never","always","once"} + SyncMode DataflowSyncMode `json:"syncMode,omitempty"` // whether the flow is considered as ran if some controller services are still invalid or not. SkipInvalidControllerService bool `json:"skipInvalidControllerService,omitempty"` // whether the flow is considered as ran if some components are still invalid or not. @@ -143,11 +144,25 @@ func init() { SchemeBuilder.Register(&NifiDataflow{}, &NifiDataflowList{}) } -func (d *NifiDataflowSpec) GetRunOnce() bool { - if d.RunOnce != nil { - return *d.RunOnce +func (d *NifiDataflowSpec) SyncOnce() bool { + if d.SyncMode == SyncOnce { + return true } - return true + return false +} + +func (d *NifiDataflowSpec) SyncAlways() bool { + if d.SyncMode == SyncAlways { + return true + } + return false +} + +func (d *NifiDataflowSpec) SyncNever() bool { + if d.SyncMode == SyncNever { + return true + } + return false } func (d *NifiDataflowSpec) GetParentProcessGroupID(rootProcessGroupId string) string { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index de8cae631..ca0bb2cb8 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -556,11 +556,6 @@ func (in *NifiDataflowSpec) DeepCopyInto(out *NifiDataflowSpec) { *out = new(ParameterContextReference) **out = **in } - if in.RunOnce != nil { - in, out := &in.RunOnce, &out.RunOnce - *out = new(bool) - **out = **in - } out.ClusterRef = in.ClusterRef if in.RegistryClientRef != nil { in, out := &in.RegistryClientRef, &out.RegistryClientRef diff --git a/config/crd/bases/nifi.orange.com_nifidataflows.yaml b/config/crd/bases/nifi.orange.com_nifidataflows.yaml index 265322bfd..5d7518c7b 100644 --- a/config/crd/bases/nifi.orange.com_nifidataflows.yaml +++ b/config/crd/bases/nifi.orange.com_nifidataflows.yaml @@ -84,9 +84,6 @@ spec: required: - name type: object - runOnce: - description: if the flow will be ran once or continuously checked - type: boolean skipInvalidComponent: description: whether the flow is considered as ran if some components are still invalid or not. @@ -95,6 +92,14 @@ spec: description: whether the flow is considered as ran if some controller services are still invalid or not. type: boolean + syncMode: + description: if the flow will be synchronized once, continuously or + never + enum: + - never + - always + - once + type: string updateStrategy: description: 'describes the way the operator will deal with data when a dataflow will be updated : drop or drain' diff --git a/controllers/nifidataflow_controller.go b/controllers/nifidataflow_controller.go index fced8ba0a..e4aafce66 100644 --- a/controllers/nifidataflow_controller.go +++ b/controllers/nifidataflow_controller.go @@ -261,7 +261,8 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request return RequeueAfter(time.Duration(15) * time.Second) } - if instance.Spec.GetRunOnce() && instance.Status.State == v1alpha1.DataflowStateRan { + if (instance.Spec.SyncNever() && len(instance.Status.State) > 0) || + (instance.Spec.SyncOnce() && instance.Status.State == v1alpha1.DataflowStateRan) { return Reconciled() } @@ -319,6 +320,10 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request return RequeueWithError(r.Log, "failed to update NifiDataflow", err) } + if instance.Spec.SyncNever(){ + return Reconciled() + } + // In case where the flow is not sync if instance.Status.State == v1alpha1.DataflowStateOutOfSync { r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronizing", @@ -381,7 +386,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if instance.Status.State == v1alpha1.DataflowStateCreated || instance.Status.State == v1alpha1.DataflowStateStarting || instance.Status.State == v1alpha1.DataflowStateInSync || - (!instance.Spec.GetRunOnce() && instance.Status.State == v1alpha1.DataflowStateRan) { + (!instance.Spec.SyncOnce() && instance.Status.State == v1alpha1.DataflowStateRan) { instance.Status.State = v1alpha1.DataflowStateStarting if err := r.Client.Status().Update(ctx, instance); err != nil { @@ -434,7 +439,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request instance.Name, instance.Spec.BucketId, instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) - if instance.Spec.GetRunOnce() { + if instance.Spec.SyncOnce() { return Reconciled() } diff --git a/pkg/clientwrappers/user/user.go b/pkg/clientwrappers/user/user.go index 1bbca316d..29da07ffc 100644 --- a/pkg/clientwrappers/user/user.go +++ b/pkg/clientwrappers/user/user.go @@ -4,6 +4,7 @@ import ( "github.com/Orange-OpenSource/nifikop/api/v1alpha1" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers" "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/accesspolicies" + "github.com/Orange-OpenSource/nifikop/pkg/clientwrappers/usergroup" "github.com/Orange-OpenSource/nifikop/pkg/common" "github.com/Orange-OpenSource/nifikop/pkg/nificlient" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" @@ -108,11 +109,16 @@ func SyncUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alph status.Id = entity.Id // Remove from access policy - for _, entity := range entity.Component.AccessPolicies { + for _, ent := range entity.Component.AccessPolicies { contains := false - for _, accessPolicy := range user.Spec.AccessPolicies { - if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { + for _, group := range entity.Component.UserGroups { + userGroupEntity, err := nClient.GetUserGroup(group.Id) + if err := clientwrappers.ErrorGetOperation(log, err, "Get user-group"); err != nil { + return nil, err + } + + if userContainsAccessPolicy(user, ent, config.RootProcessGroupId) || + userGroupEntityContainsAccessPolicyEntity(userGroupEntity, ent) { contains = true break } @@ -121,9 +127,9 @@ func SyncUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alph if err := accesspolicies.UpdateAccessPolicyEntity( &nigoapi.AccessPolicyEntity{ Component: &nigoapi.AccessPolicyDto{ - Id: entity.Component.Id, - Resource: entity.Component.Resource, - Action: entity.Component.Action, + Id: ent.Component.Id, + Resource: ent.Component.Resource, + Action: ent.Component.Action, }, }, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUser{user}, @@ -136,9 +142,14 @@ func SyncUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alph // add for _, accessPolicy := range user.Spec.AccessPolicies { contains := false - for _, entity := range entity.Component.AccessPolicies { - if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { + for _, group := range entity.Component.UserGroups { + userGroupEntity, err := nClient.GetUserGroup(group.Id) + if err := clientwrappers.ErrorGetOperation(log, err, "Get user-group"); err != nil { + return nil, err + } + + if userEntityContainsAccessPolicy(entity, accessPolicy, config.RootProcessGroupId) || + usergroup.UserGroupEntityContainsAccessPolicy(userGroupEntity, accessPolicy, config.RootProcessGroupId) { contains = true break } @@ -199,3 +210,34 @@ func updateUserEntity(user *v1alpha1.NifiUser, entity *nigoapi.UserEntity) { entity.Component.Identity = user.GetIdentity() } + +func userContainsAccessPolicy(user *v1alpha1.NifiUser, entity nigoapi.AccessPolicySummaryEntity, rootPGId string) bool { + for _, accessPolicy := range user.Spec.AccessPolicies { + if entity.Component.Action == string(accessPolicy.Action) && + entity.Component.Resource == accessPolicy.GetResource(rootPGId) { + return true + } + } + return false +} + +func userEntityContainsAccessPolicy(entity *nigoapi.UserEntity, accessPolicy v1alpha1.AccessPolicy, rootPGId string) bool { + for _, entity := range entity.Component.AccessPolicies { + if entity.Component.Action == string(accessPolicy.Action) && + entity.Component.Resource == accessPolicy.GetResource(rootPGId) { + return true + } + } + return false +} + +func userGroupEntityContainsAccessPolicyEntity(entity *nigoapi.UserGroupEntity, accessPolicy nigoapi.AccessPolicySummaryEntity) bool { + for _, entity := range entity.Component.AccessPolicies { + if entity.Component.Action == accessPolicy.Component.Action && + entity.Component.Resource == accessPolicy.Component.Resource { + return true + } + } + return false +} + diff --git a/pkg/clientwrappers/usergroup/usergroup.go b/pkg/clientwrappers/usergroup/usergroup.go index bfdde37de..2c0f6a681 100644 --- a/pkg/clientwrappers/usergroup/usergroup.go +++ b/pkg/clientwrappers/usergroup/usergroup.go @@ -104,14 +104,7 @@ func SyncUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser // Remove from access policy for _, entity := range entity.Component.AccessPolicies { - contains := false - for _, accessPolicy := range userGroup.Spec.AccessPolicies { - if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { - contains = true - break - } - } + contains := userGroupContainsAccessPolicy(userGroup, entity, config.RootProcessGroupId) if !contains { if err := accesspolicies.UpdateAccessPolicyEntity(&entity, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUser{}, @@ -123,14 +116,7 @@ func SyncUserGroup(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1.NifiUser // add for _, accessPolicy := range userGroup.Spec.AccessPolicies { - contains := false - for _, entity := range entity.Component.AccessPolicies { - if entity.Component.Action == string(accessPolicy.Action) && - entity.Component.Resource == accessPolicy.GetResource(config.RootProcessGroupId) { - contains = true - break - } - } + contains := UserGroupEntityContainsAccessPolicy(entity, accessPolicy, config.RootProcessGroupId) if !contains { if err := accesspolicies.UpdateAccessPolicy(&accessPolicy, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUser{}, @@ -211,3 +197,24 @@ func updateUserGroupEntity(userGroup *v1alpha1.NifiUserGroup, users []*v1alpha1. entity.Component.Users = append(entity.Component.Users, nigoapi.TenantEntity{Id: user.Status.Id}) } } + + +func userGroupContainsAccessPolicy(userGroup *v1alpha1.NifiUserGroup, entity nigoapi.AccessPolicyEntity, rootPGId string) bool { + for _, accessPolicy := range userGroup.Spec.AccessPolicies { + if entity.Component.Action == string(accessPolicy.Action) && + entity.Component.Resource == accessPolicy.GetResource(rootPGId) { + return true + } + } + return false +} + +func UserGroupEntityContainsAccessPolicy(entity *nigoapi.UserGroupEntity, accessPolicy v1alpha1.AccessPolicy, rootPGId string) bool { + for _, entity := range entity.Component.AccessPolicies { + if entity.Component.Action == string(accessPolicy.Action) && + entity.Component.Resource == accessPolicy.GetResource(rootPGId) { + return true + } + } + return false +} \ No newline at end of file diff --git a/pkg/k8sutil/resource.go b/pkg/k8sutil/resource.go index 0fe9eb91f..3c4e38bb4 100644 --- a/pkg/k8sutil/resource.go +++ b/pkg/k8sutil/resource.go @@ -38,7 +38,6 @@ func Reconcile(log logr.Logger, client runtimeClient.Client, desired runtimeClie current := desired.DeepCopyObject().(runtimeClient.Object) var err error - switch desired.(type) { default: var key runtimeClient.ObjectKey @@ -71,6 +70,17 @@ func Reconcile(log logr.Logger, client runtimeClient.Client, desired runtimeClie } } if err == nil { + switch desired.(type) { + case *v1alpha1.NifiUser: + user := desired.(*v1alpha1.NifiUser) + user.Status = current.(*v1alpha1.NifiUser).Status + desired = user + case *v1alpha1.NifiUserGroup: + group := desired.(*v1alpha1.NifiUserGroup) + group.Status = current.(*v1alpha1.NifiUserGroup).Status + desired = group + } + if CheckIfObjectUpdated(log, desiredType, current, desired) { if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(desired); err != nil { From 2cb0f9abf941c8b5880e8fb866062abf31edeea8 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 28 Sep 2021 22:30:37 +0200 Subject: [PATCH 12/18] fix access policy check --- controllers/nifiuser_controller.go | 122 ++++++++++++++--------------- pkg/clientwrappers/user/user.go | 10 +-- 2 files changed, 64 insertions(+), 68 deletions(-) diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index 0a6330f85..395159a20 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -145,78 +145,76 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c } // Get the referenced NifiCluster - if !clusterConnect.IsExternal() { - var cluster *v1alpha1.NifiCluster - if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterRef.Namespace); err != nil { - // This shouldn't trigger anymore, but leaving it here as a safetybelt - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Cluster is gone already, there is nothing we can do") - if err = r.removeFinalizer(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to remove finalizer from NifiUser", err) - } - return Reconciled() + var cluster *v1alpha1.NifiCluster + if cluster, err = k8sutil.LookupNifiCluster(r.Client, instance.Spec.ClusterRef.Name, clusterRef.Namespace); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is gone already, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer from NifiUser", err) } + return Reconciled() } + } - if v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) && - instance.Spec.GetCreateCert() { + if v1alpha1.ClusterRefsEquals([]v1alpha1.ClusterReference{instance.Spec.ClusterRef, current.Spec.ClusterRef}) && + instance.Spec.GetCreateCert() && !clusterConnect.IsExternal(){ - // Avoid panic if the user wants to create a nifi user but the cluster is in plaintext mode - // TODO: refactor this and use webhook to validate if the cluster is eligible to create a nifi user - if cluster.Spec.ListenersConfig.SSLSecrets == nil { - return RequeueWithError(r.Log, "could not create Nifi user since cluster does not use ssl", errors.New("failed to create Nifi user")) - } + // Avoid panic if the user wants to create a nifi user but the cluster is in plaintext mode + // TODO: refactor this and use webhook to validate if the cluster is eligible to create a nifi user + if cluster.Spec.ListenersConfig.SSLSecrets == nil { + return RequeueWithError(r.Log, "could not create Nifi user since cluster does not use ssl", errors.New("failed to create Nifi user")) + } - pkiManager := pki.GetPKIManager(r.Client, cluster) + pkiManager := pki.GetPKIManager(r.Client, cluster) - r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconcilingCertificate", - fmt.Sprintf("Reconciling certificate for nifi user %s", instance.Name)) - // Reconcile no matter what to get a user certificate instance for ACL management - // TODO (tinyzimmer): This can go wrong if the user made a mistake in their secret path - // using the vault backend, then tried to delete and fix it. Should probably - // have the PKIManager export a GetUserCertificate specifically for deletions - // that will allow the error to fall through if the certificate doesn't exist. - _, err := pkiManager.ReconcileUserCertificate(ctx, instance, r.Scheme) - if err != nil { - switch errors.Cause(err).(type) { - case errorfactory.ResourceNotReady: - r.Log.Info("generated secret not found, may not be ready") - return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Duration(5) * time.Second, - }, nil - case errorfactory.FatalReconcileError: - // TODO: (tinyzimmer) - Sleep for longer for now to give user time to see the error - // But really we should catch these kinds of issues in a pre-admission hook in a future PR - // The user can fix while this is looping and it will pick it up next reconcile attempt - r.Log.Error(err, "Fatal error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") - return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Duration(15) * time.Second, - }, nil - case errorfactory.VaultAPIFailure: - // Same as above in terms of things that could be checked pre-flight on the cluster - r.Log.Error(err, "Vault API error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") - return ctrl.Result{ - Requeue: true, - RequeueAfter: time.Duration(15) * time.Second, - }, nil - default: - return RequeueWithError(r.Log, "failed to reconcile user secret", err) - } + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconcilingCertificate", + fmt.Sprintf("Reconciling certificate for nifi user %s", instance.Name)) + // Reconcile no matter what to get a user certificate instance for ACL management + // TODO (tinyzimmer): This can go wrong if the user made a mistake in their secret path + // using the vault backend, then tried to delete and fix it. Should probably + // have the PKIManager export a GetUserCertificate specifically for deletions + // that will allow the error to fall through if the certificate doesn't exist. + _, err := pkiManager.ReconcileUserCertificate(ctx, instance, r.Scheme) + if err != nil { + switch errors.Cause(err).(type) { + case errorfactory.ResourceNotReady: + r.Log.Info("generated secret not found, may not be ready") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Duration(5) * time.Second, + }, nil + case errorfactory.FatalReconcileError: + // TODO: (tinyzimmer) - Sleep for longer for now to give user time to see the error + // But really we should catch these kinds of issues in a pre-admission hook in a future PR + // The user can fix while this is looping and it will pick it up next reconcile attempt + r.Log.Error(err, "Fatal error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Duration(15) * time.Second, + }, nil + case errorfactory.VaultAPIFailure: + // Same as above in terms of things that could be checked pre-flight on the cluster + r.Log.Error(err, "Vault API error attempting to reconcile the user certificate. If using vault perhaps a permissions issue or improperly configured PKI?") + return ctrl.Result{ + Requeue: true, + RequeueAfter: time.Duration(15) * time.Second, + }, nil + default: + return RequeueWithError(r.Log, "failed to reconcile user secret", err) } + } - r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconciledCertificate", - fmt.Sprintf("Reconciled certificate for nifi user %s", instance.Name)) + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReconciledCertificate", + fmt.Sprintf("Reconciled certificate for nifi user %s", instance.Name)) - // check if marked for deletion - if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - r.Log.Info("Nifi user is marked for deletion, revoking certificates") - if err = pkiManager.FinalizeUserCertificate(ctx, instance); err != nil { - return RequeueWithError(r.Log, "failed to finalize user certificate", err) - } - return r.checkFinalizers(ctx, instance, clientConfig) + // check if marked for deletion + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Nifi user is marked for deletion, revoking certificates") + if err = pkiManager.FinalizeUserCertificate(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to finalize user certificate", err) } + return r.checkFinalizers(ctx, instance, clientConfig) } } diff --git a/pkg/clientwrappers/user/user.go b/pkg/clientwrappers/user/user.go index 29da07ffc..37065bafa 100644 --- a/pkg/clientwrappers/user/user.go +++ b/pkg/clientwrappers/user/user.go @@ -117,13 +117,12 @@ func SyncUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alph return nil, err } - if userContainsAccessPolicy(user, ent, config.RootProcessGroupId) || - userGroupEntityContainsAccessPolicyEntity(userGroupEntity, ent) { + if userGroupEntityContainsAccessPolicyEntity(userGroupEntity, ent) { contains = true break } } - if !contains { + if !contains && !userContainsAccessPolicy(user, ent, config.RootProcessGroupId) { if err := accesspolicies.UpdateAccessPolicyEntity( &nigoapi.AccessPolicyEntity{ Component: &nigoapi.AccessPolicyDto{ @@ -148,13 +147,12 @@ func SyncUser(user *v1alpha1.NifiUser, config *clientconfig.NifiConfig) (*v1alph return nil, err } - if userEntityContainsAccessPolicy(entity, accessPolicy, config.RootProcessGroupId) || - usergroup.UserGroupEntityContainsAccessPolicy(userGroupEntity, accessPolicy, config.RootProcessGroupId) { + if usergroup.UserGroupEntityContainsAccessPolicy(userGroupEntity, accessPolicy, config.RootProcessGroupId) { contains = true break } } - if !contains { + if !contains && !userEntityContainsAccessPolicy(entity, accessPolicy, config.RootProcessGroupId) { if err := accesspolicies.UpdateAccessPolicy(&accessPolicy, []*v1alpha1.NifiUser{user}, []*v1alpha1.NifiUser{}, []*v1alpha1.NifiUserGroup{}, []*v1alpha1.NifiUserGroup{}, config); err != nil { From f12d71070bb49c24b2f94894649b79ff1a3c4184 Mon Sep 17 00:00:00 2001 From: erdrix Date: Thu, 30 Sep 2021 15:04:30 +0200 Subject: [PATCH 13/18] fix nifiuser marked for deletion management --- controllers/nifiuser_controller.go | 1 - pkg/errorfactory/errorfactory.go | 3 +++ pkg/k8sutil/resource.go | 4 ++-- pkg/nificlient/client.go | 4 ++++ 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index 395159a20..66ce4ea7b 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -214,7 +214,6 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c if err = pkiManager.FinalizeUserCertificate(ctx, instance); err != nil { return RequeueWithError(r.Log, "failed to finalize user certificate", err) } - return r.checkFinalizers(ctx, instance, clientConfig) } } diff --git a/pkg/errorfactory/errorfactory.go b/pkg/errorfactory/errorfactory.go index ccb47c036..bc2f15883 100644 --- a/pkg/errorfactory/errorfactory.go +++ b/pkg/errorfactory/errorfactory.go @@ -52,6 +52,9 @@ type FatalReconcileError struct{ error } // ReconcileRollingUpgrade states that rolling upgrade is reconciling type ReconcileRollingUpgrade struct{ error } +// NilClientConfig states that the client config is nil +type NilClientConfig struct{ error } + // NifiClusterNotReady states that NC is not ready to receive actions type NifiClusterNotReady struct{ error } diff --git a/pkg/k8sutil/resource.go b/pkg/k8sutil/resource.go index 3c4e38bb4..67e7f4831 100644 --- a/pkg/k8sutil/resource.go +++ b/pkg/k8sutil/resource.go @@ -147,8 +147,8 @@ func CheckIfObjectUpdated(log logr.Logger, desiredType reflect.Type, current, de } func IsPodTerminatedOrShutdown(pod *corev1.Pod) bool { - return (pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Shutdown") || - IsPodContainsTerminatedContainer(pod) + return (pod.Status.Phase == corev1.PodFailed && + (pod.Status.Reason == "Shutdown" || pod.Status.Reason == "Evicted")) || IsPodContainsTerminatedContainer(pod) } func IsPodContainsTerminatedContainer(pod *corev1.Pod) bool { diff --git a/pkg/nificlient/client.go b/pkg/nificlient/client.go index 61cb60423..50ea57bce 100644 --- a/pkg/nificlient/client.go +++ b/pkg/nificlient/client.go @@ -15,6 +15,7 @@ package nificlient import ( "context" + "emperror.dev/errors" "fmt" "github.com/Orange-OpenSource/nifikop/pkg/util/clientconfig" "net/http" @@ -194,6 +195,9 @@ func NewFromConfig(opts *clientconfig.NifiConfig) (NifiClient, error) { var client NifiClient var err error + if opts == nil { + return nil, errorfactory.New(errorfactory.NilClientConfig{}, errors.New("The NiFi client config is nil"), "The NiFi client config is nil") + } client = New(opts) err = client.Build() if err != nil { From 99db414c2fb9e665e98e5b92329d7f6dffc216d2 Mon Sep 17 00:00:00 2001 From: erdrix Date: Fri, 1 Oct 2021 08:52:09 +0200 Subject: [PATCH 14/18] avoid issue for old nifidataflow crd version --- api/v1alpha1/nifidataflow_types.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/api/v1alpha1/nifidataflow_types.go b/api/v1alpha1/nifidataflow_types.go index 9bfa90884..b19724706 100644 --- a/api/v1alpha1/nifidataflow_types.go +++ b/api/v1alpha1/nifidataflow_types.go @@ -37,7 +37,7 @@ type NifiDataflowSpec struct { ParameterContextRef *ParameterContextReference `json:"parameterContextRef,omitempty"` // if the flow will be synchronized once, continuously or never // +kubebuilder:validation:Enum={"never","always","once"} - SyncMode DataflowSyncMode `json:"syncMode,omitempty"` + SyncMode *DataflowSyncMode `json:"syncMode,omitempty"` // whether the flow is considered as ran if some controller services are still invalid or not. SkipInvalidControllerService bool `json:"skipInvalidControllerService,omitempty"` // whether the flow is considered as ran if some components are still invalid or not. @@ -144,22 +144,29 @@ func init() { SchemeBuilder.Register(&NifiDataflow{}, &NifiDataflowList{}) } +func (d *NifiDataflowSpec) GetSyncMode() DataflowSyncMode { + if d.SyncMode == nil { + return SyncAlways + } + return *d.SyncMode +} + func (d *NifiDataflowSpec) SyncOnce() bool { - if d.SyncMode == SyncOnce { + if d.GetSyncMode() == SyncOnce { return true } return false } func (d *NifiDataflowSpec) SyncAlways() bool { - if d.SyncMode == SyncAlways { + if d.GetSyncMode() == SyncAlways { return true } return false } func (d *NifiDataflowSpec) SyncNever() bool { - if d.SyncMode == SyncNever { + if d.GetSyncMode() == SyncNever { return true } return false From 7f12324abd450af395f9f1e18dcdb544d1c6aad6 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 12 Oct 2021 16:49:59 +0200 Subject: [PATCH 15/18] generate code --- api/v1alpha1/zz_generated.deepcopy.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index ca0bb2cb8..f2ad10820 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -556,6 +556,11 @@ func (in *NifiDataflowSpec) DeepCopyInto(out *NifiDataflowSpec) { *out = new(ParameterContextReference) **out = **in } + if in.SyncMode != nil { + in, out := &in.SyncMode, &out.SyncMode + *out = new(DataflowSyncMode) + **out = **in + } out.ClusterRef = in.ClusterRef if in.RegistryClientRef != nil { in, out := &in.RegistryClientRef, &out.RegistryClientRef From 4293915b3497cca45b86663b726ab1c501a4eea7 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 12 Oct 2021 17:59:59 +0200 Subject: [PATCH 16/18] clean comment, append changelog & start documetnation --- api/v1alpha1/nificluster_types.go | 15 +- api/v1alpha1/nifiparametercontext_types.go | 2 +- api/v1alpha1/nifiregistryclient_types.go | 2 +- .../bases/nifi.orange.com_nificlusters.yaml | 19 +- ...nifi.orange.com_nifiparametercontexts.yaml | 2 +- .../nifi.orange.com_nifiregistryclients.yaml | 2 +- .../crds/nifi.orange.com_nificlusters.yaml | 2027 ++++++++++++++++- .../crds/nifi.orange.com_nifidataflows.yaml | 11 +- ...nifi.orange.com_nifiparametercontexts.yaml | 3 +- .../nifi.orange.com_nifiregistryclients.yaml | 5 +- site/docs/3_tasks/3_nifi_dataflow.md | 8 +- .../1_nifi_cluster/1_nifi_cluster.md | 15 +- site/docs/5_references/5_nifi_dataflow.md | 4 +- 13 files changed, 2025 insertions(+), 90 deletions(-) diff --git a/api/v1alpha1/nificluster_types.go b/api/v1alpha1/nificluster_types.go index 62252402b..afb711dd8 100644 --- a/api/v1alpha1/nificluster_types.go +++ b/api/v1alpha1/nificluster_types.go @@ -17,11 +17,12 @@ limitations under the License. package v1alpha1 import ( + "strings" + cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "strings" ) const ( @@ -37,19 +38,21 @@ const ( // NifiClusterSpec defines the desired state of NifiCluster type NifiClusterSpec struct { + // clientType defines if the operator will use basic or tls authentication to query the NiFi cluster. // +kubebuilder:validation:Enum={"tls","basic"} ClientType ClientConfigType `json:"clientType,omitempty"` + // type defines if the cluster is internal (i.e manager by the operator) or external. // +kubebuilder:validation:Enum={"external","internal"} Type ClusterType `json:"type,omitempty"` - // nodeURITemplate used to dynamically compute node uri (used if external-* type) + // nodeURITemplate used to dynamically compute node uri (used if external type) NodeURITemplate string `json:"nodeURITemplate,omitempty"` - // nifiURI used access through a LB uri (used if external-* type) + // nifiURI used access through a LB uri (used if external type) NifiURI string `json:"nifiURI,omitempty"` - // rootProcessGroupId contains the uuid of the root process group for this cluster (used if external-* type) + // rootProcessGroupId contains the uuid of the root process group for this cluster (used if external type) RootProcessGroupId string `json:"rootProcessGroupId,omitempty"` - // secretRef reference the secret containing the informations required to authentiticate to the cluster (used if external-* type) + // secretRef reference the secret containing the informations required to authentiticate to the cluster (used if external type) SecretRef SecretReference `json:"secretRef,omitempty"` - // proxyUrl define the proxy required to query the NiFi cluster (used if external-* type) + // proxyUrl defines the proxy required to query the NiFi cluster (used if external type) ProxyUrl string `json:"proxyUrl,omitempty"` // Service defines the policy for services owned by NiFiKop operator. Service ServicePolicy `json:"service,omitempty"` diff --git a/api/v1alpha1/nifiparametercontext_types.go b/api/v1alpha1/nifiparametercontext_types.go index 1e2babe20..93a0142d9 100644 --- a/api/v1alpha1/nifiparametercontext_types.go +++ b/api/v1alpha1/nifiparametercontext_types.go @@ -29,7 +29,7 @@ type NifiParameterContextSpec struct { Description string `json:"description,omitempty"` // a list of non-sensitive Parameters. Parameters []Parameter `json:"parameters"` - // contains the reference to the NifiCluster with the one the dataflow is linked. + // contains the reference to the NifiCluster with the one the parameter context is linked. ClusterRef ClusterReference `json:"clusterRef,omitempty"` // a list of secret containing sensitive parameters (the key will name of the parameter). SecretRefs []SecretReference `json:"secretRefs,omitempty"` diff --git a/api/v1alpha1/nifiregistryclient_types.go b/api/v1alpha1/nifiregistryclient_types.go index badbfd10a..13621e401 100644 --- a/api/v1alpha1/nifiregistryclient_types.go +++ b/api/v1alpha1/nifiregistryclient_types.go @@ -29,7 +29,7 @@ type NifiRegistryClientSpec struct { Uri string `json:"uri"` // The Description of the Registry client. Description string `json:"description,omitempty"` - // contains the reference to the NifiCluster with the one the dataflow is linked. + // contains the reference to the NifiCluster with the one the registry client is linked. ClusterRef ClusterReference `json:"clusterRef,omitempty"` } diff --git a/config/crd/bases/nifi.orange.com_nificlusters.yaml b/config/crd/bases/nifi.orange.com_nificlusters.yaml index 74b53d424..b46ff8a28 100644 --- a/config/crd/bases/nifi.orange.com_nificlusters.yaml +++ b/config/crd/bases/nifi.orange.com_nificlusters.yaml @@ -37,8 +37,8 @@ spec: description: NifiClusterSpec defines the desired state of NifiCluster properties: clientType: - description: ClientConfigType represents an interface implementing - the ClientConfigManager + description: clientType defines if the operator will use basic or + tls authentication to query the NiFi cluster. enum: - tls - basic @@ -1422,7 +1422,7 @@ spec: - retryDurationMinutes type: object nifiURI: - description: nifiURI used access through a LB uri (used if external-* + description: nifiURI used access through a LB uri (used if external type) type: string nodeConfigGroups: @@ -1921,7 +1921,7 @@ spec: type: object nodeURITemplate: description: nodeURITemplate used to dynamically compute node uri - (used if external-* type) + (used if external type) type: string nodes: description: all node requires an image, unique id, and storageConfigs @@ -2865,8 +2865,8 @@ spec: description: propage type: boolean proxyUrl: - description: proxyUrl define the proxy required to query the NiFi - cluster (used if external-* type) + description: proxyUrl defines the proxy required to query the NiFi + cluster (used if external type) type: string readOnlyConfig: description: readOnlyConfig specifies the read-only type Nifi config @@ -3247,11 +3247,11 @@ spec: type: object rootProcessGroupId: description: rootProcessGroupId contains the uuid of the root process - group for this cluster (used if external-* type) + group for this cluster (used if external type) type: string secretRef: description: secretRef reference the secret containing the informations - required to authentiticate to the cluster (used if external-* type) + required to authentiticate to the cluster (used if external type) properties: name: type: string @@ -4364,7 +4364,8 @@ spec: type: object type: array type: - description: ClusterType represents an interface implementing the ClientConfigManager + description: type defines if the cluster is internal (i.e manager + by the operator) or external. enum: - external - internal diff --git a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml index 9fb905338..92cbafff1 100644 --- a/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml +++ b/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml @@ -39,7 +39,7 @@ spec: properties: clusterRef: description: contains the reference to the NifiCluster with the one - the dataflow is linked. + the parameter context is linked. properties: name: type: string diff --git a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml index bf9c077e7..7bf531d14 100644 --- a/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml +++ b/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml @@ -39,7 +39,7 @@ spec: properties: clusterRef: description: contains the reference to the NifiCluster with the one - the dataflow is linked. + the registry client is linked. properties: name: type: string diff --git a/helm/nifikop/crds/nifi.orange.com_nificlusters.yaml b/helm/nifikop/crds/nifi.orange.com_nificlusters.yaml index 698650d2c..74b53d424 100644 --- a/helm/nifikop/crds/nifi.orange.com_nificlusters.yaml +++ b/helm/nifikop/crds/nifi.orange.com_nificlusters.yaml @@ -36,6 +36,13 @@ spec: spec: description: NifiClusterSpec defines the desired state of NifiCluster properties: + clientType: + description: ClientConfigType represents an interface implementing + the ClientConfigManager + enum: + - tls + - basic + type: string clusterImage: description: clusterImage can specify the whole NiFi cluster image in one place @@ -52,6 +59,118 @@ spec: description: If set to true, will create a podDisruptionBudget type: boolean type: object + externalServices: + description: ExternalService specifies settings required to access + nifi externally + items: + properties: + name: + description: 'Name must be unique within a namespace. Is required + when creating resources, although some resources may allow + a client to request the generation of an appropriate name + automatically. Name is primarily intended for creation idempotence + and configuration definition. Cannot be updated. More info: + http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + serviceAnnotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored + with a resource that may be set by external tools to store + and retrieve arbitrary metadata. They are not queryable and + should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + spec: + description: Spec defines the behavior of a service. + properties: + clusterIP: + description: 'clusterIP is the IP address of the service + and is usually assigned randomly by the master. If an + address is specified manually and is not in use by others, + it will be allocated to the service; otherwise, creation + of the service will fail. This field can not be changed + through updates. Valid values are "None", empty string + (""), or a valid IP address. "None" can be specified for + headless services when proxying is not required. Only + applies to types ClusterIP, NodePort, and LoadBalancer. + Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + externalIPs: + description: externalIPs is a list of IP addresses for which + nodes in the cluster will also accept traffic for this + service. These IPs are not managed by Kubernetes. The + user is responsible for ensuring that traffic arrives + at a node with this IP. A common example is external + load-balancers that are not part of the Kubernetes system. + items: + type: string + type: array + externalName: + description: externalName is the external reference that + kubedns or equivalent will return as a CNAME record for + this service. No proxying will be involved. Must be a + valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + and requires Type to be ExternalName. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer + LoadBalancer will get created with the IP specified in + this field. This feature depends on whether the underlying + cloud-provider supports specifying the loadBalancerIP + when a load balancer is created. This field will be ignored + if the cloud-provider does not support the feature.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client + IPs. This field will be ignored if the cloud-provider + does not support the feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + items: + type: string + type: array + portConfigs: + description: Contains the list port for the service and + the associated listener + items: + properties: + internalListenerName: + description: The name of the listener which will be + used as target container. + type: string + port: + description: The port that will be exposed by this + service. + format: int32 + type: integer + required: + - internalListenerName + - port + type: object + type: array + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, + ClusterIP, NodePort, and LoadBalancer. "ExternalName" + maps to the specified externalName. "ClusterIP" allocates + a cluster-internal IP address for load-balancing to endpoints. + Endpoints are determined by the selector or if that is + not specified, by manual construction of an Endpoints + object. If clusterIP is "None", no virtual IP is allocated + and the endpoints are published as a set of endpoints + rather than a stable IP. "NodePort" builds on ClusterIP + and allocates a port on every node which routes to the + clusterIP. "LoadBalancer" builds on NodePort and creates + an external load-balancer (if supported in the current + cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + required: + - portConfigs + type: object + required: + - spec + type: object + type: array initContainerImage: description: initContainerImage can override the default image used into the init container to check if ZoooKeeper server is reachable. @@ -916,8 +1035,7 @@ spec: can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. - This cannot be updated. This is a beta feature enabled by - the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' properties: exec: description: One and only one of the following should be @@ -1172,11 +1290,8 @@ spec: domain which is "cluster.local" type: string internalListeners: - description: 'externalListeners specifies settings required to - access nifi externally TODO: enable externalListener configuration - ExternalListeners []ExternalListenerConfig `json:"externalListeners,omitempty"` - internalListeners specifies settings required to access nifi - internally' + description: internalListeners specifies settings required to + access nifi internally items: description: InternalListenerConfig defines the internal listener config for Nifi @@ -1198,6 +1313,7 @@ spec: - http - https - s2s + - prometheus type: string required: - containerPort @@ -1222,10 +1338,13 @@ spec: act as CA : https://cert-manager.io/docs/concepts/issuer/' properties: group: + description: Group of the resource being referred to. type: string kind: + description: Kind of the resource being referred to. type: string name: + description: Name of the resource being referred to. type: string required: - name @@ -1302,10 +1421,20 @@ spec: required: - retryDurationMinutes type: object + nifiURI: + description: nifiURI used access through a LB uri (used if external-* + type) + type: string nodeConfigGroups: additionalProperties: description: NodeConfig defines the node configuration properties: + fsGroup: + description: FSGroup define the id of the group for each volumes + in Nifi image + format: int64 + minimum: 1 + type: integer image: description: ' Docker image used by the operator to create the node associated https://hub.docker.com/r/apache/nifi/' @@ -1617,20 +1746,15 @@ spec: type: array dataSource: description: 'This field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - - Beta) * An existing PVC (PersistentVolumeClaim) - * An existing custom resource/object that implements - data population (Alpha) In order to use VolumeSnapshot - object types, the appropriate feature gate must - be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) - If the provisioner or an external controller can - support the specified data source, it will create - a new volume based on the contents of the specified - data source. If the specified data source is not - supported, the volume will not be created and the - failure will be reported as an event. In the future, - we plan to support more data source types and the - behavior of the provisioner may change.' + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) * An existing + custom resource that implements data population + (Alpha) In order to use custom resource types that + implement data population, the AnyVolumeDataSource + feature gate must be enabled. If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on + the contents of the specified data source.' properties: apiGroup: description: APIGroup is the group for the resource @@ -1795,6 +1919,10 @@ spec: description: nodeConfigGroups specifies multiple node configs with unique name type: object + nodeURITemplate: + description: nodeURITemplate used to dynamically compute node uri + (used if external-* type) + type: string nodes: description: all node requires an image, unique id, and storageConfigs settings @@ -1808,6 +1936,12 @@ spec: nodeConfig: description: node configuration properties: + fsGroup: + description: FSGroup define the id of the group for each + volumes in Nifi image + format: int64 + minimum: 1 + type: integer image: description: ' Docker image used by the operator to create the node associated https://hub.docker.com/r/apache/nifi/' @@ -2132,21 +2266,15 @@ spec: dataSource: description: 'This field can be used to specify either: * An existing VolumeSnapshot object - (snapshot.storage.k8s.io/VolumeSnapshot - Beta) - * An existing PVC (PersistentVolumeClaim) * - An existing custom resource/object that implements - data population (Alpha) In order to use VolumeSnapshot - object types, the appropriate feature gate must - be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) - If the provisioner or an external controller - can support the specified data source, it will - create a new volume based on the contents of - the specified data source. If the specified - data source is not supported, the volume will - not be created and the failure will be reported - as an event. In the future, we plan to support - more data source types and the behavior of the - provisioner may change.' + (snapshot.storage.k8s.io/VolumeSnapshot) * An + existing PVC (PersistentVolumeClaim) * An existing + custom resource that implements data population + (Alpha) In order to use custom resource types + that implement data population, the AnyVolumeDataSource + feature gate must be enabled. If the provisioner + or an external controller can support the specified + data source, it will create a new volume based + on the contents of the specified data source.' properties: apiGroup: description: APIGroup is the group for the @@ -2324,6 +2452,167 @@ spec: which has type read-only these config changes will trigger rolling upgrade properties: + additionalSharedEnvs: + description: AdditionalSharedEnvs define a set of additional + env variables that will shared between all init containers + and containers in the pod. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are + expanded using the previous defined environment + variables in the container and any service environment + variables. If a variable cannot be resolved, the + reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double + $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + bootstrapNotificationServicesConfig: + description: BootstrapNotificationServices configuration + that will be applied to the node. + properties: + replaceConfigMap: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template + and overrideConfigMap + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object + type: object bootstrapProperties: description: BootstrapProperties configuration that will be applied to the node. @@ -2331,12 +2620,105 @@ spec: nifiJvmMemory: description: JVM memory settings type: string + overrideConfigMap: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template + and configuration + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object overrideConfigs: description: Additionnals bootstrap.properties configuration that will override the one produced based on template and configurations. type: string + overrideSecretConfig: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template, + configurations, overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object + type: object + logbackConfig: + description: Logback configuration that will be applied + to the node. + properties: + replaceConfigMap: + description: logback.xml configuration that will replace + the one produced based on template + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: logback.xml configuration that will replace + the one produced based on template and overrideConfigMap + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object type: object + maximumTimerDrivenThreadCount: + description: MaximumTimerDrivenThreadCount define the maximum + number of threads for timer driven processors available + to the system. + format: int32 + type: integer nifiProperties: description: NifiProperties configuration that will be applied to the node. @@ -2348,11 +2730,53 @@ spec: needClientAuth: description: Nifi security client auth type: boolean - overrideConfigs: + overrideConfigMap: description: Additionnals nifi.properties configuration that will override the one produced based on template - and configurations. + and configuration + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object + overrideConfigs: + description: Additionnals nifi.properties configuration + that will override the one produced based on template, + configurations and overrideConfigMap. type: string + overrideSecretConfig: + description: Additionnals nifi.properties configuration + that will override the one produced based on template, + configurations, overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object webProxyHosts: description: A comma separated list of allowed HTTP Host header values to consider when NiFi is running @@ -2366,11 +2790,53 @@ spec: description: ZookeeperProperties configuration that will be applied to the node. properties: + overrideConfigMap: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template + and configuration + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object overrideConfigs: description: Additionnals zookeeper.properties configuration that will override the one produced based on template and configurations. type: string + overrideSecretConfig: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template, + configurations, overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, + that we want use. + type: string + name: + description: Name of the configmap that we want + to refer. + type: string + namespace: + description: Namespace where is located the secret + that we want to refer. + type: string + required: + - data + - name + type: object type: object type: object required: @@ -2398,11 +2864,170 @@ spec: propagateLabels: description: propage type: boolean + proxyUrl: + description: proxyUrl define the proxy required to query the NiFi + cluster (used if external-* type) + type: string readOnlyConfig: description: readOnlyConfig specifies the read-only type Nifi config cluster wide, all theses will be merged with node specified readOnly configurations, so it can be overwritten per node. properties: + additionalSharedEnvs: + description: AdditionalSharedEnvs define a set of additional env + variables that will shared between all init containers and containers + in the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped references + will never be expanded, regardless of whether the variable + exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + bootstrapNotificationServicesConfig: + description: BootstrapNotificationServices configuration that + will be applied to the node. + properties: + replaceConfigMap: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template and + overrideConfigMap + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object bootstrapProperties: description: BootstrapProperties configuration that will be applied to the node. @@ -2410,12 +3035,101 @@ spec: nifiJvmMemory: description: JVM memory settings type: string + overrideConfigMap: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template and + configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object overrideConfigs: description: Additionnals bootstrap.properties configuration that will override the one produced based on template and configurations. type: string + overrideSecretConfig: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + logbackConfig: + description: Logback configuration that will be applied to the + node. + properties: + replaceConfigMap: + description: logback.xml configuration that will replace the + one produced based on template + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: logback.xml configuration that will replace the + one produced based on template and overrideConfigMap + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object type: object + maximumTimerDrivenThreadCount: + description: MaximumTimerDrivenThreadCount define the maximum + number of threads for timer driven processors available to the + system. + format: int32 + type: integer nifiProperties: description: NifiProperties configuration that will be applied to the node. @@ -2427,12 +3141,52 @@ spec: needClientAuth: description: Nifi security client auth type: boolean + overrideConfigMap: + description: Additionnals nifi.properties configuration that + will override the one produced based on template and configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object overrideConfigs: description: Additionnals nifi.properties configuration that - will override the one produced based on template and configurations. + will override the one produced based on template, configurations + and overrideConfigMap. type: string - webProxyHosts: - description: A comma separated list of allowed HTTP Host header + overrideSecretConfig: + description: Additionnals nifi.properties configuration that + will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + webProxyHosts: + description: A comma separated list of allowed HTTP Host header values to consider when NiFi is running securely and will be receiving requests to a different host[:port] than it is bound to. https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties @@ -2444,13 +3198,68 @@ spec: description: ZookeeperProperties configuration that will be applied to the node. properties: + overrideConfigMap: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template and + configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object overrideConfigs: description: Additionnals zookeeper.properties configuration that will override the one produced based on template and configurations. type: string + overrideSecretConfig: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object type: object type: object + rootProcessGroupId: + description: rootProcessGroupId contains the uuid of the root process + group for this cluster (used if external-* type) + type: string + secretRef: + description: secretRef reference the secret containing the informations + required to authentiticate to the cluster (used if external-* type) + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object service: description: Service defines the policy for services owned by NiFiKop operator. @@ -2458,17 +3267,1108 @@ spec: annotations: additionalProperties: type: string - description: Annotations specifies the annotations to attach to - services the operator creates - type: object - headlessEnabled: - description: HeadlessEnabled specifies if the cluster should use - headlessService for Nifi or individual services using service - per nodes may come an handy case of service mesh. - type: boolean - required: - - headlessEnabled - type: object + description: Annotations specifies the annotations to attach to + services the operator creates + type: object + headlessEnabled: + description: HeadlessEnabled specifies if the cluster should use + headlessService for Nifi or individual services using service + per nodes may come an handy case of service mesh. + type: boolean + required: + - headlessEnabled + type: object + sidecarConfigs: + description: SidecarsConfig defines additional sidecar configurations + items: + description: A single application container that you want to run + within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with a + double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. + The docker image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + references will never be expanded, regardless of whether the + variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be + a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in + the container and any service environment variables. + If a variable cannot be resolved, the reference in the + input string will be unchanged. The $(VAR_NAME) syntax + can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + references will never be expanded, regardless of whether + the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, requests.cpu, + requests.memory and requests.ephemeral-storage) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be + a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key + will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap must be + defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take + in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More + info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The reason for termination is passed + to the handler. The Pod''s termination grace period countdown + begins before the PreStop hooked is executed. Regardless + of the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period. + Other management of the container blocks until the hook + completes or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should + be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's + filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions + ('|', etc) won't work. To use a shell, you need + to explicitly call out to that shell. Exit status + of 0 is treated as live/healthy and non-zero is + unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the + host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving + a TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range + 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be + specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a + TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing + a port here gives the system additional information about + the network connections a container uses, but is primarily + informational. Not specifying a port here DOES NOT prevent + that port from being exposed. Any port which is listening + on the default "0.0.0.0" address inside a container will be + accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a + single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP + address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If + specified, this must be a valid port number, 0 < x < + 65536. If HostNetwork is specified, this must match + ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod + must have a unique name. Name for the port that can + be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe + fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be + specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a + TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. + Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. More info: + https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More + info: https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether + a process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by + the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes + in privileged containers are essentially equivalent to + root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to + use for the containers. The default is DefaultProcMount + which uses the container runtime defaults for readonly + paths and masked paths. This requires the ProcMountType + feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root + filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a + non-root user. If true, the Kubelet will validate the + image at runtime to ensure that it does not run as UID + 0 (root) and fail to start the container if it does. If + unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both + SecurityContext and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata + if unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a + random SELinux context for each container. May also be + set in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile + must be preconfigured on the node to work. Must be + a descending path, relative to the kubelet's configured + seccomp profile location. Must only be set if type + is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - + a profile defined in a file on the node should be + used. RuntimeDefault - the container runtime default + profile should be used. Unconfined - no profile should + be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set + in PodSecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod + will be restarted, just as if the livenessProbe failed. This + can be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. + This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be + specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for the + command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', etc) + won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe + to be considered failed after having succeeded. Defaults + to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has + started before liveness probes are initiated. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe + to be considered successful after having failed. Defaults + to 1. Must be 1 for liveness and startup. Minimum value + is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a + TCP port. TCP hooks not yet supported TODO: implement + a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on + the container. Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer + for stdin in the container runtime. If this is not set, reads + from stdin in the container will always result in EOF. Default + is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the + stdin channel after it has been opened by a single attach. + When stdin is true the stdin stream will remain open across + multiple attach sessions. If stdinOnce is set to true, stdin + is opened on container start, is empty until the first client + attaches to stdin, and then remains open and accepts data + until the client disconnects, at which time stdin is closed + and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin + will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the + container''s termination message will be written is mounted + into the container''s filesystem. Message written is intended + to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. + The total message length across all containers will be limited + to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be + populated. File will use the contents of terminationMessagePath + to populate the container status message on both success and + failure. FallbackToLogsOnError will use the last chunk of + container log output if the termination message file is empty + and the container exited with an error. The log output is + limited to 2048 bytes or 80 lines, whichever is smaller. Defaults + to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other + way around. When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might + be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + type: + description: ClusterType represents an interface implementing the ClientConfigManager + enum: + - external + - internal + type: string zkAddress: description: 'zKAddress specifies the ZooKeeper connection string in the form hostname:port where host and port are those of a Zookeeper @@ -2480,10 +4380,7 @@ spec: in the global ZooKeeper namespace. type: string required: - - listenersConfig - nodes - - oneNifiNodePerNode - - zkAddress type: object status: description: NifiClusterStatus defines the observed state of NifiCluster @@ -2523,13 +4420,33 @@ spec: description: InitClusterNode contains if this nodes was part of the initial cluster type: boolean + podIsReady: + description: PodIsReady whether or not the associated pod is + ready + type: boolean required: - configurationState - gracefulActionState - initClusterNode + - podIsReady type: object description: Store the state of each nifi node type: object + prometheusReportingTask: + description: PrometheusReportingTask contains the status of the prometheus + reporting task managed by the operator + properties: + id: + description: The nifi reporting task's id + type: string + version: + description: The last nifi reporting task revision version catched + format: int64 + type: integer + required: + - id + - version + type: object rollingUpgradeStatus: description: RollingUpgradeStatus defines status of rolling upgrade properties: diff --git a/helm/nifikop/crds/nifi.orange.com_nifidataflows.yaml b/helm/nifikop/crds/nifi.orange.com_nifidataflows.yaml index 265322bfd..5d7518c7b 100644 --- a/helm/nifikop/crds/nifi.orange.com_nifidataflows.yaml +++ b/helm/nifikop/crds/nifi.orange.com_nifidataflows.yaml @@ -84,9 +84,6 @@ spec: required: - name type: object - runOnce: - description: if the flow will be ran once or continuously checked - type: boolean skipInvalidComponent: description: whether the flow is considered as ran if some components are still invalid or not. @@ -95,6 +92,14 @@ spec: description: whether the flow is considered as ran if some controller services are still invalid or not. type: boolean + syncMode: + description: if the flow will be synchronized once, continuously or + never + enum: + - never + - always + - once + type: string updateStrategy: description: 'describes the way the operator will deal with data when a dataflow will be updated : drop or drain' diff --git a/helm/nifikop/crds/nifi.orange.com_nifiparametercontexts.yaml b/helm/nifikop/crds/nifi.orange.com_nifiparametercontexts.yaml index a70c23709..9fb905338 100644 --- a/helm/nifikop/crds/nifi.orange.com_nifiparametercontexts.yaml +++ b/helm/nifikop/crds/nifi.orange.com_nifiparametercontexts.yaml @@ -39,7 +39,7 @@ spec: properties: clusterRef: description: contains the reference to the NifiCluster with the one - the user is linked. + the dataflow is linked. properties: name: type: string @@ -84,7 +84,6 @@ spec: type: object type: array required: - - clusterRef - parameters type: object status: diff --git a/helm/nifikop/crds/nifi.orange.com_nifiregistryclients.yaml b/helm/nifikop/crds/nifi.orange.com_nifiregistryclients.yaml index e604f891f..bf9c077e7 100644 --- a/helm/nifikop/crds/nifi.orange.com_nifiregistryclients.yaml +++ b/helm/nifikop/crds/nifi.orange.com_nifiregistryclients.yaml @@ -38,8 +38,8 @@ spec: description: NifiRegistryClientSpec defines the desired state of NifiRegistryClient properties: clusterRef: - description: Contains the reference to the NifiCluster with the one - the registry client is linked. + description: contains the reference to the NifiCluster with the one + the dataflow is linked. properties: name: type: string @@ -56,7 +56,6 @@ spec: pulling the flow. type: string required: - - clusterRef - uri type: object status: diff --git a/site/docs/3_tasks/3_nifi_dataflow.md b/site/docs/3_tasks/3_nifi_dataflow.md index 0cba5e550..d2ecf35f1 100644 --- a/site/docs/3_tasks/3_nifi_dataflow.md +++ b/site/docs/3_tasks/3_nifi_dataflow.md @@ -92,7 +92,7 @@ spec: bucketId: "01ced6cc-0378-4893-9403-f6c70d080d4f" flowId: "9b2fb465-fb45-49e7-94fe-45b16b642ac9" flowVersion: 2 - runOnce: false + syncMode: always skipInvalidControllerService: true skipInvalidComponent: true clusterRef: @@ -111,9 +111,11 @@ To find details about the versioned flow information required check the [officia You have two modes of control from your dataflow by the operator : -1 - `Spec.RunOnce == true` : The operator will deploy the dataflow as described in the resource, run it once, and never control it again (unless you change the field to `false`). It is useful when you want to deploy your dataflow in a dev environment, and you want to update the dataflow. +1 - `Spec.SyncMode == never` : The operator will deploy the dataflow as described in the resource, and never control it (unless you change the field to `always`). It is useful when you want to deploy your dataflow without starting it. -2 - `Spec.RunOnce == false` : The operator will deploy and ensure the dataflow lifecycle, it will avoid all manual modification directly from the Cluster (e.g remove the process group, remove the versioning, update the parent process group, make some local changes ...). If you want to perform update, rollback or stuff like this, you have to simply update the [NifiDataflow] resource. +2 - `Spec.SyncMode == once` : The operator will deploy the dataflow as described in the resource, run it once, and never control it again (unless you change the field to `always`). It is useful when you want to deploy your dataflow in a dev environment, and you want to update the dataflow. + +3 - `Spec.SyncMode == always` : The operator will deploy and ensure the dataflow lifecycle, it will avoid all manual modification directly from the Cluster (e.g remove the process group, remove the versioning, update the parent process group, make some local changes ...). If you want to perform update, rollback or stuff like this, you have to simply update the [NifiDataflow] resource. :::important More information about `Spec.UpdateStrategy` [here](../5_references/5_nifi_dataflow.md#dataflowupdatestrategy) diff --git a/site/docs/5_references/1_nifi_cluster/1_nifi_cluster.md b/site/docs/5_references/1_nifi_cluster/1_nifi_cluster.md index b632e8eb9..d3b7b0e7a 100644 --- a/site/docs/5_references/1_nifi_cluster/1_nifi_cluster.md +++ b/site/docs/5_references/1_nifi_cluster/1_nifi_cluster.md @@ -81,14 +81,23 @@ spec: |Field|Type|Description|Required|Default| |-----|----|-----------|--------|--------| +|clientType|Enum={"tls","basic"}| defines if the operator will use basic or tls authentication to query the NiFi cluster. |No| `tls` | +|type|Enum={"external","internal"}| defines if the cluster is internal (i.e manager by the operator) or external. |No| `internal` | +|nodeURITemplate|string| used to dynamically compute node uri. |if external type| - | +|nifiURI|stringused access through a LB uri. |if external type| - | +|rootProcessGroupId|string|contains the uuid of the root process group for this cluster. |if external type| - | +|secretRef|\[ \][SecretReference](./4_nifi_parameter_context#secretreference)|reference the secret containing the informations required to authentiticate to the cluster. |if external type| - | +|proxyUrl|string|defines the proxy required to query the NiFi cluster. |if external type| - | + + |service|[ServicePolicy](#servicepolicy)| defines the policy for services owned by NiFiKop operator. |No| - | |pod|[PodPolicy](#podpolicy)| defines the policy for pod owned by NiFiKop operator. |No| - | -|zkAddress|string| specifies the ZooKeeper connection string in the form hostname:port where host and port are those of a Zookeeper server.|Yes|""| +|zkAddress|string| specifies the ZooKeeper connection string in the form hostname:port where host and port are those of a Zookeeper server.|No|""| |zkPath|string| specifies the Zookeeper chroot path as part of its Zookeeper connection string which puts its data under same path in the global ZooKeeper namespace.|Yes|"/"| |initContainerImage|string| can override the default image used into the init container to check if ZoooKeeper server is reachable.. |Yes|"busybox"| |initContainers|\[ \]string| defines additional initContainers configurations. |No|\[ \]| |clusterImage|string| can specify the whole nificluster image in one place. |No|""| -|oneNifiNodePerNode|boolean|if set to true every nifi node is started on a new node, if there is not enough node to do that it will stay in pending state. If set to false the operator also tries to schedule the nifi node to a unique node but if the node number is insufficient the nifi node will be scheduled to a node where a nifi node is already running.|Yes| nil | +|oneNifiNodePerNode|boolean|if set to true every nifi node is started on a new node, if there is not enough node to do that it will stay in pending state. If set to false the operator also tries to schedule the nifi node to a unique node but if the node number is insufficient the nifi node will be scheduled to a node where a nifi node is already running.|No| nil | |propagateLabels|boolean| - |Yes|false| |managedAdminUsers|\[ \][ManagedUser](#managedusers)| contains the list of users that will be added to the managed admin group (with all rights). |No|[]| |managedReaderUsers|\[ \][ManagedUser](#managedusers)| contains the list of users that will be added to the managed admin group (with all rights). |No|[]| @@ -98,7 +107,7 @@ spec: |disruptionBudget|[DisruptionBudget](#disruptionbudget)| defines the configuration for PodDisruptionBudget.|No| nil | |ldapConfiguration|[LdapConfiguration](#ldapconfiguration)| specifies the configuration if you want to use LDAP.|No| nil | |nifiClusterTaskSpec|[NifiClusterTaskSpec](#nificlustertaskspec)| specifies the configuration of the nifi cluster Tasks.|No| nil | -|listenersConfig|[ListenersConfig](./6_listeners_config.md)| specifies nifi's listener specifig configs.|Yes| - | +|listenersConfig|[ListenersConfig](./6_listeners_config.md)| specifies nifi's listener specifig configs.|No| - | |sidecarConfigs|\[ \][Container](https://godoc.org/k8s.io/api/core/v1#Container)|Defines additional sidecar configurations. [Check documentation for more informations]| |externalServices|\[ \][ExternalServiceConfigs](./7_external_service_config.md)| specifies settings required to access nifi externally.|No| - | diff --git a/site/docs/5_references/5_nifi_dataflow.md b/site/docs/5_references/5_nifi_dataflow.md index 0a5261a50..dbc561e6f 100644 --- a/site/docs/5_references/5_nifi_dataflow.md +++ b/site/docs/5_references/5_nifi_dataflow.md @@ -16,7 +16,7 @@ spec: bucketId: "01ced6cc-0378-4893-9403-f6c70d080d4f" flowId: "9b2fb465-fb45-49e7-94fe-45b16b642ac9" flowVersion: 2 - runOnce: false + syncMode: always skipInvalidControllerService: true skipInvalidComponent: true clusterRef: @@ -48,7 +48,7 @@ spec: |bucketId|string|the UUID of the Bucket containing the flow. |Yes| - | |flowId|string|the UUID of the flow to run. |Yes| - | |flowVersion|*int32|the version of the flow to run, if not present or equals to -1, then the latest version of flow will be used. |Yes| - | -|runOnce|bool|if the flow will be ran once or continuously checked. |Yes| true | +|syncMode|Enum={"never","always","once"}|if the flow will be synchronized once, continuously or never. |No| always | |skipInvalidControllerService|bool|whether the flow is considered as ran if some controller services are still invalid or not. |Yes| false | |skipInvalidComponent|bool|whether the flow is considered as ran if some components are still invalid or not. |Yes| false | |updateStrategy|[DataflowUpdateStrategy](#dataflowupdatestrategy)|describes the way the operator will deal with data when a dataflow will be updated : Drop or Drain |Yes| drain | From bcc375571ebc9ab91839bf61e5181d65ed1f8b4d Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 12 Oct 2021 21:49:01 +0200 Subject: [PATCH 17/18] append changelog & documentation --- CHANGELOG.md | 47 +++- .../nifi_v1alpha1_nificluster_external.yaml | 24 ++ .../samples/nifi_v1alpha1_nifidataflow.yaml | 2 +- .../1_nifi_cluster/4_external_cluster.md | 93 +++++++ site/website/sidebars.json | 13 +- .../1_concepts/1_introduction.md | 36 +++ .../1_concepts/2_design_principes.md | 62 +++++ .../version-0.7.0/1_concepts/3_features.md | 57 +++++ .../version-0.7.0/1_concepts/4_roadmap.md | 95 +++++++ .../2_setup/1_getting_started.md | 159 ++++++++++++ .../2_setup/2_platform_setup/1_gke.md | 42 ++++ .../2_setup/2_platform_setup/2_minikube.md | 47 ++++ .../1_customizable_install_with_helm.md | 197 +++++++++++++++ .../1_nifi_cluster/1_nodes_configuration.md | 9 + .../1_nifi_cluster/2_cluster_scaling.md | 236 ++++++++++++++++++ .../3_tasks/1_nifi_cluster/3_external_dns.md | 9 + .../1_nifi_cluster/4_external_cluster.md | 93 +++++++ .../version-0.7.0/3_tasks/2_security/1_ssl.md | 159 ++++++++++++ .../2_security/2_authentication/1_oidc.md | 42 ++++ .../version-0.7.0/3_tasks/3_nifi_dataflow.md | 125 ++++++++++ .../3_tasks/4_nifi_user_group.md | 168 +++++++++++++ .../4_examples/1_simple_nifi_cluster.md | 5 + .../1_nifi_cluster/1_nifi_cluster.md | 171 +++++++++++++ .../1_nifi_cluster/2_read_only_config.md | 190 ++++++++++++++ .../1_nifi_cluster/3_node_config.md | 79 ++++++ .../5_references/1_nifi_cluster/4_node.md | 59 +++++ .../1_nifi_cluster/5_node_state.md | 71 ++++++ .../1_nifi_cluster/6_listeners_config.md | 54 ++++ .../7_external_service_config.md | 46 ++++ .../version-0.7.0/5_references/2_nifi_user.md | 100 ++++++++ .../5_references/3_nifi_registry_client.md | 43 ++++ .../5_references/4_nifi_parameter_context.md | 83 ++++++ .../5_references/5_nifi_dataflow.md | 124 +++++++++ .../5_references/6_nifi_usergroup.md | 55 ++++ .../6_contributing/1_developer_guide.md | 143 +++++++++++ .../6_contributing/2_reporting_bugs.md | 25 ++ .../version-0.7.0/6_contributing/3_credits.md | 11 + .../version-0.7.0-sidebars.json | 197 +++++++++++++++ site/website/versions.json | 9 +- 39 files changed, 3153 insertions(+), 27 deletions(-) create mode 100644 config/samples/nifi_v1alpha1_nificluster_external.yaml create mode 100644 site/docs/3_tasks/1_nifi_cluster/4_external_cluster.md create mode 100644 site/website/versioned_docs/version-0.7.0/1_concepts/1_introduction.md create mode 100644 site/website/versioned_docs/version-0.7.0/1_concepts/2_design_principes.md create mode 100644 site/website/versioned_docs/version-0.7.0/1_concepts/3_features.md create mode 100644 site/website/versioned_docs/version-0.7.0/1_concepts/4_roadmap.md create mode 100644 site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md create mode 100644 site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/1_gke.md create mode 100644 site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/2_minikube.md create mode 100644 site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/3_external_dns.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/4_external_cluster.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/2_security/1_ssl.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/2_security/2_authentication/1_oidc.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/3_nifi_dataflow.md create mode 100644 site/website/versioned_docs/version-0.7.0/3_tasks/4_nifi_user_group.md create mode 100644 site/website/versioned_docs/version-0.7.0/4_examples/1_simple_nifi_cluster.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/1_nifi_cluster.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/2_read_only_config.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/3_node_config.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/4_node.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/5_node_state.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/6_listeners_config.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/7_external_service_config.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/2_nifi_user.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/3_nifi_registry_client.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/4_nifi_parameter_context.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/5_nifi_dataflow.md create mode 100644 site/website/versioned_docs/version-0.7.0/5_references/6_nifi_usergroup.md create mode 100644 site/website/versioned_docs/version-0.7.0/6_contributing/1_developer_guide.md create mode 100644 site/website/versioned_docs/version-0.7.0/6_contributing/2_reporting_bugs.md create mode 100644 site/website/versioned_docs/version-0.7.0/6_contributing/3_credits.md create mode 100644 site/website/versioned_sidebars/version-0.7.0-sidebars.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c7e468f6..1edbf7b2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,15 +10,45 @@ ### Fixed Bugs +## v0.7.0 + +### Added + +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Operator]** Add the ability to manage dataflow lifecycle on non managed NiFi Cluster. +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Operator]** Operator can interact with the NiFi cluster using basic authentication in addition to tls. + +### Changed + +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Operator]** Enabling the ability to move a resource from one cluster to another by just changing the clusterReference. +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Operator]** Improves the performances by reducing the amont of errors when interacting with then NiFi cluster API, checking cluster readiness before applying actions. +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Operator/NiFiCluster]** Support `evicted` and `shutdown` pod status as terminating. + +### Deprecated + +### Removed + +### Fixed Bugs + +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Operator/NiFiCluster]** Fix the downscale issue ([PR #131](https://github.com/Orange-OpenSource/nifikop/issues/131)) by removing references to configmap +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Helm Chart]** Fix the RBAC definition for configmap and lease generated by operator-sdk with some mistakes. +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Helm Chart]** Add corect CRDs in the chart helm. +- [PR #132](https://github.com/Orange-OpenSource/nifikop/pull/132) - **[Operator/NiFiUser]** Fix policy check conflict between user and group scope policy. + +## v0.6.4 + +### Fixed Bugs + +- [COMMIT #d98eb15fb3a74a1be17be5d456b02bd6a2d333cd](https://github.com/Orange-OpenSource/nifikop/tree/d98eb15fb3a74a1be17be5d456b02bd6a2d333cd) - **[Fix/NiFiCluster]** Fix external service port configuration being ignore [#133](https://github.com/Orange-OpenSource/nifikop/issues/133) + ## v0.6.3 ### Added -- [PR #114](https://github.com/Orange-OpenSource/nifikop/pull/114) - **[Fix/NiFiCluster]** Additionals environment variables. +- [PR #114](https://github.com/Orange-OpenSource/nifikop/pull/114) - **[Operator/NiFiCluster]** Additionals environment variables. ### Fixed Bugs -- [PR #113](https://github.com/Orange-OpenSource/nifikop/pull/113) - **[Fix/NiFiDataflow]** Simple work around to avoid null pointer dereferencing on nifi side. +- [PR #113](https://github.com/Orange-OpenSource/nifikop/pull/113) - **[Operator/NiFiDataflow]** Simple work around to avoid null pointer dereferencing on nifi side. ## v0.6.2 @@ -36,7 +66,6 @@ - [PR #93](https://github.com/Orange-OpenSource/nifikop/pull/93) - **[Helm Chart]** Included securityContext and custom service account in helm chart for NiFiKop deployment. - [PR #100](https://github.com/Orange-OpenSource/nifikop/pull/100) - **[Helm Chart]** Add nodeSelector, affinty and toleration in helm chart for NiFiKop deployment. - ## v0.6.0 ### Added @@ -116,7 +145,7 @@ ### Fixed Bugs -- [PR #53](https://github.com/Orange-OpenSource/nifikop/pull/53) - **[Operator]** Upgrade k8s dependencies to match with new version requirement : [#52](https://github.com/Orange-OpenSource/nifikop/issues/52) [#51](https://github.com/Orange-OpenSource/nifikop/issues/51) [#33](https://github.com/Orange-OpenSource/nifikop/issues/33) +- [PR #53](https://github.com/Orange-OpenSource/nifikop/pull/53) - **[Operator]** Upgrade k8s dependencies to match with new version requirement : [#52](https://github.com/Orange-OpenSource/nifikop/issues/52) [#51](https://github.com/Orange-OpenSource/nifikop/issues/51) [#33](https://github.com/Orange-OpenSource/nifikop/issues/33) - [PR #53](https://github.com/Orange-OpenSource/nifikop/pull/53) - **[Operator]** Fix the users used into Reader user group - [PR #53](https://github.com/Orange-OpenSource/nifikop/pull/53) - **[Documentation]** Fix the chart version informations : [#51](https://github.com/Orange-OpenSource/nifikop/issues/51) @@ -132,7 +161,6 @@ - [PR #41](https://github.com/Orange-OpenSource/nifikop/pull/41) - **[Operator/NifiCluster]** Create three defaults groups : admins, readers, nodes - [PR #41](https://github.com/Orange-OpenSource/nifikop/pull/41) - **[Operator/NifiCluster]** Add pod disruption budget support - ### Changed - [PR #41](https://github.com/Orange-OpenSource/nifikop/pull/41) - **[Helm Chart]** Add CRDs @@ -143,7 +171,6 @@ - [PR #41](https://github.com/Orange-OpenSource/nifikop/pull/41) - **[Operator/NifiCluster]** Remove `ClusterSecure` and `SiteToSiteSecure` by only checking if `SSLSecret` is set. - ### Fixed Bugs - [PR #30](https://github.com/Orange-OpenSource/nifikop/pull/40) - **[Documentation]** Fix getting started @@ -168,7 +195,7 @@ ### Added -- [PR #25](https://github.com/Orange-OpenSource/nifikop/pull/25) - [Helm Chart] Add support for iterating over namespaces +- [PR #25](https://github.com/Orange-OpenSource/nifikop/pull/25) - [Helm Chart] Add support for iterating over namespaces - [PR #18](https://github.com/Orange-OpenSource/nifikop/pull/18) - [Operator] NiFiKop CRDs in version `v1beta1` of CustomResourceDefinition object. ### Changed @@ -200,8 +227,8 @@ - [MR #17](https://github.com/Orange-OpenSource/nifikop/-/merge_requests/17) - Upgrade dependencies - [MR #17](https://github.com/Orange-OpenSource/nifikop/-/merge_requests/17) - CRD generated under `apiextensions.k8s.io/v1` -- [MR #16](https://github.com/Orange-OpenSource/nifikop/-/merge_requests/16) - Set binami zookeeper helm chart as recommended solution for -ZooKeeper. +- [MR #16](https://github.com/Orange-OpenSource/nifikop/-/merge_requests/16) - Set binami zookeeper helm chart as recommended solution for + ZooKeeper. - [MR #16](https://github.com/Orange-OpenSource/nifikop/-/merge_requests/16) - Improve terraform setup for articles. - [MR #18](https://gitlab.si.francetelecom.fr/kubernetes/nifikop/-/merge_requests/18) - Add ability to define if cert-manager is cluster scoped or not. - [MR #18](https://gitlab.si.francetelecom.fr/kubernetes/nifikop/-/merge_requests/18) - Open source changes @@ -266,4 +293,4 @@ ZooKeeper. ### Removed -### Fixed Bugs \ No newline at end of file +### Fixed Bugs diff --git a/config/samples/nifi_v1alpha1_nificluster_external.yaml b/config/samples/nifi_v1alpha1_nificluster_external.yaml new file mode 100644 index 000000000..2fccc89d9 --- /dev/null +++ b/config/samples/nifi_v1alpha1_nificluster_external.yaml @@ -0,0 +1,24 @@ +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +metadata: + name: externalcluster +spec: + # rootProcessGroupId contains the uuid of the root process group for this cluster. + rootProcessGroupId: "d37bee03-017a-1000-cff7-4eaaa82266b7" + # nodeURITemplate used to dynamically compute node uri. + nodeURITemplate: "nifi0%d.integ.mapreduce.m0.p.fti.net:9090" + # all node requiresunique id + nodes: + - id: 1 + - id: 2 + - id: 3 + # type defines if the cluster is internal (i.e manager by the operator) or external. + # :Enum={"external","internal"} + type: "external" + # clientType defines if the operator will use basic or tls authentication to query the NiFi cluster. + # Enum={"tls","basic"} + clientType: "basic" + # secretRef reference the secret containing the informations required to authentiticate to the cluster. + secretRef: + name: nifikop-credentials + namespace: nifikop-nifi diff --git a/config/samples/nifi_v1alpha1_nifidataflow.yaml b/config/samples/nifi_v1alpha1_nifidataflow.yaml index aeb7efeec..ec788b62c 100644 --- a/config/samples/nifi_v1alpha1_nifidataflow.yaml +++ b/config/samples/nifi_v1alpha1_nifidataflow.yaml @@ -12,7 +12,7 @@ spec: # the version of the flow to run, if not present or equals to -1, then the latest version of flow will be used. flowVersion: 2 # if the flow will be ran once or continuously checked - runOnce: false + syncMode: always # whether the flow is considered as ran if some controller services are still invalid or not. skipInvalidControllerService: true # whether the flow is considered as ran if some components are still invalid or not. diff --git a/site/docs/3_tasks/1_nifi_cluster/4_external_cluster.md b/site/docs/3_tasks/1_nifi_cluster/4_external_cluster.md new file mode 100644 index 000000000..446d2efbc --- /dev/null +++ b/site/docs/3_tasks/1_nifi_cluster/4_external_cluster.md @@ -0,0 +1,93 @@ +--- +id: 4_external_cluster +title: External cluster +sidebar_label: External cluster +--- + +This task shows you how to configure an external cluster. + +## Common configuration + +The operator allows you to manage the Dataflow lifecycle for internal (i.e cluster managed by the operator) and external NiFi cluster. +A NiFi cluster is considered as external as soon as the `NifiCluster` resource used as reference in other NiFi resource explicitly detailed the way to comunicate with the cluster. + +This feature allows you : + +- To automate your Dataflow CI/CD using yaml +- To manage the same way your Dataflow management wherever your cluster is, on bare metal, VMs, k8s, on-premise or on cloud. + +To deploy different resources (`NifiRegistryClient`, `NifiUser`, `NifiUserGroup`, `NifiParameterContext`, `NifiDataflow`) you simply have to declare a `NifiCluster` resource explaining how to discuss with the external cluster, and refer to this resource as usual using the `Spec.ClusterRef` field. + +To declare an external cluster you have to follow this kind of configuration : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +metadata: + name: externalcluster +spec: + # rootProcessGroupId contains the uuid of the root process group for this cluster. + rootProcessGroupId: 'd37bee03-017a-1000-cff7-4eaaa82266b7' + # nodeURITemplate used to dynamically compute node uri. + nodeURITemplate: 'nifi0%d.integ.mapreduce.m0.p.fti.net:9090' + # all node requiresunique id + nodes: + - id: 1 + - id: 2 + - id: 3 + # type defines if the cluster is internal (i.e manager by the operator) or external. + # :Enum={"external","internal"} + type: 'external' + # clientType defines if the operator will use basic or tls authentication to query the NiFi cluster. + # Enum={"tls","basic"} + clientType: 'basic' + # secretRef reference the secret containing the informations required to authenticate to the cluster. + secretRef: + name: nifikop-credentials + namespace: nifikop-nifi +``` + +- The `Spec.RootProcessGroupId` field is required to give the ability to the operator of managing root level policy and default deployment and policy. +- The `Spec.NodeURITemplate` field, defines the hostname template of your NiFi cluster nodes, the operator will use this information and the list of id specified in `Spec.Nodes` field to generate the hostname of the nodes (in the configuration above you will have : `nifi01.integ.mapreduce.m0.p.fti.net:9090`, `nifi02.integ.mapreduce.m0.p.fti.net:9090`, `nifi03.integ.mapreduce.m0.p.fti.net:9090`). +- The `Spec.Type` field defines the type of cluster that this resource is refering to, by default it is `internal`, in our case here we just want to use this resource to reference an existing NiFi cluster, so we set this field to `external`. +- The `Spec.ClientType` field defines how we want to authenticate to the NiFi cluster API, for now we are supporting two modes : + - `tls` : using client TLS certificate. + - `basic` : using a username and a password to get an access token. +- The `Spec.SecretRef` defines a reference to a secret which contains the sensitive values that will be used by the operator to authenticate to the NiFi cluster API (ie in basic mode it will contain the password and username). + +:::warning +The id of node only support `int32` as type, so if the hostname of your nodes doesn't match with this, you can't use this feature. +::: + +## Secret configuration for Basic authentication + +When you are using the basic authentication, you have to pass some informations into the secret that is referenced into the `NifiCluster` resource: + +- `username` : the username associated to the user that will be used by the operator to request the REST API. +- `password` : the password associated to the user that will be used by the operator to request the REST API. +- `ca.crt (optional)`: the certificate authority to trust the server certificate if needed + +The following command shows how you can create this secret : + +```console +kubectl create secret generic nifikop-credentials \ + --from-file=username=./secrets/username\ + --from-file=password=./secrets/password\ + --from-file=ca.crt=./secrets/ca.crt\ + -n nifikop-nifi +``` + +:::info +When you use the basic authentication, the operator will create a secret `-basic-secret` containing for each node an access token that will be maintained by the operator. +::: + +## Secret configuration for TLS authentication + +When you are using the tls authentication, you have to pass some information into the secret that is referenced into the `NifiCluster` resource: + +- `tls.key` : The user private key. +- `tls.crt` : The user certificate. +- `password` : the password associated to the user that will be used by the operator to request the REST API. +- `ca.crt`: The CA certificate +- `truststore.jks`: +- `keystore.jks`: diff --git a/site/website/sidebars.json b/site/website/sidebars.json index d130b1066..8f93a42bc 100644 --- a/site/website/sidebars.json +++ b/site/website/sidebars.json @@ -19,9 +19,7 @@ { "type": "category", "label": "Install", - "items": [ - "2_setup/3_install/1_customizable_install_with_helm" - ] + "items": ["2_setup/3_install/1_customizable_install_with_helm"] } ], "Tasks": [ @@ -29,7 +27,8 @@ "type": "category", "label": "NiFi Cluster", "items": [ - "3_tasks/1_nifi_cluster/2_cluster_scaling" + "3_tasks/1_nifi_cluster/2_cluster_scaling", + "3_tasks/1_nifi_cluster/4_external_cluster" ] }, { @@ -40,9 +39,7 @@ { "type": "category", "label": "Authentication", - "items": [ - "3_tasks/2_security/2_authentication/1_oidc" - ] + "items": ["3_tasks/2_security/2_authentication/1_oidc"] } ] }, @@ -75,4 +72,4 @@ "6_contributing/3_credits" ] } -} \ No newline at end of file +} diff --git a/site/website/versioned_docs/version-0.7.0/1_concepts/1_introduction.md b/site/website/versioned_docs/version-0.7.0/1_concepts/1_introduction.md new file mode 100644 index 000000000..e2b7d739f --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/1_concepts/1_introduction.md @@ -0,0 +1,36 @@ +--- +id: 1_introduction +title: Introduction +sidebar_label: Introduction +--- + +The Orange NiFi operator is a Kubernetes operator to automate provisioning, management, autoscaling and operations of [Apache NiFi](https://nifi.apache.org/) clusters deployed to K8s. + +## Overview + +Apache NiFi is an open-source solution that support powerful and scalable directed graphs of data routing, transformation, and system mediation logic. +Some of the high-level capabilities and objectives of Apache NiFi include, and some of the main features of the **NiFiKop** are: + +- **Fine grained** node configuration support +- Graceful rolling upgrade +- graceful NiFi cluster **scaling** +- Encrypted communication using SSL +- the provisioning of secure NiFi clusters +- Advanced Dataflow and user management via CRD + +Some of the roadmap features : + +- Monitoring via **Prometheus** +- Automatic reaction and self healing based on alerts (plugin system, with meaningful default alert plugins) +- graceful NiFi cluster **scaling and rebalancing** + +## Motivation + +At [Orange](https://opensource.orange.com/fr/accueil/) we are building some [Kubernetes operator](https://github.com/Orange-OpenSource?utf8=%E2%9C%93&q=operator&type=&language=), that operate NiFi and Cassandra clusters (among other types) for our business cases. + +There are already some approaches to operating NiFi on Kubernetes, however, we did not find them appropriate for use in a highly dynamic environment, nor capable of meeting our needs. + +- [Helm chart](https://github.com/cetic/helm-nifi) +- [Cloudera Nifi Operator](https://blog.cloudera.com/cloudera-flow-management-goes-cloud-native-with-apache-nifi-on-red-hat-openshift-kubernetes-platform/) + +Finally, our motivation is to build an open source solution and a community which drives the innovation and features of this operator. diff --git a/site/website/versioned_docs/version-0.7.0/1_concepts/2_design_principes.md b/site/website/versioned_docs/version-0.7.0/1_concepts/2_design_principes.md new file mode 100644 index 000000000..3048d88a6 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/1_concepts/2_design_principes.md @@ -0,0 +1,62 @@ +--- +id: 2_design_principes +title: Design Principes +sidebar_label: Design Principes +--- + +## Pod level management + +NiFi is a stateful application. The first piece of the puzzle is the Node, which is a simple server capable of createing/forming a cluster with other Nodes. Every Node has his own **unique** configuration which differs slightly from all others. + +All NiFi on Kubernetes setup use [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) to create a NiFi Cluster. Just to quickly recap from the K8s docs: + +> StatefulSet manages the deployment and scaling of a set of Pods, and provide guarantees about their ordering and uniqueness. Like a Deployment, a StatefulSet manages Pods that are based on an identical container spec. Unlike a Deployment, a StatefulSet maintains sticky identities for each of its Pods. These pods are created from the same spec, but are not interchangeable: each has a persistent identifier that is maintained across any rescheduling. + +How does this looks from the perspective of Apache NiFi ? + +With StatefulSet we get : + +- unique Node IDs generated during Pod startup +- networking between Nodes with headless services +- unique Persistent Volumes for Nodes + +Using StatefulSet we **lose** the ability to : + +- modify the configuration of unique Nodes +- remove a specific Node from a cluster (StatefulSet always removes the most recently created Node) +- use multiple, different Persistent Volumes for each Node + +The Orange NiFi Operator uses `simple` Pods, ConfigMaps, and PersistentVolumeClaims, instead of StatefulSet (based on the design used by [Banzai Cloud Kafka Operator](https://github.com/banzaicloud/kafka-operator)). +Using these resources allows us to build an Operator which is better suited to NiFi. + +With the Orange NiFi operator we can: + +- modify the configuration of unique Nodes +- remove specific Nodes from clusters +- use multiple Persistent Volumes for each Node + +## Dataflow Lifecycle management + +The [Dataflow Lifecycle management feature](./3_features.md#dataflow-lifecycle-management-via-crd) introduces 3 new CRDs : + +- **NiFiRegistryClient :** Allowing you to declare a [NiFi registry client](https://nifi.apache.org/docs/nifi-registry-docs/html/getting-started.html#connect-nifi-to-the-registry). +- **NiFiParameterContext :** Allowing you to create parameter context, with two kinds of parameters, a simple `map[string]string` for non-sensitive parameters and a `list of secrets` which contains sensitive parameters. +- **NiFiDataflow :** Allowing you to declare a Dataflow based on a `NiFiRegistryClient` and optionally a `ParameterContext`, which will be deployed and managed by the operator on the `targeted NiFi cluster`. + +The following diagram shows the interactions between all the components : + +![dataflow lifecycle management schema](/img/1_concepts/2_design_principes/dataflow_lifecycle_management_schema.jpg) + +With each CRD comes a new controller, with a reconcile loop : + +- **NiFiRegistryClient's controller :** + +![NiFi registry client's reconcile loop](/img/1_concepts/2_design_principes/registry_client_reconcile_loop.jpeg) + +- **NiFiParameterContext's controller :** + +![NiFi parameter context's reconcile loop](/img/1_concepts/2_design_principes/parameter_context_reconcile_loop.jpeg) + +- **NiFiDataflow's controller :** + +![NiFi dataflow's reconcile loop](/img/1_concepts/2_design_principes/dataflow_reconcile_loop.jpeg) diff --git a/site/website/versioned_docs/version-0.7.0/1_concepts/3_features.md b/site/website/versioned_docs/version-0.7.0/1_concepts/3_features.md new file mode 100644 index 000000000..4a1455c18 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/1_concepts/3_features.md @@ -0,0 +1,57 @@ +--- +id: 3_features +title: Features +sidebar_label: Features +--- + +To highligt some of the features we needed and were not possible with the operators available, please keep reading + +## Fine Grained Node Config Support + +We needed to be able to react to events in a fine-grained way for each Node - and not in the limited way StatefulSet does (which, for example, removes the most recently created Nodes). Some of the available solutions try to overcome these deficits by placing scripts inside the container to generate configs at runtime (a good example is our [Cassandra Operator](https://github.com/Orange-OpenSource/casskop)), whereas the Orange NiFi operator's configurations are deterministically placed in specific Configmaps. + +## Graceful NiFi Cluster Scaling + +Apache NiFi is a good candidate to create an operator, because everything is made to orchestrate it through REST Api calls. With this comes automation of actions such as scaling, following all required steps : https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#decommission-nodes. + +## Graceful Rolling Upgrade + +Operator supports graceful rolling upgrade. It means the operator will check if the cluster is healthy. + +## Dynamic Configuration Support + +NiFi operates with two type of configs: + +- Read-only +- PerNode + +Read only config requires node restart to update all the others may be updated dynamically. +Operator CRD distinguishes these fields, and proceed with the right action. It can be a rolling upgrade, or +a dynamic reconfiguration. + +## Dataflow lifecycle management via CRD + +In a cloud native approach, we are looking for important management features, which we have applied to NiFi Dataflow : + +- **Automated deployment :** Based on the NiFi registry, you can describe your `NiFiDataflow` resource that will be deployed and run on the targeted NiFi cluster. +- **Portability :** On kubernetes everything is a yaml file, so with NiFiKop we give you the ability to describe your clusters but also the `registry clients`, `parameter contexts` and `dataflows` of your NiFi application, so that you can redeploy the same thing in a different namespace or cluster. +- **State management :** With NiFiKop resources, you can describe what you want, and the operator deals with the NiFi Rest API to make sure the resource stays in sync (even if someone manually makes changes directly on NiFi cluster). +- **Configurations :** Based on the `Parameter Contexts`, NiFiKop allows you to associate to your `Dataflow` (= your applications) with a different configuration depending on the environment ! + +## Users and access policies management + +Without the management of users and access policies associated, it was not possible to have a fully automated NiFi cluster setup due to : + +- **Node scaling :** when a new node joins the cluster it needs to have some roles like `proxy user request`, `view data` etc., by managing users and access policies we can easily create a user for this node with the right accesses. +- **Operator admin rigth :** For the operator to manage efficiently the cluster it needs a lot of rights as `deploying process groups`, `empty the queues` etc., these rights are not available by default when you set a user as [InitialAdmin](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#initial-admin-identity). Once again by giving the ability to define users and access policies we go through this. +- **User's access :** as seen just below we need to define the operator as `InitialAdmin`, in this situation there is no more users that can access to the web UI to manually give access to other users. That's why we extend the `InitialAdmin` concept into the operator, giving the ability to define a list of users as admins. + +In addition to these requirements to have a fully automated and managed cluster, we introduced some useful features : + +- **User management :** using `NifiUser` resource, you are able to create (or bind an existing) user in NiFi cluster and apply some access policies that will be managed and continuously synced by the operator. +- **Group management :** using `NifiUserGroup` resource, you can create groups in NiFi cluster and apply access policies and a list of `NifiUser` that will be managed and continuously synced by the operator. +- **Default group :** As the definition of `NifiUser` and `NifiUserGroup` resources could be heavy for some simple use cases, we also decided to define two default groups that you can feed with a list of users that will be created and managed by the operator (no kubernetes resources to create) : + - **Admins :** a group giving access to everything on the NiFi Cluster, + - **Readers :** a group giving access as viewer on the NiFi Cluster. + +By introducing this feature we are giving you the ability to fully automate your deployment, from the NiFi Cluster to your managed NiFi Dataflow. diff --git a/site/website/versioned_docs/version-0.7.0/1_concepts/4_roadmap.md b/site/website/versioned_docs/version-0.7.0/1_concepts/4_roadmap.md new file mode 100644 index 000000000..7206685af --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/1_concepts/4_roadmap.md @@ -0,0 +1,95 @@ +--- +id: 4_roadmap +title: Roadmap +sidebar_label: Roadmap +--- + +## Available + +### NiFi cluster installation + +| | | +| ------------------- | -------- | +| Status | Done | +| Priority | High | +| Targeted Start date | Jan 2020 | + +### Graceful NiFi Cluster Scaling + +| | | +| ------------------- | -------- | +| Status | Done | +| Priority | High | +| Targeted Start date | Jan 2020 | + +Apache NiFi is a good candidate to create an operator, because everything is made to orchestrate it through REST Api calls. With this comes automation of actions such as scaling, following all required steps : https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#decommission-nodes. + +### Communication via SSL + +| | | +| ------------------- | -------- | +| Status | Done | +| Priority | High | +| Targeted Start date | May 2020 | + +The operator fully automates NiFi's SSL support. +The operator can provision the required secrets and certificates for you, or you can provide your own. + +### Dataflow lifecycle management via CRD + +| | | +| ------------------- | -------- | +| Status | Done | +| Priority | High | +| Targeted Start date | Aug 2020 | + +### Users & access policies management + +| | | +| ------------------- | ------------- | +| Status | Done | +| Priority | High | +| Targeted Start date | November 2020 | + +The operator fully automates NiFi's user and access policies management. + +## Backlog + +### Monitoring via Prometheus + +| | | +| ------------------- | -------- | +| Status | To Do | +| Priority | High | +| Targeted Start date | Oct 2020 | + +The NiFi operator exposes NiFi JMX metrics to Prometheus. + +### Reacting on Alerts + +| | | +| ------------------- | ----- | +| Status | To Do | +| Priority | Low | +| Targeted Start date | - | + +The NiFi Operator acts as a **Prometheus Alert Manager**. It receives alerts defined in Prometheus, and creates actions based on Prometheus alert annotations. + +Currently, there are three actions expected : + +- upscale cluster (add a new Node) +- downscale cluster (remove a Node) +- add additional disk to a Node + +### Seamless Istio mesh support + +| | | +| ------------------- | ----- | +| Status | To Do | +| Priority | Low | +| Targeted Start date | - | + +- Operator allows to use ClusterIP services instead of Headless, which still works better in case of Service meshes. +- To avoid too early nifi initialization, which might lead to unready sidecar container. The operator will use a small script to + mitigate this behaviour. All NiFi image can be used the only one requirement is an available **wget** command. +- To access a NiFi cluster which runs inside the mesh. Operator will supports creating Istio ingress gateways. diff --git a/site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md b/site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md new file mode 100644 index 000000000..81d2923e2 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md @@ -0,0 +1,159 @@ +--- +id: 1_getting_started +title: Getting Started +sidebar_label: Getting Started +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The operator installs the 1.12.1 version of Apache NiFi, can run on Minikube v0.33.1+ and **Kubernetes 1.16.0+**, and require **Helm 3**. + +:::info +The operator supports NiFi 1.11.0+ +::: + +As a pre-requisite it needs a Kubernetes cluster. Also, NiFi requires Zookeeper so you need to first have a Zookeeper cluster if you don't already have one. + +> We believe in the `separation of concerns` principle, thus the NiFi operator does not install nor manage Zookeeper. + +## Prerequisites + +### Install Zookeeper + +To install Zookeeper we recommend using the [Bitnami's Zookeeper chart](https://github.com/bitnami/charts/tree/master/bitnami/zookeeper). + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +``` + +```bash +# You have to create the namespace before executing following command +helm install zookeeper bitnami/zookeeper \ + --set resources.requests.memory=256Mi \ + --set resources.requests.cpu=250m \ + --set resources.limits.memory=256Mi \ + --set resources.limits.cpu=250m \ + --set global.storageClass=standard \ + --set networkPolicy.enabled=true \ + --set replicaCount=3 +``` + +:::warning +Replace the `storageClass` parameter value with your own. +::: + +### Install cert-manager + +The NiFiKop operator uses `cert-manager` for issuing certificates to users and and nodes, so you'll need to have it setup in case you want to deploy a secured cluster with authentication enabled. + + + + +```bash +# Install the CustomResourceDefinitions and cert-manager itself +kubectl apply -f \ + https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml +``` + + + + +```bash +# Install CustomResourceDefinitions first +kubectl apply --validate=false -f \ + https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.crds.yaml + +# Add the jetstack helm repo +helm repo add jetstack https://charts.jetstack.io +helm repo update + +# You have to create the namespace before executing following command +helm install cert-manager \ + --namespace cert-manager \ + --version v1.2.0 jetstack/cert-manager +``` + + + + +## Installation + +## Installing with Helm + +You can deploy the operator using a Helm chart [Helm chart](https://github.com/Orange-OpenSource/nifikop/tree/master/helm): + +> To install an other version of the operator use `helm install --name=nifikop --namespace=nifi --set operator.image.tag=x.y.z orange-incubator/nifikop` + +In the case where you don't want to deploy the crds using helm (`--skip-crds`), you have to deploy manually the crds : + +```bash +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nificlusters.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiusers.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiusergroups.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifidataflows.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml +``` + +Add the orange incubator repository : + +```bash + +helm repo add orange-incubator https://orange-kubernetes-charts-incubator.storage.googleapis.com/ +``` + +Now deploy the helm chart : + +```bash +# You have to create the namespace before executing following command +helm install nifikop \ + orange-incubator/nifikop \ + --namespace=nifi \ + --version 0.6.3 \ + --set image.tag=v0.6.3-release \ + --set resources.requests.memory=256Mi \ + --set resources.requests.cpu=250m \ + --set resources.limits.memory=256Mi \ + --set resources.limits.cpu=250m \ + --set namespaces={"nifi"} +``` + +:::note +Add the following parameter if you are using this instance to only deploy unsecured clusters : `--set certManager.enabled=false` +::: + +## Create custom storage class + +We recommend to use a **custom StorageClass** to leverage the volume binding mode `WaitForFirstConsumer` + +```bash +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exampleStorageclass +parameters: + type: pd-standard +provisioner: kubernetes.io/gce-pd +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +``` + +:::tip +Remember to set your NiFiCluster CR properly to use the newly created StorageClass. +::: + +## Deploy NiFi cluster + +And after you can deploy a simple NiFi cluster. + +```bash +# Add your zookeeper svc name to the configuration +kubectl create -n nifi -f config/samples/simplenificluster.yaml +``` diff --git a/site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/1_gke.md b/site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/1_gke.md new file mode 100644 index 000000000..6e3849b82 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/1_gke.md @@ -0,0 +1,42 @@ +--- +id: 1_gke +title: Google Kubernetes Engine +sidebar_label: Google Kubernetes Engine +--- + +Follow these instructions to prepare a GKE cluster for NiFiKop + +1. Setup environment variables. + +```sh +export GCP_PROJECT= +export GCP_ZONE= +export CLUSTER_NAME= +``` + +2. Create a new cluster. + +```sh +gcloud container clusters create $CLUSTER_NAME \ + --cluster-version latest \ + --machine-type=n1-standard-1 \ + --num-nodes 4 \ + --zone $GCP_ZONE \ + --project $GCP_PROJECT +``` + +3. Retrieve your credentials for `kubectl`. + +```sh +cloud container clusters get-credentials $CLUSTER_NAME \ + --zone $GCP_ZONE \ + --project $GCP_PROJECT +``` + +4. Grant cluster administrator (admin) permissions to the current user. To create the necessary RBAC rules for NiFiKop, the current user requires admin permissions. + +```sh +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole=cluster-admin \ + --user=$(gcloud config get-value core/account) +``` diff --git a/site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/2_minikube.md b/site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/2_minikube.md new file mode 100644 index 000000000..61acdb54c --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/2_setup/2_platform_setup/2_minikube.md @@ -0,0 +1,47 @@ +--- +id: 2_minikube +title: MiniKube +sidebar_label: MiniKube +--- + +Follow these instructions to prepare minikube for NiFiKop installation with sufficient resources to run NiFiKop and some basic applications. + +## Prerequisites + +- Administrative privileges are required to run minikube. + +## Installation steps + +1. Install the latest version of [minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/), version 1.1.1 or later, and a [minikube hypervisor driver](https://kubernetes.io/docs/tasks/tools/install-minikube/#install-a-hypervisor). +2. If you’re not using the default driver, set your minikube hypervisor driver. + For example, if you installed the KVM hypervisor, set the vm-driver within the minikube configuration using the following command: + + ```sh + minikube config set vm-driver kvm2 + ``` + +3. Start minikube with 16384 MB of memory and 4 CPUs. This example uses Kubernetes version 1.14.2. You can change the version to any Kubernetes version supported by NiFiKop by altering the --kubernetes-version value: + + ```sh + $ minikube start --memory=16384 --cpus=4 --kubernetes-version=v1.14.2 + ``` + +Depending on the hypervisor you use and the platform on which the hypervisor is run, minimum memory requirements vary. 16384 MB is sufficent to run NiFiKop. + +:::tip +If you don’t have enough RAM allocated to the minikube virtual machine, the following errors could occur: + +- Image pull failures +- Healthcheck timeout failures +- Kubectl failures on the host +- General network instability of the virtual machine and the host +- Complete lock-up of the virtual machine +- Host NMI watchdog reboots +- One effective way to monitor memory usage in minikube: + +```sh +minikube ssh +top +``` + +::: diff --git a/site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md b/site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md new file mode 100644 index 000000000..bde2d30e9 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md @@ -0,0 +1,197 @@ +--- +id: 1_customizable_install_with_helm +title: Customizable install with Helm +sidebar_label: Customizable install with Helm +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Prerequisites + +- Perform any necessary [plateform-specific setup](../2_platform_setup/1_gke.md) +- [Install a Helm client](https://github.com/helm/helm#install) with a version higher than 3 + +## Introduction + +This Helm chart install NiFiKop the Orange's Nifi Kubernetes operator to create/configure/manage NiFi +clusters in a Kubernetes Namespace. + +It will use Custom Ressources Definition CRDs: + +- `nificlusters.nifi.orange.com`, +- `nifiusers.nifi.orange.com`, +- `nifiusergroups.nifi.orange.com`, +- `nifiregistryclients.nifi.orange.com`, +- `nifiparametercontexts.nifi.orange.com`, +- `nifidataflows.nifi.orange.com`, + +### Configuration + +The following tables lists the configurable parameters of the NiFi Operator Helm chart and their default values. + +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------- | +| `image.repository` | Image | `orangeopensource/nifikop` | +| `image.tag` | Image tag | `v0.6.3-release` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | +| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | +| `certManager.enabled` | Enable cert-manager integration | `true` | +| `rbacEnable` | If true, create & use RBAC resources | `true` | +| `resources` | Pod resource requests & limits | `{}` | +| `metricService` | deploy service for metrics | `false` | +| `debug.enabled` | activate DEBUG log level | `false` | +| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | +| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs. | `""` i.e. all namespaces | +| `nodeSelector` | Node selector configuration for operator pod | `{}` | +| `affinity` | Node affinity configuration for operator pod | `{}` | +| `tolerations` | Toleration configuration for operator pod | `{}` | +| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | +| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install nifikop \ + orange-incubator/nifikop \ + -f values.yaml +``` + +### Installing the Chart + +:::important Skip CRDs +In the case where you don't want to deploy the crds using helm (`--skip-crds`) you need to deploy manually the crds beforehand: + +```bash +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nificlusters.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiusers.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiusergroups.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifidataflows.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml +kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml +``` + +::: + + + + +```bash +helm install nifikop orange-incubator/nifikop \ + --dry-run \ + --debug.enabled \ + --set debug.enabled=true \ + --set namespaces={"nifikop"} +``` + + + + +```bash +helm install orange-incubator/nifikop +``` + + + + + +```bash +helm install nifikop orange-incubator/nifikop --set namespaces={"nifikop"} +``` + + + + +> the `--replace` flag allow you to reuses a charts release name + +### Listing deployed charts + +``` +helm list +``` + +### Get Status for the helm deployment + +``` +helm status nifikop +``` + +## Uninstaling the Charts + +If you want to delete the operator from your Kubernetes cluster, the operator deployment +should be deleted. + +``` +helm del nifikop +``` + +The command removes all the Kubernetes components associated with the chart and deletes the helm release. + +:::tip +The CRD created by the chart are not removed by default and should be manually cleaned up (if required) +::: + +Manually delete the CRD: + +``` +kubectl delete crd nificlusters.nifi.orange.com +kubectl delete crd nifiusers.nifi.orange.com +kubectl delete crd nifiusergroups.nifi.orange.com +kubectl delete crd nifiregistryclients.nifi.orange.com +kubectl delete crd nifiparametercontexts.nifi.orange.com +kubectl delete crd nifidataflows.nifi.orange.com +``` + +:::warning +If you delete the CRD then +It will delete **ALL** Clusters that has been created using this CRD!!! +Please never delete a CRD without very good care +::: + +Helm always keeps records of what releases happened. Need to see the deleted releases ? + +```bash +helm list --deleted +``` + +Need to see all of the releases (deleted and currently deployed, as well as releases that +failed) ? + +```bash +helm list --all +``` + +Because Helm keeps records of deleted releases, a release name cannot be re-used. (If you really need to re-use a +release name, you can use the `--replace` flag, but it will simply re-use the existing release and replace its +resources.) + +Note that because releases are preserved in this way, you can rollback a deleted resource, and have it re-activate. + +To purge a release + +```bash +helm delete --purge nifikop +``` + +## Troubleshooting + +### Install of the CRD + +By default, the chart will install the CRDs, but this installation is global for the whole +cluster, and you may want to not modify the already deployed CRDs. + +In this case there is a parameter to say to not install the CRDs : + +``` +$ helm install --name nifikop ./helm/nifikop --set namespaces={"nifikop"} --skip-crds +``` diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md new file mode 100644 index 000000000..89b206a24 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md @@ -0,0 +1,9 @@ +--- +id: 1_nodes_configuration +title: Nodes configuration +sidebar_label: Nodes configuration +--- + +:::warning +WIP +::: diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md new file mode 100644 index 000000000..57d122922 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md @@ -0,0 +1,236 @@ +--- +id: 2_cluster_scaling +title: Cluster Scaling +sidebar_label: Cluster Scaling +--- + +This tasks shows you how to perform a gracefull cluster scale up and scale down. + +## Before you begin + +- Setup NiFiKop by following the instructions in the [Installation guide](../../2_setup/1_getting_started.md). +- Deploy the [Simple NiFi](../../2_setup/1_getting_started.md#easy-way-installing-with-helm) sample cluster. +- Review the [Node](../../5_references/1_nifi_cluster/4_node.md) references doc. + +## About this task + +The [Simple NiFi](../../2_setup/1_getting_started.md#easy-way-installing-with-helm) example consists of a three nodes NiFi cluster. +A node decommission must follow a strict procedure, described in the [NiFi documentation](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#decommission-nodes) : + +1. Disconnect the node +2. Once disconnect completes, offload the node. +3. Once offload completes, delete the node. +4. Once the delete request has finished, stop/remove the NiFi service on the host. + +For the moment, we have implemented it as follows in the operator : + +1. Disconnect the node +2. Once disconnect completes, offload the node. +3. Once offload completes, delete the pod. +4. Once the pod deletion completes, delete the node. +5. Once the delete request has finished, remove the node from the NifiCluster status. + +In addition, we have a regular check that ensure that all nodes have been removed. + +In this task, you will first perform a scale up, in adding an new node. Then, you will remove another node that the one created, and observe the decommission's steps. + +## Scale up : Add a new node + +For this task, we will simply add a node with the same configuration than the other ones, if you want to know more about how to add a node with an other configuration let's have a look to the [Node configuration](./1_nodes_configuration.md) documentation page. + +1. Add and run a dataflow as the example : + +![Scaling dataflow](/img/3_tasks/1_nifi_cluster/2_cluster_scaling/scaling_dataflow.png) + +2. Add a new node to the list of `NifiCluster.Spec.Nodes` field, by following the [Node object definition](../../5_references/1_nifi_cluster/4_node.md) documentation: + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +metadata: + name: simplenifi +spec: + service: + headlessEnabled: true + zkAddress: 'zookeepercluster-client.zookeeper:2181' + zkPath: '/simplenifi' + clusterImage: 'apache/nifi:1.12.1' + oneNifiNodePerNode: false + nodeConfigGroups: + default_group: + isNode: true + storageConfigs: + - mountPath: '/opt/nifi/nifi-current/logs' + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: 'standard' + resources: + requests: + storage: 10Gi + serviceAccountName: 'default' + resourcesRequirements: + limits: + cpu: '2' + memory: 3Gi + requests: + cpu: '1' + memory: 1Gi + nodes: + - id: 0 + nodeConfigGroup: 'default_group' + - id: 1 + nodeConfigGroup: 'default_group' + - id: 2 + nodeConfigGroup: 'default_group' + # >>>> START: The new node + - id: 25 + nodeConfigGroup: 'default_group' + # <<<< END + propagateLabels: true + nifiClusterTaskSpec: + retryDurationMinutes: 10 + listenersConfig: + internalListeners: + - type: 'http' + name: 'http' + containerPort: 8080 + - type: 'cluster' + name: 'cluster' + containerPort: 6007 + - type: 's2s' + name: 's2s' + containerPort: 10000 +``` + +:::important +**Note :** The `Node.Id` field must be unique in the `NifiCluster.Spec.Nodes` list. +::: + +3. Apply the new `NifiCluster` configuration : + +```sh +kubectl -n nifi apply -f config/samples/simplenificluster.yaml +``` + +4. You should now have the following resources into kubernetes : + +```console +kubectl get pods,configmap,pvc -l nodeId=25 +NAME READY STATUS RESTARTS AGE +pod/simplenifi-25-nodem5jh4 1/1 Running 0 11m + +NAME DATA AGE +configmap/simplenifi-config-25 7 11m + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +persistentvolumeclaim/simplenifi-25-storagehwn24 Bound pvc-7da86076-728e-11ea-846d-42010a8400f2 10Gi RWO standard 11m +``` + +And if you go on the NiFi UI, in the cluster administration page : + +![Scale up, cluster list](/img/3_tasks/1_nifi_cluster/2_cluster_scaling/scaleup_cluster_list.png) + +5. You now have data on the new node : + +![Scale up, cluster distribution](/img/3_tasks/1_nifi_cluster/2_cluster_scaling/scaleup_distribution.png) + +## Scaledown : Gracefully remove node + +For this task, we will simply remove a node and look at that the decommission's steps. + +1. Remove the node from the list of `NifiCluster.Spec.Nodes` field : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +metadata: + name: simplenifi +spec: + headlessServiceEnabled: true + zkAddresse: 'zookeepercluster-client.zookeeper:2181' + zkPath: '/simplenifi' + clusterImage: 'apache/nifi:1.11.3' + oneNifiNodePerNode: false + nodeConfigGroups: + default_group: + isNode: true + storageConfigs: + - mountPath: '/opt/nifi/nifi-current/logs' + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: 'standard' + resources: + requests: + storage: 10Gi + serviceAccountName: 'default' + resourcesRequirements: + limits: + cpu: '2' + memory: 3Gi + requests: + cpu: '1' + memory: 1Gi + nodes: + - id: 0 + nodeConfigGroup: 'default_group' + - id: 1 + nodeConfigGroup: 'default_group' + # >>>> START: node removed + # - id: 2 + # nodeConfigGroup: "default_group" + # <<<< END + - id: 25 + nodeConfigGroup: 'default_group' + propagateLabels: true + nifiClusterTaskSpec: + retryDurationMinutes: 10 + listenersConfig: + internalListeners: + - type: 'http' + name: 'http' + containerPort: 8080 + - type: 'cluster' + name: 'cluster' + containerPort: 6007 + - type: 's2s' + name: 's2s' + containerPort: 10000 +``` + +2. Apply the new `NifiCluster` configuration : + +```sh +kubectl -n nifi apply -f config/samples/simplenificluster.yaml +``` + +3. You can follow the node's action step status in the `NifiCluster.Status` description : + +```console +kubectl describe nificluster simplenifi + +... +Status: + Nodes State: + ... + 2: + Configuration State: ConfigInSync + Graceful Action State: + Action State: GracefulDownscaleRequired + Error Message: + ... +... +``` + +:::tip +The list of decommision's step and their corresponding value for the `Nifi Cluster.Status.Node State.Graceful ActionState.ActionStep` field is described into the [Node State page](../../5_references/1_nifi_cluster/5_node_state.md#actionstep) +::: + +4. Once the scaledown successfully performed, you should have the data offloaded on the other nodes, and the node state removed from the `NifiCluster.Status.NodesState` list : + +:::warning +Keep in mind that the [`NifiCluster.Spec.nifiClusterTaskSpec.retryDurationMinutes`](../../5_references/1_nifi_cluster/1_nifi_cluster.md#nificlustertaskspec) should be long enough to perform the whole procedure, or you will have some rollback and retry loop. +::: diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/3_external_dns.md b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/3_external_dns.md new file mode 100644 index 000000000..37b350d0e --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/3_external_dns.md @@ -0,0 +1,9 @@ +--- +id: 3_external_dns +title: External DNS +sidebar_label: External DNS +--- + +:::warning +WIP +::: diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/4_external_cluster.md b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/4_external_cluster.md new file mode 100644 index 000000000..446d2efbc --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/1_nifi_cluster/4_external_cluster.md @@ -0,0 +1,93 @@ +--- +id: 4_external_cluster +title: External cluster +sidebar_label: External cluster +--- + +This task shows you how to configure an external cluster. + +## Common configuration + +The operator allows you to manage the Dataflow lifecycle for internal (i.e cluster managed by the operator) and external NiFi cluster. +A NiFi cluster is considered as external as soon as the `NifiCluster` resource used as reference in other NiFi resource explicitly detailed the way to comunicate with the cluster. + +This feature allows you : + +- To automate your Dataflow CI/CD using yaml +- To manage the same way your Dataflow management wherever your cluster is, on bare metal, VMs, k8s, on-premise or on cloud. + +To deploy different resources (`NifiRegistryClient`, `NifiUser`, `NifiUserGroup`, `NifiParameterContext`, `NifiDataflow`) you simply have to declare a `NifiCluster` resource explaining how to discuss with the external cluster, and refer to this resource as usual using the `Spec.ClusterRef` field. + +To declare an external cluster you have to follow this kind of configuration : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +metadata: + name: externalcluster +spec: + # rootProcessGroupId contains the uuid of the root process group for this cluster. + rootProcessGroupId: 'd37bee03-017a-1000-cff7-4eaaa82266b7' + # nodeURITemplate used to dynamically compute node uri. + nodeURITemplate: 'nifi0%d.integ.mapreduce.m0.p.fti.net:9090' + # all node requiresunique id + nodes: + - id: 1 + - id: 2 + - id: 3 + # type defines if the cluster is internal (i.e manager by the operator) or external. + # :Enum={"external","internal"} + type: 'external' + # clientType defines if the operator will use basic or tls authentication to query the NiFi cluster. + # Enum={"tls","basic"} + clientType: 'basic' + # secretRef reference the secret containing the informations required to authenticate to the cluster. + secretRef: + name: nifikop-credentials + namespace: nifikop-nifi +``` + +- The `Spec.RootProcessGroupId` field is required to give the ability to the operator of managing root level policy and default deployment and policy. +- The `Spec.NodeURITemplate` field, defines the hostname template of your NiFi cluster nodes, the operator will use this information and the list of id specified in `Spec.Nodes` field to generate the hostname of the nodes (in the configuration above you will have : `nifi01.integ.mapreduce.m0.p.fti.net:9090`, `nifi02.integ.mapreduce.m0.p.fti.net:9090`, `nifi03.integ.mapreduce.m0.p.fti.net:9090`). +- The `Spec.Type` field defines the type of cluster that this resource is refering to, by default it is `internal`, in our case here we just want to use this resource to reference an existing NiFi cluster, so we set this field to `external`. +- The `Spec.ClientType` field defines how we want to authenticate to the NiFi cluster API, for now we are supporting two modes : + - `tls` : using client TLS certificate. + - `basic` : using a username and a password to get an access token. +- The `Spec.SecretRef` defines a reference to a secret which contains the sensitive values that will be used by the operator to authenticate to the NiFi cluster API (ie in basic mode it will contain the password and username). + +:::warning +The id of node only support `int32` as type, so if the hostname of your nodes doesn't match with this, you can't use this feature. +::: + +## Secret configuration for Basic authentication + +When you are using the basic authentication, you have to pass some informations into the secret that is referenced into the `NifiCluster` resource: + +- `username` : the username associated to the user that will be used by the operator to request the REST API. +- `password` : the password associated to the user that will be used by the operator to request the REST API. +- `ca.crt (optional)`: the certificate authority to trust the server certificate if needed + +The following command shows how you can create this secret : + +```console +kubectl create secret generic nifikop-credentials \ + --from-file=username=./secrets/username\ + --from-file=password=./secrets/password\ + --from-file=ca.crt=./secrets/ca.crt\ + -n nifikop-nifi +``` + +:::info +When you use the basic authentication, the operator will create a secret `-basic-secret` containing for each node an access token that will be maintained by the operator. +::: + +## Secret configuration for TLS authentication + +When you are using the tls authentication, you have to pass some information into the secret that is referenced into the `NifiCluster` resource: + +- `tls.key` : The user private key. +- `tls.crt` : The user certificate. +- `password` : the password associated to the user that will be used by the operator to request the REST API. +- `ca.crt`: The CA certificate +- `truststore.jks`: +- `keystore.jks`: diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/2_security/1_ssl.md b/site/website/versioned_docs/version-0.7.0/3_tasks/2_security/1_ssl.md new file mode 100644 index 000000000..4fa9f9320 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/2_security/1_ssl.md @@ -0,0 +1,159 @@ +--- +id: 1_ssl +title: Securing NiFi with SSL +sidebar_label: SSL +--- + +The `NiFi operator` makes securing your NiFi cluster with SSL. You may provide your own certificates, or instruct the operator to create them for from your cluster configuration. + +Below this is an example configuration required to secure your cluster with SSL : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +... +spec: + ... + managedAdminUsers: + - identity : "alexandre.guitton@orange.com" + name: "aguitton" + ... + readOnlyConfig: + # NifiProperties configuration that will be applied to the node. + nifiProperties: + webProxyHosts: + - nifistandard2.trycatchlearn.fr:8443 + ... + ... + listenersConfig: + internalListeners: + - type: "https" + name: "https" + containerPort: 8443 + - type: "cluster" + name: "cluster" + containerPort: 6007 + - type: "s2s" + name: "s2s" + containerPort: 10000 + sslSecrets: + tlsSecretName: "test-nifikop" + create: true +``` + +- `managedAdminUsers` : list of users account which will be configured as admin into NiFi cluster, please check [](../4_nifi_user_group#managed-groups-for-simple-setup) for more information. +- `readOnlyConfig.nifiProperties.webProxyHosts` : A list of allowed HTTP Host header values to consider when NiFi is running securely and will be receiving requests to a different host[:port] than it is bound to. [web-properties](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties) + +If `listenersConfig.sslSecrets.create` is set to `false`, the operator will look for the secret at `listenersConfig.sslSecrets.tlsSecretName` and expect these values : + +| key | value | +| ---------- | ------------------------------------------------------------------------ | +| caCert | The CA certificate | +| caKey | The CA private key | +| clientCert | A client certificate (this will be used by operator for NiFI operations) | +| clientKey | The private key for clientCert | + +## Using an existing Issuer + +As described in the [Reference section](../../5_references/1_nifi_cluster/6_listeners_config.md#sslsecrets), instead of using a self-signed certificate as CA, you can use an existing one. +In order to do so, you only have to refer it into your `Spec.ListenerConfig.SslSecrets.IssuerRef` field. + +### Example : Let's encrypt + +Let's say you have an existing DNS server, with [external dns](https://github.com/kubernetes-sigs/external-dns) deployed into your cluster's namespace. +You can easily use Let's encrypt as authority for your certificate. + +To do this, you have to : + +1. Create an issuer : + +```yaml +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: letsencrypt-staging +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource used to store the account's private key. + name: example-issuer-account-key + # Add a single challenge solver, HTTP01 using nginx + solvers: + - http01: + ingress: + ingressTemplate: + metadata: + annotations: + 'external-dns.alpha.kubernetes.io/ttl': '5' +``` + +2. Setup External dns and correctly create your issuer into your cluster configuration : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +... +spec: + ... + clusterSecure: true + siteToSiteSecure: true + ... + listenersConfig: + clusterDomain: + useExternalDNS: true + ... + sslSecrets: + tlsSecretName: "test-nifikop" + create: true + issuerRef: + name: letsencrypt-staging + kind: Issuer +``` + +## Create SSL credentials + +You may use `NifiUser` resource to create new certificates for your applications, allowing them to query your Nifi cluster. + +To create a new client you will need to generate new certificates sign by the CA. The operator can automate this for you using the `NifiUser` CRD : + +```console +cat << EOF | kubectl apply -n nifi -f - +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiUser +metadata: + name: example-client + namespace: nifi +spec: + clusterRef: + name: nifi + secretName: example-client-secret +EOF +``` + +This will create a user and store its credentials in the secret `example-client-secret`. The secret contains these fields : + +| key | value | +| ------- | -------------------- | +| ca.crt | The CA certificate | +| tls.crt | The user certificate | +| tls.key | The user private key | + +You can then mount these secret to your pod. Alternatively, you can write them to your local machine by running: + +```console +kubectl get secret example-client-secret -o jsonpath="{['data']['ca\.crt']}" | base64 -d > ca.crt +kubectl get secret example-client-secret -o jsonpath="{['data']['tls\.crt']}" | base64 -d > tls.crt +kubectl get secret example-client-secret -o jsonpath="{['data']['tls\.key']}" | base64 -d > tls.key +``` + +The operator can also include a Java keystore format (JKS) with your user secret if you'd like. Add `includeJKS`: `true` to the `spec` like shown above, and then the user-secret will gain these additional fields : + +| key | value | +| -------- | ------------------------------------------------------------------------------------------------------ | +| tls.jks | The java keystore containing both the user keys and the CA (use this for your keystore AND truststore) | +| pass.txt | The password to decrypt the JKS (this will be randomly generated) | diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/2_security/2_authentication/1_oidc.md b/site/website/versioned_docs/version-0.7.0/3_tasks/2_security/2_authentication/1_oidc.md new file mode 100644 index 000000000..42bd55dc4 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/2_security/2_authentication/1_oidc.md @@ -0,0 +1,42 @@ +--- +id: 1_oidc +title: OpenId Connect +sidebar_label: OpenId Connect +--- + +To enable authentication via OpenId Connect refering to [NiFi Administration guide](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html) required some configuration into `nifi.properties`. + +In addition and to ensure multiple identity provider support, we recommended to add the following configuration to your `nifi.properties` : + +```sh +nifi.security.identity.mapping.pattern.dn=CN=([^,]*)(?:, (?:O|OU)=.*)? +nifi.security.identity.mapping.value.dn=$1 +nifi.security.identity.mapping.transform.dn=NONE +``` + +To perform this with `NiFiKop` you just have to configure the `Spec.NifiProperties.OverrideConfigs` field with your OIDC configuration, for example : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +... +spec: + ... + readOnlyConfig: + # NifiProperties configuration that will be applied to the node. + nifiProperties: + webProxyHosts: + - nifistandard2.trycatchlearn.fr:8443 + # Additionnals nifi.properties configuration that will override the one produced based + # on template and configurations. + overrideConfigs: | + nifi.security.user.oidc.discovery.url= + nifi.security.user.oidc.client.id= + nifi.security.user.oidc.client.secret= + nifi.security.identity.mapping.pattern.dn=CN=([^,]*)(?:, (?:O|OU)=.*)? + nifi.security.identity.mapping.value.dn=$1 + nifi.security.identity.mapping.transform.dn=NONE + ... + ... +... +``` diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/3_nifi_dataflow.md b/site/website/versioned_docs/version-0.7.0/3_tasks/3_nifi_dataflow.md new file mode 100644 index 000000000..c4ae92f19 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/3_nifi_dataflow.md @@ -0,0 +1,125 @@ +--- +id: 3_nifi_dataflow +title: Provisioning NiFi Dataflows +sidebar_label: NiFi Dataflows +--- + +You can create NiFi dataflows either : + +- directly against the cluster through its REST API (using UI or some home made scripts), or +- via the `NifiDataflow` CRD. + +If you want more details about the design, just have a look on the [design page](../1_concepts/2_design_principes.md#dataflow-lifecycle-management) + +To deploy a [NifiDataflow] you have to start by deploying a [NifiRegistryClient] because **NiFiKop** manages dataflow using the [NiFi Registry feature](https://nifi.apache.org/registry). + +Below is an example of [NifiRegistryClient] : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiRegistryClient +metadata: + name: registry-client-example + namespace: nifikop +spec: + clusterRef: + name: nc + namespace: nifikop + description: 'Registry client managed by NiFiKop' + uri: 'http://nifi.hostname.com:18080' +``` + +Once you have deployed your [NifiRegistryClient], you have the possibility of defining a configuration that you will apply to your [NifiDataflow]. + +This configuration is defined using the [NifiParameterContext] CRD, which NiFiKop will convert into a [Parameter context](https://nifi.apache.org/docs/nifi-docs/html/user-guide.html#parameter-contexts). + +Below is an example of [NifiParameterContext] : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiParameterContext +metadata: + name: dataflow-lifecycle + namespace: demo +spec: + description: 'It is a test' + clusterRef: + name: nc + namespace: nifikop + secretRefs: + - name: secret-params + namespace: nifikop + parameters: + - name: test + value: toto + description: tutu + - name: test2 + value: toto + description: toto +``` + +As you can see, in the [NifiParameterContext] you can refer to some secrets that will be converted into [sensitive parameter](https://nifi.apache.org/docs/nifi-docs/html/user-guide.html#using-parameters-with-sensitive-properties). + +Here is an example of secret that you can create that will be used by the configuration above : + +```console +kubectl create secret generic secret-params \ + --from-literal=secret1=yop \ + --from-literal=secret2=yep \ + -n nifikop +``` + +:::warning +As a sensitive value cannot be retrieved through the Rest API, to update the value of a sensitive parameter, you have to : + +- remove it from the secret +- wait for the next loop +- insert the parameter with the new value inside the secret + +or you can simply create a new [NifiParameterContext] and refer it into your [NifiDataflow]. +::: + +You can now deploy your [NifiDataflow] by referencing the previous objects : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiDataflow +metadata: + name: dataflow-lifecycle +spec: + parentProcessGroupID: '16cfd2ec-0174-1000-0000-00004b9b35cc' + bucketId: '01ced6cc-0378-4893-9403-f6c70d080d4f' + flowId: '9b2fb465-fb45-49e7-94fe-45b16b642ac9' + flowVersion: 2 + syncMode: always + skipInvalidControllerService: true + skipInvalidComponent: true + clusterRef: + name: nc + namespace: nifikop + registryClientRef: + name: registry-client-example + namespace: nifikop + parameterContextRef: + name: dataflow-lifecycle + namespace: demo + updateStrategy: drain +``` + +To find details about the versioned flow information required check the [official documentation](https://nifi.apache.org/docs/nifi-registry-docs/index.html) + +You have two modes of control from your dataflow by the operator : + +1 - `Spec.SyncMode == never` : The operator will deploy the dataflow as described in the resource, and never control it (unless you change the field to `always`). It is useful when you want to deploy your dataflow without starting it. + +2 - `Spec.SyncMode == once` : The operator will deploy the dataflow as described in the resource, run it once, and never control it again (unless you change the field to `always`). It is useful when you want to deploy your dataflow in a dev environment, and you want to update the dataflow. + +3 - `Spec.SyncMode == always` : The operator will deploy and ensure the dataflow lifecycle, it will avoid all manual modification directly from the Cluster (e.g remove the process group, remove the versioning, update the parent process group, make some local changes ...). If you want to perform update, rollback or stuff like this, you have to simply update the [NifiDataflow] resource. + +:::important +More information about `Spec.UpdateStrategy` [here](../5_references/5_nifi_dataflow.md#dataflowupdatestrategy) +::: + +[nifidataflow]: ../5_references/5_nifi_dataflow.md +[nifiregistryclient]: ../5_references/3_nifi_registry_client.md +[nifiparametercontext]: ../5_references/4_nifi_parameter_context.md diff --git a/site/website/versioned_docs/version-0.7.0/3_tasks/4_nifi_user_group.md b/site/website/versioned_docs/version-0.7.0/3_tasks/4_nifi_user_group.md new file mode 100644 index 000000000..f981d9bc5 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/3_tasks/4_nifi_user_group.md @@ -0,0 +1,168 @@ +--- +id: 4_nifi_user_group +title: Provisioning NiFi Users and Groups +sidebar_label: NiFi Users and Groups +--- + +## User management + +The `NifiUser` resource was already introduced for the [SSL credentials](./2_security/1_ssl.md#create-ssl-credentials) concerns. +What we are covering here is the NiFi user management part introduced in this resource. + +When you create a `NifiUser` resource the operator will : + +1. Try to check if a user already exists with the same name on the NiFi cluster, if it does, the operator will set [NifiUser.Status.Id](./2_security/1_ssl.md#create-ssl-credentials) to bind it with the kubernetes resource. +2. If no user is found, the operator will create and manage it (i.e it will ensure the synchronisation with the NiFi Cluster). + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiUser +metadata: + name: aguitton +spec: + # identity field is use to define the user identity on NiFi cluster side, + # it use full when the user's name doesn't suite with Kubernetes resource name. + identity: alexandre.guitton@orange.com + # Contains the reference to the NifiCluster with the one the registry client is linked. + clusterRef: + name: nc + namespace: nifikop + # Whether or not the the operator also include a Java keystore format (JKS) with you secret + includeJKS: false + # Whether or not a certificate will be created for this user. + createCert: false + # defines the list of access policies that will be granted to the group. + accessPolicies: + # defines the kind of access policy, could be "global" or "component". + - type: component + # defines the kind of action that will be granted, could be "read" or "write" + action: read + # resource defines the kind of resource targeted by this access policies, please refer to the following page : + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#access-policies + resource: /data + # componentType is used if the type is "component", it's allow to define the kind of component on which is the + # access policy + componentType: 'process-groups' + # componentId is used if the type is "component", it's allow to define the id of the component on which is the + # access policy + componentId: '' +``` + +By default the user name that will be used is the name of the resource. + +But as there are some constraints on this name (e.g [RFC 1123](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names)) that doesn't match with those applied on NiFi, you can override it with the `NifiUser.Spec.Identity` field which is more permissive. +In the example above the kubernetes resource name will be `aguitton` but the NiFi use created on the cluster will be `alexandre.guitton@orange.com`. + +In the case the user will not authenticate himself using TLS authentication, the operator doesn't have to create a certificate, so just set `NifiUser.Spec.CreateCert` to false. + +For each user, you have the ability to define a list of [AccessPolicies](../5_references/2_nifi_user.md#accesspolicy) to give a list of access to your user. +In the example above we are giving to user `alexandre.guitton@orange.com` the right to view metadata et content for the root process group in flowfile queues in outbound connections and through provenance events. + +## UserGroup management + +To simplify the access management Apache NiFi allows to define groups containing a list of users, on which we apply a list of access policies. +This part is supported by the operator using the `NifiUserGroup` resource : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiUserGroup +metadata: + name: group-test +spec: + # Contains the reference to the NifiCluster with the one the registry client is linked. + clusterRef: + name: nc + namespace: nifikop + # contains the list of reference to NifiUsers that are part to the group. + usersRef: + - name: nc-0-node.nc-headless.nifikop.svc.cluster.local + # namespace: nifikop + - name: nc-controller.nifikop.mgt.cluster.local + # defines the list of access policies that will be granted to the group. + accessPolicies: + # defines the kind of access policy, could be "global" or "component". + - type: global + # defines the kind of action that will be granted, could be "read" or "write" + action: read + # resource defines the kind of resource targeted by this access policies, please refer to the following page : + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#access-policies + resource: /counters +# # componentType is used if the type is "component", it's allow to define the kind of component on which is the +# # access policy +# componentType: "process-groups" +# # componentId is used if the type is "component", it's allow to define the id of the component on which is the +# # access policy +# componentId: "" +``` + +When you create a `NifiUserGroup` resource, the operator will create and manage a group named `${resource namespace}-${resource name}` in Nifi. +To declare the users that are part of this group, you just have to declare them in the [NifiUserGroup.UsersRef](../5_references/6_nifi_usergroup.md#userreference) field. + +:::important +The [NifiUserGroup.UsersRef](../5_references/6_nifi_usergroup.md#userreference) requires to declare the name and namespace of a `NifiUser` resource, so it is previously required to declare the resource. + +It's required to create the resource even if the user is already declared in NiFi Cluster (In that case the operator will just sync the kubernetes resource). +::: + +Like for `NifiUser` you can declare a list of [AccessPolicies](../5_references/2_nifi_user.md#accesspolicy) to give a list of access to your user. + +In the example above we are giving to users `nc-0-node.nc-headless.nifikop.svc.cluster.local` and `nc-controller.nifikop.mgt.cluster.local` the right to view the counters informations. + +## Managed groups for simple setup + +In some case these two features could be heavy to define, for example when you have 10 dataflows with one cluster for each of them, it will lead in a lot of `.yaml` files ... +To simplify this, we implement in the operator 2 `managed groups` : + +- **Admins :** a group giving access to everything on the NiFi Cluster, +- **Readers :** a group giving access as viewer on the NiFi Cluster. + +You can directly define the list of users who belong to each of them in the `NifiCluster.Spec` field : + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +metadata: + name: mynifi +spec: + ... + oneNifiNodePerNode: false + # + propagateLabels: true + managedAdminUsers: + - identity : "alexandre.guitton@orange.com" + name: "aguitton" + - identity : "nifiuser@orange.com" + name: "nifiuser" + managedReaderUsers: + - identity : "toto@orange.com" + name: "toto" + ... +``` + +In this example the operator will create and manage 3 `NifiUsers` : + +- **aguitton**, with the identity : `alexandre.guitton@orange.com` +- **nifiuser**, with the identity : `nifiuser@orange.com` +- **toto**, with the identity : `toto@orange.com` + +And create and manage two groups : + +- **managed-admins :** that will contain 3 users (**aguitton**, **nifiuser**, **nc-controller.nifikop.mgt.cluster.local** which is the controller user). +- **managed-readers :** that will contain 1 user (**toto**) + +And the rest of the stuff will be reconciled and managed as described for `NifiUsers` and `NifiUserGroups`. + +:::note +There is one more group that is created and managed by the operator, this is the **managed-nodes** group, for each node a `NifiUser` is created, and we automatically add them to this group to give them the right list of accesses. + +To get the list of managed groups just check the list of `NifiUserGroup` : + +```console +kubectl get -n nifikop nifiusergroups.nifi.orange.com +NAME AGE +managed-admins 6d7h +managed-nodes 6d7h +managed-readers 6d7h +``` + +::: diff --git a/site/website/versioned_docs/version-0.7.0/4_examples/1_simple_nifi_cluster.md b/site/website/versioned_docs/version-0.7.0/4_examples/1_simple_nifi_cluster.md new file mode 100644 index 000000000..871df5974 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/4_examples/1_simple_nifi_cluster.md @@ -0,0 +1,5 @@ +--- +id: 1_simple_nifi_cluster +title: Simple NiFi cluster +sidebar_label: Simple NiFi cluster +--- diff --git a/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/1_nifi_cluster.md b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/1_nifi_cluster.md new file mode 100644 index 000000000..aa0959478 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/1_nifi_cluster.md @@ -0,0 +1,171 @@ +--- +id: 1_nifi_cluster +title: NiFi cluster +sidebar_label: NiFi cluster +--- + +`NifiCluster` describes the desired state of the NiFi cluster we want to setup through the operator. + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiCluster +metadata: + name: simplenifi +spec: + service: + headlessEnabled: true + zkAddress: 'zookeepercluster-client.zookeeper:2181' + zkPath: '/simplenifi' + clusterImage: 'apache/nifi:1.11.3' + oneNifiNodePerNode: false + nodeConfigGroups: + default_group: + isNode: true + storageConfigs: + - mountPath: '/opt/nifi/nifi-current/logs' + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: 'standard' + resources: + requests: + storage: 10Gi + serviceAccountName: 'default' + resourcesRequirements: + limits: + cpu: '2' + memory: 3Gi + requests: + cpu: '1' + memory: 1Gi + nodes: + - id: 1 + nodeConfigGroup: 'default_group' + - id: 2 + nodeConfigGroup: 'default_group' + propagateLabels: true + nifiClusterTaskSpec: + retryDurationMinutes: 10 + listenersConfig: + internalListeners: + - type: 'http' + name: 'http' + containerPort: 8080 + - type: 'cluster' + name: 'cluster' + containerPort: 6007 + - type: 's2s' + name: 's2s' + containerPort: 10000 + externalServices: + - name: 'clusterip' + spec: + type: ClusterIP + portConfigs: + - port: 8080 + internalListenerName: 'http' + serviceAnnotations: + toto: tata +``` + +## NifiCluster + +| Field | Type | Description | Required | Default | +| -------- | ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | -------- | ------- | +| metadata | [ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | is metadata that all persisted resources must have, which includes all objects users must create. | No | nil | +| spec | [NifiClusterSpec](#nificlusterspec) | defines the desired state of NifiCluster. | No | nil | +| status | [NifiClusterStatus](#nificlusterstatus) | defines the observed state of NifiCluster. | No | nil | + +## NifiClusterSpec + +| Field | Type | Description | Required | Default | +| ------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------------------------------- | ---------------- | ---------- | +| clientType | Enum={"tls","basic"} | defines if the operator will use basic or tls authentication to query the NiFi cluster. | No | `tls` | +| type | Enum={"external","internal"} | defines if the cluster is internal (i.e manager by the operator) or external. | No | `internal` | +| nodeURITemplate | string | used to dynamically compute node uri. | if external type | - | +| nifiURI | stringused access through a LB uri. | if external type | - | +| rootProcessGroupId | string | contains the uuid of the root process group for this cluster. | if external type | - | +| secretRef | \[ \][SecretReference](./4_nifi_parameter_context#secretreference) | reference the secret containing the informations required to authentiticate to the cluster. | if external type | - | +| proxyUrl | string | defines the proxy required to query the NiFi cluster. | if external type | - | + +|service|[ServicePolicy](#servicepolicy)| defines the policy for services owned by NiFiKop operator. |No| - | +|pod|[PodPolicy](#podpolicy)| defines the policy for pod owned by NiFiKop operator. |No| - | +|zkAddress|string| specifies the ZooKeeper connection string in the form hostname:port where host and port are those of a Zookeeper server.|No|""| +|zkPath|string| specifies the Zookeeper chroot path as part of its Zookeeper connection string which puts its data under same path in the global ZooKeeper namespace.|Yes|"/"| +|initContainerImage|string| can override the default image used into the init container to check if ZoooKeeper server is reachable.. |Yes|"busybox"| +|initContainers|\[ \]string| defines additional initContainers configurations. |No|\[ \]| +|clusterImage|string| can specify the whole nificluster image in one place. |No|""| +|oneNifiNodePerNode|boolean|if set to true every nifi node is started on a new node, if there is not enough node to do that it will stay in pending state. If set to false the operator also tries to schedule the nifi node to a unique node but if the node number is insufficient the nifi node will be scheduled to a node where a nifi node is already running.|No| nil | +|propagateLabels|boolean| - |Yes|false| +|managedAdminUsers|\[ \][ManagedUser](#managedusers)| contains the list of users that will be added to the managed admin group (with all rights). |No|[]| +|managedReaderUsers|\[ \][ManagedUser](#managedusers)| contains the list of users that will be added to the managed admin group (with all rights). |No|[]| +|readOnlyConfig|[ReadOnlyConfig](./2_read_only_config.md)| specifies the read-only type Nifi config cluster wide, all theses will be merged with node specified readOnly configurations, so it can be overwritten per node.|No| nil | +|nodeConfigGroups|map\[string\][NodeConfig](./3_node_config.md)| specifies multiple node configs with unique name|No| nil | +|nodes|\[ \][Node](./3_node_config.md)| specifies the list of cluster nodes, all node requires an image, unique id, and storageConfigs settings|Yes| nil +|disruptionBudget|[DisruptionBudget](#disruptionbudget)| defines the configuration for PodDisruptionBudget.|No| nil | +|ldapConfiguration|[LdapConfiguration](#ldapconfiguration)| specifies the configuration if you want to use LDAP.|No| nil | +|nifiClusterTaskSpec|[NifiClusterTaskSpec](#nificlustertaskspec)| specifies the configuration of the nifi cluster Tasks.|No| nil | +|listenersConfig|[ListenersConfig](./6_listeners_config.md)| specifies nifi's listener specifig configs.|No| - | +|sidecarConfigs|\[ \][Container](https://godoc.org/k8s.io/api/core/v1#Container)|Defines additional sidecar configurations. [Check documentation for more informations]| +|externalServices|\[ \][ExternalServiceConfigs](./7_external_service_config.md)| specifies settings required to access nifi externally.|No| - | + +## NifiClusterStatus + +| Field | Type | Description | Required | Default | +| ------------------ | ------------------------------------------- | ------------------------------------------------------------- | -------- | ------- | +| nodesState | map\[string\][NodeState](./5_node_state.md) | Store the state of each nifi node. | No | - | +| State | [ClusterState](#clusterstate) | Store the state of each nifi node. | Yes | - | +| rootProcessGroupId | string | contains the uuid of the root process group for this cluster. | No | - | + +## ServicePolicy + +| Field | Type | Description | Required | Default | +| --------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| headlessEnabled | boolean | specifies if the cluster should use headlessService for Nifi or individual services using service per nodes may come an handy case of service mesh. | Yes | false | +| annotations | map\[string\]string | Annotations specifies the annotations to attach to services the NiFiKop operator creates | No | - | + +## PodPolicy + +| Field | Type | Description | Required | Default | +| ----------- | ------------------- | ------------------------------------------------------------------------------------ | -------- | ------- | +| annotations | map\[string\]string | Annotations specifies the annotations to attach to pods the NiFiKop operator creates | No | - | + +## ManagedUsers + +| Field | Type | Description | Required | Default | +| ------ | ------ | ----------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| create | bool | if set to true, will create a podDisruptionBudget. | No | - | +| name | string | name field is use to name the NifiUser resource, if not identity is provided it will be used to name the user on NiFi cluster side. | Yes | - | + +## DisruptionBudget + +| Field | Type | Description | Required | Default | +| -------- | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| identity | string | identity field is use to define the user identity on NiFi cluster side, it use full when the user's name doesn't suite with Kubernetes resource name. | No | - | +| budget | string | the budget to set for the PDB, can either be static number or a percentage. | Yes | - | + +## LdapConfiguration + +| Field | Type | Description | Required | Default | +| ------------ | ------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| enabled | boolean | if set to true, we will enable ldap usage into nifi.properties configuration. | No | false | +| url | string | space-separated list of URLs of the LDAP servers (i.e. ldap://${hostname}:${port}). | No | "" | +| searchBase | string | base DN for searching for users (i.e. CN=Users,DC=example,DC=com). | No | "" | +| searchFilter | string | Filter for searching for users against the 'User Search Base'. (i.e. sAMAccountName={0}). The user specified name is inserted into '{0}'. | No | "" | + +## NifiClusterTaskSpec + +| Field | Type | Description | Required | Default | +| -------------------- | ---- | ------------------------------------------------------------- | -------- | ------- | +| retryDurationMinutes | int | describes the amount of time the Operator waits for the task. | Yes | 5 | + +## ClusterState + +| Name | Value | Description | +| --------------------------- | ----------------------- | ------------------------------------------------------ | +| NifiClusterInitializing | ClusterInitializing | states that the cluster is still in initializing stage | +| NifiClusterInitialized | ClusterInitialized | states that the cluster is initialized | +| NifiClusterReconciling | ClusterReconciling | states that the cluster is still in reconciling stage | +| NifiClusterRollingUpgrading | ClusterRollingUpgrading | states that the cluster is rolling upgrading | +| NifiClusterRunning | ClusterRunning | states that the cluster is in running state | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/2_read_only_config.md b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/2_read_only_config.md new file mode 100644 index 000000000..96ff27b8f --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/2_read_only_config.md @@ -0,0 +1,190 @@ +--- +id: 2_read_only_config +title: Read only configurations +sidebar_label: Read only configurations +--- + +ReadOnlyConfig object specifies the read-only type Nifi config cluster wide, all theses will be merged with node specified readOnly configurations, so it can be overwritten per node. + +```yaml +readOnlyConfig: + # MaximumTimerDrivenThreadCount define the maximum number of threads for timer driven processors available to the system. + maximumTimerDrivenThreadCount: 30 + # Logback configuration that will be applied to the node + logbackConfig: + # logback.xml configuration that will replace the one produced based on template + replaceConfigMap: + # The key of the value,in data content, that we want use. + data: logback.xml + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # logback.xml configuration that will replace the one produced based on template and overrideConfigMap + replaceSecretConfig: + # The key of the value,in data content, that we want use. + data: logback.xml + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # NifiProperties configuration that will be applied to the node. + nifiProperties: + # Additionnals nifi.properties configuration that will override the one produced based on template and + # configuration + overrideConfigMap: + # The key of the value,in data content, that we want use. + data: nifi.properties + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop. + # Additionnals nifi.properties configuration that will override the one produced based + # on template, configurations, overrideConfigMap and overrideConfigs. + overrideSecretConfig: + # The key of the value,in data content, that we want use. + data: nifi.properties + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # Additionnals nifi.properties configuration that will override the one produced based + # on template, configurations and overrideConfigMap + overrideConfigs: | + nifi.ui.banner.text=NiFiKop by Orange + # A comma separated list of allowed HTTP Host header values to consider when NiFi + # is running securely and will be receiving requests to a different host[:port] than it is bound to. + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties + # webProxyHosts: + # Nifi security client auth + needClientAuth: false + # Indicates which of the configured authorizers in the authorizers.xml file to use + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#authorizer-configuration + # authorizer: + # ZookeeperProperties configuration that will be applied to the node. + zookeeperProperties: + # # Additionnals zookeeeper.properties configuration that will override the one produced based on template and + # # configuration + # overrideConfigMap: + # # The key of the value,in data content, that we want use. + # data: zookeeeper.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop. + # # Additionnals zookeeeper.properties configuration that will override the one produced based + # # on template, configurations, overrideConfigMap and overrideConfigs. + # overrideSecretConfig: + # # The key of the value,in data content, that we want use. + # data: zookeeeper.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop + # Additionnals zookeeper.properties configuration that will override the one produced based + # on template and configurations. + overrideConfigs: | + initLimit=15 + autopurge.purgeInterval=24 + syncLimit=5 + tickTime=2000 + dataDir=./state/zookeeper + autopurge.snapRetainCount=30 + # BootstrapProperties configuration that will be applied to the node. + bootstrapProperties: + # # Additionnals bootstrap.properties configuration that will override the one produced based on template and + # # configuration + # overrideConfigMap: + # # The key of the value,in data content, that we want use. + # data: bootstrap.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop. + # # Additionnals bootstrap.properties configuration that will override the one produced based + # # on template, configurations, overrideConfigMap and overrideConfigs. + # overrideSecretConfig: + # # The key of the value,in data content, that we want use. + # data: bootstrap.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop + # JVM memory settings + nifiJvmMemory: '512m' + # Additionnals bootstrap.properties configuration that will override the one produced based + # on template and configurations. + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#bootstrap_properties + overrideConfigs: | + # java.arg.4=-Djava.net.preferIPv4Stack=true +``` + +## ReadOnlyConfig + +| Field | Type | Description | Required | Default | +| ----------------------------------- | ------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| maximumTimerDrivenThreadCount | int32 | define the maximum number of threads for timer driven processors available to the system. | No | nil | +| additionalSharedEnvs | \[ \][corev1.EnvVar](https://pkg.go.dev/k8s.io/api/core/v1#EnvVar) | define a set of additional env variables that will shared between all init containers and ontainers in the pod.. | No | \[ \] | +| nifiProperties | [NifiProperties](#nifiproperties) | nifi.properties configuration that will be applied to the node. | No | nil | +| zookeeperProperties | [ZookeeperProperties](#zookeeperproperties) | zookeeper.properties configuration that will be applied to the node. | No | nil | +| bootstrapProperties | [BootstrapProperties](#bootstrapproperties) | bootstrap.conf configuration that will be applied to the node. | No | nil | +| logbackConfig | [LogbackConfig](#logbackconfig) | logback.xml configuration that will be applied to the node. | No | nil | +| bootstrapNotificationServicesConfig | [BootstrapNotificationServices](#bootstrapnotificationservices) | bootstrap_notification_services.xml configuration that will be applied to the node. | No | nil | + +## NifiProperties + +| Field | Type | Description | Required | Default | +| -------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------------------- | +| overrideConfigMap | [ConfigmapReference](#configmapreference) | Additionnals nifi.properties configuration that will override the one produced based on template and configuration. | No | nil | +| overrideConfigs | string | Additionnals nifi.properties configuration that will override the one produced based on template, configurations and overrideConfigMap. | No | "" | +| overrideSecretConfig | [SecretConfigReference](#secretconfigreference) | Additionnals nifi.properties configuration that will override the one produced based on template, configurations, overrideConfigMap and overrideConfigs. | No | nil | +| webProxyHosts | \[ \]string | A list of allowed HTTP Host header values to consider when NiFi is running securely and will be receiving requests to a different host[:port] than it is bound to. [web-properties](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties) | No | "" | +| needClientAuth | boolean | Nifi security client auth. | No | false | +| authorizer | string | Indicates which of the configured authorizers in the authorizers.xml file to use [authorizer-configuration](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#authorizer-configuration) | No | "managed-authorizer" | + +## ZookeeperProperties + +| Field | Type | Description | Required | Default | +| -------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| overrideConfigMap | [ConfigmapReference](#configmapreference) | Additionnals zookeeper.properties configuration that will override the one produced based on template and configuration. | No | nil | +| overrideConfigs | string | Additionnals zookeeper.properties configuration that will override the one produced based on template, configurations and overrideConfigMap. | No | "" | +| overrideSecretConfig | [SecretConfigReference](#secretconfigreference) | Additionnals zookeeper.properties configuration that will override the one produced based on template, configurations, overrideConfigMap and overrideConfigs. | No | nil | + +## BootstrapProperties + +| Field | Type | Description | Required | Default | +| -------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| overrideConfigMap | [ConfigmapReference](#configmapreference) | Additionnals bootstrap.properties configuration that will override the one produced based on template and configuration. | No | nil | +| overrideConfigs | string | Additionnals bootstrap.properties configuration that will override the one produced based on template, configurations and overrideConfigMap. | No | "" | +| overrideSecretConfig | [SecretConfigReference](#secretconfigreference) | Additionnals bootstrap.properties configuration that will override the one produced based on template, configurations, overrideConfigMap and overrideConfigs. | No | nil | +| NifiJvmMemory | string | JVM memory settings. | No | "512m" | + +## LogbackConfig + +| Field | Type | Description | Required | Default | +| ------------------- | ----------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -------- | ------- | +| replaceConfigMap | [ConfigmapReference](#configmapreference) | logback.xml configuration that will replace the one produced based on template. | No | nil | +| replaceSecretConfig | [SecretConfigReference](#secretconfigreference) | logback.xml configuration that will replace the one produced based on template and overrideConfigMap. | No | nil | + +## BootstrapNotificationServicesConfig + +| Field | Type | Description | Required | Default | +| ------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | -------- | ------- | +| replaceConfigMap | [ConfigmapReference](#configmapreference) | bootstrap_notifications_services.xml configuration that will replace the one produced based on template. | No | nil | +| replaceSecretConfig | [SecretConfigReference](#secretconfigreference) | bootstrap_notifications_services.xml configuration that will replace the one produced based on template and overrideConfigMap. | No | nil | + +## ConfigmapReference + +| Field | Type | Description | Required | Default | +| --------- | ------ | --------------------------------------------------------------- | -------- | ------- | +| name | string | Name of the configmap that we want to refer. | Yes | "" | +| namespace | string | Namespace where is located the configmap that we want to refer. | No | "" | +| data | string | The key of the value,in data content, that we want use. | Yes | "" | + +## SecretConfigReference + +| Field | Type | Description | Required | Default | +| --------- | ------ | ------------------------------------------------------------ | -------- | ------- | +| name | string | Name of the secret that we want to refer. | Yes | "" | +| namespace | string | Namespace where is located the secret that we want to refer. | No | "" | +| data | string | The key of the value,in data content, that we want use. | Yes | "" | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/3_node_config.md b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/3_node_config.md new file mode 100644 index 000000000..ea9a97d93 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/3_node_config.md @@ -0,0 +1,79 @@ +--- +id: 3_node_config +title: Node configuration +sidebar_label: Node configuration +--- + +NodeConfig defines the node configuration + +```yaml +default_group: + # provenanceStorage allow to specify the maximum amount of data provenance information to store at a time + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#write-ahead-provenance-repository-properties + provenanceStorage: '10 GB' + #RunAsUser define the id of the user to run in the Nifi image + # +kubebuilder:validation:Minimum=1 + runAsUser: 1000 + # Set this to true if the instance is a node in a cluster. + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#basic-cluster-setup + isNode: true + # Docker image used by the operator to create the node associated + # https://hub.docker.com/r/apache/nifi/ + # image: "apache/nifi:1.11.2" + # nodeAffinity can be specified, operator populates this value if new pvc added later to node + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity + # nodeAffinity: + # imagePullPolicy define the pull policy for NiFi cluster docker image + imagePullPolicy: IfNotPresent + # storageConfigs specifies the node related configs + storageConfigs: + # Name of the storage config, used to name PV to reuse into sidecars for example. + - name: provenance-repository + # Path where the volume will be mount into the main nifi container inside the pod. + mountPath: '/opt/nifi/provenance_repository' + # Kubernetes PVC spec + # https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: 'standard' + resources: + requests: + storage: 10Gi + - mountPath: '/opt/nifi/nifi-current/logs' + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: 'standard' + resources: + requests: + storage: 10Gi +``` + +## NodeConfig + +| Field | Type | Description | Required | Default | +| --------------------- | ------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | --------- | +| provenanceStorage | string | provenanceStorage allow to specify the maximum amount of data provenance information to store at a time: [write-ahead-provenance-repository-properties](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#write-ahead-provenance-repository-properties) | No | "8 GB" | +| runAsUser | int64 | define the id of the user to run in the Nifi image | No | 1000 | +| fsGroup | int64 | define the id of the group for each volumes in Nifi image | No | 1000 | +| isNode | boolean | Set this to true if the instance is a node in a cluster: [basic-cluster-setup](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#basic-cluster-setup) | No | true | +| image | string | Docker image used by the operator to create the node associated. [Nifi docker registry](https://hub.docker.com/r/apache/nifi/) | No | "" | +| imagePullPolicy | [PullPolicy](https://godoc.org/k8s.io/api/core/v1#PullPolicy) | define the pull policy for NiFi cluster docker image.) | No | "" | +| nodeAffinity | string | operator populates this value if new pvc added later to node [node-affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity) | No | nil | +| storageConfigs | \[ \][StorageConfig](#storageconfig) | specifies the node related configs. | No | nil | +| serviceAccountName | string | specifies the serviceAccount used for this specific node. | No | "default" | +| resourcesRequirements | [ResourceRequirements](https://godoc.org/k8s.io/api/core/v1#ResourceRequirements) | works exactly like Container resources, the user can specify the limit and the requests through this property [manage-compute-resources-container](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/). | No | nil | +| imagePullSecrets | \[ \][LocalObjectReference](https://godoc.org/k8s.io/api/core/v1#TypedLocalObjectReference) | specifies the secret to use when using private registry. | No | nil | +| nodeSelector | map\[string\]string | nodeSelector can be specified, which set the pod to fit on a node [nodeselector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) | No | nil | +| tolerations | \[ \][Toleration](https://godoc.org/k8s.io/api/core/v1#Toleration) | tolerations can be specified, which set the pod's tolerations [taint-and-toleration](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#concepts). | No | nil | +| nodeAnnotations | map\[string\]string | Additionnal annotation to attach to the pod associated [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set). | No | nil | + +## StorageConfig + +| Field | Type | Description | Required | Default | +| --------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | ------- | +| name | string | Name of the storage config, used to name PV to reuse into sidecars for example. | Yes | - | +| mountPath | string | Path where the volume will be mount into the main nifi container inside the pod. | Yes | - | +| pvcSpec | [PersistentVolumeClaimSpec](https://godoc.org/k8s.io/api/core/v1#PersistentVolumeClaimSpec) | Kubernetes PVC spec. [create-a-persistentvolumeclaim](https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim). | Yes | - | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/4_node.md b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/4_node.md new file mode 100644 index 000000000..cc5ae4d85 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/4_node.md @@ -0,0 +1,59 @@ +--- +id: 4_node +title: Node +sidebar_label: Node +--- + +Node defines the nifi node basic configuration + +```yaml +- id: 0 + # nodeConfigGroup can be used to ease the node configuration, if set only the id is required + nodeConfigGroup: 'default_group' + # readOnlyConfig can be used to pass Nifi node config + # which has type read-only these config changes will trigger rolling upgrade + readOnlyConfig: + nifiProperties: + overrideConfigs: | + nifi.ui.banner.text=NiFiKop by Orange - Node 0 + # node configuration +# nodeConfig: +- id: 2 + # readOnlyConfig can be used to pass Nifi node config + # which has type read-only these config changes will trigger rolling upgrade + readOnlyConfig: + overrideConfigs: | + nifi.ui.banner.text=NiFiKop by Orange - Node 2 + # node configuration + nodeConfig: + resourcesRequirements: + limits: + cpu: '2' + memory: 3Gi + requests: + cpu: '1' + memory: 1Gi + storageConfigs: + # Name of the storage config, used to name PV to reuse into sidecars for example. + - name: provenance-repository + # Path where the volume will be mount into the main nifi container inside the pod. + mountPath: '/opt/nifi/provenance_repository' + # Kubernetes PVC spec + # https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: 'standard' + resources: + requests: + storage: 8Gi +``` + +## Node + +| Field | Type | Description | Required | Default | +| --------------- | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| id | int32 | unique Node id. | Yes | - | +| nodeConfigGroup | string | can be used to ease the node configuration, if set only the id is required | No | "" | +| readOnlyConfig | [ReadOnlyConfig](./2_read_only_config.md) | readOnlyConfig can be used to pass Nifi node config which has type read-only these config changes will trigger rolling upgrade. | No | nil | +| nodeConfig | [NodeConfig](./3_node_config.md) | node configuration. | No | nil | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/5_node_state.md b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/5_node_state.md new file mode 100644 index 000000000..ac0ee3ade --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/5_node_state.md @@ -0,0 +1,71 @@ +--- +id: 5_node_state +title: Node state +sidebar_label: Node state +--- + +Holds information about nifi state + +## NodeState + +| Field | Type | Description | Required | Default | +| ------------------- | ------------------------------------------- | ------------------------------------------------------- | -------- | ------- | +| gracefulActionState | [GracefulActionState](#gracefulactionstate) | holds info about nifi cluster action status. | - | - | +| configurationState | [ConfigurationState](#configurationstate) | holds info about the config. | - | - | +| initClusterNode | [InitClusterNode](#initclusternode) | contains if this nodes was part of the initial cluster. | - | - | + +## GracefulActionState + +| Field | Type | Description | Required | Default | +| ------------ | ------------------------- | ------------------------------------------------------ | -------- | ------- | +| errorMessage | string | holds the information what happened with Nifi Cluster. | - | "" | +| actionStep | [ActionStep](#actionstep) | holds info about the action step ran. | No | nil | +| taskStarted | string | hold the time when the execution started. | No | "" | +| actionState | [State](#state) | holds the information about Action state. | No | nil | + +## ConfigurationState + +| Name | Value | Description | +| --------------- | --------------- | ----------------------------------------------------------------- | +| ConfigInSync | ConfigInSync | states that the generated nodeConfig is in sync with the Node | +| ConfigOutOfSync | ConfigOutOfSync | states that the generated nodeConfig is out of sync with the Node | + +## InitClusterNode + +| Name | Value | Description | +| ------------------ | ----- | ---------------------------------------------------- | +| IsInitClusterNode | true | states the node is part of initial cluster setup | +| NotInitClusterNode | false | states the node is not part of initial cluster setup | + +## State + +### Upscale + +| Name | Value | Description | +| ------------------------ | ------------------------ | --------------------------------------------------- | +| GracefulUpscaleRequired | GracefulUpscaleRequired | states that a node upscale is required. | +| GracefulUpscaleRunning | GracefulUpscaleRunning | states that the node upscale task is still running. | +| GracefulUpscaleSucceeded | GracefulUpscaleSucceeded | states the node is updated gracefully. | + +### Downscale + +| Name | Value | Description | +| ------------------------- | ------------------------- | -------------------------------------------------- | +| GracefulDownscaleRequired | GracefulDownscaleRequired | states that a node downscale is required | +| GracefulDownscaleRunning | GracefulDownscaleRunning | states that the node downscale is still running in | +| GracefulUpscaleSucceeded | GracefulUpscaleSucceeded | states the node is updated gracefully | + +## ActionStep + +| Name | Value | Description | +| -------------------- | ------------- | ------------------------------------------------------------------------------- | +| DisconnectNodeAction | DISCONNECTING | states that the NiFi node is disconnecting from NiFi Cluster. | +| DisconnectStatus | DISCONNECTED | states that the NiFi node is disconnected from NiFi Cluster. | +| OffloadNodeAction | OFFLOADING | states that the NiFi node is offloading data to NiFi Cluster. | +| OffloadStatus | OFFLOADED | states that the NiFi node offloaded data to NiFi Cluster. | +| RemovePodAction | POD_REMOVING | states that the NiFi node pod and object related are removing by operator. | +| RemovePodStatus | POD_REMOVED | states that the NiFi node pod and object related have been removed by operator. | +| RemoveNodeAction | REMOVING | states that the NiFi node is removing from NiFi Cluster. | +| RemoveStatus | REMOVED | states that the NiFi node is removed from NiFi Cluster. | +| ConnectNodeAction | CONNECTING | states that the NiFi node is connecting to the NiFi Cluster. | +| ConnectStatus | CONNECTED | states that the NiFi node is connected to the NiFi Cluster. | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/6_listeners_config.md b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/6_listeners_config.md new file mode 100644 index 000000000..d0dd28a46 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/6_listeners_config.md @@ -0,0 +1,54 @@ +--- +id: 6_listeners_config +title: Listeners Config +sidebar_label: Listeners Config +--- + +ListenersConfig defines the Nifi listener types : + +```yaml +listenersConfig: + internalListeners: + - type: 'https' + name: 'https' + containerPort: 8443 + - type: 'cluster' + name: 'cluster' + containerPort: 6007 + - type: 's2s' + name: 's2s' + containerPort: 10000 + - type: 'prometheus' + name: 'prometheus' + containerPort: 9090 + sslSecrets: + tlsSecretName: 'test-nifikop' + create: true +``` + +## ListenersConfig + +| Field | Type | Description | Required | Default | +| ----------------- | ------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | --------------- | +| internalListeners | \[ \][InternalListener](#internallistener) | specifies settings required to access nifi internally. | Yes | - | +| sslSecrets | [SSLSecrets](#sslsecrets) | contains information about ssl related kubernetes secrets if one of the listener setting type set to ssl these fields must be populated to. | Yes | nil | +| clusterDomain | string | allow to override the default cluster domain which is "cluster.local". | Yes | `cluster.local` | +| useExternalDNS | string | allow to manage externalDNS usage by limiting the DNS names associated to each nodes and load balancer: `-node-...` | Yes | false | + +## InternalListener + +| Field | Type | Description | Required | Default | +| ------------- | ------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| type | enum{ "cluster", "http", "https", "s2s", "prometheus"} | allow to specify if we are in a specific nifi listener it's allowing to define some required information such as Cluster Port, Http Port, Https Port, S2S or Prometheus port | Yes | - | +| name | string | an identifier for the port which will be configured. | Yes | - | +| containerPort | int32 | the containerPort. | Yes | - | + +## SSLSecrets + +| Field | Type | Description | Required | Default | +| ------------- | --------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| tlsSecretName | string | should contain all ssl certs required by nifi including: caCert, caKey, clientCert, clientKey serverCert, serverKey, peerCert, peerKey. | Yes | - | +| create | boolean | tells the installed cert manager to create the required certs keys. | Yes | - | +| clusterScoped | boolean | defines if the Issuer created is cluster or namespace scoped. | Yes | - | +| issuerRef | [ObjectReference](https://docs.cert-manager.io/en/release-0.9/reference/api-docs/index.html#objectreference-v1alpha1) | cIssuerRef allow to use an existing issuer to act as CA: https://cert-manager.io/docs/concepts/issuer/ | No | - | +| pkiBackend | enum{"cert-manager"} | | Yes | - | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/7_external_service_config.md b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/7_external_service_config.md new file mode 100644 index 000000000..49f215d0d --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/1_nifi_cluster/7_external_service_config.md @@ -0,0 +1,46 @@ +--- +id: 7_external_service_config +title: External Service Config +sidebar_label: External Service Config +--- + +ListenersConfig defines the Nifi listener types : + +```yaml +externalServices: + - name: 'clusterip' + spec: + type: ClusterIP + portConfigs: + - port: 8080 + internalListenerName: 'http' + serviceAnnotations: + toto: tata +``` + +## ExternalServiceConfig + +| Field | Type | Description | Required | Default | +| ------------------ | ------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| name | string | must be unique within a namespace. Name is primarily intended for creation idempotence and configuration. | Yes | - | +| serviceAnnotations | map\[string\]string | Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadat | No | - | +| spec | [ExternalServiceSpec](#externalservicespec) | defines the behavior of a service. | Yes | | + +## ExternalServiceSpec + +| Field | Type | Description | Required | Default | +| ------------------------ | --------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | ------- | --- | +| portConfigs | | \[ \][PortConfig](#portconfig) | Contains the list port for the service and the associated listener | Yes | - | +| clusterIP | string | More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies | No | - | +| type | [ServiceType](https://godoc.org/k8s.io/api/core/v1#ServiceType) | type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. | No | - | +| externalIPs | \[ \]string | externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes | No | - | +| loadBalancerIP | string | Only applies to Service Type: LoadBalancer. LoadBalancer will get created with the IP specified in this field. | No | - | +| loadBalancerSourceRanges | \[ \]string | If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs | No | - | +| externalName | string | externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. | No | - | + +## PortConfig + +| Field | Type | Description | Required | Default | +| -------------------- | ------ | ---------------------------------------------------------------- | -------- | ------- | +| port | int32 | The port that will be exposed by this service. | Yes | - | +| internalListenerName | string | The name of the listener which will be used as target container. | Yes | - | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/2_nifi_user.md b/site/website/versioned_docs/version-0.7.0/5_references/2_nifi_user.md new file mode 100644 index 000000000..356885c1b --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/2_nifi_user.md @@ -0,0 +1,100 @@ +--- +id: 2_nifi_user +title: NiFi User +sidebar_label: NiFi User +--- + +`NifiUser` is the Schema for the nifi users API. + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiUser +metadata: + name: aguitton +spec: + identity: alexandre.guitton@orange.com + clusterRef: + name: nc + namespace: nifikop + createCert: false +``` + +## NifiUser + +| Field | Type | Description | Required | Default | +| -------- | ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | -------- | ------- | +| metadata | [ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | is metadata that all persisted resources must have, which includes all objects users must create. | No | nil | +| spec | [NifiUserSpec](#nifiuserspec) | defines the desired state of NifiUser. | No | nil | +| status | [NifiUserStatus](#nifiuserstatus) | defines the observed state of NifiUser. | No | nil | + +## NifiUserSpec + +| Field | Type | Description | Required | Default | +| -------------- | ------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| identity | string | used to define the user identity on NiFi cluster side, when the user's name doesn't suit with Kubernetes resource name. | No | - | +| secretName | string | name of the secret where all cert resources will be stored. | No | - | +| clusterRef | [ClusterReference](#clusterreference) | contains the reference to the NifiCluster with the one the user is linked. | Yes | - | +| DNSNames | \[ \]string | list of DNSNames that the user will used to request the NifiCluster (allowing to create the right certificates associated). | Yes | - | +| includeJKS | boolean | whether or not the the operator also include a Java keystore format (JKS) with you secret. | Yes | - | +| createCert | boolean | whether or not a certificate will be created for this user. | No | - | +| accessPolicies | \[ \][AccessPolicy](#accesspolicy) | defines the list of access policies that will be granted to the group. | No | [] | + +## NifiUserStatus + +| Field | Type | Description | Required | Default | +| ------- | ------ | --------------------------------------------------- | -------- | ------- | +| id | string | the nifi user's node id. | Yes | - | +| version | string | the last nifi user's node revision version catched. | Yes | - | + +## ClusterReference + +| Field | Type | Description | Required | Default | +| --------- | ------ | ----------------------------------- | -------- | ------- | +| name | string | name of the NifiCluster. | Yes | - | +| namespace | string | the NifiCluster namespace location. | Yes | - | + +## AccessPolicy + +| Field | Type | Description | Required | Default | +| ------------- | --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| type | [AccessPolicyType](#accesspolicytype) | defines the kind of access policy, could be "global" or "component". | Yes | - | +| action | [AccessPolicyAction](#accesspolicyaction) | defines the kind of action that will be granted, could be "read" or "write". | Yes | - | +| resource | [AccessPolicyResource](#accesspolicyresource) | defines the kind of resource targeted by this access policies, please refer to the following page : https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#access-policies | Yes | - | +| componentType | string | used if the type is "component", it allows to define the kind of component on which is the access policy. | No | - | +| componentId | string | used if the type is "component", it allows to define the id of the component on which is the access policy. | No | - | + +## AccessPolicyType + +| Name | Value | Description | +| ------------------------- | --------- | ----------------------------------------------------------------------------------- | +| GlobalAccessPolicyType | global | Global access policies govern the following system level authorizations | +| ComponentAccessPolicyType | component | Component level access policies govern the following component level authorizations | + +## AccessPolicyAction + +| Name | Value | Description | +| ----------------------- | ----- | ---------------------- | +| ReadAccessPolicyAction | read | Allows users to view | +| WriteAccessPolicyAction | write | Allows users to modify | + +## AccessPolicyResource + +| Name | Value | Description | +| ---------------------------------------- | ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| FlowAccessPolicyResource | /flow | About the UI | +| ControllerAccessPolicyResource | /controller | about the controller including Reporting Tasks, Controller Services, Parameter Contexts and Nodes in the Cluster | +| ParameterContextAccessPolicyResource | /parameter-context | About the Parameter Contexts. Access to Parameter Contexts are inherited from the "access the controller" policies unless overridden. | +| ProvenanceAccessPolicyResource | /provenance | Allows users to submit a Provenance Search and request Event Lineage | +| RestrictedComponentsAccessPolicyResource | /restricted-components | About the restricted components assuming other permissions are sufficient. The restricted components may indicate which specific permissions are required. Permissions can be granted for specific restrictions or be granted regardless of restrictions. If permission is granted regardless of restrictions, the user can create/modify all restricted components. | +| PoliciesAccessPolicyResource | /policies | About the policies for all components | +| TenantsAccessPolicyResource | /tenants | About the users and user groups | +| SiteToSiteAccessPolicyResource | /site-to-site | Allows other NiFi instances to retrieve Site-To-Site details | +| SystemAccessPolicyResource | /system | Allows users to view System Diagnostics | +| ProxyAccessPolicyResource | /proxy | Allows proxy machines to send requests on the behalf of others | +| CountersAccessPolicyResource | /counters | About counters | +| ComponentsAccessPolicyResource | / | About the component configuration details | +| OperationAccessPolicyResource | /operation | to operate components by changing component run status (start/stop/enable/disable), remote port transmission status, or terminating processor threads | +| ProvenanceDataAccessPolicyResource | /provenance-data | to view provenance events generated by this component | +| DataAccessPolicyResource | /data | About metadata and content for this component in flowfile queues in outbound connections and through provenance events | +| PoliciesComponentAccessPolicyResource | /policies | - | +| DataTransferAccessPolicyResource | /data-transfer | Allows a port to receive data from NiFi instances | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/3_nifi_registry_client.md b/site/website/versioned_docs/version-0.7.0/5_references/3_nifi_registry_client.md new file mode 100644 index 000000000..06ee74351 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/3_nifi_registry_client.md @@ -0,0 +1,43 @@ +--- +id: 3_nifi_registry_client +title: NiFi Registry Client +sidebar_label: NiFi Registry Client +--- + +`NifiRegistryClient` is the Schema for the NiFi registry client API. + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiRegistryClient +metadata: + name: squidflow +spec: + clusterRef: + name: nc + namespace: nifikop + description: 'Squidflow demo' + uri: 'http://nifi-registry:18080' +``` + +## NifiRegistryClient + +| Field | Type | Description | Required | Default | +| -------- | ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------ | -------- | ------- | +| metadata | [ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | is metadata that all persisted resources must have, which includes all objects registry clients must create. | No | nil | +| spec | [NifiRegistryClientSpec](#nifiregistryclientspec) | defines the desired state of NifiRegistryClient. | No | nil | +| status | [NifiRegistryClientStatus](#nifiregistryclientstatus) | defines the observed state of NifiRegistryClient. | No | nil | + +## NifiRegistryClientsSpec + +| Field | Type | Description | Required | Default | +| ----------- | ----------------------------------------------------- | -------------------------------------------------------------------------- | -------- | ------- | +| description | string | describes the Registry client. | No | - | +| uri | string | URI of the NiFi registry that should be used for pulling the flow. | Yes | - | +| clusterRef | [ClusterReference](./2_nifi_user.md#clusterreference) | contains the reference to the NifiCluster with the one the user is linked. | Yes | - | + +## NifiRegistryClientStatus + +| Field | Type | Description | Required | Default | +| ------- | ------ | ------------------------------------------------------- | -------- | ------- | +| id | string | nifi registry client's id. | Yes | - | +| version | int64 | the last nifi registry client revision version catched. | Yes | - | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/4_nifi_parameter_context.md b/site/website/versioned_docs/version-0.7.0/5_references/4_nifi_parameter_context.md new file mode 100644 index 000000000..b56150e37 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/4_nifi_parameter_context.md @@ -0,0 +1,83 @@ +--- +id: 4_nifi_parameter_context +title: NiFi Parameter Context +sidebar_label: NiFi Parameter Context +--- + +`NifiParameterContext` is the Schema for the NiFi parameter context API. + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiParameterContext +metadata: + name: dataflow-lifecycle +spec: + description: 'It is a test' + clusterRef: + name: nc + namespace: nifikop + secretRefs: + - name: secret-params + namespace: nifikop + parameters: + - name: test + value: toto + description: tutu + - name: test2 + value: toto + description: toto +``` + +## NifiParameterContext + +| Field | Type | Description | Required | Default | +| -------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| metadata | [ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | is metadata that all persisted resources must have, which includes all objects parameter contexts must create. | No | nil | +| spec | [NifiParameterContextSpec](#NifiParameterContextspec) | defines the desired state of NifiParameterContext. | No | nil | +| status | [NifiParameterContextStatus](#NifiParameterContextstatus) | defines the observed state of NifiParameterContext. | No | nil | + +## NifiParameterContextsSpec + +| Field | Type | Description | Required | Default | +| ----------- | ----------------------------------------------------- | ------------------------------------------------------------------------------------- | -------- | ------- | +| description | string | describes the Parameter Context. | No | - | +| parameters | \[ \][Parameter](#parameter) | a list of non-sensitive Parameters. | No | - | +| secretRefs | \[ \][SecretReference](#secretreference) | a list of secret containing sensitive parameters (the key will name of the parameter) | No | - | +| clusterRef | [ClusterReference](./2_nifi_user.md#clusterreference) | contains the reference to the NifiCluster with the one the user is linked. | Yes | - | + +## NifiParameterContextStatus + +| Field | Type | Description | Required | Default | +| ------------------- | --------------------------------------------------------------- | --------------------------------------------------------- | -------- | ------- | +| id | string | nifi parameter context's id. | Yes | - | +| version | int64 | the last nifi parameter context revision version catched. | Yes | - | +| latestUpdateRequest | [ParameterContextUpdateRequest](#parametercontextupdaterequest) | the latest update request. | Yes | - | +| version | int64 | the last nifi parameter context revision version catched. | Yes | - | + +## Parameter + +| Field | Type | Description | Required | Default | +| ----------- | ------ | --------------------------------- | -------- | ------- | +| name | string | the name of the Parameter. | Yes | - | +| value | string | the value of the Parameter. | Yes | - | +| description | string | the description of the Parameter. | No | - | + +## SecretReference + +| Field | Type | Description | Required | Default | +| --------- | ------ | ------------------------------ | -------- | ------- | +| name | string | name of the secret. | Yes | - | +| namespace | string | the secret namespace location. | Yes | - | + +## ParameterContextUpdateRequest + +| Field | Type | Description | Required | Default | +| ---------------- | ------ | --------------------------------------------------------------------------------- | -------- | ------- | +| id | string | the id of the update request. | Yes | - | +| uri | string | the uri for this request. | Yes | - | +| submissionTime | string | the timestamp of when the request was submitted This property is read only. | Yes | - | +| lastUpdated | string | the timestamp of when the request was submitted This property is read only. | Yes | - | +| complete | bool | whether or not this request has completed. | Yes | false | +| failureReason | string | an explication of why the request failed, or null if this request has not failed. | Yes | - | +| percentCompleted | int32 | the percentage complete of the request, between 0 and 100. | Yes | - | +| state | string | the state of the request. | Yes | - | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/5_nifi_dataflow.md b/site/website/versioned_docs/version-0.7.0/5_references/5_nifi_dataflow.md new file mode 100644 index 000000000..ea7417658 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/5_nifi_dataflow.md @@ -0,0 +1,124 @@ +--- +id: 5_nifi_dataflow +title: NiFi Dataflow +sidebar_label: NiFi Dataflow +--- + +`NifiDataflow` is the Schema for the NiFi dataflow API. + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiDataflow +metadata: + name: dataflow-lifecycle +spec: + parentProcessGroupID: '16cfd2ec-0174-1000-0000-00004b9b35cc' + bucketId: '01ced6cc-0378-4893-9403-f6c70d080d4f' + flowId: '9b2fb465-fb45-49e7-94fe-45b16b642ac9' + flowVersion: 2 + syncMode: always + skipInvalidControllerService: true + skipInvalidComponent: true + clusterRef: + name: nc + namespace: nifikop + registryClientRef: + name: squidflow + namespace: nifikop + parameterContextRef: + name: dataflow-lifecycle + namespace: nifikop + updateStrategy: drain +``` + +## NifiDataflow + +| Field | Type | Description | Required | Default | +| -------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -------- | ------- | +| metadata | [ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | is metadata that all persisted resources must have, which includes all objects dataflows must create. | No | nil | +| spec | [NifiDataflowSpec](#NifiDataflowspec) | defines the desired state of NifiDataflow. | No | nil | +| status | [NifiDataflowStatus](#NifiDataflowstatus) | defines the observed state of NifiDataflow. | No | nil | + +## NifiDataflowsSpec + +| Field | Type | Description | Required | Default | +| ---------------------------- | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| parentProcessGroupID | string | the UUID of the parent process group where you want to deploy your dataflow, if not set deploy at root level. | No | - | +| bucketId | string | the UUID of the Bucket containing the flow. | Yes | - | +| flowId | string | the UUID of the flow to run. | Yes | - | +| flowVersion | \*int32 | the version of the flow to run, if not present or equals to -1, then the latest version of flow will be used. | Yes | - | +| syncMode | Enum={"never","always","once"} | if the flow will be synchronized once, continuously or never. | No | always | +| skipInvalidControllerService | bool | whether the flow is considered as ran if some controller services are still invalid or not. | Yes | false | +| skipInvalidComponent | bool | whether the flow is considered as ran if some components are still invalid or not. | Yes | false | +| updateStrategy | [DataflowUpdateStrategy](#dataflowupdatestrategy) | describes the way the operator will deal with data when a dataflow will be updated : Drop or Drain | Yes | drain | +| clusterRef | [ClusterReference](./2_nifi_user.md#clusterreference) | contains the reference to the NifiCluster with the one the user is linked. | Yes | - | +| parameterContextRef | [ParameterContextReference](./4_nifi_parameter_context.md#parametercontextreference) | contains the reference to the ParameterContext with the one the dataflow is linked. | No | - | +| clusterRef | [RegistryClientReference](./3_nifi_registry_client.md#registryclientreference) | contains the reference to the NifiRegistry with the one the dataflow is linked. | Yes | - | + +## NifiDataflowStatus + +| Field | Type | Description | Required | Default | +| ------------------- | -------------------------------- | ----------------------------------- | -------- | ------- | +| processGroupID | string | process Group ID. | Yes | - | +| state | [DataflowState](#dataflowstate)) | the dataflow current state. | Yes | - | +| latestUpdateRequest | [UpdateRequest](#updaterequest) | the latest update request sent. | Yes | - | +| latestDropRequest | [DropRequest](#droprequest) | the latest queue drop request sent. | Yes | - | + +## DataflowUpdateStrategy + +| Name | Value | Description | +| ------------- | ----- | ------------------------------------------------------------------------------------------------------------------------------------- | +| DrainStrategy | drain | leads to shutting down only input components (Input processors, remote input process group) and dropping all flowfiles from the flow. | +| DropStrategy | drop | leads to shutting down all components and dropping all flowfiles from the flow. | + +## DataflowState + +| Name | Value | Description | +| ---------------------- | --------- | ------------------------------------------------------- | +| DataflowStateCreated | Created | describes the status of a NifiDataflow as created. | +| DataflowStateStarting | Starting | describes the status of a NifiDataflow as starting. | +| DataflowStateRan | Ran | describes the status of a NifiDataflow as running. | +| DataflowStateOutOfSync | OutOfSync | ddescribes the status of a NifiDataflow as out of sync. | +| DataflowStateInSync | InSync | describes the status of a NifiDataflow as in sync. | + +## UpdateRequest + +| Field | Type | Description | Required | Default | +| ---------------- | ------------------------------------------------------- | --------------------------------------------------------------------------------- | -------- | ------- | +| type | [DataflowUpdateRequestType](#dataflowupdaterequesttype) | defines the type of versioned flow update request. | Yes | - | +| id | string | the id of the update request. | Yes | - | +| uri | string | the uri for this request. | Yes | - | +| lastUpdated | string | the last time this request was updated. | Yes | - | +| complete | bool | whether or not this request has completed. | Yes | false | +| failureReason | string | an explication of why the request failed, or null if this request has not failed. | Yes | - | +| percentCompleted | int32 | the percentage complete of the request, between 0 and 100. | Yes | 0 | +| state | string | the state of the request. | Yes | - | + +## DropRequest + +| Field | Type | Description | Required | Default | +| ---------------- | ------ | --------------------------------------------------------------------------------- | -------- | ------- | +| connectionId | string | the connection id. | Yes | - | +| id | string | the id for this drop request. | Yes | - | +| uri | string | the uri for this request. | Yes | - | +| lastUpdated | string | the last time this request was updated. | Yes | - | +| finished | bool | whether the request has finished. | Yes | false | +| failureReason | string | an explication of why the request failed, or null if this request has not failed. | Yes | - | +| percentCompleted | int32 | the percentage complete of the request, between 0 and 100. | Yes | 0 | +| currentCount | int32 | the number of flow files currently queued. | Yes | 0 | +| currentSize | int64 | the size of flow files currently queued in bytes. | Yes | 0 | +| current | string | the count and size of flow files currently queued. | Yes | - | +| originalCount | int32 | the number of flow files to be dropped as a result of this request. | Yes | 0 | +| originalSize | int64 | the size of flow files to be dropped as a result of this request in bytes. | Yes | 0 | +| original | string | the count and size of flow files to be dropped as a result of this request. | Yes | - | +| droppedCount | int32 | the number of flow files that have been dropped thus far. | Yes | 0 | +| droppedSize | int64 | the size of flow files currently queued in bytes. | Yes | 0 | +| Dropped | string | the count and size of flow files that have been dropped thus far. | Yes | - | +| state | string | the state of the request. | Yes | - | + +## DataflowUpdateRequestType + +| Name | Value | Description | +| ----------------- | ------ | ---------------------------------- | +| RevertRequestType | Revert | defines a revert changes request. | +| UpdateRequestType | Update | defines an update version request. | diff --git a/site/website/versioned_docs/version-0.7.0/5_references/6_nifi_usergroup.md b/site/website/versioned_docs/version-0.7.0/5_references/6_nifi_usergroup.md new file mode 100644 index 000000000..eb49ab2a9 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/5_references/6_nifi_usergroup.md @@ -0,0 +1,55 @@ +--- +id: 6_nifi_usergroup +title: NiFi UserGroup +sidebar_label: NiFi UserGroup +--- + +`NifiUserGroup` is the Schema for the nifi user groups API. + +```yaml +apiVersion: nifi.orange.com/v1alpha1 +kind: NifiUserGroup +metadata: + name: group-test +spec: + clusterRef: + name: nc + namespace: nifikop + usersRef: + - name: nc-0-node.nc-headless.nifikop.svc.cluster.local + - name: nc-controller.nifikop.mgt.cluster.local + accessPolicies: + - type: global + action: read + resource: /counters +``` + +## NifiUser + +| Field | Type | Description | Required | Default | +| -------- | ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------ | -------- | ------- | +| metadata | [ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | is metadata that all persisted resources must have, which includes all objects usergroups must create. | No | nil | +| spec | [NifiUserGroupSpec](#nifiusergroupspec) | defines the desired state of NifiUserGroup. | No | nil | +| status | [NifiUserGroupStatus](#nifiusergroupstatus) | defines the observed state of NifiUserGroup. | No | nil | + +## NifiUserGroupSpec + +| Field | Type | Description | Required | Default | +| -------------- | ----------------------------------------------------- | -------------------------------------------------------------------------- | -------- | ------- | +| clusterRef | [ClusterReference](./2_nifi_user.md#clusterreference) | contains the reference to the NifiCluster with the one the user is linked. | Yes | - | +| usersRef | \[ \][UserReference](#userref) | contains the list of reference to NifiUsers that are part to the group. | No | [] | +| accessPolicies | \[ \][AccessPolicy](./2_nifi_user.md#accesspolicy) | defines the list of access policies that will be granted to the group. | No | [] | + +## NifiUserGroupStatus + +| Field | Type | Description | Required | Default | +| ------- | ------ | -------------------------------------------------------- | -------- | ------- | +| id | string | the nifi usergroup's node id. | Yes | - | +| version | string | the last nifi usergroup's node revision version catched. | Yes | - | + +## UserReference + +| Field | Type | Description | Required | Default | +| --------- | ------ | -------------------------------- | -------- | ------- | +| name | string | name of the NifiUser. | Yes | - | +| namespace | string | the NifiUser namespace location. | Yes | - | diff --git a/site/website/versioned_docs/version-0.7.0/6_contributing/1_developer_guide.md b/site/website/versioned_docs/version-0.7.0/6_contributing/1_developer_guide.md new file mode 100644 index 000000000..368aab1b9 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/6_contributing/1_developer_guide.md @@ -0,0 +1,143 @@ +--- +id: 1_developer_guide +title: Developer guide +sidebar_label: Developer guide +--- + +## Operator SDK + +### Prerequisites + +NiFiKop has been validated with : + +- [go](https://golang.org/doc/install) version v1.15+. +- [docker](https://docs.docker.com/get-docker/) version 18.09+ +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) version v1.16+ +- [Helm](https://helm.sh/) version v3.4.2 +- [Operator sdk](https://github.com/operator-framework/operator-sdk) version v1.5.0 + +### Initial setup + +Checkout the project. + +```bash +git clone https://github.com/Orange-OpenSource/nifikop.git +cd nifikop +``` + +### Operator sdk + +The full list of command is available here : https://sdk.operatorframework.io/docs/upgrading-sdk-version/v1.0.0/#cli-changes + +### Build NiFiKop + +#### Local environment + +If you prefer working directly with your local go environment you can simply uses : + +```bash +make build +``` + +### Run NiFiKop + +We can quickly run NiFiKop in development mode (on your local host), then it will use your kubectl configuration file to connect to your kubernetes cluster. + +There are several ways to execute your operator : + +- Using your IDE directly +- Executing directly the Go binary +- deploying using the Helm charts + +If you want to configure your development IDE, you need to give it environment variables so that it will uses to connect to kubernetes. + +```bash +KUBECONFIG={path/to/your/kubeconfig} +WATCH_NAMESPACE={namespace_to_watch} +POD_NAME={name for operator pod} +LOG_LEVEL=Debug +OPERATOR_NAME=ide +``` + +#### Run the Operator Locally with the Go Binary + +This method can be used to run the operator locally outside of the cluster. This method may be preferred during development as it facilitates faster deployment and testing. + +Set the name of the operator in an environment variable + +```bash + export OPERATOR_NAME=nifi-operator +``` + +Deploy the CRDs. + +```bash +kubectl apply -f config/crd/bases/nifi.orange.com_nificlusters.yaml +kubectl apply -f config/crd/bases/nifi.orange.com_nifidataflows.yaml +kubectl apply -f config/crd/bases/nifi.orange.com_nifiparametercontexts.yaml +kubectl apply -f config/crd/bases/nifi.orange.com_nifiregistryclients.yaml +kubectl apply -f config/crd/bases/nifi.orange.com_nifiusers.yaml +``` + +And deploy the operator. + +```bash +make run +``` + +This will run the operator in the `default` namespace using the default Kubernetes config file at `$HOME/.kube/config`. + +#### Deploy using the Helm Charts + +This section provides an instructions for running the operator Helm charts with an image that is built from the local branch. + +Build the image from the current branch. + +```bash +export DOCKER_REPO_BASE={your-docker-repo} +make docker-build +``` + +Push the image to docker hub (or to whichever repo you want to use) + +```bash +$ make docker-push +``` + +:::info +The image tag is a combination of the version as defined in `verion/version.go` and the branch name. +::: + +Install the Helm chart. + +```bash +helm install skeleton ./helm/nifikop \ + --set image.tag=v0.5.1-release \ + --namespace-{"nifikop"} +``` + +:::important +The `image.repository` and `image.tag` template variables have to match the names from the image that we pushed in the previous step. +::: + +:::info +We set the chart name to the branch, but it can be anything. +::: + +Lastly, verify that the operator is running. + +```console +$ kubectl get pods -n nifikop +NAME READY STATUS RESTARTS AGE +skeleton-nifikop-8946b89dc-4cfs9 1/1 Running 0 7m45s +``` + +## Helm + +The NiFiKop operator is released in the `orange-incubator` helm repository. + +In order to package the chart you need to run the following command. + +```bash +make helm-package +``` diff --git a/site/website/versioned_docs/version-0.7.0/6_contributing/2_reporting_bugs.md b/site/website/versioned_docs/version-0.7.0/6_contributing/2_reporting_bugs.md new file mode 100644 index 000000000..84c992203 --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/6_contributing/2_reporting_bugs.md @@ -0,0 +1,25 @@ +--- +id: 2_reporting_bugs +title: Reporting bugs +sidebar_label: Reporting bugs +--- + +If any part of the NiFiKop project has bugs or documentation mistakes, please let us know by [opening an issue](https://github.com/Orange-OpenSource/nifikop/issues/new). We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist. + +To make the bug report accurate and easy to understand, please try to create bug reports that are: + +- Specific. Include as much details as possible: which version, what environment, what configuration, etc. + +- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please include the steps that might lead to the problem. + +- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on operator-sdk is out of scope, but we are happy to provide guidance in the right direction or help with using operator-sdk itself. + +- Unique. Do not duplicate existing bug report. + +- Scoped. One bug per report. Do not follow up with another bug inside one report. + +It may be worthwhile to read [Elika Etemad’s article on filing good bug reports][filing-good-bugs] before creating a bug report. + +We might ask for further information to locate a bug. A duplicated bug report will be closed. + +[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/ diff --git a/site/website/versioned_docs/version-0.7.0/6_contributing/3_credits.md b/site/website/versioned_docs/version-0.7.0/6_contributing/3_credits.md new file mode 100644 index 000000000..fb6bcc81b --- /dev/null +++ b/site/website/versioned_docs/version-0.7.0/6_contributing/3_credits.md @@ -0,0 +1,11 @@ +--- +id: 3_credits +title: Credits +sidebar_label: Credits +--- + +This implementation is based on other Open-Source project, and lot of the community ideas. Particular thanks to : + +- Operator implementation based on [banzaicloud/kafka-operator](https://github.com/banzaicloud/kafka-operator) +- NiFi kubernetes setup configuration inspired from [cetic/helm-nifi](https://github.com/cetic/helm-nifi) +- Implementation is based on [Operator SDK](https://github.com/operator-framework/operator-sdk) diff --git a/site/website/versioned_sidebars/version-0.7.0-sidebars.json b/site/website/versioned_sidebars/version-0.7.0-sidebars.json new file mode 100644 index 000000000..015dcdac6 --- /dev/null +++ b/site/website/versioned_sidebars/version-0.7.0-sidebars.json @@ -0,0 +1,197 @@ +{ + "version-0.7.0/docs": [ + { + "collapsed": true, + "type": "category", + "label": "Concepts", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/1_concepts/1_introduction" + }, + { + "type": "doc", + "id": "version-0.7.0/1_concepts/2_design_principes" + }, + { + "type": "doc", + "id": "version-0.7.0/1_concepts/3_features" + }, + { + "type": "doc", + "id": "version-0.7.0/1_concepts/4_roadmap" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Setup", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/2_setup/1_getting_started" + }, + { + "collapsed": true, + "type": "category", + "label": "Platform Setup", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/2_setup/2_platform_setup/1_gke" + }, + { + "type": "doc", + "id": "version-0.7.0/2_setup/2_platform_setup/2_minikube" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Install", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/2_setup/3_install/1_customizable_install_with_helm" + } + ] + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Tasks", + "items": [ + { + "collapsed": true, + "type": "category", + "label": "NiFi Cluster", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/3_tasks/1_nifi_cluster/2_cluster_scaling" + }, + { + "type": "doc", + "id": "version-0.7.0/3_tasks/1_nifi_cluster/4_external_cluster" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Security", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/3_tasks/2_security/1_ssl" + }, + { + "collapsed": true, + "type": "category", + "label": "Authentication", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/3_tasks/2_security/2_authentication/1_oidc" + } + ] + } + ] + }, + { + "type": "doc", + "id": "version-0.7.0/3_tasks/3_nifi_dataflow" + }, + { + "type": "doc", + "id": "version-0.7.0/3_tasks/4_nifi_user_group" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Reference", + "items": [ + { + "collapsed": true, + "type": "category", + "label": "NiFi Cluster", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/5_references/1_nifi_cluster/1_nifi_cluster" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/1_nifi_cluster/2_read_only_config" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/1_nifi_cluster/3_node_config" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/1_nifi_cluster/4_node" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/1_nifi_cluster/5_node_state" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/1_nifi_cluster/6_listeners_config" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/1_nifi_cluster/7_external_service_config" + } + ] + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/2_nifi_user" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/3_nifi_registry_client" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/4_nifi_parameter_context" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/5_nifi_dataflow" + }, + { + "type": "doc", + "id": "version-0.7.0/5_references/6_nifi_usergroup" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Contributing", + "items": [ + { + "type": "doc", + "id": "version-0.7.0/6_contributing/1_developer_guide" + }, + { + "type": "doc", + "id": "version-0.7.0/6_contributing/2_reporting_bugs" + }, + { + "type": "doc", + "id": "version-0.7.0/6_contributing/3_credits" + } + ] + } + ] +} diff --git a/site/website/versions.json b/site/website/versions.json index 4a9b9d01f..6c76cc6e4 100644 --- a/site/website/versions.json +++ b/site/website/versions.json @@ -1,8 +1 @@ -[ - "v0.6.3", - "v0.6.2", - "v0.6.1", - "v0.6.0", - "v0.5.3", - "v0.4.3" -] +["0.7.0", "v0.6.3", "v0.6.2", "v0.6.1", "v0.6.0", "v0.5.3", "v0.4.3"] From 04fca5fdc1cd0b98dd4e3b565376e517ef899f03 Mon Sep 17 00:00:00 2001 From: erdrix Date: Tue, 12 Oct 2021 23:00:56 +0200 Subject: [PATCH 18/18] bump version --- config/manager/manager.yaml | 54 ++++++------ helm/nifikop/Chart.yaml | 6 +- helm/nifikop/README.md | 67 +++++++-------- helm/nifikop/values.yaml | 4 +- site/docs/2_setup/1_getting_started.md | 22 ++--- .../1_customizable_install_with_helm.md | 82 ++++++++++--------- .../2_setup/1_getting_started.md | 4 +- .../1_customizable_install_with_helm.md | 2 +- .../2_setup/1_getting_started.md | 22 ++--- .../1_customizable_install_with_helm.md | 82 ++++++++++--------- version/version.go | 2 +- 11 files changed, 177 insertions(+), 170 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index ce76a2dce..57e7a408a 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -25,32 +25,32 @@ spec: securityContext: runAsUser: 65532 containers: - - command: - - /manager - args: - - --leader-elect - image: orangeopensource/nifikop:v0.6.3-release - name: nifikop - securityContext: - allowPrivilegeEscalation: false - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - resources: - limits: - cpu: 100m - memory: 30Mi - requests: - cpu: 100m - memory: 20Mi + - command: + - /manager + args: + - --leader-elect + image: orangeopensource/nifikop:v0.7.0-release + name: nifikop + securityContext: + allowPrivilegeEscalation: false + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + limits: + cpu: 100m + memory: 30Mi + requests: + cpu: 100m + memory: 20Mi terminationGracePeriodSeconds: 10 serviceAccountName: controller-manager diff --git a/helm/nifikop/Chart.yaml b/helm/nifikop/Chart.yaml index 0f2423a28..008ea79c1 100644 --- a/helm/nifikop/Chart.yaml +++ b/helm/nifikop/Chart.yaml @@ -4,8 +4,8 @@ name: nifikop home: https://github.com/Orange-OpenSource/nifikop sources: - https://github.com/Orange-OpenSource/nifikop -version: 0.6.3 -appVersion: 0.6.3-release +version: 0.7.0 +appVersion: 0.7.0-release icon: maintainers: - name: erdrix @@ -16,4 +16,4 @@ keywords: - operator - nifi - nifikop - - data \ No newline at end of file + - data diff --git a/helm/nifikop/README.md b/helm/nifikop/README.md index 0a760ebda..34662adeb 100644 --- a/helm/nifikop/README.md +++ b/helm/nifikop/README.md @@ -1,16 +1,16 @@ # NiFiKop - Nifi Kubernetes operator Helm chart -This Helm chart install NiFiKop the Orange's Nifi Kubernetes operator to create/configure/manage NiFi +This Helm chart install NiFiKop the Orange's Nifi Kubernetes operator to create/configure/manage NiFi clusters in a Kubernetes Namespace. It will use Custom Ressources Definition CRDs: - -- `nificlusters.nifi.orange.com`, -- `nifiusers.nifi.orange.com`, -- `nifiusergroups.nifi.orange.com`, -- `nifiregistryclients.nifi.orange.com`, -- `nifiparametercontexts.nifi.orange.com`, -- `nifidataflows.nifi.orange.com`, + +- `nificlusters.nifi.orange.com`, +- `nifiusers.nifi.orange.com`, +- `nifiusergroups.nifi.orange.com`, +- `nifiregistryclients.nifi.orange.com`, +- `nifiparametercontexts.nifi.orange.com`, +- `nifidataflows.nifi.orange.com`, which implements kubernetes custom ressource definition. @@ -20,26 +20,25 @@ which implements kubernetes custom ressource definition. The following tables lists the configurable parameters of the NiFi Operator Helm chart and their default values. -| Parameter | Description | Default | -|----------------------------------|--------------------------------------------------|-------------------------------------------| -| `image.repository` | Image | `orangeopensource/nifikop` | -| `image.tag` | Image tag | `v0.6.3-release` | -| `image.pullPolicy` | Image pull policy | `Always` | -| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | -| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | -| `certManager.enabled` | Enable cert-manager integration | `true` | -| `rbacEnable` | If true, create & use RBAC resources | `true` | -| `resources` | Pod resource requests & limits | `{}` | -| `metricService` | deploy service for metrics | `false` | -| `debug.enabled` | activate DEBUG log level | `false` | -| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | -| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs.| `""` i.e. all namespaces | -| `nodeSelector` | Node selector configuration for operator pod | `{}` | -| `affinity` | Node affinity configuration for operator pod | `{}` | -| `tolerations` | Toleration configuration for operator pod | `{}` | -| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | -| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | - +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------- | +| `image.repository` | Image | `orangeopensource/nifikop` | +| `image.tag` | Image tag | `v0.7.0-release` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | +| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | +| `certManager.enabled` | Enable cert-manager integration | `true` | +| `rbacEnable` | If true, create & use RBAC resources | `true` | +| `resources` | Pod resource requests & limits | `{}` | +| `metricService` | deploy service for metrics | `false` | +| `debug.enabled` | activate DEBUG log level | `false` | +| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | +| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs. | `""` i.e. all namespaces | +| `nodeSelector` | Node selector configuration for operator pod | `{}` | +| `affinity` | Node affinity configuration for operator pod | `{}` | +| `tolerations` | Toleration configuration for operator pod | `{}` | +| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | +| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, @@ -66,7 +65,7 @@ kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/mas You can make a dry run of the chart before deploying : -```console +```console helm install nifikop orange-incubator/nifikop \ --dry-run \ --debug.enabled \ @@ -83,12 +82,11 @@ $ helm install nifikop orange-incubator/nifikop --set namespaces={"nifikop"} We can surcharge default parameters using `--set` flag : ```console -$ helm install nifikop orange-incubator/nifikop --replace --set image.tag=asyncronous +$ helm install nifikop orange-incubator/nifikop --replace --set image.tag=asyncronous ``` > the `--replace` flag allow you to reuses a charts release name - ### Listing deployed charts ``` @@ -103,7 +101,7 @@ helm status nifikop ## Uninstaling the Charts -If you want to delete the operator from your Kubernetes cluster, the operator deployment +If you want to delete the operator from your Kubernetes cluster, the operator deployment should be deleted. ``` @@ -133,7 +131,6 @@ kubectl delete crd nifidataflows.nifi.orange.com > > Please never delete a CRD without very very good care - Helm always keeps records of what releases happened. Need to see the deleted releases? `helm ls --deleted` shows those, and `helm ls --all` shows all of the releases (deleted and currently deployed, as well as releases that failed): @@ -144,8 +141,6 @@ resources.) Note that because releases are preserved in this way, you can rollback a deleted resource, and have it re-activate. - - To purge a release ```console @@ -163,4 +158,4 @@ In this case there is a parameter to say to not install the CRDs : ``` $ helm install --name nifikop ./helm/nifikop --set namespaces={"nifikop"} --skip-crds -``` \ No newline at end of file +``` diff --git a/helm/nifikop/values.yaml b/helm/nifikop/values.yaml index 6ce920530..041fbb91c 100644 --- a/helm/nifikop/values.yaml +++ b/helm/nifikop/values.yaml @@ -2,7 +2,7 @@ ## image: repository: orangeopensource/nifikop - tag: v0.6.3-release + tag: v0.7.0-release pullPolicy: Always imagePullSecrets: enabled: false @@ -34,7 +34,7 @@ rbacEnable: true ## If true, create serviceAccount ## serviceAccount: - create: true + create: true ## if true deploy service for metrics access metricService: false diff --git a/site/docs/2_setup/1_getting_started.md b/site/docs/2_setup/1_getting_started.md index c297996bc..620820e1b 100644 --- a/site/docs/2_setup/1_getting_started.md +++ b/site/docs/2_setup/1_getting_started.md @@ -3,6 +3,7 @@ id: 1_getting_started title: Getting Started sidebar_label: Getting Started --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -16,7 +17,7 @@ As a pre-requisite it needs a Kubernetes cluster. Also, NiFi requires Zookeeper > We believe in the `separation of concerns` principle, thus the NiFi operator does not install nor manage Zookeeper. -## Prerequisites +## Prerequisites ### Install Zookeeper @@ -35,7 +36,7 @@ helm install zookeeper bitnami/zookeeper \ --set resources.limits.cpu=250m \ --set global.storageClass=standard \ --set networkPolicy.enabled=true \ - --set replicaCount=3 + --set replicaCount=3 ``` :::warning @@ -47,11 +48,11 @@ Replace the `storageClass` parameter value with your own. The NiFiKop operator uses `cert-manager` for issuing certificates to users and and nodes, so you'll need to have it setup in case you want to deploy a secured cluster with authentication enabled. @@ -60,6 +61,7 @@ The NiFiKop operator uses `cert-manager` for issuing certificates to users and a kubectl apply -f \ https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml ``` + @@ -114,8 +116,8 @@ Now deploy the helm chart : helm install nifikop \ orange-incubator/nifikop \ --namespace=nifi \ - --version 0.6.3 \ - --set image.tag=v0.6.3-release \ + --version 0.7.0 \ + --set image.tag=v0.7.0-release \ --set resources.requests.memory=256Mi \ --set resources.requests.cpu=250m \ --set resources.limits.memory=256Mi \ @@ -154,4 +156,4 @@ And after you can deploy a simple NiFi cluster. ```bash # Add your zookeeper svc name to the configuration kubectl create -n nifi -f config/samples/simplenificluster.yaml -``` \ No newline at end of file +``` diff --git a/site/docs/2_setup/3_install/1_customizable_install_with_helm.md b/site/docs/2_setup/3_install/1_customizable_install_with_helm.md index ea00795a4..d76f1b9a3 100644 --- a/site/docs/2_setup/3_install/1_customizable_install_with_helm.md +++ b/site/docs/2_setup/3_install/1_customizable_install_with_helm.md @@ -3,6 +3,7 @@ id: 1_customizable_install_with_helm title: Customizable install with Helm sidebar_label: Customizable install with Helm --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -13,41 +14,41 @@ import TabItem from '@theme/TabItem'; ## Introduction -This Helm chart install NiFiKop the Orange's Nifi Kubernetes operator to create/configure/manage NiFi +This Helm chart install NiFiKop the Orange's Nifi Kubernetes operator to create/configure/manage NiFi clusters in a Kubernetes Namespace. It will use Custom Ressources Definition CRDs: - -- `nificlusters.nifi.orange.com`, -- `nifiusers.nifi.orange.com`, -- `nifiusergroups.nifi.orange.com`, -- `nifiregistryclients.nifi.orange.com`, -- `nifiparametercontexts.nifi.orange.com`, -- `nifidataflows.nifi.orange.com`, + +- `nificlusters.nifi.orange.com`, +- `nifiusers.nifi.orange.com`, +- `nifiusergroups.nifi.orange.com`, +- `nifiregistryclients.nifi.orange.com`, +- `nifiparametercontexts.nifi.orange.com`, +- `nifidataflows.nifi.orange.com`, ### Configuration The following tables lists the configurable parameters of the NiFi Operator Helm chart and their default values. -| Parameter | Description | Default | -|----------------------------------|--------------------------------------------------|-------------------------------------------| -| `image.repository` | Image | `orangeopensource/nifikop` | -| `image.tag` | Image tag | `v0.6.3-release` | -| `image.pullPolicy` | Image pull policy | `Always` | -| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | -| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | -| `certManager.enabled` | Enable cert-manager integration | `true` | -| `rbacEnable` | If true, create & use RBAC resources | `true` | -| `resources` | Pod resource requests & limits | `{}` | -| `metricService` | deploy service for metrics | `false` | -| `debug.enabled` | activate DEBUG log level | `false` | -| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | -| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs.| `""` i.e. all namespaces | -| `nodeSelector` | Node selector configuration for operator pod | `{}` | -| `affinity` | Node affinity configuration for operator pod | `{}` | -| `tolerations` | Toleration configuration for operator pod | `{}` | -| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | -| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------- | +| `image.repository` | Image | `orangeopensource/nifikop` | +| `image.tag` | Image tag | `v0.7.0-release` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | +| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | +| `certManager.enabled` | Enable cert-manager integration | `true` | +| `rbacEnable` | If true, create & use RBAC resources | `true` | +| `resources` | Pod resource requests & limits | `{}` | +| `metricService` | deploy service for metrics | `false` | +| `debug.enabled` | activate DEBUG log level | `false` | +| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | +| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs. | `""` i.e. all namespaces | +| `nodeSelector` | Node selector configuration for operator pod | `{}` | +| `affinity` | Node affinity configuration for operator pod | `{}` | +| `tolerations` | Toleration configuration for operator pod | `{}` | +| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | +| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, @@ -73,15 +74,15 @@ kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/mas kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml ``` -::: +::: @@ -92,12 +93,14 @@ helm install nifikop orange-incubator/nifikop \ --set debug.enabled=true \ --set namespaces={"nifikop"} ``` + ```bash -helm install orange-incubator/nifikop +helm install orange-incubator/nifikop ``` + @@ -105,6 +108,7 @@ helm install orange-incubator/nifikop ```bash helm install nifikop orange-incubator/nifikop --set namespaces={"nifikop"} ``` + @@ -124,7 +128,7 @@ helm status nifikop ## Uninstaling the Charts -If you want to delete the operator from your Kubernetes cluster, the operator deployment +If you want to delete the operator from your Kubernetes cluster, the operator deployment should be deleted. ``` @@ -154,7 +158,7 @@ It will delete **ALL** Clusters that has been created using this CRD!!! Please never delete a CRD without very good care ::: -Helm always keeps records of what releases happened. Need to see the deleted releases ? +Helm always keeps records of what releases happened. Need to see the deleted releases ? ```bash helm list --deleted @@ -163,7 +167,7 @@ helm list --deleted Need to see all of the releases (deleted and currently deployed, as well as releases that failed) ? -```bash +```bash helm list --all ``` @@ -190,4 +194,4 @@ In this case there is a parameter to say to not install the CRDs : ``` $ helm install --name nifikop ./helm/nifikop --set namespaces={"nifikop"} --skip-crds -``` \ No newline at end of file +``` diff --git a/site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md b/site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md index 81d2923e2..620820e1b 100644 --- a/site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md +++ b/site/website/versioned_docs/version-0.7.0/2_setup/1_getting_started.md @@ -116,8 +116,8 @@ Now deploy the helm chart : helm install nifikop \ orange-incubator/nifikop \ --namespace=nifi \ - --version 0.6.3 \ - --set image.tag=v0.6.3-release \ + --version 0.7.0 \ + --set image.tag=v0.7.0-release \ --set resources.requests.memory=256Mi \ --set resources.requests.cpu=250m \ --set resources.limits.memory=256Mi \ diff --git a/site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md b/site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md index bde2d30e9..d76f1b9a3 100644 --- a/site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md +++ b/site/website/versioned_docs/version-0.7.0/2_setup/3_install/1_customizable_install_with_helm.md @@ -33,7 +33,7 @@ The following tables lists the configurable parameters of the NiFi Operator Helm | Parameter | Description | Default | | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------- | | `image.repository` | Image | `orangeopensource/nifikop` | -| `image.tag` | Image tag | `v0.6.3-release` | +| `image.tag` | Image tag | `v0.7.0-release` | | `image.pullPolicy` | Image pull policy | `Always` | | `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | | `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | diff --git a/site/website/versioned_docs/version-v0.6.3/2_setup/1_getting_started.md b/site/website/versioned_docs/version-v0.6.3/2_setup/1_getting_started.md index c297996bc..620820e1b 100644 --- a/site/website/versioned_docs/version-v0.6.3/2_setup/1_getting_started.md +++ b/site/website/versioned_docs/version-v0.6.3/2_setup/1_getting_started.md @@ -3,6 +3,7 @@ id: 1_getting_started title: Getting Started sidebar_label: Getting Started --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -16,7 +17,7 @@ As a pre-requisite it needs a Kubernetes cluster. Also, NiFi requires Zookeeper > We believe in the `separation of concerns` principle, thus the NiFi operator does not install nor manage Zookeeper. -## Prerequisites +## Prerequisites ### Install Zookeeper @@ -35,7 +36,7 @@ helm install zookeeper bitnami/zookeeper \ --set resources.limits.cpu=250m \ --set global.storageClass=standard \ --set networkPolicy.enabled=true \ - --set replicaCount=3 + --set replicaCount=3 ``` :::warning @@ -47,11 +48,11 @@ Replace the `storageClass` parameter value with your own. The NiFiKop operator uses `cert-manager` for issuing certificates to users and and nodes, so you'll need to have it setup in case you want to deploy a secured cluster with authentication enabled. @@ -60,6 +61,7 @@ The NiFiKop operator uses `cert-manager` for issuing certificates to users and a kubectl apply -f \ https://github.com/jetstack/cert-manager/releases/download/v1.2.0/cert-manager.yaml ``` + @@ -114,8 +116,8 @@ Now deploy the helm chart : helm install nifikop \ orange-incubator/nifikop \ --namespace=nifi \ - --version 0.6.3 \ - --set image.tag=v0.6.3-release \ + --version 0.7.0 \ + --set image.tag=v0.7.0-release \ --set resources.requests.memory=256Mi \ --set resources.requests.cpu=250m \ --set resources.limits.memory=256Mi \ @@ -154,4 +156,4 @@ And after you can deploy a simple NiFi cluster. ```bash # Add your zookeeper svc name to the configuration kubectl create -n nifi -f config/samples/simplenificluster.yaml -``` \ No newline at end of file +``` diff --git a/site/website/versioned_docs/version-v0.6.3/2_setup/3_install/1_customizable_install_with_helm.md b/site/website/versioned_docs/version-v0.6.3/2_setup/3_install/1_customizable_install_with_helm.md index ea00795a4..d76f1b9a3 100644 --- a/site/website/versioned_docs/version-v0.6.3/2_setup/3_install/1_customizable_install_with_helm.md +++ b/site/website/versioned_docs/version-v0.6.3/2_setup/3_install/1_customizable_install_with_helm.md @@ -3,6 +3,7 @@ id: 1_customizable_install_with_helm title: Customizable install with Helm sidebar_label: Customizable install with Helm --- + import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; @@ -13,41 +14,41 @@ import TabItem from '@theme/TabItem'; ## Introduction -This Helm chart install NiFiKop the Orange's Nifi Kubernetes operator to create/configure/manage NiFi +This Helm chart install NiFiKop the Orange's Nifi Kubernetes operator to create/configure/manage NiFi clusters in a Kubernetes Namespace. It will use Custom Ressources Definition CRDs: - -- `nificlusters.nifi.orange.com`, -- `nifiusers.nifi.orange.com`, -- `nifiusergroups.nifi.orange.com`, -- `nifiregistryclients.nifi.orange.com`, -- `nifiparametercontexts.nifi.orange.com`, -- `nifidataflows.nifi.orange.com`, + +- `nificlusters.nifi.orange.com`, +- `nifiusers.nifi.orange.com`, +- `nifiusergroups.nifi.orange.com`, +- `nifiregistryclients.nifi.orange.com`, +- `nifiparametercontexts.nifi.orange.com`, +- `nifidataflows.nifi.orange.com`, ### Configuration The following tables lists the configurable parameters of the NiFi Operator Helm chart and their default values. -| Parameter | Description | Default | -|----------------------------------|--------------------------------------------------|-------------------------------------------| -| `image.repository` | Image | `orangeopensource/nifikop` | -| `image.tag` | Image tag | `v0.6.3-release` | -| `image.pullPolicy` | Image pull policy | `Always` | -| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | -| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | -| `certManager.enabled` | Enable cert-manager integration | `true` | -| `rbacEnable` | If true, create & use RBAC resources | `true` | -| `resources` | Pod resource requests & limits | `{}` | -| `metricService` | deploy service for metrics | `false` | -| `debug.enabled` | activate DEBUG log level | `false` | -| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | -| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs.| `""` i.e. all namespaces | -| `nodeSelector` | Node selector configuration for operator pod | `{}` | -| `affinity` | Node affinity configuration for operator pod | `{}` | -| `tolerations` | Toleration configuration for operator pod | `{}` | -| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | -| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | +| Parameter | Description | Default | +| -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------- | +| `image.repository` | Image | `orangeopensource/nifikop` | +| `image.tag` | Image tag | `v0.7.0-release` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | +| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | +| `certManager.enabled` | Enable cert-manager integration | `true` | +| `rbacEnable` | If true, create & use RBAC resources | `true` | +| `resources` | Pod resource requests & limits | `{}` | +| `metricService` | deploy service for metrics | `false` | +| `debug.enabled` | activate DEBUG log level | `false` | +| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | +| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs. | `""` i.e. all namespaces | +| `nodeSelector` | Node selector configuration for operator pod | `{}` | +| `affinity` | Node affinity configuration for operator pod | `{}` | +| `tolerations` | Toleration configuration for operator pod | `{}` | +| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | +| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, @@ -73,15 +74,15 @@ kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/mas kubectl apply -f https://raw.githubusercontent.com/Orange-OpenSource/nifikop/master/config/crd/bases/nifi.orange.com_nifiregistryclients.yaml ``` -::: +::: @@ -92,12 +93,14 @@ helm install nifikop orange-incubator/nifikop \ --set debug.enabled=true \ --set namespaces={"nifikop"} ``` + ```bash -helm install orange-incubator/nifikop +helm install orange-incubator/nifikop ``` + @@ -105,6 +108,7 @@ helm install orange-incubator/nifikop ```bash helm install nifikop orange-incubator/nifikop --set namespaces={"nifikop"} ``` + @@ -124,7 +128,7 @@ helm status nifikop ## Uninstaling the Charts -If you want to delete the operator from your Kubernetes cluster, the operator deployment +If you want to delete the operator from your Kubernetes cluster, the operator deployment should be deleted. ``` @@ -154,7 +158,7 @@ It will delete **ALL** Clusters that has been created using this CRD!!! Please never delete a CRD without very good care ::: -Helm always keeps records of what releases happened. Need to see the deleted releases ? +Helm always keeps records of what releases happened. Need to see the deleted releases ? ```bash helm list --deleted @@ -163,7 +167,7 @@ helm list --deleted Need to see all of the releases (deleted and currently deployed, as well as releases that failed) ? -```bash +```bash helm list --all ``` @@ -190,4 +194,4 @@ In this case there is a parameter to say to not install the CRDs : ``` $ helm install --name nifikop ./helm/nifikop --set namespaces={"nifikop"} --skip-crds -``` \ No newline at end of file +``` diff --git a/version/version.go b/version/version.go index c2f653ad1..ada5776e6 100644 --- a/version/version.go +++ b/version/version.go @@ -1,5 +1,5 @@ package version var ( - Version = "0.6.3" + Version = "0.7.0" )