diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a71a020d..0201eb74 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -59,18 +59,6 @@ rules: - patch - update - watch -- apiGroups: - - batch - resources: - - jobs - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - k8s.cni.cncf.io resources: diff --git a/controllers/ansibletest_controller.go b/controllers/ansibletest_controller.go index b80d5849..af487818 100644 --- a/controllers/ansibletest_controller.go +++ b/controllers/ansibletest_controller.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "strconv" - "time" "reflect" @@ -30,12 +29,10 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/env" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" - "github.com/openstack-k8s-operators/lib-common/modules/common/job" common_rbac "github.com/openstack-k8s-operators/lib-common/modules/common/rbac" - "github.com/openstack-k8s-operators/test-operator/api/v1beta1" testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + v1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" "github.com/openstack-k8s-operators/test-operator/pkg/ansibletest" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -54,7 +51,6 @@ func (r *AnsibleTestReconciler) GetLogger(ctx context.Context) logr.Logger { // +kubebuilder:rbac:groups=test.openstack.org,resources=ansibletests,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=test.openstack.org,resources=ansibletests/status,verbs=get;update;patch // +kubebuilder:rbac:groups=test.openstack.org,resources=ansibletests/finalizers,verbs=update;patch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;patch;update;delete; // +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update;patch @@ -143,12 +139,12 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err case Wait: - Log.Info(InfoWaitingOnJob) + Log.Info(InfoWaitingOnPod) return ctrl.Result{RequeueAfter: RequeueAfterValue}, nil case EndTesting: - // All jobs created by the instance were completed. Release the lock - // so that other instances can spawn their jobs. + // All pods created by the instance were completed. Release the lock + // so that other instances can spawn their pods. if lockReleased, err := r.ReleaseLock(ctx, instance); !lockReleased { Log.Info(fmt.Sprintf(InfoCanNotReleaseLock, testOperatorLockName)) return ctrl.Result{RequeueAfter: RequeueAfterValue}, err @@ -161,7 +157,7 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) Log.Info(InfoTestingCompleted) return ctrl.Result{}, nil - case CreateFirstJob: + case CreateFirstPod: lockAcquired, err := r.AcquireLock(ctx, instance, helper, false) if !lockAcquired { Log.Info(fmt.Sprintf(InfoCanNotAcquireLock, testOperatorLockName)) @@ -170,7 +166,7 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) Log.Info(fmt.Sprintf(InfoCreatingFirstPod, nextWorkflowStep)) - case CreateNextJob: + case CreateNextPod: // Confirm that we still hold the lock. This is useful to check if for // example somebody / something deleted the lock and it got claimed by // another instance. This is considered to be an error state. @@ -211,9 +207,9 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) - // Create a new job + // Create a new pod mountCerts := r.CheckSecretExists(ctx, instance, "combined-ca-bundle") - jobName := r.GetJobName(instance, nextWorkflowStep) + podName := r.GetPodName(instance, nextWorkflowStep) envVars, workflowOverrideParams := r.PrepareAnsibleEnv(instance, nextWorkflowStep) logsPVCName := r.GetPVCLogsName(instance, 0) containerImage, err := r.GetContainerImage(ctx, workflowOverrideParams["ContainerImage"], instance) @@ -249,11 +245,10 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) return rbacResult, nil } // Service account, role, binding - end - - jobDef := ansibletest.Job( + podDef := ansibletest.Pod( instance, serviceLabels, - jobName, + podName, logsPVCName, mountCerts, envVars, @@ -262,19 +257,12 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) containerImage, privileged, ) - ansibleTestsJob := job.NewJob( - jobDef, - testv1beta1.ConfigHash, - true, - time.Duration(5)*time.Second, - "", - ) - ctrlResult, err = ansibleTestsJob.DoJob(ctx, helper) + ctrlResult, err = r.CreatePod(ctx, *helper, podDef) if err != nil { - // Creation of the ansibleTests job was not successfull. + // Creation of the ansibleTests pod was not successfull. // Release the lock and allow other controllers to spawn - // a job. + // a pod. if lockReleased, err := r.ReleaseLock(ctx, instance); !lockReleased { return ctrl.Result{}, err } @@ -294,7 +282,7 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) condition.DeploymentReadyRunningMessage)) return ctrlResult, nil } - // Create a new job - end + // Create a new pod - end Log.Info("Reconciled Service successfully") return ctrl.Result{}, nil } @@ -303,7 +291,7 @@ func (r *AnsibleTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) func (r *AnsibleTestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&testv1beta1.AnsibleTest{}). - Owns(&batchv1.Job{}). + Owns(&corev1.Pod{}). Owns(&corev1.Secret{}). Owns(&corev1.ConfigMap{}). Complete(r) diff --git a/controllers/common.go b/controllers/common.go index 4abed53b..794904f2 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -17,7 +17,6 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/util" v1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" "gopkg.in/yaml.v3" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" @@ -27,11 +26,12 @@ import ( "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) const ( workflowNameSuffix = "-workflow-counter" - jobNameStepInfix = "-workflow-step-" + podNameStepInfix = "-workflow-step-" envVarsConfigMapinfix = "-env-vars-step-" customDataConfigMapinfix = "-custom-data-step-" workflowStepNumInvalid = -1 @@ -51,7 +51,7 @@ const ( ) const ( - InfoWaitingOnJob = "Waiting on either job to finish or release of the lock." + InfoWaitingOnPod = "Waiting on either pod to finish or release of the lock." InfoTestingCompleted = "Testing completed. All pods spawned by the test-operator finished." InfoCreatingFirstPod = "Creating first test pod (workflow step %d)." InfoCreatingNextPod = "Creating next test pod (workflow step %d)." @@ -80,15 +80,15 @@ const ( // to change Wait = iota - // CreateFirstJob indicates that the Reconcile loop should create the first job + // CreateFirstPod indicates that the Reconcile loop should create the first pod // either specified in the .Spec section or in the .Spec.Workflow section. - CreateFirstJob + CreateFirstPod - // CreateNextJob indicates that the Reconcile loop should create a next job + // CreateNextPod indicates that the Reconcile loop should create a next pod // specified in the .Spec.Workflow section (if .Spec.Workflow is defined) - CreateNextJob + CreateNextPod - // EndTesting indicates that all jobs have already finished. The Reconcile + // EndTesting indicates that all pods have already finished. The Reconcile // loop should end the testing and release resources that are required to // be release (e.g., global lock) EndTesting @@ -97,6 +97,47 @@ const ( Failure ) +// GetPod returns pod that has a specific name (podName) in a given namespace +// (podNamespace). +func (r *Reconciler) GetPod( + ctx context.Context, + podName string, + podNamespace string, +) (*corev1.Pod, error) { + pod := &corev1.Pod{} + objectKey := client.ObjectKey{Namespace: podNamespace, Name: podName} + if err := r.Client.Get(ctx, objectKey, pod); err != nil { + return pod, err + } + + return pod, nil +} + +// CreatePod creates a pod based on a spec provided via PodSpec. +func (r *Reconciler) CreatePod( + ctx context.Context, + h helper.Helper, + podSpec *corev1.Pod, +) (ctrl.Result, error) { + _, err := r.GetPod(ctx, podSpec.Name, podSpec.Namespace) + if err == nil { + return ctrl.Result{}, nil + } else if !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + err = controllerutil.SetControllerReference(h.GetBeforeObject(), podSpec, r.GetScheme()) + if err != nil { + return ctrl.Result{}, err + } + + if err := r.Client.Create(ctx, podSpec); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + // NextAction indicates what action needs to be performed by the Reconcile loop // based on the current state of the OpenShift cluster. func (r *Reconciler) NextAction( @@ -104,92 +145,92 @@ func (r *Reconciler) NextAction( instance client.Object, workflowLength int, ) (NextAction, int, error) { - // Get the latest job. The latest job is job with the highest value stored + // Get the latest pod. The latest pod is pod with the highest value stored // in workflowStep label workflowStepIdx := 0 - lastJob, err := r.GetLastJob(ctx, instance) + lastPod, err := r.GetLastPod(ctx, instance) if err != nil { return Failure, workflowStepIdx, err } - // If there is a job associated with the current instance. - if lastJob != nil { - workflowStepIdx, err := strconv.Atoi(lastJob.Labels[workflowStepLabel]) + // If there is a pod associated with the current instance. + if lastPod != nil { + workflowStepIdx, err := strconv.Atoi(lastPod.Labels[workflowStepLabel]) if err != nil { return Failure, workflowStepIdx, err } - // If the last job is not in Failed or Succeded state -> Wait - lastJobFinished := (lastJob.Status.Failed + lastJob.Status.Succeeded) > 0 - if !lastJobFinished { + // If the last pod is not in Failed or Succeded state -> Wait + lastPodFinished := lastPod.Status.Phase == corev1.PodFailed || lastPod.Status.Phase == corev1.PodSucceeded + if !lastPodFinished { return Wait, workflowStepIdx, nil } - // If the last job is in Failed or Succeeded state and it is NOT the last - // job which was supposed to be created -> CreateNextJob - if lastJobFinished && !isLastJobIndex(workflowStepIdx, workflowLength) { + // If the last pod is in Failed or Succeeded state and it is NOT the last + // pod which was supposed to be created -> CreateNextPod + if lastPodFinished && !isLastPodIndex(workflowStepIdx, workflowLength) { workflowStepIdx++ - return CreateNextJob, workflowStepIdx, nil + return CreateNextPod, workflowStepIdx, nil } - // Otherwise if the job is in Failed or Succeded stated and it IS the - // last job -> EndTesting - if lastJobFinished && isLastJobIndex(workflowStepIdx, workflowLength) { + // Otherwise if the pod is in Failed or Succeded stated and it IS the + // last pod -> EndTesting + if lastPodFinished && isLastPodIndex(workflowStepIdx, workflowLength) { return EndTesting, workflowStepIdx, nil } } - // If there is not any job associated with the instance -> createFirstJob - if lastJob == nil { - return CreateFirstJob, workflowStepIdx, nil + // If there is not any pod associated with the instance -> createFirstPod + if lastPod == nil { + return CreateFirstPod, workflowStepIdx, nil } return Failure, workflowStepIdx, nil } -// isLastJobIndex returns true when jobIndex is the index of the last job that +// isLastPodIndex returns true when podIndex is the index of the last pod that // should be executed. Otherwise the return value is false. -func isLastJobIndex(jobIndex int, workflowLength int) bool { +func isLastPodIndex(podIndex int, workflowLength int) bool { switch workflowLength { case 0: - return jobIndex == workflowLength + return podIndex == workflowLength default: - return jobIndex == (workflowLength - 1) + return podIndex == (workflowLength - 1) } } -// GetLastJob returns job associated with an instance which has the highest value +// GetLastPod returns pod associated with an instance which has the highest value // stored in the workflowStep label -func (r *Reconciler) GetLastJob( +func (r *Reconciler) GetLastPod( ctx context.Context, instance client.Object, -) (*batchv1.Job, error) { +) (*corev1.Pod, error) { labels := map[string]string{instanceNameLabel: instance.GetName()} namespaceListOpt := client.InNamespace(instance.GetNamespace()) labelsListOpt := client.MatchingLabels(labels) - jobList := &batchv1.JobList{} - err := r.Client.List(ctx, jobList, namespaceListOpt, labelsListOpt) + podList := &corev1.PodList{} + err := r.Client.List(ctx, podList, namespaceListOpt, labelsListOpt) if err != nil { return nil, err } - var maxJob *batchv1.Job - maxJobWorkflowStep := 0 + var maxPod *corev1.Pod + maxPodWorkflowStep := 0 - for _, job := range jobList.Items { - workflowStep, err := strconv.Atoi(job.Labels[workflowStepLabel]) + for _, pod := range podList.Items { + workflowStep, err := strconv.Atoi(pod.Labels[workflowStepLabel]) if err != nil { - return &batchv1.Job{}, err + return &corev1.Pod{}, err } - if workflowStep >= maxJobWorkflowStep { - maxJobWorkflowStep = workflowStep - newMaxJob := job - maxJob = &newMaxJob + if workflowStep >= maxPodWorkflowStep { + maxPodWorkflowStep = workflowStep + newMaxPod := pod + maxPod = &newMaxPod } } - return maxJob, nil + return maxPod, nil } func GetEnvVarsConfigMapName(instance interface{}, workflowStepNum int) string { @@ -307,7 +348,7 @@ func (r *Reconciler) GetContainerImage( return "", nil } -func (r *Reconciler) GetJobName(instance interface{}, workflowStepNum int) string { +func (r *Reconciler) GetPodName(instance interface{}, workflowStepNum int) string { if typedInstance, ok := instance.(*v1beta1.Tobiko); ok { if len(typedInstance.Spec.Workflow) == 0 || workflowStepNum == workflowStepNumInvalid { return typedInstance.Name @@ -318,7 +359,7 @@ func (r *Reconciler) GetJobName(instance interface{}, workflowStepNum int) strin workflowStepName = typedInstance.Spec.Workflow[workflowStepNum].StepName } - return typedInstance.Name + jobNameStepInfix + fmt.Sprintf("%02d", workflowStepNum) + "-" + workflowStepName + return typedInstance.Name + podNameStepInfix + fmt.Sprintf("%02d", workflowStepNum) + "-" + workflowStepName } else if typedInstance, ok := instance.(*v1beta1.Tempest); ok { if len(typedInstance.Spec.Workflow) == 0 || workflowStepNum == workflowStepNumInvalid { return typedInstance.Name @@ -329,7 +370,7 @@ func (r *Reconciler) GetJobName(instance interface{}, workflowStepNum int) strin workflowStepName = typedInstance.Spec.Workflow[workflowStepNum].StepName } - return typedInstance.Name + jobNameStepInfix + fmt.Sprintf("%02d", workflowStepNum) + "-" + workflowStepName + return typedInstance.Name + podNameStepInfix + fmt.Sprintf("%02d", workflowStepNum) + "-" + workflowStepName } else if typedInstance, ok := instance.(*v1beta1.HorizonTest); ok { return typedInstance.Name } else if typedInstance, ok := instance.(*v1beta1.AnsibleTest); ok { @@ -342,7 +383,7 @@ func (r *Reconciler) GetJobName(instance interface{}, workflowStepNum int) strin workflowStepName = typedInstance.Spec.Workflow[workflowStepNum].StepName } - return typedInstance.Name + jobNameStepInfix + fmt.Sprintf("%02d", workflowStepNum) + "-" + workflowStepName + return typedInstance.Name + podNameStepInfix + fmt.Sprintf("%02d", workflowStepNum) + "-" + workflowStepName } return workflowStepNameInvalid @@ -552,11 +593,11 @@ func (r *Reconciler) ReleaseLock(ctx context.Context, instance client.Object) (b return false, errors.New("failed to delete test-operator-lock") } -func (r *Reconciler) JobExists(ctx context.Context, instance client.Object, workflowStepNum int) bool { - job := &batchv1.Job{} - jobName := r.GetJobName(instance, workflowStepNum) - objectKey := client.ObjectKey{Namespace: instance.GetNamespace(), Name: jobName} - err := r.Client.Get(ctx, objectKey, job) +func (r *Reconciler) PodExists(ctx context.Context, instance client.Object, workflowStepNum int) bool { + pod := &corev1.Pod{} + podName := r.GetPodName(instance, workflowStepNum) + objectKey := client.ObjectKey{Namespace: instance.GetNamespace(), Name: podName} + err := r.Client.Get(ctx, objectKey, pod) if err != nil && k8s_errors.IsNotFound(err) { return false } diff --git a/controllers/horizontest_controller.go b/controllers/horizontest_controller.go index 62b8e021..4bca8325 100644 --- a/controllers/horizontest_controller.go +++ b/controllers/horizontest_controller.go @@ -20,18 +20,15 @@ import ( "context" "errors" "fmt" - "time" "github.com/go-logr/logr" "github.com/openstack-k8s-operators/lib-common/modules/common" "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/env" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" - "github.com/openstack-k8s-operators/lib-common/modules/common/job" common_rbac "github.com/openstack-k8s-operators/lib-common/modules/common/rbac" testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" "github.com/openstack-k8s-operators/test-operator/pkg/horizontest" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -51,7 +48,6 @@ func (r *HorizonTestReconciler) GetLogger(ctx context.Context) logr.Logger { // +kubebuilder:rbac:groups=test.openstack.org,resources=horizontests,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=test.openstack.org,resources=horizontests/status,verbs=get;update;patch // +kubebuilder:rbac:groups=test.openstack.org,resources=horizontests/finalizers,verbs=update;patch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;patch;update;delete; // +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update;patch @@ -136,12 +132,12 @@ func (r *HorizonTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err case Wait: - Log.Info(InfoWaitingOnJob) + Log.Info(InfoWaitingOnPod) return ctrl.Result{RequeueAfter: RequeueAfterValue}, nil case EndTesting: - // All jobs created by the instance were completed. Release the lock - // so that other instances can spawn their jobs. + // All pods created by the instance were completed. Release the lock + // so that other instances can spawn their pods. if lockReleased, err := r.ReleaseLock(ctx, instance); !lockReleased { Log.Info(fmt.Sprintf(InfoCanNotReleaseLock, testOperatorLockName)) return ctrl.Result{RequeueAfter: RequeueAfterValue}, err @@ -154,7 +150,7 @@ func (r *HorizonTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) Log.Info(InfoTestingCompleted) return ctrl.Result{}, nil - case CreateFirstJob: + case CreateFirstPod: lockAcquired, err := r.AcquireLock(ctx, instance, helper, instance.Spec.Parallel) if !lockAcquired { Log.Info(fmt.Sprintf(InfoCanNotAcquireLock, testOperatorLockName)) @@ -163,7 +159,7 @@ func (r *HorizonTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) Log.Info(fmt.Sprintf(InfoCreatingFirstPod, nextWorkflowStep)) - case CreateNextJob: + case CreateNextPod: // Confirm that we still hold the lock. This is useful to check if for // example somebody / something deleted the lock and it got claimed by // another instance. This is considered to be an error state. @@ -224,7 +220,7 @@ func (r *HorizonTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Prepare HorizonTest env vars envVars := r.PrepareHorizonTestEnvVars(instance) - jobName := r.GetJobName(instance, 0) + podName := r.GetPodName(instance, 0) logsPVCName := r.GetPVCLogsName(instance, 0) containerImage, err := r.GetContainerImage(ctx, instance.Spec.ContainerImage, instance) if err != nil { @@ -240,11 +236,10 @@ func (r *HorizonTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) return rbacResult, nil } // Service account, role, binding - end - - jobDef := horizontest.Job( + podDef := horizontest.Pod( instance, serviceLabels, - jobName, + podName, logsPVCName, mountCerts, mountKeys, @@ -252,15 +247,8 @@ func (r *HorizonTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) envVars, containerImage, ) - horizontestJob := job.NewJob( - jobDef, - testv1beta1.ConfigHash, - true, - time.Duration(5)*time.Second, - "", - ) - ctrlResult, err = horizontestJob.DoJob(ctx, helper) + ctrlResult, err = r.CreatePod(ctx, *helper, podDef) if err != nil { instance.Status.Conditions.Set(condition.FalseCondition( condition.DeploymentReadyCondition, @@ -286,7 +274,7 @@ func (r *HorizonTestReconciler) Reconcile(ctx context.Context, req ctrl.Request) func (r *HorizonTestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&testv1beta1.HorizonTest{}). - Owns(&batchv1.Job{}). + Owns(&corev1.Pod{}). Owns(&corev1.Secret{}). Owns(&corev1.ConfigMap{}). Complete(r) diff --git a/controllers/tempest_controller.go b/controllers/tempest_controller.go index 512e6a0f..0bc0414d 100644 --- a/controllers/tempest_controller.go +++ b/controllers/tempest_controller.go @@ -29,14 +29,12 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/configmap" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" - "github.com/openstack-k8s-operators/lib-common/modules/common/job" "github.com/openstack-k8s-operators/lib-common/modules/common/labels" nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" common_rbac "github.com/openstack-k8s-operators/lib-common/modules/common/rbac" "github.com/openstack-k8s-operators/lib-common/modules/common/util" testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" "github.com/openstack-k8s-operators/test-operator/pkg/tempest" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -56,7 +54,6 @@ func (r *TempestReconciler) GetLogger(ctx context.Context) logr.Logger { // +kubebuilder:rbac:groups=test.openstack.org,resources=tempests,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=test.openstack.org,resources=tempests/status,verbs=get;update;patch // +kubebuilder:rbac:groups=test.openstack.org,resources=tempests/finalizers,verbs=update;patch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;patch;update;delete; // +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update;patch @@ -160,12 +157,12 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re return ctrl.Result{}, err case Wait: - Log.Info(InfoWaitingOnJob) + Log.Info(InfoWaitingOnPod) return ctrl.Result{RequeueAfter: RequeueAfterValue}, nil case EndTesting: - // All jobs created by the instance were completed. Release the lock - // so that other instances can spawn their jobs. + // All pods created by the instance were completed. Release the lock + // so that other instances can spawn their pods. if lockReleased, err := r.ReleaseLock(ctx, instance); !lockReleased { Log.Info(fmt.Sprintf(InfoCanNotReleaseLock, testOperatorLockName)) return ctrl.Result{RequeueAfter: RequeueAfterValue}, err @@ -178,7 +175,7 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re Log.Info(InfoTestingCompleted) return ctrl.Result{}, nil - case CreateFirstJob: + case CreateFirstPod: lockAcquired, err := r.AcquireLock(ctx, instance, helper, instance.Spec.Parallel) if !lockAcquired { Log.Info(fmt.Sprintf(InfoCanNotAcquireLock, testOperatorLockName)) @@ -187,7 +184,7 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re Log.Info(fmt.Sprintf(InfoCreatingFirstPod, nextWorkflowStep)) - case CreateNextJob: + case CreateNextPod: // Confirm that we still hold the lock. This is useful to check if for // example somebody / something deleted the lock and it got claimed by // another instance. This is considered to be an error state. @@ -287,7 +284,7 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re } // NetworkAttachments - if r.JobExists(ctx, instance, nextWorkflowStep) { + if r.PodExists(ctx, instance, nextWorkflowStep) { networkReady, networkAttachmentStatus, err := nad.VerifyNetworkStatusFromAnnotation( ctx, helper, @@ -319,11 +316,11 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re } // NetworkAttachments - end - // Create a new job + // Create a new pod mountCerts := r.CheckSecretExists(ctx, instance, "combined-ca-bundle") customDataConfigMapName := GetCustomDataConfigMapName(instance, nextWorkflowStep) EnvVarsConfigMapName := GetEnvVarsConfigMapName(instance, nextWorkflowStep) - jobName := r.GetJobName(instance, nextWorkflowStep) + podName := r.GetPodName(instance, nextWorkflowStep) logsPVCName := r.GetPVCLogsName(instance, workflowStepNum) containerImage, err := r.GetContainerImage(ctx, instance.Spec.ContainerImage, instance) if err != nil { @@ -360,11 +357,11 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re } } - jobDef := tempest.Job( + podDef := tempest.Pod( instance, serviceLabels, serviceAnnotations, - jobName, + podName, EnvVarsConfigMapName, customDataConfigMapName, logsPVCName, @@ -372,19 +369,12 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re mountSSHKey, containerImage, ) - tempestJob := job.NewJob( - jobDef, - testv1beta1.ConfigHash, - true, - time.Duration(5)*time.Second, - "", - ) - ctrlResult, err = tempestJob.DoJob(ctx, helper) + ctrlResult, err = r.CreatePod(ctx, *helper, podDef) if err != nil { - // Creation of the tempest job was not successfull. + // Creation of the tempest pod was not successfull. // Release the lock and allow other controllers to spawn - // a job. + // a pod. if lockReleased, lockErr := r.ReleaseLock(ctx, instance); lockReleased { return ctrl.Result{RequeueAfter: RequeueAfterValue}, lockErr } @@ -404,7 +394,7 @@ func (r *TempestReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re condition.DeploymentReadyRunningMessage)) return ctrlResult, nil } - // Create a new job - end + // Create a new pod - end return ctrl.Result{}, nil } @@ -429,7 +419,7 @@ func (r *TempestReconciler) reconcileDelete( func (r *TempestReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&testv1beta1.Tempest{}). - Owns(&batchv1.Job{}). + Owns(&corev1.Pod{}). Owns(&corev1.Secret{}). Owns(&corev1.ConfigMap{}). Complete(r) @@ -508,7 +498,7 @@ func (r *TempestReconciler) setTempestConfigVars(envVars map[string]string, envVars["TEMPEST_EXTERNAL_PLUGIN_REFSPEC"] += externalPluginDictionary.ChangeRefspec + "," } - envVars["TEMPEST_WORKFLOW_STEP_DIR_NAME"] = r.GetJobName(instance, workflowStepNum) + envVars["TEMPEST_WORKFLOW_STEP_DIR_NAME"] = r.GetPodName(instance, workflowStepNum) extraImages := mergeWithWorkflow(tRun.ExtraImages, wtRun.ExtraImages) for _, extraImageDict := range extraImages { diff --git a/controllers/tobiko_controller.go b/controllers/tobiko_controller.go index 7d1e9e46..6ebe1ea9 100644 --- a/controllers/tobiko_controller.go +++ b/controllers/tobiko_controller.go @@ -30,13 +30,11 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/configmap" "github.com/openstack-k8s-operators/lib-common/modules/common/env" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" - "github.com/openstack-k8s-operators/lib-common/modules/common/job" nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" common_rbac "github.com/openstack-k8s-operators/lib-common/modules/common/rbac" "github.com/openstack-k8s-operators/lib-common/modules/common/util" testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" "github.com/openstack-k8s-operators/test-operator/pkg/tobiko" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -56,7 +54,6 @@ func (r *TobikoReconciler) GetLogger(ctx context.Context) logr.Logger { // +kubebuilder:rbac:groups=test.openstack.org,resources=tobikoes,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=test.openstack.org,resources=tobikoes/status,verbs=get;update;patch // +kubebuilder:rbac:groups=test.openstack.org,resources=tobikoes/finalizers,verbs=update;patch -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;patch;update;delete; // +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update;patch @@ -147,12 +144,12 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return ctrl.Result{}, err case Wait: - Log.Info(InfoWaitingOnJob) + Log.Info(InfoWaitingOnPod) return ctrl.Result{RequeueAfter: RequeueAfterValue}, nil case EndTesting: - // All jobs created by the instance were completed. Release the lock - // so that other instances can spawn their jobs. + // All pods created by the instance were completed. Release the lock + // so that other instances can spawn their pods. if lockReleased, err := r.ReleaseLock(ctx, instance); !lockReleased { Log.Info(fmt.Sprintf(InfoCanNotReleaseLock, testOperatorLockName)) return ctrl.Result{RequeueAfter: RequeueAfterValue}, err @@ -165,7 +162,7 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res Log.Info(InfoTestingCompleted) return ctrl.Result{}, nil - case CreateFirstJob: + case CreateFirstPod: lockAcquired, err := r.AcquireLock(ctx, instance, helper, instance.Spec.Parallel) if !lockAcquired { Log.Info(fmt.Sprintf(InfoCanNotAcquireLock, testOperatorLockName)) @@ -174,7 +171,7 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res Log.Info(fmt.Sprintf(InfoCreatingFirstPod, nextWorkflowStep)) - case CreateNextJob: + case CreateNextPod: // Confirm that we still hold the lock. This needs to be checked in order // to prevent situation when somebody / something deleted the lock and it // got claimedy by another instance. @@ -261,7 +258,7 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res } // NetworkAttachments - if r.JobExists(ctx, instance, nextWorkflowStep) { + if r.PodExists(ctx, instance, nextWorkflowStep) { networkReady, networkAttachmentStatus, err := nad.VerifyNetworkStatusFromAnnotation( ctx, helper, @@ -310,7 +307,7 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Prepare Tobiko env vars envVars := r.PrepareTobikoEnvVars(ctx, serviceLabels, instance, helper, nextWorkflowStep) - jobName := r.GetJobName(instance, nextWorkflowStep) + podName := r.GetPodName(instance, nextWorkflowStep) logsPVCName := r.GetPVCLogsName(instance, workflowStepNum) containerImage, err := r.GetContainerImage(ctx, instance.Spec.ContainerImage, instance) privileged := r.OverwriteValueWithWorkflow(instance.Spec, "Privileged", "pbool", nextWorkflowStep).(bool) @@ -328,11 +325,11 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res } // Service account, role, binding - end - jobDef := tobiko.Job( + podDef := tobiko.Pod( instance, serviceLabels, serviceAnnotations, - jobName, + podName, logsPVCName, mountCerts, mountKeys, @@ -341,15 +338,8 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res containerImage, privileged, ) - tobikoJob := job.NewJob( - jobDef, - testv1beta1.ConfigHash, - true, - time.Duration(5)*time.Second, - "", - ) - ctrlResult, err = tobikoJob.DoJob(ctx, helper) + ctrlResult, err = r.CreatePod(ctx, *helper, podDef) if err != nil { instance.Status.Conditions.Set(condition.FalseCondition( condition.DeploymentReadyCondition, @@ -375,7 +365,7 @@ func (r *TobikoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res func (r *TobikoReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&testv1beta1.Tobiko{}). - Owns(&batchv1.Job{}). + Owns(&corev1.Pod{}). Owns(&corev1.Secret{}). Owns(&corev1.ConfigMap{}). Complete(r) @@ -414,7 +404,7 @@ func (r *TobikoReconciler) PrepareTobikoEnvVars( // Prepare env vars envVars := make(map[string]env.Setter) envVars["USE_EXTERNAL_FILES"] = env.SetValue("True") - envVars["TOBIKO_LOGS_DIR_NAME"] = env.SetValue(r.GetJobName(instance, step)) + envVars["TOBIKO_LOGS_DIR_NAME"] = env.SetValue(r.GetPodName(instance, step)) testenv := r.OverwriteValueWithWorkflow(instance.Spec, "Testenv", "string", step).(string) envVars["TOBIKO_TESTENV"] = env.SetValue(testenv) diff --git a/pkg/ansibletest/job.go b/pkg/ansibletest/job.go deleted file mode 100644 index 0a500341..00000000 --- a/pkg/ansibletest/job.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansibletest - -import ( - "github.com/openstack-k8s-operators/lib-common/modules/common/env" - - testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" - util "github.com/openstack-k8s-operators/test-operator/pkg/util" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Job - prepare job to run AnsibleTests tests -func Job( - instance *testv1beta1.AnsibleTest, - labels map[string]string, - jobName string, - logsPVCName string, - mountCerts bool, - envVars map[string]env.Setter, - workflowOverrideParams map[string]string, - externalWorkflowCounter int, - containerImage string, - privileged bool, -) *batchv1.Job { - - runAsUser := int64(227) - runAsGroup := int64(227) - parallelism := int32(1) - completions := int32(1) - - capabilities := []corev1.Capability{"NET_ADMIN", "NET_RAW"} - securityContext := util.GetSecurityContext(runAsUser, capabilities, privileged) - - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: instance.Namespace, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - Parallelism: ¶llelism, - Completions: &completions, - BackoffLimit: instance.Spec.BackoffLimit, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: &instance.Spec.Privileged, - RestartPolicy: corev1.RestartPolicyNever, - ServiceAccountName: instance.RbacResourceName(), - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - FSGroup: &runAsGroup, - }, - Tolerations: instance.Spec.Tolerations, - NodeSelector: instance.Spec.NodeSelector, - Containers: []corev1.Container{ - { - Name: instance.Name, - Image: containerImage, - Args: []string{}, - Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), - VolumeMounts: GetVolumeMounts(mountCerts, instance, externalWorkflowCounter), - SecurityContext: &securityContext, - Resources: instance.Spec.Resources, - }, - }, - Volumes: GetVolumes( - instance, - logsPVCName, - mountCerts, - workflowOverrideParams, - externalWorkflowCounter, - ), - }, - }, - }, - } - - if len(instance.Spec.SELinuxLevel) > 0 { - job.Spec.Template.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ - Level: instance.Spec.SELinuxLevel, - } - } - - return job -} diff --git a/pkg/ansibletest/pod.go b/pkg/ansibletest/pod.go new file mode 100644 index 00000000..50c68f65 --- /dev/null +++ b/pkg/ansibletest/pod.go @@ -0,0 +1,76 @@ +package ansibletest + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + + testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + util "github.com/openstack-k8s-operators/test-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Pod - prepare pod to run AnsibleTests tests +func Pod( + instance *testv1beta1.AnsibleTest, + labels map[string]string, + podName string, + logsPVCName string, + mountCerts bool, + envVars map[string]env.Setter, + workflowOverrideParams map[string]string, + externalWorkflowCounter int, + containerImage string, + privileged bool, +) *corev1.Pod { + + runAsUser := int64(227) + runAsGroup := int64(227) + + capabilities := []corev1.Capability{"NET_ADMIN", "NET_RAW"} + securityContext := util.GetSecurityContext(runAsUser, capabilities, privileged) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + AutomountServiceAccountToken: &instance.Spec.Privileged, + RestartPolicy: corev1.RestartPolicyNever, + Tolerations: instance.Spec.Tolerations, + NodeSelector: instance.Spec.NodeSelector, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsGroup: &runAsGroup, + FSGroup: &runAsGroup, + }, + Containers: []corev1.Container{ + { + Name: instance.Name, + Image: containerImage, + Args: []string{}, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: GetVolumeMounts(mountCerts, instance, externalWorkflowCounter), + SecurityContext: &securityContext, + Resources: instance.Spec.Resources, + }, + }, + Volumes: GetVolumes( + instance, + logsPVCName, + mountCerts, + workflowOverrideParams, + externalWorkflowCounter, + ), + }, + } + + if len(instance.Spec.SELinuxLevel) > 0 { + pod.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ + Level: instance.Spec.SELinuxLevel, + } + } + + return pod +} diff --git a/pkg/horizontest/job.go b/pkg/horizontest/job.go deleted file mode 100644 index 749e7c7d..00000000 --- a/pkg/horizontest/job.go +++ /dev/null @@ -1,88 +0,0 @@ -package horizontest - -import ( - "github.com/openstack-k8s-operators/lib-common/modules/common/env" - - testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" - util "github.com/openstack-k8s-operators/test-operator/pkg/util" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Job - prepare job to run Horizon tests -func Job( - instance *testv1beta1.HorizonTest, - labels map[string]string, - jobName string, - logsPVCName string, - mountCerts bool, - mountKeys bool, - mountKubeconfig bool, - envVars map[string]env.Setter, - containerImage string, -) *batchv1.Job { - - runAsUser := int64(42455) - runAsGroup := int64(42455) - parallelism := int32(1) - completions := int32(1) - - capabilities := []corev1.Capability{"NET_ADMIN", "NET_RAW"} - securityContext := util.GetSecurityContext(runAsUser, capabilities, instance.Spec.Privileged) - - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: instance.Namespace, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - Parallelism: ¶llelism, - Completions: &completions, - BackoffLimit: instance.Spec.BackoffLimit, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: &instance.Spec.Privileged, - RestartPolicy: corev1.RestartPolicyNever, - ServiceAccountName: instance.RbacResourceName(), - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - FSGroup: &runAsGroup, - }, - Tolerations: instance.Spec.Tolerations, - NodeSelector: instance.Spec.NodeSelector, - Containers: []corev1.Container{ - { - Name: instance.Name, - Image: containerImage, - Args: []string{}, - Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), - VolumeMounts: GetVolumeMounts(mountCerts, mountKeys, mountKubeconfig, instance), - SecurityContext: &securityContext, - Resources: instance.Spec.Resources, - }, - }, - Volumes: GetVolumes( - instance, - logsPVCName, - mountCerts, - mountKubeconfig, - ), - }, - }, - }, - } - - if len(instance.Spec.SELinuxLevel) > 0 { - job.Spec.Template.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ - Level: instance.Spec.SELinuxLevel, - } - } - - return job -} diff --git a/pkg/horizontest/pod.go b/pkg/horizontest/pod.go new file mode 100644 index 00000000..9660fce7 --- /dev/null +++ b/pkg/horizontest/pod.go @@ -0,0 +1,74 @@ +package horizontest + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + + testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + util "github.com/openstack-k8s-operators/test-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Pod - prepare pod to run Horizon tests +func Pod( + instance *testv1beta1.HorizonTest, + labels map[string]string, + podName string, + logsPVCName string, + mountCerts bool, + mountKeys bool, + mountKubeconfig bool, + envVars map[string]env.Setter, + containerImage string, +) *corev1.Pod { + + runAsUser := int64(42455) + runAsGroup := int64(42455) + + capabilities := []corev1.Capability{"NET_ADMIN", "NET_RAW"} + securityContext := util.GetSecurityContext(runAsUser, capabilities, instance.Spec.Privileged) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + AutomountServiceAccountToken: &instance.Spec.Privileged, + RestartPolicy: corev1.RestartPolicyNever, + Tolerations: instance.Spec.Tolerations, + NodeSelector: instance.Spec.NodeSelector, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsGroup: &runAsGroup, + FSGroup: &runAsGroup, + }, + Containers: []corev1.Container{ + { + Name: instance.Name, + Image: containerImage, + Args: []string{}, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: GetVolumeMounts(mountCerts, mountKeys, mountKubeconfig, instance), + SecurityContext: &securityContext, + Resources: instance.Spec.Resources, + }, + }, + Volumes: GetVolumes( + instance, + logsPVCName, + mountCerts, + mountKubeconfig, + ), + }, + } + + if len(instance.Spec.SELinuxLevel) > 0 { + pod.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ + Level: instance.Spec.SELinuxLevel, + } + } + + return pod +} diff --git a/pkg/tempest/job.go b/pkg/tempest/job.go deleted file mode 100644 index b3c91f32..00000000 --- a/pkg/tempest/job.go +++ /dev/null @@ -1,102 +0,0 @@ -package tempest - -import ( - "github.com/openstack-k8s-operators/lib-common/modules/common/env" - - testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" - util "github.com/openstack-k8s-operators/test-operator/pkg/util" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Job - prepare job to run Tempest tests -func Job( - instance *testv1beta1.Tempest, - labels map[string]string, - annotations map[string]string, - jobName string, - envVarsConfigMapName string, - customDataConfigMapName string, - logsPVCName string, - mountCerts bool, - mountSSHKey bool, - containerImage string, -) *batchv1.Job { - - envVars := map[string]env.Setter{} - runAsUser := int64(42480) - runAsGroup := int64(42480) - securityContext := util.GetSecurityContext(runAsUser, []corev1.Capability{}, instance.Spec.Privileged) - - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: instance.Namespace, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - BackoffLimit: instance.Spec.BackoffLimit, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: annotations, - Labels: labels, - }, - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: &instance.Spec.Privileged, - RestartPolicy: corev1.RestartPolicyNever, - ServiceAccountName: instance.RbacResourceName(), - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - FSGroup: &runAsGroup, - }, - Tolerations: instance.Spec.Tolerations, - NodeSelector: instance.Spec.NodeSelector, - Containers: []corev1.Container{ - { - Name: instance.Name + "-tests-runner", - Image: containerImage, - Args: []string{}, - Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), - VolumeMounts: GetVolumeMounts(mountCerts, mountSSHKey, instance), - SecurityContext: &securityContext, - Resources: instance.Spec.Resources, - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: customDataConfigMapName, - }, - }, - }, - { - ConfigMapRef: &corev1.ConfigMapEnvSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: envVarsConfigMapName, - }, - }, - }, - }, - }, - }, - Volumes: GetVolumes( - instance, - customDataConfigMapName, - logsPVCName, - mountCerts, - mountSSHKey, - ), - }, - }, - }, - } - - if len(instance.Spec.SELinuxLevel) > 0 { - job.Spec.Template.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ - Level: instance.Spec.SELinuxLevel, - } - } - - return job -} diff --git a/pkg/tempest/pod.go b/pkg/tempest/pod.go new file mode 100644 index 00000000..5b92c385 --- /dev/null +++ b/pkg/tempest/pod.go @@ -0,0 +1,92 @@ +package tempest + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + + testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + util "github.com/openstack-k8s-operators/test-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Pod - prepare pod to run Tempest tests +func Pod( + instance *testv1beta1.Tempest, + labels map[string]string, + annotations map[string]string, + podName string, + envVarsConfigMapName string, + customDataConfigMapName string, + logsPVCName string, + mountCerts bool, + mountSSHKey bool, + containerImage string, +) *corev1.Pod { + + envVars := map[string]env.Setter{} + runAsUser := int64(42480) + runAsGroup := int64(42480) + securityContext := util.GetSecurityContext(runAsUser, []corev1.Capability{}, instance.Spec.Privileged) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Name: podName, + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + AutomountServiceAccountToken: &instance.Spec.Privileged, + RestartPolicy: corev1.RestartPolicyNever, + Tolerations: instance.Spec.Tolerations, + NodeSelector: instance.Spec.NodeSelector, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsGroup: &runAsGroup, + FSGroup: &runAsGroup, + }, + Containers: []corev1.Container{ + { + Name: instance.Name + "-tests-runner", + Image: containerImage, + Args: []string{}, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: GetVolumeMounts(mountCerts, mountSSHKey, instance), + SecurityContext: &securityContext, + Resources: instance.Spec.Resources, + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: customDataConfigMapName, + }, + }, + }, + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: envVarsConfigMapName, + }, + }, + }, + }, + }, + }, + Volumes: GetVolumes( + instance, + customDataConfigMapName, + logsPVCName, + mountCerts, + mountSSHKey, + ), + }, + } + + if len(instance.Spec.SELinuxLevel) > 0 { + pod.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ + Level: instance.Spec.SELinuxLevel, + } + } + + return pod +} diff --git a/pkg/tobiko/job.go b/pkg/tobiko/job.go deleted file mode 100644 index 2ba14e68..00000000 --- a/pkg/tobiko/job.go +++ /dev/null @@ -1,94 +0,0 @@ -package tobiko - -import ( - "github.com/openstack-k8s-operators/lib-common/modules/common/env" - - testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" - util "github.com/openstack-k8s-operators/test-operator/pkg/util" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Job - prepare job to run Tempest tests -func Job( - instance *testv1beta1.Tobiko, - labels map[string]string, - annotations map[string]string, - jobName string, - logsPVCName string, - mountCerts bool, - mountKeys bool, - mountKubeconfig bool, - envVars map[string]env.Setter, - containerImage string, - privileged bool, -) *batchv1.Job { - - runAsUser := int64(42495) - runAsGroup := int64(42495) - parallelism := int32(1) - completions := int32(1) - - capabilities := []corev1.Capability{"NET_ADMIN", "NET_RAW"} - securityContext := util.GetSecurityContext(runAsUser, capabilities, privileged) - - // Note(lpiwowar): Once the webhook is implemented move all the logic of merging - // the workflows there. - job := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: instance.Namespace, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - Parallelism: ¶llelism, - Completions: &completions, - BackoffLimit: instance.Spec.BackoffLimit, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: annotations, - Labels: labels, - }, - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: &instance.Spec.Privileged, - RestartPolicy: corev1.RestartPolicyNever, - ServiceAccountName: instance.RbacResourceName(), - SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsGroup: &runAsGroup, - FSGroup: &runAsGroup, - }, - Tolerations: instance.Spec.Tolerations, - NodeSelector: instance.Spec.NodeSelector, - Containers: []corev1.Container{ - { - Name: instance.Name, - Image: containerImage, - Args: []string{}, - Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), - VolumeMounts: GetVolumeMounts(mountCerts, mountKeys, mountKubeconfig, instance), - SecurityContext: &securityContext, - Resources: instance.Spec.Resources, - }, - }, - Volumes: GetVolumes( - instance, - logsPVCName, - mountCerts, - mountKeys, - mountKubeconfig, - ), - }, - }, - }, - } - - if len(instance.Spec.SELinuxLevel) > 0 { - job.Spec.Template.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ - Level: instance.Spec.SELinuxLevel, - } - } - - return job -} diff --git a/pkg/tobiko/pod.go b/pkg/tobiko/pod.go new file mode 100644 index 00000000..05767c7a --- /dev/null +++ b/pkg/tobiko/pod.go @@ -0,0 +1,78 @@ +package tobiko + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + + testv1beta1 "github.com/openstack-k8s-operators/test-operator/api/v1beta1" + util "github.com/openstack-k8s-operators/test-operator/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Pod - prepare pod to run Tempest tests +func Pod( + instance *testv1beta1.Tobiko, + labels map[string]string, + annotations map[string]string, + podName string, + logsPVCName string, + mountCerts bool, + mountKeys bool, + mountKubeconfig bool, + envVars map[string]env.Setter, + containerImage string, + privileged bool, +) *corev1.Pod { + + runAsUser := int64(42495) + runAsGroup := int64(42495) + + capabilities := []corev1.Capability{"NET_ADMIN", "NET_RAW"} + securityContext := util.GetSecurityContext(runAsUser, capabilities, privileged) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Name: podName, + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + AutomountServiceAccountToken: &instance.Spec.Privileged, + RestartPolicy: corev1.RestartPolicyNever, + Tolerations: instance.Spec.Tolerations, + NodeSelector: instance.Spec.NodeSelector, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsGroup: &runAsGroup, + FSGroup: &runAsGroup, + }, + Containers: []corev1.Container{ + { + Name: instance.Name, + Image: containerImage, + Args: []string{}, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: GetVolumeMounts(mountCerts, mountKeys, mountKubeconfig, instance), + SecurityContext: &securityContext, + Resources: instance.Spec.Resources, + }, + }, + Volumes: GetVolumes( + instance, + logsPVCName, + mountCerts, + mountKeys, + mountKubeconfig, + ), + }, + } + + if len(instance.Spec.SELinuxLevel) > 0 { + pod.Spec.SecurityContext.SELinuxOptions = &corev1.SELinuxOptions{ + Level: instance.Spec.SELinuxLevel, + } + } + + return pod +}