diff --git a/api/v1alpha1/gatling_types.go b/api/v1alpha1/gatling_types.go index 10d066a..e1b1977 100644 --- a/api/v1alpha1/gatling_types.go +++ b/api/v1alpha1/gatling_types.go @@ -79,6 +79,14 @@ type PodSpec struct { // Affinity specification // +kubebuilder:validation:Optional Affinity corev1.Affinity `json:"affinity,omitempty"` + + // Tolerations specification + // +kubebuilder:validation:Optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // ServiceAccountName specification + // +kubebuilder:validation:Optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` } // TestScenarioSpec defines the load testing scenario @@ -128,7 +136,7 @@ type TestScenarioSpec struct { type CloudStorageSpec struct { // Provider specifies the cloud provider that will be used. - // Supported providers: aws + // Supported providers: aws, gcp // +kubebuilder:validation:Optional Provider string `json:"provider"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 6e8cfea..b7d2272 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,4 +1,3 @@ -//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -161,6 +160,13 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = *in in.Resources.DeepCopyInto(&out.Resources) in.Affinity.DeepCopyInto(&out.Affinity) + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. diff --git a/config/crd/bases/gatling-operator.tech.zozo.com_gatlings.yaml b/config/crd/bases/gatling-operator.tech.zozo.com_gatlings.yaml index d1c6121..045544f 100644 --- a/config/crd/bases/gatling-operator.tech.zozo.com_gatlings.yaml +++ b/config/crd/bases/gatling-operator.tech.zozo.com_gatlings.yaml @@ -841,6 +841,47 @@ spec: to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' type: object type: object + tolerations: + description: Tolerations specification + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array type: object testScenarioSpec: description: Test Scenario specification diff --git a/config/samples/gatling-operator_v1alpha1_gatling01.yaml b/config/samples/gatling-operator_v1alpha1_gatling01.yaml index 8374265..3ae34a7 100644 --- a/config/samples/gatling-operator_v1alpha1_gatling01.yaml +++ b/config/samples/gatling-operator_v1alpha1_gatling01.yaml @@ -22,8 +22,13 @@ spec: operator: In values: - linux + tolerations: + - key: "node-type" + operator: "Equal" + value: "non-kube-system" + effect: "NoSchedule" cloudStorageSpec: - provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws" + provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws", "gcp" bucket: "gatling-operator-reports" # Bucket name in cloud storage service, on which Gatlilng report files are stored region: "ap-northeast-1" # Optional. Default: "ap-northeast-1" for aws provider. Region name #env: # Optional. Environment variables to be used for connecting to the cloud providers diff --git a/config/samples/gatling-operator_v1alpha1_gatling02.yaml b/config/samples/gatling-operator_v1alpha1_gatling02.yaml index f08e184..2ca98b5 100644 --- a/config/samples/gatling-operator_v1alpha1_gatling02.yaml +++ b/config/samples/gatling-operator_v1alpha1_gatling02.yaml @@ -23,7 +23,7 @@ spec: values: - linux cloudStorageSpec: - provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws" + provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws", "gcp" bucket: "gatling-operator-reports" # Bucket name in cloud storage service, on which Gatlilng report files are stored region: "ap-northeast-1" # Optional. Default: "ap-northeast-1" for aws provider. Region name #env: # Optional. Environment variables to be used for connecting to the cloud providers diff --git a/controllers/cloudstorage.go b/controllers/cloudstorage.go index 8b57d98..8b0f70d 100644 --- a/controllers/cloudstorage.go +++ b/controllers/cloudstorage.go @@ -9,8 +9,8 @@ func getCloudStoragePath(provider string, bucket string, gatlingName string, sub case "aws": // Format s3:// return fmt.Sprintf("s3:%s/%s/%s", bucket, gatlingName, subDir) - case "gcp": //not supported yet - return "" + case "gcp": + return fmt.Sprintf("gs://%s/%s/%s", bucket, gatlingName, subDir) case "azure": //not supported yet return "" default: @@ -26,7 +26,7 @@ func getCloudStorageReportURL(provider string, bucket string, gatlingName string case "gcp": //not supported yet // Format http(s)://.storage.googleapis.com///index.html // or http(s)://storage.googleapis.com////index.html - return "" + return fmt.Sprintf("https://storage.googleapis.com/%s/%s/%s/index.html", bucket, gatlingName, subDir) case "azure": //not supported yet // Format https://.blob.core.windows.net///index.html return "" diff --git a/controllers/cloudstorage_test.go b/controllers/cloudstorage_test.go index 67366b2..d1d80c1 100644 --- a/controllers/cloudstorage_test.go +++ b/controllers/cloudstorage_test.go @@ -41,8 +41,8 @@ var _ = Describe("getCloudStoragePath", func() { BeforeEach(func() { provider = "gcp" }) - It("path is empty", func() { - Expect(getCloudStoragePath(provider, bucket, gatlingName, subDir)).To(Equal("")) + It("path is gcp gcs bucket", func() { + Expect(getCloudStoragePath(provider, bucket, gatlingName, subDir)).To(Equal("gs://testBucket/testGatling/subDir")) }) }) @@ -92,8 +92,8 @@ var _ = Describe("getCloudStorageReportURL", func() { BeforeEach(func() { provider = "gcp" }) - It("path is empty", func() { - Expect(getCloudStorageReportURL(provider, bucket, gatlingName, subDir)).To(Equal("")) + It("path is gcp gcs bucket", func() { + Expect(getCloudStorageReportURL(provider, bucket, gatlingName, subDir)).To(Equal("https://storage.googleapis.com/testBucket/testGatling/subDir/index.html")) }) }) diff --git a/controllers/commands.go b/controllers/commands.go index 68ba693..383240d 100644 --- a/controllers/commands.go +++ b/controllers/commands.go @@ -61,8 +61,17 @@ do done ` return fmt.Sprintf(template, resultsDirectoryPath, region, storagePath) - case "gcp": //not supported yet - return "" + case "gcp": + template := ` +RESULTS_DIR_PATH=%s +rclone config create gs "google cloud storage" --gcs-anonymous +# assumes each pod only contain single gatling log file but use for loop to use find command result +for source in $(find ${RESULTS_DIR_PATH} -type f -name *.log) +do + rclone copyto ${source} %s/${HOSTNAME}.log +done +` + return fmt.Sprintf(template, resultsDirectoryPath, storagePath) case "azure": //not supported yet return "" default: @@ -79,8 +88,13 @@ rclone config create s3 s3 env_auth=true region %s rclone copy --s3-no-check-bucket --s3-env-auth %s ${GATLING_AGGREGATE_DIR} ` return fmt.Sprintf(template, resultsDirectoryPath, region, storagePath) - case "gcp": //not supported yet - return "" + case "gcp": + template := ` +GATLING_AGGREGATE_DIR=%s +rclone config create gs "google cloud storage" --gcs-anonymous +rclone copy %s ${GATLING_AGGREGATE_DIR} +` + return fmt.Sprintf(template, resultsDirectoryPath, storagePath) case "azure": //not supported yet return "" default: @@ -107,8 +121,13 @@ rclone config create s3 s3 env_auth=true region %s rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" --s3-no-check-bucket --s3-env-auth %s ` return fmt.Sprintf(template, resultsDirectoryPath, region, storagePath) - case "gcp": //not supported yet - return "" + case "gcp": + template := ` +GATLING_AGGREGATE_DIR=%s +rclone config create gs "google cloud storage" --gcs-anonymous +rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" %s +` + return fmt.Sprintf(template, resultsDirectoryPath, storagePath) case "azure": //not supported yet return "" default: diff --git a/controllers/commands_test.go b/controllers/commands_test.go index 9c7b2fe..b87f49b 100644 --- a/controllers/commands_test.go +++ b/controllers/commands_test.go @@ -97,7 +97,15 @@ done Context("Provider is gcp", func() { BeforeEach(func() { provider = "gcp" - expectedValue = "" + expectedValue = ` +RESULTS_DIR_PATH=testResultsDirectoryPath +rclone config create gs "google cloud storage" --gcs-anonymous +# assumes each pod only contain single gatling log file but use for loop to use find command result +for source in $(find ${RESULTS_DIR_PATH} -type f -name *.log) +do + rclone copyto ${source} testStoragePath/${HOSTNAME}.log +done +` }) It("Provider is gcp", func() { Expect(getGatlingTransferResultCommand(resultsDirectoryPath, provider, region, storagePath)).To(Equal(expectedValue)) @@ -157,7 +165,11 @@ rclone copy --s3-no-check-bucket --s3-env-auth testStoragePath ${GATLING_AGGREGA Context("Provider is gcp", func() { BeforeEach(func() { provider = "gcp" - expectedValue = "" + expectedValue = ` +GATLING_AGGREGATE_DIR=testResultsDirectoryPath +rclone config create gs "google cloud storage" --gcs-anonymous +rclone copy testStoragePath ${GATLING_AGGREGATE_DIR} +` }) It("Provider is gcp", func() { Expect(getGatlingAggregateResultCommand(resultsDirectoryPath, provider, region, storagePath)).To(Equal(expectedValue)) @@ -238,7 +250,11 @@ rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" --s3-no-check-bucket --s3 Context("Provider is gcp", func() { BeforeEach(func() { provider = "gcp" - expectedValue = "" + expectedValue = ` +GATLING_AGGREGATE_DIR=testResultsDirectoryPath +rclone config create gs "google cloud storage" --gcs-anonymous +rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" testStoragePath +` }) It("Provider is gcp", func() { Expect(getGatlingTransferReportCommand(resultsDirectoryPath, provider, region, storagePath)).To(Equal(expectedValue)) diff --git a/controllers/gatling_controller.go b/controllers/gatling_controller.go index a4024a8..708c154 100644 --- a/controllers/gatling_controller.go +++ b/controllers/gatling_controller.go @@ -274,7 +274,7 @@ func (r *GatlingReconciler) gatlingRunnerReconcile(ctx context.Context, req ctrl // Implementation of reconciler logic for the reporter job func (r *GatlingReconciler) gatlingReporterReconcile(ctx context.Context, req ctrl.Request, gatling *gatlingv1alpha1.Gatling, log logr.Logger) (bool, error) { // Check if cloud storage info is given, and skip the reporter job if prerequistes are not made - if r.getCloudStorageProvider(gatling) == "" || r.getCloudStorageRegion(gatling) == "" || r.getCloudStorageBucket(gatling) == "" { + if r.getCloudStorageProvider(gatling) == "" || (r.getCloudStorageRegion(gatling) == "" && r.getCloudStorageProvider(gatling) == "aws") || r.getCloudStorageBucket(gatling) == "" { log.Error(nil, "Minimum cloud storage info is not given, thus skip reporting reconcile, and requeue") gatling.Status.ReportCompleted = true gatling.Status.NotificationCompleted = false @@ -452,7 +452,9 @@ func (r *GatlingReconciler) newGatlingRunnerJobForCR(gatling *gatlingv1alpha1.Ga Completions: r.getGatlingRunnerJobParallelism(gatling), Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ - Affinity: r.getPodAffinity(gatling), + Affinity: r.getPodAffinity(gatling), + Tolerations: r.getPodTolerations(gatling), + ServiceAccountName: r.getPodServiceAccountName(gatling), InitContainers: []corev1.Container{ { Name: "gatling-runner", @@ -498,7 +500,9 @@ func (r *GatlingReconciler) newGatlingRunnerJobForCR(gatling *gatlingv1alpha1.Ga Completions: &gatling.Spec.TestScenarioSpec.Parallelism, Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ - Affinity: r.getPodAffinity(gatling), + Affinity: r.getPodAffinity(gatling), + Tolerations: r.getPodTolerations(gatling), + ServiceAccountName: r.getPodServiceAccountName(gatling), Containers: []corev1.Container{ { Name: "gatling-runner", @@ -553,7 +557,9 @@ func (r *GatlingReconciler) newGatlingReporterJobForCR(gatling *gatlingv1alpha1. Spec: batchv1.JobSpec{ Template: corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ - Affinity: r.getPodAffinity(gatling), + Affinity: r.getPodAffinity(gatling), + Tolerations: r.getPodTolerations(gatling), + ServiceAccountName: r.getPodServiceAccountName(gatling), InitContainers: []corev1.Container{ { Name: "gatling-result-aggregator", @@ -893,6 +899,22 @@ func (r *GatlingReconciler) getPodAffinity(gatling *gatlingv1alpha1.Gatling) *co return &affinity } +func (r *GatlingReconciler) getPodTolerations(gatling *gatlingv1alpha1.Gatling) []corev1.Toleration { + tolerations := []corev1.Toleration{} + if &gatling.Spec.PodSpec != nil && &gatling.Spec.PodSpec.Tolerations != nil { + tolerations = gatling.Spec.PodSpec.Tolerations + } + return tolerations +} + +func (r *GatlingReconciler) getPodServiceAccountName(gatling *gatlingv1alpha1.Gatling) string { + serviceAccountName := "" + if &gatling.Spec.PodSpec != nil && &gatling.Spec.PodSpec.ServiceAccountName != nil { + serviceAccountName = gatling.Spec.PodSpec.ServiceAccountName + } + return serviceAccountName +} + func (r *GatlingReconciler) getSimulationsDirectoryPath(gatling *gatlingv1alpha1.Gatling) string { path := defaultSimulationsDirectoryPath if &gatling.Spec.TestScenarioSpec != nil && gatling.Spec.TestScenarioSpec.SimulationsDirectoryPath != "" {