Skip to content

Commit

Permalink
Merge pull request #2 from st-tech/feature/add-gcs-implementation-as-…
Browse files Browse the repository at this point in the history
…a-cloud-storage

add GCS related implementation / add toleration, serviceAccountName to PodSpec
  • Loading branch information
kayamin authored Jan 14, 2022
2 parents 89c663f + e70efa1 commit 8111bee
Show file tree
Hide file tree
Showing 10 changed files with 141 additions and 24 deletions.
10 changes: 9 additions & 1 deletion api/v1alpha1/gatling_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,14 @@ type PodSpec struct {
// Affinity specification
// +kubebuilder:validation:Optional
Affinity corev1.Affinity `json:"affinity,omitempty"`

// Tolerations specification
// +kubebuilder:validation:Optional
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`

// ServiceAccountName specification
// +kubebuilder:validation:Optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
}

// TestScenarioSpec defines the load testing scenario
Expand Down Expand Up @@ -128,7 +136,7 @@ type TestScenarioSpec struct {

type CloudStorageSpec struct {
// Provider specifies the cloud provider that will be used.
// Supported providers: aws
// Supported providers: aws, gcp
// +kubebuilder:validation:Optional
Provider string `json:"provider"`

Expand Down
8 changes: 7 additions & 1 deletion api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

41 changes: 41 additions & 0 deletions config/crd/bases/gatling-operator.tech.zozo.com_gatlings.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -841,6 +841,47 @@ spec:
to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
type: object
type: object
tolerations:
description: Tolerations specification
items:
description: The pod this Toleration is attached to tolerates
any taint that matches the triple <key,value,effect> using
the matching operator <operator>.
properties:
effect:
description: Effect indicates the taint effect to match.
Empty means match all taint effects. When specified, allowed
values are NoSchedule, PreferNoSchedule and NoExecute.
type: string
key:
description: Key is the taint key that the toleration applies
to. Empty means match all taint keys. If the key is empty,
operator must be Exists; this combination means to match
all values and all keys.
type: string
operator:
description: Operator represents a key's relationship to
the value. Valid operators are Exists and Equal. Defaults
to Equal. Exists is equivalent to wildcard for value,
so that a pod can tolerate all taints of a particular
category.
type: string
tolerationSeconds:
description: TolerationSeconds represents the period of
time the toleration (which must be of effect NoExecute,
otherwise this field is ignored) tolerates the taint.
By default, it is not set, which means tolerate the taint
forever (do not evict). Zero and negative values will
be treated as 0 (evict immediately) by the system.
format: int64
type: integer
value:
description: Value is the taint value the toleration matches
to. If the operator is Exists, the value should be empty,
otherwise just a regular string.
type: string
type: object
type: array
type: object
testScenarioSpec:
description: Test Scenario specification
Expand Down
7 changes: 6 additions & 1 deletion config/samples/gatling-operator_v1alpha1_gatling01.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,13 @@ spec:
operator: In
values:
- linux
tolerations:
- key: "node-type"
operator: "Equal"
value: "non-kube-system"
effect: "NoSchedule"
cloudStorageSpec:
provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws"
provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws", "gcp"
bucket: "gatling-operator-reports" # Bucket name in cloud storage service, on which Gatlilng report files are stored
region: "ap-northeast-1" # Optional. Default: "ap-northeast-1" for aws provider. Region name
#env: # Optional. Environment variables to be used for connecting to the cloud providers
Expand Down
2 changes: 1 addition & 1 deletion config/samples/gatling-operator_v1alpha1_gatling02.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ spec:
values:
- linux
cloudStorageSpec:
provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws"
provider: "aws" # Provider specifies the cloud provider that will be used. Supported providers: "aws", "gcp"
bucket: "gatling-operator-reports" # Bucket name in cloud storage service, on which Gatlilng report files are stored
region: "ap-northeast-1" # Optional. Default: "ap-northeast-1" for aws provider. Region name
#env: # Optional. Environment variables to be used for connecting to the cloud providers
Expand Down
6 changes: 3 additions & 3 deletions controllers/cloudstorage.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ func getCloudStoragePath(provider string, bucket string, gatlingName string, sub
case "aws":
// Format s3:<bucket>/<gatling-name>/<sub-dir>
return fmt.Sprintf("s3:%s/%s/%s", bucket, gatlingName, subDir)
case "gcp": //not supported yet
return ""
case "gcp":
return fmt.Sprintf("gs://%s/%s/%s", bucket, gatlingName, subDir)
case "azure": //not supported yet
return ""
default:
Expand All @@ -26,7 +26,7 @@ func getCloudStorageReportURL(provider string, bucket string, gatlingName string
case "gcp": //not supported yet
// Format http(s)://<bucket>.storage.googleapis.com/<gatling-name>/<sub-dir>/index.html
// or http(s)://storage.googleapis.com/<bucket>/<gatling-name>/<sub-dir>/index.html
return ""
return fmt.Sprintf("https://storage.googleapis.com/%s/%s/%s/index.html", bucket, gatlingName, subDir)
case "azure": //not supported yet
// Format https://<bucket>.blob.core.windows.net/<gatling-name>/<sub-dir>/index.html
return ""
Expand Down
8 changes: 4 additions & 4 deletions controllers/cloudstorage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ var _ = Describe("getCloudStoragePath", func() {
BeforeEach(func() {
provider = "gcp"
})
It("path is empty", func() {
Expect(getCloudStoragePath(provider, bucket, gatlingName, subDir)).To(Equal(""))
It("path is gcp gcs bucket", func() {
Expect(getCloudStoragePath(provider, bucket, gatlingName, subDir)).To(Equal("gs://testBucket/testGatling/subDir"))
})
})

Expand Down Expand Up @@ -92,8 +92,8 @@ var _ = Describe("getCloudStorageReportURL", func() {
BeforeEach(func() {
provider = "gcp"
})
It("path is empty", func() {
Expect(getCloudStorageReportURL(provider, bucket, gatlingName, subDir)).To(Equal(""))
It("path is gcp gcs bucket", func() {
Expect(getCloudStorageReportURL(provider, bucket, gatlingName, subDir)).To(Equal("https://storage.googleapis.com/testBucket/testGatling/subDir/index.html"))
})
})

Expand Down
31 changes: 25 additions & 6 deletions controllers/commands.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,17 @@ do
done
`
return fmt.Sprintf(template, resultsDirectoryPath, region, storagePath)
case "gcp": //not supported yet
return ""
case "gcp":
template := `
RESULTS_DIR_PATH=%s
rclone config create gs "google cloud storage" --gcs-anonymous
# assumes each pod only contain single gatling log file but use for loop to use find command result
for source in $(find ${RESULTS_DIR_PATH} -type f -name *.log)
do
rclone copyto ${source} %s/${HOSTNAME}.log
done
`
return fmt.Sprintf(template, resultsDirectoryPath, storagePath)
case "azure": //not supported yet
return ""
default:
Expand All @@ -79,8 +88,13 @@ rclone config create s3 s3 env_auth=true region %s
rclone copy --s3-no-check-bucket --s3-env-auth %s ${GATLING_AGGREGATE_DIR}
`
return fmt.Sprintf(template, resultsDirectoryPath, region, storagePath)
case "gcp": //not supported yet
return ""
case "gcp":
template := `
GATLING_AGGREGATE_DIR=%s
rclone config create gs "google cloud storage" --gcs-anonymous
rclone copy %s ${GATLING_AGGREGATE_DIR}
`
return fmt.Sprintf(template, resultsDirectoryPath, storagePath)
case "azure": //not supported yet
return ""
default:
Expand All @@ -107,8 +121,13 @@ rclone config create s3 s3 env_auth=true region %s
rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" --s3-no-check-bucket --s3-env-auth %s
`
return fmt.Sprintf(template, resultsDirectoryPath, region, storagePath)
case "gcp": //not supported yet
return ""
case "gcp":
template := `
GATLING_AGGREGATE_DIR=%s
rclone config create gs "google cloud storage" --gcs-anonymous
rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" %s
`
return fmt.Sprintf(template, resultsDirectoryPath, storagePath)
case "azure": //not supported yet
return ""
default:
Expand Down
22 changes: 19 additions & 3 deletions controllers/commands_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,15 @@ done
Context("Provider is gcp", func() {
BeforeEach(func() {
provider = "gcp"
expectedValue = ""
expectedValue = `
RESULTS_DIR_PATH=testResultsDirectoryPath
rclone config create gs "google cloud storage" --gcs-anonymous
# assumes each pod only contain single gatling log file but use for loop to use find command result
for source in $(find ${RESULTS_DIR_PATH} -type f -name *.log)
do
rclone copyto ${source} testStoragePath/${HOSTNAME}.log
done
`
})
It("Provider is gcp", func() {
Expect(getGatlingTransferResultCommand(resultsDirectoryPath, provider, region, storagePath)).To(Equal(expectedValue))
Expand Down Expand Up @@ -157,7 +165,11 @@ rclone copy --s3-no-check-bucket --s3-env-auth testStoragePath ${GATLING_AGGREGA
Context("Provider is gcp", func() {
BeforeEach(func() {
provider = "gcp"
expectedValue = ""
expectedValue = `
GATLING_AGGREGATE_DIR=testResultsDirectoryPath
rclone config create gs "google cloud storage" --gcs-anonymous
rclone copy testStoragePath ${GATLING_AGGREGATE_DIR}
`
})
It("Provider is gcp", func() {
Expect(getGatlingAggregateResultCommand(resultsDirectoryPath, provider, region, storagePath)).To(Equal(expectedValue))
Expand Down Expand Up @@ -238,7 +250,11 @@ rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" --s3-no-check-bucket --s3
Context("Provider is gcp", func() {
BeforeEach(func() {
provider = "gcp"
expectedValue = ""
expectedValue = `
GATLING_AGGREGATE_DIR=testResultsDirectoryPath
rclone config create gs "google cloud storage" --gcs-anonymous
rclone copy ${GATLING_AGGREGATE_DIR} --exclude "*.log" testStoragePath
`
})
It("Provider is gcp", func() {
Expect(getGatlingTransferReportCommand(resultsDirectoryPath, provider, region, storagePath)).To(Equal(expectedValue))
Expand Down
30 changes: 26 additions & 4 deletions controllers/gatling_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ func (r *GatlingReconciler) gatlingRunnerReconcile(ctx context.Context, req ctrl
// Implementation of reconciler logic for the reporter job
func (r *GatlingReconciler) gatlingReporterReconcile(ctx context.Context, req ctrl.Request, gatling *gatlingv1alpha1.Gatling, log logr.Logger) (bool, error) {
// Check if cloud storage info is given, and skip the reporter job if prerequistes are not made
if r.getCloudStorageProvider(gatling) == "" || r.getCloudStorageRegion(gatling) == "" || r.getCloudStorageBucket(gatling) == "" {
if r.getCloudStorageProvider(gatling) == "" || (r.getCloudStorageRegion(gatling) == "" && r.getCloudStorageProvider(gatling) == "aws") || r.getCloudStorageBucket(gatling) == "" {
log.Error(nil, "Minimum cloud storage info is not given, thus skip reporting reconcile, and requeue")
gatling.Status.ReportCompleted = true
gatling.Status.NotificationCompleted = false
Expand Down Expand Up @@ -452,7 +452,9 @@ func (r *GatlingReconciler) newGatlingRunnerJobForCR(gatling *gatlingv1alpha1.Ga
Completions: r.getGatlingRunnerJobParallelism(gatling),
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Affinity: r.getPodAffinity(gatling),
Affinity: r.getPodAffinity(gatling),
Tolerations: r.getPodTolerations(gatling),
ServiceAccountName: r.getPodServiceAccountName(gatling),
InitContainers: []corev1.Container{
{
Name: "gatling-runner",
Expand Down Expand Up @@ -498,7 +500,9 @@ func (r *GatlingReconciler) newGatlingRunnerJobForCR(gatling *gatlingv1alpha1.Ga
Completions: &gatling.Spec.TestScenarioSpec.Parallelism,
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Affinity: r.getPodAffinity(gatling),
Affinity: r.getPodAffinity(gatling),
Tolerations: r.getPodTolerations(gatling),
ServiceAccountName: r.getPodServiceAccountName(gatling),
Containers: []corev1.Container{
{
Name: "gatling-runner",
Expand Down Expand Up @@ -553,7 +557,9 @@ func (r *GatlingReconciler) newGatlingReporterJobForCR(gatling *gatlingv1alpha1.
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Affinity: r.getPodAffinity(gatling),
Affinity: r.getPodAffinity(gatling),
Tolerations: r.getPodTolerations(gatling),
ServiceAccountName: r.getPodServiceAccountName(gatling),
InitContainers: []corev1.Container{
{
Name: "gatling-result-aggregator",
Expand Down Expand Up @@ -893,6 +899,22 @@ func (r *GatlingReconciler) getPodAffinity(gatling *gatlingv1alpha1.Gatling) *co
return &affinity
}

func (r *GatlingReconciler) getPodTolerations(gatling *gatlingv1alpha1.Gatling) []corev1.Toleration {
tolerations := []corev1.Toleration{}
if &gatling.Spec.PodSpec != nil && &gatling.Spec.PodSpec.Tolerations != nil {
tolerations = gatling.Spec.PodSpec.Tolerations
}
return tolerations
}

func (r *GatlingReconciler) getPodServiceAccountName(gatling *gatlingv1alpha1.Gatling) string {
serviceAccountName := ""
if &gatling.Spec.PodSpec != nil && &gatling.Spec.PodSpec.ServiceAccountName != nil {
serviceAccountName = gatling.Spec.PodSpec.ServiceAccountName
}
return serviceAccountName
}

func (r *GatlingReconciler) getSimulationsDirectoryPath(gatling *gatlingv1alpha1.Gatling) string {
path := defaultSimulationsDirectoryPath
if &gatling.Spec.TestScenarioSpec != nil && gatling.Spec.TestScenarioSpec.SimulationsDirectoryPath != "" {
Expand Down

0 comments on commit 8111bee

Please sign in to comment.