diff --git a/.chainsaw.yaml b/.chainsaw.yaml index 2a75d98..38cac6e 100644 --- a/.chainsaw.yaml +++ b/.chainsaw.yaml @@ -3,13 +3,14 @@ kind: Configuration metadata: name: custom-config spec: + # namespace: chainsaw timeouts: apply: 120s assert: 400s cleanup: 120s delete: 240s error: 120s - exec: 45s - skipDelete: false + exec: 200s + # skipDelete: true failFast: true parallel: 1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index aef8e9b..46d4033 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,6 +33,7 @@ jobs: strategy: matrix: k8s-version: ['1.26.15', '1.27.16'] + product-version: ['3.7.1', '3.8.0'] steps: - name: Clone the code uses: actions/checkout@v4 @@ -61,4 +62,5 @@ jobs: KINDTEST_K8S_VERSION: ${{ matrix.k8s-version }} KUBECONFIG: kind-kubeconfig-${{ matrix.k8s-version }} KIND_KUBECONFIG: kind-kubeconfig-${{ matrix.k8s-version }} + PRODUCT_VERSION: ${{ matrix.product-version }} run: make chainsaw-test diff --git a/Dockerfile b/Dockerfile index b4d4588..3499579 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.23 as builder +FROM quay.io/zncdatadev/go-devel:1.23.2-kubedoop0.0.0-dev AS builder ARG TARGETOS ARG TARGETARCH diff --git a/Makefile b/Makefile index 9189c53..ac8b318 100644 --- a/Makefile +++ b/Makefile @@ -261,6 +261,7 @@ endif # Tool Versions KINDTEST_K8S_VERSION ?= 1.26.15 CHAINSAW_VERSION ?= v0.2.11 +PRODUCT_VERSION ?= 3.7.1 KIND_IMAGE ?= kindest/node:v${KINDTEST_K8S_VERSION} KIND_KUBECONFIG ?= ./kind-kubeconfig-$(KINDTEST_K8S_VERSION) @@ -314,7 +315,7 @@ chainsaw-setup: ## Run the chainsaw setup .PHONY: chainsaw-test chainsaw-test: chainsaw ## Run the chainsaw test - KUBECONFIG=$(KIND_KUBECONFIG) $(CHAINSAW) test --cluster cluster-1=$(KIND_KUBECONFIG) --test-dir ./test/e2e/ + echo "product_version: $(PRODUCT_VERSION)" | KUBECONFIG=$(KIND_KUBECONFIG) $(CHAINSAW) test --cluster cluster-1=$(KIND_KUBECONFIG) --test-dir ./test/e2e/ --values - .PHONY: chainsaw-cleanup chainsaw-cleanup: ## Run the chainsaw cleanup diff --git a/api/v1alpha1/kafkacluster_types.go b/api/v1alpha1/kafkacluster_types.go index 4da31d4..6d7c8ab 100644 --- a/api/v1alpha1/kafkacluster_types.go +++ b/api/v1alpha1/kafkacluster_types.go @@ -46,7 +46,7 @@ const ( const ( ImageRepository = "quay.io/zncdatadev/kafka" ImageTag = "3.7.1-kubedoop0.0.0-dev" - ImagePullPolicy = corev1.PullAlways + ImagePullPolicy = corev1.PullIfNotPresent KubedoopKafkaDataDirName = "data" // kafka log dirs KubedoopLogConfigDirName = "log-config" diff --git a/go.mod b/go.mod index 4dbefe1..1dbf746 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/zncdatadev/kafka-operator -go 1.23.2 +go 1.23.4 require ( emperror.dev/errors v0.8.1 diff --git a/internal/controller/configmap.go b/internal/controller/configmap.go index f8434a3..ad8c4f4 100644 --- a/internal/controller/configmap.go +++ b/internal/controller/configmap.go @@ -44,12 +44,17 @@ func NewConfigMap( } } func (c *ConfigMapReconciler) Build(ctx context.Context) (client.Object, error) { + var loggingConfigSpec *kafkav1alpha1.LoggingConfigSpec + if c.MergedCfg.Config != nil && c.MergedCfg.Config.Logging != nil && c.MergedCfg.Config.Logging.Broker != nil { + loggingConfigSpec = c.MergedCfg.Config.Logging.Broker + } + builder := common.ConfigMapBuilder{ Name: common.CreateConfigName(c.Instance.GetName(), c.GroupName), Namespace: c.Instance.Namespace, Labels: c.Labels, ConfigGenerators: []common.ConfigGenerator{ - &config.Log4jConfGenerator{LoggingSpec: c.MergedCfg.Config.Logging.Broker, Container: string(common.Kafka)}, + &config.Log4jConfGenerator{LoggingSpec: loggingConfigSpec, Container: string(common.Kafka)}, &config.SecurityConfGenerator{}, &config.KafkaServerConfGenerator{KafkaTlsSecurity: c.KafkaTlsSecurity}, // &KafkaConfGenerator{sslSpec: c.MergedCfg.Config.Ssl}, diff --git a/internal/controller/statefulset.go b/internal/controller/statefulset.go index c1e0806..ba81c6e 100644 --- a/internal/controller/statefulset.go +++ b/internal/controller/statefulset.go @@ -128,6 +128,7 @@ func (s *StatefulSetReconciler) makeKafkaContainer() []corev1.Container { groupSvcName := svc.CreateGroupServiceName(s.Instance.GetName(), s.GroupName) builder := container.NewKafkaContainerBuilder(imageName, util.ImagePullPolicy(imageSpec), zNode, resourceSpec, s.KafkaTlsSecurity, s.Instance.Namespace, groupSvcName) kafkaContainer := builder.Build(builder) + return []corev1.Container{ kafkaContainer, } diff --git a/internal/controller/svc/cluster.go b/internal/controller/svc/cluster.go index b621c02..bfb3f14 100644 --- a/internal/controller/svc/cluster.go +++ b/internal/controller/svc/cluster.go @@ -21,7 +21,7 @@ func NewClusterService( headlessServiceType := common.Service serviceType := corev1.ServiceTypeNodePort builder := common.NewServiceBuilder( - CreateGroupServiceName(instance.GetName(), ""), + CreateClusterServiceName(instance.GetName()), instance.GetNamespace(), labels, makePorts(tlsSecurity), diff --git a/internal/controller/svc/pod.go b/internal/controller/svc/pod.go index 084f5c4..cba44fa 100644 --- a/internal/controller/svc/pod.go +++ b/internal/controller/svc/pod.go @@ -133,7 +133,7 @@ func (p *PodServiceReconciler) ServicePorts() []corev1.ServicePort { { Name: p.KafkaTlsSecurity.ClientPortName(), Port: int32(p.KafkaTlsSecurity.ClientPort()), - TargetPort: intstr.FromString(kafkav1alpha1.ClientPortName), + TargetPort: intstr.FromString(p.KafkaTlsSecurity.ClientPortName()), }, { Name: kafkav1alpha1.MetricsPortName, diff --git a/internal/security/tls.go b/internal/security/tls.go index 4c94195..6b06cf9 100644 --- a/internal/security/tls.go +++ b/internal/security/tls.go @@ -2,6 +2,7 @@ package security import ( "fmt" + "strings" kafkav1alpha1 "github.com/zncdatadev/kafka-operator/api/v1alpha1" "github.com/zncdatadev/kafka-operator/internal/util" @@ -169,7 +170,6 @@ func (k *KafkaTlsSecurity) KcatClientSsl(certDirectory string) []string { func (k *KafkaTlsSecurity) AddVolumeAndVolumeMounts(sts *appsv1.StatefulSet) { kafkaContainer := k.getContainer(sts.Spec.Template.Spec.Containers, "kafka") if tlsServerSecretClass := k.TlsServerSecretClass(); tlsServerSecretClass != "" { - k.AddVolume(sts, CreateTlsVolume(KubedoopTLSCertServerDirName, tlsServerSecretClass, k.SSLStorePassword)) // cbKcatProber.AddVolumeMount(KubedoopTLSCertServerDirName, KubedoopTLSCertServerDir) todo k.AddVolume(sts, CreateTlsKeystoreVolume(KubedoopTLSKeyStoreServerDirName, tlsServerSecretClass, k.SSLStorePassword)) k.AddVolumeMount(kafkaContainer, KubedoopTLSKeyStoreServerDirName, KubedoopTLSKeyStoreServerDir) @@ -235,28 +235,18 @@ func (k *KafkaTlsSecurity) ConfigSettings() map[string]string { config[InterSSLClientAuth] = "required" } // common - config[InterBrokerListenerName] = "internal" + config[InterBrokerListenerName] = "INTERNAL" return config } -func CreateTlsVolume(volumeName, secretClass, sslStorePassword string) corev1.Volume { - builder := util.SecretVolumeBuilder{VolumeName: volumeName} - builder.SetAnnotations(map[string]string{ - constants.AnnotationSecretsClass: secretClass, - constants.AnnotationSecretsScope: fmt.Sprintf("%s,%s", constants.PodScope, constants.NodeScope), - }) - if sslStorePassword != "" { - builder.AddAnnotation(constants.AnnotationSecretsPKCS12Password, sslStorePassword) - } - return builder.Build() -} - // // CreateTlsKeystoreVolume creates ephemeral volumes to mount the SecretClass into the Pods as keystores func CreateTlsKeystoreVolume(volumeName, secretClass, sslStorePassword string) corev1.Volume { builder := util.SecretVolumeBuilder{VolumeName: volumeName} + svcScope := fmt.Sprintf("%s=%s", constants.ServiceScope, "kafkacluster-sample") + secretScopes := []string{svcScope, string(constants.PodScope), string(constants.NodeScope)} builder.SetAnnotations(map[string]string{ constants.AnnotationSecretsClass: secretClass, - constants.AnnotationSecretsScope: fmt.Sprintf("%s,%s", constants.PodScope, constants.NodeScope), + constants.AnnotationSecretsScope: strings.Join(secretScopes, constants.CommonDelimiter), constants.AnnotationSecretsFormat: string(constants.TLSP12), }) if sslStorePassword != "" { diff --git a/test/e2e/default/chainsaw-test.yaml b/test/e2e/default/chainsaw-test.yaml deleted file mode 100644 index 7b90229..0000000 --- a/test/e2e/default/chainsaw-test.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: chainsaw.kyverno.io/v1alpha1 -kind: Test -metadata: - name: default -spec: - steps: - - try: - - apply: - file: ../setup/zookeeper.yaml - - assert: - file: ../setup/zookeeper-assert.yaml - cleanup: - - sleep: - duration: 30s - - try: - - apply: - file: kafka.yaml - - assert: - file: kafka-assert.yaml - cleanup: - - sleep: - duration: 10s - catch: - - sleep: - duration: 10s - - script: - env: - - name: NAMESPACE - value: ($namespace) - content: | - set -ex - free -h - df -h - kubectl -n $NAMESPACE get pods - - describe: - apiVersion: v1 - kind: Pod - selector: app.kubernetes.io/component=broker - - podLogs: - selector: app.kubernetes.io/component=broker - tail: -1 diff --git a/test/e2e/logging/chainsaw-test.yaml b/test/e2e/logging/chainsaw-test.yaml new file mode 100644 index 0000000..ac9b441 --- /dev/null +++ b/test/e2e/logging/chainsaw-test.yaml @@ -0,0 +1,104 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: logging +spec: + bindings: + steps: + - name: install vector-aggregator + try: + - script: + content: >- + helm upgrade --install vector-aggregator vector + --namespace $NAMESPACE + --version 0.36.1 + --repo https://helm.vector.dev + --values vector-aggregator-values.yaml + - apply: + file: vector-aggregator.yaml + - assert: + file: vector-aggregator-assert.yaml + - name: install zookeeper cluster + try: + - apply: + file: ../setup/zookeeper.yaml + - assert: + file: ../setup/zookeeper-assert.yaml + cleanup: + - sleep: + duration: 30s + - name: install kakfka cluster + try: + - apply: + file: kafka.yaml + - assert: + file: kafka-assert.yaml + - name: assert kafka logs + try: + - sleep: + duration: 50s + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + #!/bin/bash + # Get logs from vector-aggregator-0 and check for specific log pattern + kubectl -n $NAMESPACE logs statefulset/vector-aggregator -c vector | \ + grep -q '"cluster":"kafkacluster-sample","container":"kafka","errors":\[\],"file":"kafka.log4j.xml"' + exit_code=$? + + if [ $exit_code -eq 0 ]; then + echo "Found expected log pattern" + exit 0 + else + echo "Did not find expected log pattern" + exit 1 + fi + cleanup: + - sleep: + duration: 50s + - name: access kafka + try: + - apply: + file: client-access-configmap.yaml + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + export KAFKA_IMAGE=$( + kubectl -n $NAMESPACE get statefulset kafkacluster-sample-default-broker \ + -o jsonpath='{.spec.template.spec.containers[?(.name=="kafka")].image}' + ) + export KAFKA_DISCOVERY=$( + kubectl -n $NAMESPACE get kafkaclusters.kafka.kubedoop.dev \ + -o jsonpath='{.items[0].metadata.name}' + ) + echo "KAFKA_IMAGE=$KAFKA_IMAGE" + echo "KAFKA_DISCOVERY=$KAFKA_DISCOVERY" + + envsubst '${KAFKA_IMAGE} ${KAFKA_DISCOVERY}' < client-access-pod.txt | kubectl -n $NAMESPACE apply -f - + - assert: + timeout: 150s + resource: + apiVersion: v1 + kind: Pod + metadata: + name: client-access + status: + phase: Succeeded + catch: + - podLogs: + name: client-access + container: client-access + - describe: + apiVersion: v1 + kind: Pod + name: client-access + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl -n $NAMESPACE get pod client-access -o yaml diff --git a/test/e2e/logging/client-access-configmap.yaml b/test/e2e/logging/client-access-configmap.yaml new file mode 100755 index 0000000..ea9cef5 --- /dev/null +++ b/test/e2e/logging/client-access-configmap.yaml @@ -0,0 +1,65 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: client-access +data: + client-access.sh: | + #!/usr/bin/env bash + + # to be safe + unset TOPIC + unset BAD_TOPIC + + echo "Connecting to boostrap address $KAFKA" + + echo "Start client access testing..." + ############################################################################ + # Test the secured connection + ############################################################################ + # create random topics + TOPIC=$(tr -dc A-Za-z0-9 /dev/null + then + echo "[ERROR] client topic created with bad host name!" + exit 1 + else + echo "[SUCCESS] client topic creation failed with bad host name - 'localhost:9093' !" + fi + + ############################################################################ + # Test producer and consumer + ############################################################################ + # echo "Start producer and consumer testing..." + # echo "Hello, World!" | /kubedoop/kafka/bin/kafka-console-producer.sh --topic "$TOPIC" --bootstrap-server "$KAFKA" + + # if /kubedoop/kafka/bin/kafka-console-consumer.sh --topic "$TOPIC" --from-beginning --bootstrap-server "$KAFKA" --max-messages 1 --timeout-ms 20000 | grep "Hello, World!" + # then + # echo "[SUCCESS] producer and consumer testing!" + # else + # echo "[ERROR] producer and consumer testing failed!" + # exit 1 + # fi + + echo "All tests successful!" + exit 0 diff --git a/test/e2e/logging/client-access-pod.txt b/test/e2e/logging/client-access-pod.txt new file mode 100644 index 0000000..ac61cb5 --- /dev/null +++ b/test/e2e/logging/client-access-pod.txt @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: client-access + labels: + name: client-access +spec: + restartPolicy: Never + containers: + - name: client-access + securityContext: + runAsUser: 0 + image: ${KAFKA_IMAGE} + env: + - name: KAFKA + valueFrom: + configMapKeyRef: + key: KAFKA + name: ${KAFKA_DISCOVERY} + - name: KAFKA_HEAP_OPTS + value: "-Xmx819m" + command: + - /bin/sh + args: + + - -c + - bash /kubedoop/test/client-access.sh + resources: + limits: + memory: "1.5Gi" + cpu: "300m" + volumeMounts: + - name: test-scripts + mountPath: /kubedoop/test + volumes: + - name: test-scripts + configMap: + name: client-access diff --git a/test/e2e/vector/kafka-assert.yaml b/test/e2e/logging/kafka-assert.yaml similarity index 75% rename from test/e2e/vector/kafka-assert.yaml rename to test/e2e/logging/kafka-assert.yaml index 6b4d2c5..bb233d2 100644 --- a/test/e2e/vector/kafka-assert.yaml +++ b/test/e2e/logging/kafka-assert.yaml @@ -4,8 +4,9 @@ metadata: name: kafkacluster-sample-default-broker status: replicas: 3 + availableReplicas: 3 --- apiVersion: v1 kind: ConfigMap metadata: - name: kafkacluster-sample \ No newline at end of file + name: kafkacluster-sample diff --git a/test/e2e/vector/kafka.yaml b/test/e2e/logging/kafka.yaml similarity index 84% rename from test/e2e/vector/kafka.yaml rename to test/e2e/logging/kafka.yaml index 280f9b2..bc53b84 100644 --- a/test/e2e/vector/kafka.yaml +++ b/test/e2e/logging/kafka.yaml @@ -9,11 +9,9 @@ metadata: app.kubernetes.io/created-by: kafka-operator name: kafkacluster-sample spec: + image: + productVersion: ($values.product_version) clusterConfig: - tls: - sslStorePassword: "123456" - internalSecretClass: tls - serverSecretClass: tls zookeeperConfigMapName: kafka-znode vectorAggregatorConfigMapName: vector-aggregator-discovery brokers: @@ -23,7 +21,7 @@ spec: config: resources: cpu: - min: 300m + min: 200m max: 600m memory: limit: "1Gi" diff --git a/test/e2e/vector/aggragator-assert.yaml b/test/e2e/logging/vector-aggregator-assert.yaml similarity index 57% rename from test/e2e/vector/aggragator-assert.yaml rename to test/e2e/logging/vector-aggregator-assert.yaml index bfefcd6..903ff12 100644 --- a/test/e2e/vector/aggragator-assert.yaml +++ b/test/e2e/logging/vector-aggregator-assert.yaml @@ -1,20 +1,10 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: vector-aggregator ---- -apiVersion: v1 -kind: Service -metadata: - name: vector-aggregator -spec: - type: ClusterIP ---- + apiVersion: apps/v1 kind: StatefulSet metadata: name: vector-aggregator status: + readyReplicas: 1 replicas: 1 --- apiVersion: v1 @@ -23,4 +13,3 @@ data: kind: ConfigMap metadata: name: vector-aggregator-discovery - diff --git a/test/e2e/logging/vector-aggregator-values.yaml b/test/e2e/logging/vector-aggregator-values.yaml new file mode 100644 index 0000000..a496633 --- /dev/null +++ b/test/e2e/logging/vector-aggregator-values.yaml @@ -0,0 +1,16 @@ +api: + enabled: true +log_schema: + host_key: "pod" +sinks: + debug_console: + type: "console" + inputs: + - vector + encoding: + codec: "json" +sources: + vector: + address: 0.0.0.0:6000 + type: vector + version: "2" diff --git a/test/e2e/logging/vector-aggregator.yaml b/test/e2e/logging/vector-aggregator.yaml new file mode 100644 index 0000000..e2b7066 --- /dev/null +++ b/test/e2e/logging/vector-aggregator.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: "vector-aggregator:6000" diff --git a/test/e2e/tls/chainsaw-test.yaml b/test/e2e/tls/chainsaw-test.yaml new file mode 100644 index 0000000..178f5c4 --- /dev/null +++ b/test/e2e/tls/chainsaw-test.yaml @@ -0,0 +1,55 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: tls +spec: + bindings: + steps: + - name: install zookeeper cluster + try: + - apply: + file: ../setup/zookeeper.yaml + - assert: + file: ../setup/zookeeper-assert.yaml + cleanup: + - sleep: + duration: 30s + - name: install kakfka cluster + try: + - apply: + file: kafka.yaml + - assert: + file: kafka-assert.yaml + - name: tls client access test + try: + - apply: + file: client-tls-access-configmap.yaml + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + export KAFKA_IMAGE=$( + kubectl -n $NAMESPACE get statefulset kafkacluster-sample-default-broker \ + -o jsonpath='{.spec.template.spec.containers[?(.name=="kafka")].image}' + ) + export KAFKA_DISCOVERY=$( + kubectl -n $NAMESPACE get kafkaclusters.kafka.kubedoop.dev \ + -o jsonpath='{.items[0].metadata.name}' + ) + echo "KAFKA_IMAGE=$KAFKA_IMAGE" + echo "KAFKA_DISCOVERY=$KAFKA_DISCOVERY" + + envsubst '${KAFKA_IMAGE} ${KAFKA_DISCOVERY}' < client-tls-access-pod.txt | kubectl -n $NAMESPACE apply -f - + - assert: + resource: + apiVersion: v1 + kind: Pod + metadata: + name: client-tls-access + status: + phase: Succeeded + catch: + - podLogs: + name: client-tls-access + container: client-tls-access diff --git a/test/e2e/tls/client-tls-access-configmap.yaml b/test/e2e/tls/client-tls-access-configmap.yaml new file mode 100755 index 0000000..438371d --- /dev/null +++ b/test/e2e/tls/client-tls-access-configmap.yaml @@ -0,0 +1,65 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: client-tls-access +data: + client-tls-access.sh: | + #!/usr/bin/env bash + + # to be safe + unset TOPIC + unset BAD_TOPIC + + echo "Connecting to boostrap address $KAFKA" + + echo "Start client TLS testing..." + ############################################################################ + # Test the secured connection + ############################################################################ + # create random topics + TOPIC=$(tr -dc A-Za-z0-9 /tmp/client.config + + if /kubedoop/kafka/bin/kafka-topics.sh --create --topic "$TOPIC" --bootstrap-server "$KAFKA" --command-config /tmp/client.config + then + echo "[SUCCESS] Secure client topic created!" + else + echo "[ERROR] Secure client topic creation failed!" + exit 1 + fi + + if /kubedoop/kafka/bin/kafka-topics.sh --list --topic "$TOPIC" --bootstrap-server "$KAFKA" --command-config /tmp/client.config | grep "$TOPIC" + then + echo "[SUCCESS] Secure client topic read!" + else + echo "[ERROR] Secure client topic read failed!" + exit 1 + fi + + ############################################################################ + # Test the connection without certificates + ############################################################################ + if /kubedoop/kafka/bin/kafka-topics.sh --create --topic "$BAD_TOPIC" --bootstrap-server "$KAFKA" &> /dev/null + then + echo "[ERROR] Secure client topic created without certificates!" + exit 1 + else + echo "[SUCCESS] Secure client topic creation failed without certificates!" + fi + + ############################################################################ + # Test the connection with bad host name + ############################################################################ + if /kubedoop/kafka/bin/kafka-topics.sh --create --topic "$BAD_TOPIC" --bootstrap-server localhost:9093 --command-config /tmp/client.config &> /dev/null + then + echo "[ERROR] Secure client topic created with bad host name!" + exit 1 + else + echo "[SUCCESS] Secure client topic creation failed with bad host name!" + fi + + echo "All client TLS tests successful!" + exit 0 diff --git a/test/e2e/tls/client-tls-access-pod.txt b/test/e2e/tls/client-tls-access-pod.txt new file mode 100644 index 0000000..b79b729 --- /dev/null +++ b/test/e2e/tls/client-tls-access-pod.txt @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: Pod +metadata: + name: client-tls-access + labels: + name: client-tls-access +spec: + restartPolicy: Never + containers: + - name: client-tls-access + securityContext: + runAsUser: 0 + image: ${KAFKA_IMAGE} + env: + - name: KAFKA + valueFrom: + configMapKeyRef: + key: KAFKA + name: ${KAFKA_DISCOVERY} + - name: KAFKA_HEAP_OPTS + value: "-Xmx819m" + command: + - /bin/sh + args: + - -c + - bash /kubedoop/test/client-tls-access.sh + resources: + limits: + memory: "1.5Gi" + cpu: "300m" + volumeMounts: + - name: client-tls + mountPath: /kubedoop/client-tls + - name: test-scripts + mountPath: /kubedoop/test + volumes: + - name: test-scripts + configMap: + name: client-tls-access + - name: client-tls + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.kubedoop.dev/class: tls + secrets.kubedoop.dev/format: tls-p12 + secrets.kubedoop.dev/scope: pod,node,service=kafkacluster-sample + secrets.kubedoop.dev/tlsPKCS12Password: "123456" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Mi + storageClassName: secrets.kubedoop.dev + volumeMode: Filesystem diff --git a/test/e2e/default/kafka-assert.yaml b/test/e2e/tls/kafka-assert.yaml similarity index 75% rename from test/e2e/default/kafka-assert.yaml rename to test/e2e/tls/kafka-assert.yaml index 6b4d2c5..bb233d2 100644 --- a/test/e2e/default/kafka-assert.yaml +++ b/test/e2e/tls/kafka-assert.yaml @@ -4,8 +4,9 @@ metadata: name: kafkacluster-sample-default-broker status: replicas: 3 + availableReplicas: 3 --- apiVersion: v1 kind: ConfigMap metadata: - name: kafkacluster-sample \ No newline at end of file + name: kafkacluster-sample diff --git a/test/e2e/default/kafka.yaml b/test/e2e/tls/kafka.yaml similarity index 74% rename from test/e2e/default/kafka.yaml rename to test/e2e/tls/kafka.yaml index 8e8bf48..88294b8 100644 --- a/test/e2e/default/kafka.yaml +++ b/test/e2e/tls/kafka.yaml @@ -9,6 +9,8 @@ metadata: app.kubernetes.io/created-by: kafka-operator name: kafkacluster-sample spec: + image: + productVersion: ($values.product_version) clusterConfig: tls: sslStorePassword: "123456" @@ -23,15 +25,6 @@ spec: resources: cpu: min: 200m - max: 600m + max: 400m memory: limit: "1Gi" - logging: - broker: - loggers: - test: - level: DEBUG - console: - level: WARN - file: - level: ERROR diff --git a/test/e2e/vector/aggragator.yaml b/test/e2e/vector/aggragator.yaml deleted file mode 100644 index 001aabd..0000000 --- a/test/e2e/vector/aggragator.yaml +++ /dev/null @@ -1,176 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: elasticsearch - labels: - app: elasticsearch -spec: - containers: - - name: es7 - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7 - env: - - name: discovery.type - value: "single-node" - - name: bootstrap.memory_lock - value: "true" - - name: ES_JAVA_OPTS - value: "-Xms512m -Xmx512m" - - name: transport.host - value: "127.0.0.1" - - name: xpack.security.enabled - value: "false" - ports: - - containerPort: 9200 - name: http - - name: kibana7 - image: docker.elastic.co/kibana/kibana:7.17.7 - env: - - name: ELASTICSEARCH_HOSTS - value: "http://localhost:9200" - - name: i18n.locale - value: "zh-CN" - - name: xpack.security.enabled - value: "false" - ports: - - containerPort: 5601 - name: kibana ---- -## create svc for elasticsearch -apiVersion: v1 -kind: Service -metadata: - name: elasticsearch -spec: - ports: - - name: http - port: 9200 - protocol: TCP - targetPort: 9200 - selector: - app: elasticsearch - type: ClusterIP - ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: vector-aggregator -data: - vector.yaml: | - api: - enabled: true - log_schema: - host_key: "pod" - sinks: - debug_console: - type: "console" - inputs: - - vector - encoding: - codec: "json" - essink: - type: elasticsearch - inputs: - - vector - api_version: auto - compression: none - doc_type: _doc - endpoints: - - http://elasticsearch:9200 - mode: bulk - # query: - # X-Powered-By: Vector - sources: - vector: - address: 0.0.0.0:6000 - type: vector - version: "2" ---- -apiVersion: v1 -kind: Service -metadata: - name: vector-aggregator -spec: - clusterIP: "" - internalTrafficPolicy: Cluster - ipFamilies: - - IPv4 - ipFamilyPolicy: SingleStack - ports: - - name: vector - port: 6000 - protocol: TCP - targetPort: 6000 - selector: - app.kubernetes.io/component: Aggregator - app.kubernetes.io/instance: vector-aggregator - app.kubernetes.io/name: vector - sessionAffinity: None - type: ClusterIP ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - generation: 1 - labels: - app.kubernetes.io/component: Aggregator - app.kubernetes.io/instance: vector-aggregator - app.kubernetes.io/name: vector - name: vector-aggregator -spec: - persistentVolumeClaimRetentionPolicy: - whenDeleted: Retain - whenScaled: Retain - podManagementPolicy: OrderedReady - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - app.kubernetes.io/component: Aggregator - app.kubernetes.io/instance: vector-aggregator - app.kubernetes.io/name: vector - serviceName: vector-aggregator-headless - template: - metadata: - labels: - app.kubernetes.io/component: Aggregator - app.kubernetes.io/instance: vector-aggregator - app.kubernetes.io/name: vector - vector.dev/exclude: "true" - spec: - containers: - - args: - - --config-dir - - /etc/vector/ - image: timberio/vector:0.38.0-alpine - imagePullPolicy: IfNotPresent - name: vector - ports: - - containerPort: 6000 - name: vector - protocol: TCP - resources: {} - volumeMounts: - - mountPath: /vector-data-dir - name: data - - mountPath: /etc/vector/ - name: config - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 60 - volumes: - - emptyDir: {} - name: data - - name: config - configMap: - name: vector-aggregator ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: vector-aggregator-discovery -data: - ADDRESS: "vector-aggregator:6000" \ No newline at end of file diff --git a/test/e2e/vector/chainsaw-test.yaml b/test/e2e/vector/chainsaw-test.yaml deleted file mode 100644 index 6f75631..0000000 --- a/test/e2e/vector/chainsaw-test.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: chainsaw.kyverno.io/v1alpha1 -kind: Test -metadata: - name: vector -spec: - steps: - - try: - - apply: - file: aggragator.yaml - - assert: - file: aggragator-assert.yaml - - try: - - apply: - file: ../setup/zookeeper.yaml - - assert: - file: ../setup/zookeeper-assert.yaml - cleanup: - - sleep: - duration: 30s - - try: - - apply: - file: kafka.yaml - - assert: - file: kafka-assert.yaml - cleanup: - - sleep: - duration: 50s