Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test(e2e): refactor e2e test case for test coverage #131

Merged
merged 4 commits into from
Jan 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions .chainsaw.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,14 @@ kind: Configuration
metadata:
name: custom-config
spec:
# namespace: chainsaw
timeouts:
apply: 120s
assert: 400s
cleanup: 120s
delete: 240s
error: 120s
exec: 45s
skipDelete: false
exec: 200s
# skipDelete: true
failFast: true
parallel: 1
2 changes: 2 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ jobs:
strategy:
matrix:
k8s-version: ['1.26.15', '1.27.16']
product-version: ['3.7.1', '3.8.0']
steps:
- name: Clone the code
uses: actions/checkout@v4
Expand Down Expand Up @@ -61,4 +62,5 @@ jobs:
KINDTEST_K8S_VERSION: ${{ matrix.k8s-version }}
KUBECONFIG: kind-kubeconfig-${{ matrix.k8s-version }}
KIND_KUBECONFIG: kind-kubeconfig-${{ matrix.k8s-version }}
PRODUCT_VERSION: ${{ matrix.product-version }}
run: make chainsaw-test
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Build the manager binary
FROM golang:1.23 as builder
FROM quay.io/zncdatadev/go-devel:1.23.2-kubedoop0.0.0-dev AS builder
ARG TARGETOS
ARG TARGETARCH

Expand Down
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ endif
# Tool Versions
KINDTEST_K8S_VERSION ?= 1.26.15
CHAINSAW_VERSION ?= v0.2.11
PRODUCT_VERSION ?= 3.7.1

KIND_IMAGE ?= kindest/node:v${KINDTEST_K8S_VERSION}
KIND_KUBECONFIG ?= ./kind-kubeconfig-$(KINDTEST_K8S_VERSION)
Expand Down Expand Up @@ -314,7 +315,7 @@ chainsaw-setup: ## Run the chainsaw setup

.PHONY: chainsaw-test
chainsaw-test: chainsaw ## Run the chainsaw test
KUBECONFIG=$(KIND_KUBECONFIG) $(CHAINSAW) test --cluster cluster-1=$(KIND_KUBECONFIG) --test-dir ./test/e2e/
echo "product_version: $(PRODUCT_VERSION)" | KUBECONFIG=$(KIND_KUBECONFIG) $(CHAINSAW) test --cluster cluster-1=$(KIND_KUBECONFIG) --test-dir ./test/e2e/ --values -

.PHONY: chainsaw-cleanup
chainsaw-cleanup: ## Run the chainsaw cleanup
Expand Down
2 changes: 1 addition & 1 deletion api/v1alpha1/kafkacluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ const (
const (
ImageRepository = "quay.io/zncdatadev/kafka"
ImageTag = "3.7.1-kubedoop0.0.0-dev"
ImagePullPolicy = corev1.PullAlways
ImagePullPolicy = corev1.PullIfNotPresent

KubedoopKafkaDataDirName = "data" // kafka log dirs
KubedoopLogConfigDirName = "log-config"
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module github.com/zncdatadev/kafka-operator

go 1.23.2
go 1.23.4

require (
emperror.dev/errors v0.8.1
Expand Down
7 changes: 6 additions & 1 deletion internal/controller/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,17 @@ func NewConfigMap(
}
}
func (c *ConfigMapReconciler) Build(ctx context.Context) (client.Object, error) {
var loggingConfigSpec *kafkav1alpha1.LoggingConfigSpec
if c.MergedCfg.Config != nil && c.MergedCfg.Config.Logging != nil && c.MergedCfg.Config.Logging.Broker != nil {
loggingConfigSpec = c.MergedCfg.Config.Logging.Broker
}

builder := common.ConfigMapBuilder{
Name: common.CreateConfigName(c.Instance.GetName(), c.GroupName),
Namespace: c.Instance.Namespace,
Labels: c.Labels,
ConfigGenerators: []common.ConfigGenerator{
&config.Log4jConfGenerator{LoggingSpec: c.MergedCfg.Config.Logging.Broker, Container: string(common.Kafka)},
&config.Log4jConfGenerator{LoggingSpec: loggingConfigSpec, Container: string(common.Kafka)},
&config.SecurityConfGenerator{},
&config.KafkaServerConfGenerator{KafkaTlsSecurity: c.KafkaTlsSecurity},
// &KafkaConfGenerator{sslSpec: c.MergedCfg.Config.Ssl},
Expand Down
1 change: 1 addition & 0 deletions internal/controller/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ func (s *StatefulSetReconciler) makeKafkaContainer() []corev1.Container {
groupSvcName := svc.CreateGroupServiceName(s.Instance.GetName(), s.GroupName)
builder := container.NewKafkaContainerBuilder(imageName, util.ImagePullPolicy(imageSpec), zNode, resourceSpec, s.KafkaTlsSecurity, s.Instance.Namespace, groupSvcName)
kafkaContainer := builder.Build(builder)

return []corev1.Container{
kafkaContainer,
}
Expand Down
2 changes: 1 addition & 1 deletion internal/controller/svc/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func NewClusterService(
headlessServiceType := common.Service
serviceType := corev1.ServiceTypeNodePort
builder := common.NewServiceBuilder(
CreateGroupServiceName(instance.GetName(), ""),
CreateClusterServiceName(instance.GetName()),
instance.GetNamespace(),
labels,
makePorts(tlsSecurity),
Expand Down
2 changes: 1 addition & 1 deletion internal/controller/svc/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ func (p *PodServiceReconciler) ServicePorts() []corev1.ServicePort {
{
Name: p.KafkaTlsSecurity.ClientPortName(),
Port: int32(p.KafkaTlsSecurity.ClientPort()),
TargetPort: intstr.FromString(kafkav1alpha1.ClientPortName),
TargetPort: intstr.FromString(p.KafkaTlsSecurity.ClientPortName()),
},
{
Name: kafkav1alpha1.MetricsPortName,
Expand Down
20 changes: 5 additions & 15 deletions internal/security/tls.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package security

import (
"fmt"
"strings"

kafkav1alpha1 "github.com/zncdatadev/kafka-operator/api/v1alpha1"
"github.com/zncdatadev/kafka-operator/internal/util"
Expand Down Expand Up @@ -169,7 +170,6 @@ func (k *KafkaTlsSecurity) KcatClientSsl(certDirectory string) []string {
func (k *KafkaTlsSecurity) AddVolumeAndVolumeMounts(sts *appsv1.StatefulSet) {
kafkaContainer := k.getContainer(sts.Spec.Template.Spec.Containers, "kafka")
if tlsServerSecretClass := k.TlsServerSecretClass(); tlsServerSecretClass != "" {
k.AddVolume(sts, CreateTlsVolume(KubedoopTLSCertServerDirName, tlsServerSecretClass, k.SSLStorePassword))
// cbKcatProber.AddVolumeMount(KubedoopTLSCertServerDirName, KubedoopTLSCertServerDir) todo
k.AddVolume(sts, CreateTlsKeystoreVolume(KubedoopTLSKeyStoreServerDirName, tlsServerSecretClass, k.SSLStorePassword))
k.AddVolumeMount(kafkaContainer, KubedoopTLSKeyStoreServerDirName, KubedoopTLSKeyStoreServerDir)
Expand Down Expand Up @@ -235,28 +235,18 @@ func (k *KafkaTlsSecurity) ConfigSettings() map[string]string {
config[InterSSLClientAuth] = "required"
}
// common
config[InterBrokerListenerName] = "internal"
config[InterBrokerListenerName] = "INTERNAL"
return config
}

func CreateTlsVolume(volumeName, secretClass, sslStorePassword string) corev1.Volume {
builder := util.SecretVolumeBuilder{VolumeName: volumeName}
builder.SetAnnotations(map[string]string{
constants.AnnotationSecretsClass: secretClass,
constants.AnnotationSecretsScope: fmt.Sprintf("%s,%s", constants.PodScope, constants.NodeScope),
})
if sslStorePassword != "" {
builder.AddAnnotation(constants.AnnotationSecretsPKCS12Password, sslStorePassword)
}
return builder.Build()
}

// // CreateTlsKeystoreVolume creates ephemeral volumes to mount the SecretClass into the Pods as keystores
func CreateTlsKeystoreVolume(volumeName, secretClass, sslStorePassword string) corev1.Volume {
builder := util.SecretVolumeBuilder{VolumeName: volumeName}
svcScope := fmt.Sprintf("%s=%s", constants.ServiceScope, "kafkacluster-sample")
secretScopes := []string{svcScope, string(constants.PodScope), string(constants.NodeScope)}
builder.SetAnnotations(map[string]string{
constants.AnnotationSecretsClass: secretClass,
constants.AnnotationSecretsScope: fmt.Sprintf("%s,%s", constants.PodScope, constants.NodeScope),
constants.AnnotationSecretsScope: strings.Join(secretScopes, constants.CommonDelimiter),
constants.AnnotationSecretsFormat: string(constants.TLSP12),
})
if sslStorePassword != "" {
Expand Down
41 changes: 0 additions & 41 deletions test/e2e/default/chainsaw-test.yaml

This file was deleted.

104 changes: 104 additions & 0 deletions test/e2e/logging/chainsaw-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
apiVersion: chainsaw.kyverno.io/v1alpha1
kind: Test
metadata:
name: logging
spec:
bindings:
steps:
- name: install vector-aggregator
try:
- script:
content: >-
helm upgrade --install vector-aggregator vector
--namespace $NAMESPACE
--version 0.36.1
--repo https://helm.vector.dev
--values vector-aggregator-values.yaml
- apply:
file: vector-aggregator.yaml
- assert:
file: vector-aggregator-assert.yaml
- name: install zookeeper cluster
try:
- apply:
file: ../setup/zookeeper.yaml
- assert:
file: ../setup/zookeeper-assert.yaml
cleanup:
- sleep:
duration: 30s
- name: install kakfka cluster
try:
- apply:
file: kafka.yaml
- assert:
file: kafka-assert.yaml
- name: assert kafka logs
try:
- sleep:
duration: 50s
- script:
env:
- name: NAMESPACE
value: ($namespace)
content: |
#!/bin/bash
# Get logs from vector-aggregator-0 and check for specific log pattern
kubectl -n $NAMESPACE logs statefulset/vector-aggregator -c vector | \
grep -q '"cluster":"kafkacluster-sample","container":"kafka","errors":\[\],"file":"kafka.log4j.xml"'
exit_code=$?

if [ $exit_code -eq 0 ]; then
echo "Found expected log pattern"
exit 0
else
echo "Did not find expected log pattern"
exit 1
fi
cleanup:
- sleep:
duration: 50s
- name: access kafka
try:
- apply:
file: client-access-configmap.yaml
- script:
env:
- name: NAMESPACE
value: ($namespace)
content: |
export KAFKA_IMAGE=$(
kubectl -n $NAMESPACE get statefulset kafkacluster-sample-default-broker \
-o jsonpath='{.spec.template.spec.containers[?(.name=="kafka")].image}'
)
export KAFKA_DISCOVERY=$(
kubectl -n $NAMESPACE get kafkaclusters.kafka.kubedoop.dev \
-o jsonpath='{.items[0].metadata.name}'
)
echo "KAFKA_IMAGE=$KAFKA_IMAGE"
echo "KAFKA_DISCOVERY=$KAFKA_DISCOVERY"

envsubst '${KAFKA_IMAGE} ${KAFKA_DISCOVERY}' < client-access-pod.txt | kubectl -n $NAMESPACE apply -f -
- assert:
timeout: 150s
resource:
apiVersion: v1
kind: Pod
metadata:
name: client-access
status:
phase: Succeeded
catch:
- podLogs:
name: client-access
container: client-access
- describe:
apiVersion: v1
kind: Pod
name: client-access
- script:
env:
- name: NAMESPACE
value: ($namespace)
content: |
kubectl -n $NAMESPACE get pod client-access -o yaml
65 changes: 65 additions & 0 deletions test/e2e/logging/client-access-configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: client-access
data:
client-access.sh: |
#!/usr/bin/env bash

# to be safe
unset TOPIC
unset BAD_TOPIC

echo "Connecting to boostrap address $KAFKA"

echo "Start client access testing..."
############################################################################
# Test the secured connection
############################################################################
# create random topics
TOPIC=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 20 ; echo '')
BAD_TOPIC=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 20 ; echo '')

if /kubedoop/kafka/bin/kafka-topics.sh --create --topic "$TOPIC" --bootstrap-server "$KAFKA"
then
echo "[SUCCESS] client topic created!"
else
echo "[ERROR] client topic creation failed!"
exit 1
fi

if /kubedoop/kafka/bin/kafka-topics.sh --list --topic "$TOPIC" --bootstrap-server "$KAFKA" | grep "$TOPIC"
then
echo "[SUCCESS] client topic read!"
else
echo "[ERROR] client topic read failed!"
exit 1
fi

############################################################################
# Test the connection with bad host name
############################################################################
if /kubedoop/kafka/bin/kafka-topics.sh --create --topic "$BAD_TOPIC" --bootstrap-server localhost:9093 &> /dev/null
then
echo "[ERROR] client topic created with bad host name!"
exit 1
else
echo "[SUCCESS] client topic creation failed with bad host name - 'localhost:9093' !"
fi

############################################################################
# Test producer and consumer
############################################################################
# echo "Start producer and consumer testing..."
# echo "Hello, World!" | /kubedoop/kafka/bin/kafka-console-producer.sh --topic "$TOPIC" --bootstrap-server "$KAFKA"

# if /kubedoop/kafka/bin/kafka-console-consumer.sh --topic "$TOPIC" --from-beginning --bootstrap-server "$KAFKA" --max-messages 1 --timeout-ms 20000 | grep "Hello, World!"
# then
# echo "[SUCCESS] producer and consumer testing!"
# else
# echo "[ERROR] producer and consumer testing failed!"
# exit 1
# fi

echo "All tests successful!"
exit 0
Loading
Loading