From 063fa5966ced9813b01e133df974582e00ffbb50 Mon Sep 17 00:00:00 2001 From: whg517 Date: Mon, 30 Dec 2024 14:45:03 +0800 Subject: [PATCH] test(e2e): refactor e2e --- .chainsaw.yaml | 1 + Makefile | 3 +- cmd/csi_driver/main.go | 2 + config/csi/daemonset.yaml | 6 +- config/rbac/role.yaml | 8 + internal/controller/secretclass_controller.go | 1 + internal/csi/backend/ca/ca_manager.go | 18 +-- internal/csi/controller.go | 6 +- internal/csi/driver.go | 6 +- internal/csi/node.go | 14 +- internal/csi/server.go | 2 +- pkg/kerberos/kadmin.go | 6 +- pkg/pod_info/pod_info.go | 2 +- pkg/util/log.go | 14 +- pkg/volume/volume.go | 2 +- test/e2e/krb5/krb5.yaml | 2 +- test/e2e/krb5/node-scope-assert.yaml | 4 +- test/e2e/krb5/node-scope.yaml | 27 +--- test/e2e/krb5/pod-scope-assert.yaml | 9 +- test/e2e/krb5/pod-scope.yaml | 31 ++-- test/e2e/krb5/service-scope-assert.yaml | 4 +- test/e2e/krb5/service-scope.yaml | 34 ++--- test/e2e/tls/chainsaw-test.yaml | 45 ++++++ test/e2e/tls/tls-pkcs2-assert.yaml | 11 ++ test/e2e/tls/tls-pkcs2.yaml | 118 +++++++++++++++ test/e2e/tls/tls-scope-assert.yaml | 11 ++ test/e2e/tls/tls-scope.yaml | 140 ++++++++++++++++++ test/e2e/tls/tls-will-expires.yaml | 59 ++++++++ 28 files changed, 470 insertions(+), 116 deletions(-) create mode 100644 test/e2e/tls/tls-pkcs2-assert.yaml create mode 100644 test/e2e/tls/tls-pkcs2.yaml create mode 100644 test/e2e/tls/tls-scope-assert.yaml create mode 100644 test/e2e/tls/tls-scope.yaml create mode 100644 test/e2e/tls/tls-will-expires.yaml diff --git a/.chainsaw.yaml b/.chainsaw.yaml index 41e9964..128002d 100644 --- a/.chainsaw.yaml +++ b/.chainsaw.yaml @@ -10,6 +10,7 @@ spec: delete: 120s error: 10s exec: 45s + forceTerminationGracePeriod: 10s # skipDelete: true failFast: true parallel: 1 # use 1 concurrent to test, to voide multiple csi driver conflict diff --git a/Makefile b/Makefile index 1207475..d001875 100644 --- a/Makefile +++ b/Makefile @@ -339,9 +339,8 @@ $(CHAINSAW): $(LOCALBIN) .PHONY: chainsaw-setup chainsaw-setup: ## Run the chainsaw setup - make docker-build make csi-docker-build - $(KIND) --name $(KIND_CLUSTER_NAME) load docker-image $(IMG) $(CSIDRIVER_IMG) + $(KIND) --name $(KIND_CLUSTER_NAME) load docker-image $(CSIDRIVER_IMG) KUBECONFIG=$(KIND_KUBECONFIG) make helm-install-depends KUBECONFIG=$(KIND_KUBECONFIG) make deploy diff --git a/cmd/csi_driver/main.go b/cmd/csi_driver/main.go index 45ab5b3..eccdf9b 100644 --- a/cmd/csi_driver/main.go +++ b/cmd/csi_driver/main.go @@ -29,6 +29,7 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + listenerv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/listeners/v1alpha1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -55,6 +56,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(listenerv1alpha1.AddToScheme(scheme)) utilruntime.Must(secretv1alpha1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme diff --git a/config/csi/daemonset.yaml b/config/csi/daemonset.yaml index 92d5de5..517cc6b 100644 --- a/config/csi/daemonset.yaml +++ b/config/csi/daemonset.yaml @@ -27,7 +27,7 @@ spec: securityContext: {} containers: - - name: secret-operator + - name: secret-csi-driver securityContext: privileged: true runAsUser: 0 @@ -35,8 +35,8 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 100m - memory: 128Mi + cpu: 1000m + memory: 512Mi requests: cpu: 100m memory: 128Mi diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 406af05..2dceb70 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -38,6 +38,14 @@ rules: - patch - update - watch +- apiGroups: + - listeners.kubedoop.dev + resources: + - listeners + verbs: + - get + - list + - watch - apiGroups: - secrets.kubedoop.dev resources: diff --git a/internal/controller/secretclass_controller.go b/internal/controller/secretclass_controller.go index de5182e..bff3e1e 100644 --- a/internal/controller/secretclass_controller.go +++ b/internal/controller/secretclass_controller.go @@ -44,6 +44,7 @@ type SecretClassReconciler struct { // +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=persistentvolumes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=listeners.kubedoop.dev,resources=listeners,verbs=get;list;watch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/internal/csi/backend/ca/ca_manager.go b/internal/csi/backend/ca/ca_manager.go index 04ef085..aff37c9 100644 --- a/internal/csi/backend/ca/ca_manager.go +++ b/internal/csi/backend/ca/ca_manager.go @@ -95,7 +95,7 @@ func (c *CertificateManager) secretCreateIfDoesNotExist(ctx context.Context) err return err } - logger.V(1).Info("Created a new secret", "name", c.name, "namespace", c.namespace, "auto", c.auto) + logger.V(1).Info("created a new secret", "name", c.name, "namespace", c.namespace, "auto", c.auto) return nil } @@ -115,7 +115,7 @@ func (c CertificateManager) getPEMKeyPairsFromSecret(ctx context.Context) ([]PEM } } - logger.V(0).Info("Get certificate authorities PEM key pairs from secret", "name", c.name, "namespace", c.namespace, "len", len(keyPairs)) + logger.V(0).Info("got certificate authorities PEM key pairs from secret", "name", c.name, "namespace", c.namespace, "len", len(keyPairs)) return keyPairs, nil } @@ -171,7 +171,7 @@ func (c *CertificateManager) getCertificateAuthorities(pemKeyPairs []PEMkeyPair) return nil, err } if ca.Certificate.NotAfter.Before(time.Now()) { - logger.V(0).Info("Certificate authority is expired, skip it.", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) + logger.V(0).Info("certificate authority is expired, skip it.", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) continue } cas = append(cas, ca) @@ -187,7 +187,7 @@ func (c *CertificateManager) getCertificateAuthorities(pemKeyPairs []PEMkeyPair) ) } - logger.V(0).Info("Could not find any certificate authorities, created a new self-signed certificate authority", "name", c.name, "namespace", c.namespace, "auto", c.auto) + logger.V(0).Info("could not find any certificate authorities, created a new self-signed certificate authority", "name", c.name, "namespace", c.namespace, "auto", c.auto) ca, err := c.createSelfSignedCertificateAuthority() if err != nil { return nil, err @@ -213,7 +213,7 @@ func (c *CertificateManager) createSelfSignedCertificateAuthority() (*Certificat if err != nil { return nil, err } - logger.V(0).Info("Created new self-signed certificate authority", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) + logger.V(0).Info("created new self-signed certificate authority", "serialNumber", ca.SerialNumber(), "notAfter", ca.Certificate.NotAfter) return ca, nil } @@ -248,19 +248,19 @@ func (c *CertificateManager) rotateCertificateAuthority(cas []*CertificateAuthor if err != nil { return nil, err } - logger.V(0).Info("Rotated certificate authority, because the old ca is about to expire", + logger.V(0).Info("rotated certificate authority, because the old ca is about to expire", "serialNumber", newestCA.SerialNumber(), "notAfter", newCA.Certificate.NotAfter, ) cas = append(cas, newCA) } else { - logger.V(0).Info("Certificate authority is about to expire, but auto-generate is disabled, please rotate manually.", + logger.V(0).Info("certificate authority is about to expire, but auto-generate is disabled, please rotate manually.", "serialNumber", newestCA.SerialNumber(), "notAfter", newestCA.Certificate.NotAfter, ) } } else { - logger.V(0).Info("Certificate authority is still valid, no need to rotate", + logger.V(0).Info("certificate authority is still valid, no need to rotate", "serialNumber", newestCA.SerialNumber(), "notAfter", newestCA.Certificate.NotAfter, ) @@ -283,7 +283,7 @@ func (c *CertificateManager) getAliveCertificateAuthority(atAfter time.Time, cas } return 0 }) - logger.V(0).Info("Get alive certificate authority", "serialNumber", oldestCA.SerialNumber(), "notAfter", oldestCA.Certificate.NotAfter) + logger.V(0).Info("got alive certificate authority", "serialNumber", oldestCA.SerialNumber(), "notAfter", oldestCA.Certificate.NotAfter) return oldestCA } diff --git a/internal/csi/controller.go b/internal/csi/controller.go index 084c34b..85141f6 100644 --- a/internal/csi/controller.go +++ b/internal/csi/controller.go @@ -62,7 +62,7 @@ func (c *ControllerServer) CreateVolume(ctx context.Context, request *csi.Create c.volumes[request.Name] = requiredCap if request.Parameters["secretFinalizer"] == "true" { - logger.V(1).Info("Finalizer is true") + logger.V(1).Info("finalizer is true") } // requests.parameters is StorageClass.Parameters, which is set by user when creating PVC. @@ -185,13 +185,13 @@ func (c *ControllerServer) DeleteVolume(ctx context.Context, request *csi.Delete } if !dynamic { - logger.V(5).Info("Volume is not dynamic, skip delete volume") + logger.V(5).Info("volume is not dynamic, skip delete volume") return &csi.DeleteVolumeResponse{}, nil } if _, ok := c.volumes[request.VolumeId]; !ok { // return nil, status.Errorf(codes.NotFound, "Volume ID: %q", request.VolumeId) - logger.V(1).Info("Volume not found, skip delete volume") + logger.V(1).Info("volume not found, skip delete volume") } return &csi.DeleteVolumeResponse{}, nil diff --git a/internal/csi/driver.go b/internal/csi/driver.go index 47123d2..46652b7 100644 --- a/internal/csi/driver.go +++ b/internal/csi/driver.go @@ -47,7 +47,7 @@ func NewDriver( func (d *Driver) Run(ctx context.Context, testMode bool) error { - logger.V(1).Info("Driver information", "versionInfo", version.GetVersion(d.name)) + logger.V(1).Info("driver information", "versionInfo", version.GetVersion(d.name)) // check node id if d.nodeID == "" { @@ -56,7 +56,7 @@ func (d *Driver) Run(ctx context.Context, testMode bool) error { ns := NewNodeServer( d.nodeID, - mount.New(""), + mount.New("secret-csi"), d.client, ) @@ -72,7 +72,7 @@ func (d *Driver) Run(ctx context.Context, testMode bool) error { }() d.server.Wait() - logger.Info("Server stopped") + logger.Info("csi driver stopped") return nil } diff --git a/internal/csi/node.go b/internal/csi/node.go index 6cdbf5c..2313be5 100644 --- a/internal/csi/node.go +++ b/internal/csi/node.go @@ -144,7 +144,7 @@ func (n *NodeServer) updatePod(ctx context.Context, pod *corev1.Pod, volumeID st } patch := client.MergeFrom(pod.DeepCopy()) if expiresTime == nil { - logger.V(5).Info("Expiration time is nil, skip update pod annotation", "pod", pod.Name) + logger.V(5).Info("expiration time is nil, skip update pod annotation", "pod", pod.Name) return nil } @@ -157,14 +157,14 @@ func (n *NodeServer) updatePod(ctx context.Context, pod *corev1.Pod, volumeID st annotationExpiresName := constants.PrefixLabelRestarterExpiresAt + hex.EncodeToString(volumeTag) expiresTimeStr := expiresTime.Format(time.RFC3339) - logger.V(5).Info("Update pod annotation", "pod", pod.Name, "key", annotationExpiresName, "value", expiresTimeStr) + logger.V(5).Info("update pod annotation", "pod", pod.Name, "key", annotationExpiresName, "value", expiresTimeStr) pod.Annotations[annotationExpiresName] = expiresTimeStr if err := n.client.Patch(ctx, pod, patch); err != nil { return err } - logger.V(5).Info("Pod patched", "pod", pod.Name) + logger.V(5).Info("pod patched", "pod", pod.Name) return nil } @@ -177,9 +177,9 @@ func (n *NodeServer) writeData(targetPath string, data map[string]string) error if err := os.WriteFile(fileName, []byte(content), fs.FileMode(0644)); err != nil { return err } - logger.V(5).Info("File written", "file", fileName) + logger.V(5).Info("file written", "file", fileName) } - logger.V(5).Info("Data written", "target", targetPath) + logger.V(5).Info("data written", "target", targetPath) return nil } @@ -218,7 +218,7 @@ func (n *NodeServer) mount(targetPath string) error { if err := n.mounter.Mount("tmpfs", targetPath, "tmpfs", opts); err != nil { return status.Error(codes.Internal, err.Error()) } - logger.V(1).Info("Volume mounted", "source", "tmpfs", "target", targetPath, "fsType", "tmpfs", "options", opts) + logger.V(1).Info("volume mounted", "source", "tmpfs", "target", targetPath, "fsType", "tmpfs", "options", opts) return nil } @@ -239,7 +239,7 @@ func (n *NodeServer) NodeUnpublishVolume(ctx context.Context, request *csi.NodeU if err := n.mounter.Unmount(targetPath); err != nil { // FIXME: use status.Error to return error // return nil, status.Error(codes.Internal, err.Error()) - logger.V(0).Info("Volume not found, skip delete volume") + logger.V(0).Info("volume not found, skip delete volume") } // remove the target path diff --git a/internal/csi/server.go b/internal/csi/server.go index 5583539..896f164 100644 --- a/internal/csi/server.go +++ b/internal/csi/server.go @@ -72,7 +72,7 @@ func (s *nonBlockingServer) serveGrpc(endpoint string, ids csi.IdentityServer, c if proto == "unix" { addr = "/" + addr if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { - logger.V(0).Info("Failed to remove", "addr", addr, "error", err.Error()) + logger.V(0).Info("failed to remove", "addr", addr, "error", err.Error()) } } diff --git a/pkg/kerberos/kadmin.go b/pkg/kerberos/kadmin.go index 4b9e17e..ed470ed 100644 --- a/pkg/kerberos/kadmin.go +++ b/pkg/kerberos/kadmin.go @@ -118,7 +118,7 @@ func (k *Kadmin) Query(query string) (result string, err error) { kadminLogger.Error(err, "Failed to execute kadmin query", "cmd", cmd.String(), "output", result) return "", err } - kadminLogger.Info("Executed kadmin query", "cmd", cmd.String(), "output", result) + kadminLogger.Info("executed kadmin query", "cmd", cmd.String(), "output", result) return result, nil @@ -149,7 +149,7 @@ func (k *Kadmin) Ktadd(principals ...string) ([]byte, error) { return nil, err } - kadminLogger.Info("Saved keytab", "principal", principals, "keytab", keytab, "output", output) + kadminLogger.Info("saved keytab", "principal", principals, "keytab", keytab, "output", output) return os.ReadFile(keytab) } @@ -186,7 +186,7 @@ func (k *Kadmin) AddPrincipal(principal string) error { return err } - kadminLogger.Info("Added principal", "principal", principal, "output", output) + kadminLogger.Info("created a new principal", "principal", principal, "output", output) return nil } diff --git a/pkg/pod_info/pod_info.go b/pkg/pod_info/pod_info.go index fc689cb..20f03f5 100644 --- a/pkg/pod_info/pod_info.go +++ b/pkg/pod_info/pod_info.go @@ -76,7 +76,7 @@ func (p *PodInfo) getNodeAddresses(ctx context.Context) ([]Address, error) { } } - logger.V(1).Info("get node ip filter by internal and external", "pod", p.getPodName(), + logger.V(1).Info("got node ip filter by internal and external", "pod", p.getPodName(), "namespace", p.getPodNamespace(), "addresses", addresses) return addresses, nil } diff --git a/pkg/util/log.go b/pkg/util/log.go index 6e4ab01..c8b8f36 100644 --- a/pkg/util/log.go +++ b/pkg/util/log.go @@ -14,30 +14,32 @@ var ( log = ctrl.Log.WithName("csi-grpc") ) -func GetLogLevel(method string) int { +func getLogLevel(method string) int { + v := ctrl.Log.GetV() + if method == "/csi.v1.Identity/Probe" || method == "/csi.v1.Node/NodeGetCapabilities" || method == "/csi.v1.Node/NodeGetVolumeStats" { return 8 } - return 2 + return v } func LogGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - level := GetLogLevel(info.FullMethod) - log.V(level).Info("GRPC calling", "method", info.FullMethod, "request", protosanitizer.StripSecrets(req)) + level := getLogLevel(info.FullMethod) + log.V(level).Info("gRPC calling", "method", info.FullMethod, "request", protosanitizer.StripSecrets(req)) resp, err := handler(ctx, req) if err != nil { - log.Error(err, "GRPC called error", "method", info.FullMethod) + log.Error(err, "RPC called error", "method", info.FullMethod) if level >= 5 { stack := debug.Stack() errStack := fmt.Errorf("\n%s", stack) log.Error(err, "GRPC called error", errStack.Error()) } } else { - log.V(level).Info("GRPC called", "method", info.FullMethod, "response", protosanitizer.StripSecrets(resp)) + log.V(level).Info("gRPC called", "method", info.FullMethod, "response", protosanitizer.StripSecrets(resp)) } return resp, err } diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go index d01d098..5b37451 100644 --- a/pkg/volume/volume.go +++ b/pkg/volume/volume.go @@ -188,7 +188,7 @@ func NewvolumeContextFromMap(parameters map[string]string) (*SecretVolumeContext case VolumeKubernetesStorageProvisioner: v.Provisioner = value case DeprecatedVolumeKubernetesStorageProvisioner: - logger.V(0).Info("Deprecated key since v1.23, please use new key", + logger.V(0).Info("deprecated key since v1.23, please use new key", "key", key, "value", value, "new key", VolumeKubernetesStorageProvisioner, diff --git a/test/e2e/krb5/krb5.yaml b/test/e2e/krb5/krb5.yaml index 9688116..a344bda 100644 --- a/test/e2e/krb5/krb5.yaml +++ b/test/e2e/krb5/krb5.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: krb5 - image: quay.io/zncdatadev-test/krb5:dev + image: quay.io/zncdatadev/krb5:1.21.1-kubedoop0.0.0-dev args: - -r - ($relam) # chainsaw bindings value diff --git a/test/e2e/krb5/node-scope-assert.yaml b/test/e2e/krb5/node-scope-assert.yaml index c6dc451..491eaf6 100644 --- a/test/e2e/krb5/node-scope-assert.yaml +++ b/test/e2e/krb5/node-scope-assert.yaml @@ -4,9 +4,7 @@ kind: Pod metadata: name: krb5-node-scope status: - phase: Running + phase: Succeeded containerStatuses: - name: main - ready: true restartCount: 0 - started: true diff --git a/test/e2e/krb5/node-scope.yaml b/test/e2e/krb5/node-scope.yaml index 610c214..460d326 100644 --- a/test/e2e/krb5/node-scope.yaml +++ b/test/e2e/krb5/node-scope.yaml @@ -5,39 +5,34 @@ metadata: labels: name: krb5-node-scope spec: + restartPolicy: Never containers: - name: main - image: rockylinux/rockylinux:9 + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev command: - "sh" - "-c" - | - set -ex - dnf install krb5-workstation nginx -y - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while ! test -f /opt/secret/keytab; do + while ! test -f /kubedoop/secret/keytab; do sleep 1 echo "Waiting for content..." done - KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /opt/secret//krb5.conf) + KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /kubedoop/secret/krb5.conf) - klist -kt /opt/secret/keytab + klist -kt /kubedoop/secret/keytab - kinit -kt /opt/secret/keytab foo/$NODE_NAME@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/$NODE_NAME@$KERBEROS_REALM klist -e - - echo start nginx server - nginx -g "daemon off;" env: - name: NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: KRB5_CONFIG - value: /opt/secret//krb5.conf + value: /kubedoop/secret/krb5.conf resources: limits: memory: "526Mi" @@ -48,13 +43,7 @@ spec: name: http volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - httpGet: - port: http - initialDelaySeconds: 5 - periodSeconds: 5 - + mountPath: /kubedoop/secret volumes: - name: secret ephemeral: diff --git a/test/e2e/krb5/pod-scope-assert.yaml b/test/e2e/krb5/pod-scope-assert.yaml index 833384a..ebdace7 100644 --- a/test/e2e/krb5/pod-scope-assert.yaml +++ b/test/e2e/krb5/pod-scope-assert.yaml @@ -1,17 +1,10 @@ --- apiVersion: v1 -kind: Service -metadata: - name: krb5-pod-scope-service ---- -apiVersion: v1 kind: Pod metadata: name: krb5-pod-scope status: - phase: Running + phase: Succeeded containerStatuses: - name: main - ready: true restartCount: 0 - started: true diff --git a/test/e2e/krb5/pod-scope.yaml b/test/e2e/krb5/pod-scope.yaml index e414013..2dc6b6c 100644 --- a/test/e2e/krb5/pod-scope.yaml +++ b/test/e2e/krb5/pod-scope.yaml @@ -1,5 +1,6 @@ --- apiVersion: v1 +<<<<<<< Updated upstream kind: Service metadata: name: krb5-pod-scope-service @@ -12,39 +13,36 @@ spec: --- apiVersion: v1 +======= +>>>>>>> Stashed changes kind: Pod metadata: name: krb5-pod-scope labels: name: krb5-pod-scope spec: + restartPolicy: Never containers: - name: main - image: rockylinux/rockylinux:9 + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev command: - "sh" - "-c" - | - set -ex - dnf install krb5-workstation nginx -y - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while ! test -f /opt/secret/keytab; do + while ! test -f /kubedoop/secret/keytab; do sleep 1 echo "Waiting for content..." done - KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /opt/secret//krb5.conf) + KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /kubedoop/secret//krb5.conf) - klist -kt /opt/secret/keytab + klist -kt /kubedoop/secret/keytab - kinit -kt /opt/secret/keytab foo/krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM - kinit -kt /opt/secret/keytab foo/krb5-pod-scope.krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/krb5-pod-scope.krb5-pod-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM klist -e - - echo start nginx server - nginx -g "daemon off;" env: - name: NAMESPACE valueFrom: @@ -55,7 +53,7 @@ spec: fieldRef: fieldPath: spec.nodeName - name: KRB5_CONFIG - value: /opt/secret//krb5.conf + value: /kubedoop/secret/krb5.conf resources: limits: memory: "526Mi" @@ -67,12 +65,7 @@ spec: volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - httpGet: - port: http - initialDelaySeconds: 5 - periodSeconds: 5 + mountPath: /kubedoop/secret subdomain: krb5-pod-scope-service volumes: - name: secret diff --git a/test/e2e/krb5/service-scope-assert.yaml b/test/e2e/krb5/service-scope-assert.yaml index 66ccb43..ed08850 100644 --- a/test/e2e/krb5/service-scope-assert.yaml +++ b/test/e2e/krb5/service-scope-assert.yaml @@ -10,9 +10,7 @@ kind: Pod metadata: name: krb5-svc-scope status: - phase: Running + phase: Succeeded containerStatuses: - name: main - ready: true restartCount: 0 - started: true diff --git a/test/e2e/krb5/service-scope.yaml b/test/e2e/krb5/service-scope.yaml index 6463e41..719ba88 100644 --- a/test/e2e/krb5/service-scope.yaml +++ b/test/e2e/krb5/service-scope.yaml @@ -9,7 +9,6 @@ spec: ports: - port: 80 targetPort: http - --- apiVersion: v1 kind: Pod @@ -18,32 +17,28 @@ metadata: labels: name: krb5-svc-scope spec: + restartPolicy: Never containers: - name: main - image: rockylinux/rockylinux:9 + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev command: - "sh" - "-c" - | - set -ex - dnf install krb5-workstation nginx -y - echo "Loop is running... (Press Ctrl+C or send SIGTERM to exit)" - while ! test -f /opt/secret/keytab; do + while ! test -f /kubedoop/secret/keytab; do sleep 1 echo "Waiting for content..." done - KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /opt/secret//krb5.conf) + KERBEROS_REALM=$(grep -oP 'default_realm = \K.*' /kubedoop/secret/krb5.conf) - klist -kt /opt/secret/keytab + klist -kt /kubedoop/secret/keytab - kinit -kt /opt/secret/keytab foo/krb5-svc-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM + kinit -kt /kubedoop/secret/keytab foo/krb5-svc-scope-service.$NAMESPACE.svc.cluster.local@$KERBEROS_REALM klist -e - echo start nginx server - nginx -g "daemon off;" env: - name: NAMESPACE valueFrom: @@ -54,23 +49,14 @@ spec: fieldRef: fieldPath: spec.nodeName - name: KRB5_CONFIG - value: /opt/secret//krb5.conf + value: /kubedoop/secret/krb5.conf resources: limits: - memory: "526Mi" - cpu: "500m" - ports: - - containerPort: 80 - protocol: TCP - name: http + memory: "200Mi" + cpu: "256m" volumeMounts: - name: secret - mountPath: /opt/secret - readinessProbe: - httpGet: - port: http - initialDelaySeconds: 5 - periodSeconds: 5 + mountPath: /kubedoop/secret volumes: - name: secret ephemeral: diff --git a/test/e2e/tls/chainsaw-test.yaml b/test/e2e/tls/chainsaw-test.yaml index 7a8edec..051bc00 100644 --- a/test/e2e/tls/chainsaw-test.yaml +++ b/test/e2e/tls/chainsaw-test.yaml @@ -1,6 +1,7 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: +<<<<<<< Updated upstream name: tls spec: steps: @@ -30,3 +31,47 @@ spec: - podLogs: selector: name=auto-tls tail: -1 +======= + name: autotls-pkcs12 +spec: + steps: + # tls smoke test case with pkcs12 + - try: + - apply: + file: tls-pkcs12.yaml + - assert: + file: tls-pkcs12-assert.yaml + # tls will expires case + - try: + - apply: + file: tls-will-expires.yaml + - assert: + resource: + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: tls-will-expires + status: + availableReplicas: 1 + readyReplicas: 1 + - sleep: + duration: 80s + - script: + content: | + # count k8s events with `Successfully assigned default/tls-will-expires-0 to ` filter with pod name + # If the count > 1, then the test is fine, pod is restarted when the certificate expires + restart_count=$(kubectl get events --field-selector involvedObject.name=tls-will-expires-0 --no-headers | grep 'Successfully assigned default/tls-will-expires-0 to ' | wc -l) + if [ $restart_count -gt 1 ]; then + echo "Pod tls-will-expires-0 is restarted when the certificate expires" + else + echo "Pod tls-will-expires-0 is not restarted when the certificate expires" + exit 1 + fi + check: + ($error == null): true + - try: + - apply: + file: tls-scope.yaml + - assert: + file: tls-scope-assert.yaml +>>>>>>> Stashed changes diff --git a/test/e2e/tls/tls-pkcs2-assert.yaml b/test/e2e/tls/tls-pkcs2-assert.yaml new file mode 100644 index 0000000..6f8b584 --- /dev/null +++ b/test/e2e/tls/tls-pkcs2-assert.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auto-tls + labels: + name: auto-tls +status: + availableReplicas: 3 + readyReplicas: 3 + replicas: 3 + updatedReplicas: 3 diff --git a/test/e2e/tls/tls-pkcs2.yaml b/test/e2e/tls/tls-pkcs2.yaml new file mode 100644 index 0000000..f2fa56a --- /dev/null +++ b/test/e2e/tls/tls-pkcs2.yaml @@ -0,0 +1,118 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: auto-tls + labels: + name: auto-tls +spec: + replicas: 3 + selector: + matchLabels: + name: auto-tls + template: + metadata: + labels: + name: auto-tls + spec: + containers: + - name: auto-tls + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev + command: + - /bin/bash + - -c + - | + KEYSTORE_FILE=/opt/security/tls/keystore.p12 + TRUSTSTORE_FILE=/opt/security/tls/truststore.p12 + SERVER_PEM_FILE=/opt/security/tls/server.pem + + # Arguments: + # Returns: + # 0: success + # 1: fail + function checkTLSPKCS12 () { + # Check if keystore and truststore exist + if [ ! -f $KEYSTORE_FILE ] || [ ! -f $TRUSTSTORE_FILE ]; then + echo "Keystore or truststore does not exist." >&2 + return 1 + fi + + # Check if keystore contains more than 0 entries + entryCount=$(keytool -list -keystore $KEYSTORE_FILE -storepass $P12PASSWORD | grep 'Your keystore contains' | awk '{print $4}') + if [ $entryCount -gt 0 ]; then + echo "Keystore contains more than 0 entries." >&2 + else + echo "Keystore contains 0 entries." >&2 + return 1 + fi + + # Check server certificate in keystore is not expired, default is 60 * 60 * 24 * 7 seconds + EXPIRESLIFE=$((23 * 60 * 60)) # 23 hours + openssl pkcs12 -in "$KEYSTORE_FILE" -passin pass:"$P12PASSWORD" -nokeys -clcerts -out "$SERVER_PEM_FILE" + if ! openssl x509 -checkend $EXPIRESLIFE -noout -in "$SERVER_PEM_FILE"; then + echo "Server certificate in keystore is expired within $EXPIRESLIFE ." >&2 + return 1 + fi + + echo "All checks passed." >&2 + return 0 + } + + # Continuous check with 10 seconds interval until the function returns 0 + while true; do + if checkTLSPKCS12; then + break + fi + sleep 10 + done + + # save assert result success to /kubedoop/assert/success + echo "success" > /kubedoop/assert/success + + sleep infinity + + resources: + limits: + memory: "512Mi" + cpu: "300m" + securityContext: + runAsUser: 0 + runAsGroup: 0 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: P12PASSWORD + value: changeit + ports: + - containerPort: 80 + name: web + readinessProbe: + exec: + command: + - test + - -f + - /kubedoop/assert/success + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + volumeMounts: + - name: tls + mountPath: /kubedoop/tls + volumes: + - name: tls + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.kubedoop.dev/class: tls + secrets.kubedoop.dev/format: tls-p12 + secrets.kubedoop.dev/scope: pod,node + secrets.kubedoop.dev/tlsPKCS12Password: changeit + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: secrets.kubedoop.dev + resources: + requests: + storage: 1Mi diff --git a/test/e2e/tls/tls-scope-assert.yaml b/test/e2e/tls/tls-scope-assert.yaml new file mode 100644 index 0000000..b94ad97 --- /dev/null +++ b/test/e2e/tls/tls-scope-assert.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tls-scope + labels: + name: tls-scope +status: + availableReplicas: 3 + readyReplicas: 3 + replicas: 3 + updatedReplicas: 3 diff --git a/test/e2e/tls/tls-scope.yaml b/test/e2e/tls/tls-scope.yaml new file mode 100644 index 0000000..3975862 --- /dev/null +++ b/test/e2e/tls/tls-scope.yaml @@ -0,0 +1,140 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: tls-scope +spec: + replicas: 3 + selector: + matchLabels: + app: tls-scope + template: + metadata: + labels: + app: tls-scope + spec: + containers: + - name: tls-scope + image: quay.io/zncdatadev/testing-tools:0.1.0-kubedoop0.0.0-dev + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + limits: + memory: "128Mi" + cpu: "500m" + command: + - /bin/bash + - -c + - | + # check tls.crt san has secret scope value + # Arguments: + # Returns: + # 0: success + # 1: fail + function checkTLSSAN () { + # check /kubedoop/tls/tls.crt exists + if [ ! -f /kubedoop/tls/tls.crt ]; then + echo "/kubedoop/tls/tls.crt does not exist" >&2 + return 1 + fi + + # Tls SAN check + # X509v3 Subject Alternative Name: critical + # DNS:secret-operator-1.26.15-control-plane, DNS:tls-scope-https.default.svc.cluster.local, DNS:tls-scope-74c794dc64-88mh8-tls-scope-listener-eph-volume.default.svc.cluster.local, IP Address:172.18.0.2 + tls_san=$(openssl x509 -in /kubedoop/tls/tls.crt -noout -text | grep -A 1 "Subject Alternative Name") + echo "tls_san: \n$tls_san\n" >&2 + + # check tls_san container some value + svc_san="tls-scope-https.$NAMESPACE.svc.cluster.local" + listener_san="tls-scope-listener-eph-volume.$NAMESPACE.svc.cluster.local" + check_lists=($svc_san $listener_san) + + for check in ${check_lists[@]}; do + if [[ $tls_san != *$check* ]]; then + echo "tls_san does not contain $check" >&2 + return 1 + fi + done + + return 0 + } + + # Continuous check with 10 seconds interval until the function returns 0 + while true; do + checkTLSSAN + if [ $? -eq 0 ]; then + echo "tls_san check success" + break + fi + sleep 10 + done + + # save assert result success to /kubedoop/assert/success + echo "success" > /kubedoop/assert/success + sleep infinity + ports: + - containerPort: 443 + name: https + readinessProbe: + exec: + command: + - test + - -f + - /kubedoop/assert/success + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + volumeMounts: + - name: tls + mountPath: /kubedoop/tls + - name: tls-scope-listener-eph-volume + mountPath: /kubedoop/listener + - name: assert + mountPath: /kubedoop/assert + volumes: + - name: assert + emptyDir: {} + - name: tls + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.kubedoop.dev/class: tls + secrets.kubedoop.dev/format: pem + secrets.kubedoop.dev/scope: pod,node,service=tls-scope-https,listener-volume=tls-scope-listener-eph-volume + secrets.kubedoop.dev/tlsPKCS12Password: changeit + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: secrets.kubedoop.dev + resources: + requests: + storage: 1Mi + - name: tls-scope-listener-eph-volume + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + listeners.kubedoop.dev/class: cluster-internal # this is service ClusterIP + # listeners.kubedoop.dev/class: external-unstable # this is service NodePort + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: listeners.kubedoop.dev + resources: + requests: + storage: 1Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: tls-scope-https +spec: + selector: + app: tls-scope + ports: + - port: 443 + targetPort: https diff --git a/test/e2e/tls/tls-will-expires.yaml b/test/e2e/tls/tls-will-expires.yaml new file mode 100644 index 0000000..fc66625 --- /dev/null +++ b/test/e2e/tls/tls-will-expires.yaml @@ -0,0 +1,59 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: tls-will-expires + labels: + name: tls-will-expires +spec: + replicas: 1 + serviceName: default + selector: + matchLabels: + name: tls-will-expires + template: + metadata: + labels: + name: tls-will-expires + spec: + containers: + - name: tls-will-expires + image: caddy:2 + resources: + limits: + memory: "100Mi" + cpu: "100m" + ports: + - containerPort: 80 + name: http + readinessProbe: + httpGet: + path: / + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + volumeMounts: + - name: tls + mountPath: /opt/security/tls + volumes: + - name: tls + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.kubedoop.dev/class: tls + secrets.kubedoop.dev/format: pem + secrets.kubedoop.dev/scope: pod,node + secrets.kubedoop.dev/tlsPKCS12Password: changeit + # Golang duration string is a possibly signed sequence of decimal numbers, + # each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". + # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + secrets.kubedoop.dev/autoTlsCertLifetime: 60s + secrets.kubedoop.dev/autoTlsCertRestartBuffer: 10s + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: secrets.kubedoop.dev + resources: + requests: + storage: 1Mi