diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh index e458aa9..cbb20cb 100755 --- a/hack/run-e2e.sh +++ b/hack/run-e2e.sh @@ -31,7 +31,7 @@ nodes: - role: worker - role: worker EOF - + # Retrieve cluster information kubectl cluster-info --context kind-kind kubectl get nodes @@ -48,25 +48,26 @@ function run_test { --output-dir ${ARTIFACTS}/results/ \ --focus "${FOCUS}" \ --skip "${SKIP}" \ - $EXTRA_ARGS| tee /tmp/test.log - + --namespace "${NAMESPACE}" \ + $EXTRA_ARGS | tee /tmp/test.log + # Check if $CHECK_DURATION is set to true if [[ ${CHECK_DURATION} == "true" ]]; then # Check duration DURATION=$(grep -oP 'Ran \d+ of \d+ Specs in \K[0-9.]+(?= seconds)' /tmp/test.log | cut -d. -f1) - + if [[ ${DRYRUN} == "true" ]]; then - if [[ ${DURATION} -gt ${DRYRUN_THRESHOLD} ]]; then + if [[ ${DURATION} -gt ${DRYRUN_THRESHOLD} ]]; then echo "Focused test took too long to run. Expected less than ${DRYRUN_THRESHOLD} seconds, got ${DURATION} seconds" exit 1 fi else - if [[ ${DURATION} -lt ${DRYRUN_THRESHOLD} ]]; then + if [[ ${DURATION} -lt ${DRYRUN_THRESHOLD} ]]; then echo "Focused test exited too quickly, check if dry-run is enabled. Expected more than ${DRYRUN_THRESHOLD} seconds, got ${DURATION} seconds" exit 1 fi fi - fi + fi # If EXPECTED_NUM_TESTS is set, run the evaluate_test_num function if [[ ! -z ${EXPECTED_NUM_TESTS+x} ]]; then @@ -95,6 +96,7 @@ DRYRUN_THRESHOLD=${DRYRUN_DURATION:-5} FOCUS=${FOCUS:-""} SKIP=${SKIP:-""} DRYRUN=${DRYRUN:-"false"} +NAMESPACE=${NAMESPACE:-""} CONFORMANCE=${CONFORMANCE:-"false"} EXTRA_ARGS=${EXTRA_ARGS:-""} CHECK_DURATION=${CHECK_DURATION:-"false"} diff --git a/pkg/client/download.go b/pkg/client/download.go index ada15f6..5af83b6 100644 --- a/pkg/client/download.go +++ b/pkg/client/download.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "io" + "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -69,7 +70,7 @@ func downloadFile(ctx context.Context, config *rest.Config, clientset *kubernete Stderr: &stderr, }) if err != nil { - return fmt.Errorf("download failed: %w (stderr: %s)", err, stderr.String()) + return fmt.Errorf("download failed: %w (stderr: %s)", err, strings.TrimSpace(stderr.String())) } return nil diff --git a/pkg/service/init.go b/pkg/service/init.go index 8df220f..8306cfb 100644 --- a/pkg/service/init.go +++ b/pkg/service/init.go @@ -72,11 +72,17 @@ func GetKubeConfig(kubeconfig string) string { return kubeconfig } +func namespacedName(basename string) string { + return fmt.Sprintf("%s:%s", basename, viper.GetString("namespace")) +} + // RunE2E sets up the necessary resources and runs E2E conformance tests. func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { + namespace := viper.GetString("namespace") + conformanceNS := v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: viper.GetString("namespace"), + Name: namespace, }, } @@ -86,7 +92,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { "component": "conformance", }, Name: common.ServiceAccountName, - Namespace: conformanceNS.Name, + Namespace: namespace, }, } @@ -95,7 +101,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { Labels: map[string]string{ "component": "conformance", }, - Name: common.ClusterRoleName, + Name: namespacedName(common.ClusterRoleName), }, Rules: []rbac.PolicyRule{ { @@ -115,18 +121,18 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { Labels: map[string]string{ "component": "conformance", }, - Name: common.ClusterRoleBindingName, + Name: namespacedName(common.ClusterRoleBindingName), }, RoleRef: rbac.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", - Name: "conformance-serviceaccount", + Name: namespacedName(common.ClusterRoleName), }, Subjects: []rbac.Subject{ { Kind: "ServiceAccount", - Name: "conformance-serviceaccount", - Namespace: conformanceNS.Name, + Name: common.ServiceAccountName, + Namespace: namespace, }, }, } @@ -134,7 +140,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { conformancePod := v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "e2e-conformance-test", - Namespace: conformanceNS.Name, + Namespace: namespace, }, Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -200,7 +206,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { }, }, RestartPolicy: v1.RestartPolicyNever, - ServiceAccountName: "conformance-serviceaccount", + ServiceAccountName: common.ServiceAccountName, Tolerations: []v1.Toleration{ { // An empty key with operator Exists matches all keys, @@ -272,7 +278,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { configMap := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "repo-list-config", - Namespace: ns.Name, + Namespace: namespace, }, Data: map[string]string{ "repo-list.yaml": string(RepoListData), @@ -338,51 +344,34 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error { // Cleanup removes all resources created during E2E tests. func Cleanup(ctx context.Context, clientset *kubernetes.Clientset) error { - namespace := viper.GetString("namespace") - - err := clientset.CoreV1().Pods(namespace).Delete(ctx, common.PodName, metav1.DeleteOptions{}) - if err != nil { - if !errors.IsNotFound(err) { - return err - } - } else { - log.Printf("Deleted Pod %s.", common.PodName) - } - - err = clientset.RbacV1().ClusterRoleBindings().Delete(ctx, common.ClusterRoleBindingName, metav1.DeleteOptions{}) - if err != nil { - if !errors.IsNotFound(err) { - return err - } - } else { - log.Printf("Deleted ClusterRoleBinding %s.", common.ClusterRoleBindingName) - } - - err = clientset.RbacV1().ClusterRoles().Delete(ctx, common.ClusterRoleName, metav1.DeleteOptions{}) + name := namespacedName(common.ClusterRoleBindingName) + err := clientset.RbacV1().ClusterRoleBindings().Delete(ctx, name, metav1.DeleteOptions{}) if err != nil { if !errors.IsNotFound(err) { return err } } else { - log.Printf("Deleted ClusterRole %s.", common.ClusterRoleName) + log.Printf("Deleted ClusterRoleBinding %s.", name) } - err = clientset.CoreV1().ServiceAccounts(namespace).Delete(ctx, common.ServiceAccountName, metav1.DeleteOptions{}) + name = namespacedName(common.ClusterRoleName) + err = clientset.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{}) if err != nil { if !errors.IsNotFound(err) { return err } } else { - log.Printf("Deleted ServiceAccount %s.", common.ServiceAccountName) + log.Printf("Deleted ClusterRole %s.", name) } - err = clientset.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}) + name = viper.GetString("namespace") + err = clientset.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{}) if err != nil { if !errors.IsNotFound(err) { return err } } else { - log.Printf("Deleted Namespace %s.", namespace) + log.Printf("Deleted Namespace %s.", name) } return nil