Skip to content

Commit

Permalink
Merge pull request #171 from xrstf/multi-runs
Browse files Browse the repository at this point in the history
append namespace to resources to allow parallel conformance tests
  • Loading branch information
k8s-ci-robot committed Apr 7, 2024
2 parents f6fa8e5 + 286f12e commit a691b1b
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 44 deletions.
16 changes: 9 additions & 7 deletions hack/run-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ nodes:
- role: worker
- role: worker
EOF

# Retrieve cluster information
kubectl cluster-info --context kind-kind
kubectl get nodes
Expand All @@ -48,25 +48,26 @@ function run_test {
--output-dir ${ARTIFACTS}/results/ \
--focus "${FOCUS}" \
--skip "${SKIP}" \
$EXTRA_ARGS| tee /tmp/test.log

--namespace "${NAMESPACE}" \
$EXTRA_ARGS | tee /tmp/test.log

# Check if $CHECK_DURATION is set to true
if [[ ${CHECK_DURATION} == "true" ]]; then
# Check duration
DURATION=$(grep -oP 'Ran \d+ of \d+ Specs in \K[0-9.]+(?= seconds)' /tmp/test.log | cut -d. -f1)

if [[ ${DRYRUN} == "true" ]]; then
if [[ ${DURATION} -gt ${DRYRUN_THRESHOLD} ]]; then
if [[ ${DURATION} -gt ${DRYRUN_THRESHOLD} ]]; then
echo "Focused test took too long to run. Expected less than ${DRYRUN_THRESHOLD} seconds, got ${DURATION} seconds"
exit 1
fi
else
if [[ ${DURATION} -lt ${DRYRUN_THRESHOLD} ]]; then
if [[ ${DURATION} -lt ${DRYRUN_THRESHOLD} ]]; then
echo "Focused test exited too quickly, check if dry-run is enabled. Expected more than ${DRYRUN_THRESHOLD} seconds, got ${DURATION} seconds"
exit 1
fi
fi
fi
fi

# If EXPECTED_NUM_TESTS is set, run the evaluate_test_num function
if [[ ! -z ${EXPECTED_NUM_TESTS+x} ]]; then
Expand Down Expand Up @@ -95,6 +96,7 @@ DRYRUN_THRESHOLD=${DRYRUN_DURATION:-5}
FOCUS=${FOCUS:-""}
SKIP=${SKIP:-""}
DRYRUN=${DRYRUN:-"false"}
NAMESPACE=${NAMESPACE:-""}
CONFORMANCE=${CONFORMANCE:-"false"}
EXTRA_ARGS=${EXTRA_ARGS:-""}
CHECK_DURATION=${CHECK_DURATION:-"false"}
Expand Down
3 changes: 2 additions & 1 deletion pkg/client/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"context"
"fmt"
"io"
"strings"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -69,7 +70,7 @@ func downloadFile(ctx context.Context, config *rest.Config, clientset *kubernete
Stderr: &stderr,
})
if err != nil {
return fmt.Errorf("download failed: %w (stderr: %s)", err, stderr.String())
return fmt.Errorf("download failed: %w (stderr: %s)", err, strings.TrimSpace(stderr.String()))
}

return nil
Expand Down
61 changes: 25 additions & 36 deletions pkg/service/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,11 +72,17 @@ func GetKubeConfig(kubeconfig string) string {
return kubeconfig
}

func namespacedName(basename string) string {
return fmt.Sprintf("%s:%s", basename, viper.GetString("namespace"))
}

// RunE2E sets up the necessary resources and runs E2E conformance tests.
func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error {
namespace := viper.GetString("namespace")

conformanceNS := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: viper.GetString("namespace"),
Name: namespace,
},
}

Expand All @@ -86,7 +92,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error {
"component": "conformance",
},
Name: common.ServiceAccountName,
Namespace: conformanceNS.Name,
Namespace: namespace,
},
}

Expand All @@ -95,7 +101,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error {
Labels: map[string]string{
"component": "conformance",
},
Name: common.ClusterRoleName,
Name: namespacedName(common.ClusterRoleName),
},
Rules: []rbac.PolicyRule{
{
Expand All @@ -115,26 +121,26 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error {
Labels: map[string]string{
"component": "conformance",
},
Name: common.ClusterRoleBindingName,
Name: namespacedName(common.ClusterRoleBindingName),
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "conformance-serviceaccount",
Name: namespacedName(common.ClusterRoleName),
},
Subjects: []rbac.Subject{
{
Kind: "ServiceAccount",
Name: "conformance-serviceaccount",
Namespace: conformanceNS.Name,
Name: common.ServiceAccountName,
Namespace: namespace,
},
},
}

conformancePod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-conformance-test",
Namespace: conformanceNS.Name,
Namespace: namespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
Expand Down Expand Up @@ -200,7 +206,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error {
},
},
RestartPolicy: v1.RestartPolicyNever,
ServiceAccountName: "conformance-serviceaccount",
ServiceAccountName: common.ServiceAccountName,
Tolerations: []v1.Toleration{
{
// An empty key with operator Exists matches all keys,
Expand Down Expand Up @@ -272,7 +278,7 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error {
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "repo-list-config",
Namespace: ns.Name,
Namespace: namespace,
},
Data: map[string]string{
"repo-list.yaml": string(RepoListData),
Expand Down Expand Up @@ -338,51 +344,34 @@ func RunE2E(ctx context.Context, clientset *kubernetes.Clientset) error {

// Cleanup removes all resources created during E2E tests.
func Cleanup(ctx context.Context, clientset *kubernetes.Clientset) error {
namespace := viper.GetString("namespace")

err := clientset.CoreV1().Pods(namespace).Delete(ctx, common.PodName, metav1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
} else {
log.Printf("Deleted Pod %s.", common.PodName)
}

err = clientset.RbacV1().ClusterRoleBindings().Delete(ctx, common.ClusterRoleBindingName, metav1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
} else {
log.Printf("Deleted ClusterRoleBinding %s.", common.ClusterRoleBindingName)
}

err = clientset.RbacV1().ClusterRoles().Delete(ctx, common.ClusterRoleName, metav1.DeleteOptions{})
name := namespacedName(common.ClusterRoleBindingName)
err := clientset.RbacV1().ClusterRoleBindings().Delete(ctx, name, metav1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
} else {
log.Printf("Deleted ClusterRole %s.", common.ClusterRoleName)
log.Printf("Deleted ClusterRoleBinding %s.", name)
}

err = clientset.CoreV1().ServiceAccounts(namespace).Delete(ctx, common.ServiceAccountName, metav1.DeleteOptions{})
name = namespacedName(common.ClusterRoleName)
err = clientset.RbacV1().ClusterRoles().Delete(ctx, name, metav1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
} else {
log.Printf("Deleted ServiceAccount %s.", common.ServiceAccountName)
log.Printf("Deleted ClusterRole %s.", name)
}

err = clientset.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{})
name = viper.GetString("namespace")
err = clientset.CoreV1().Namespaces().Delete(ctx, name, metav1.DeleteOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return err
}
} else {
log.Printf("Deleted Namespace %s.", namespace)
log.Printf("Deleted Namespace %s.", name)
}

return nil
Expand Down

0 comments on commit a691b1b

Please sign in to comment.