Skip to content

Commit

Permalink
[Feature] Add timestamps for logs in e2e tests (#3006)
Browse files Browse the repository at this point in the history
  • Loading branch information
kenchung285 authored Feb 12, 2025
1 parent 3efef20 commit 39e8028
Show file tree
Hide file tree
Showing 19 changed files with 181 additions and 174 deletions.
24 changes: 12 additions & 12 deletions ray-operator/test/e2e/raycluster_gcs_ft_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,26 +64,26 @@ func TestRayClusterGCSFaultTolerence(t *testing.T) {
rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)

g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutLong).
Should(WithTransform(StatusCondition(rayv1.RayClusterProvisioned), MatchCondition(metav1.ConditionTrue, rayv1.AllPodRunningAndReadyFirstTime)))

headPod, err := GetHeadPod(test, rayCluster)
g.Expect(err).NotTo(HaveOccurred())

test.T().Logf("HeadPod Name: %s", headPod.Name)
LogWithTimestamp(test.T(), "HeadPod Name: %s", headPod.Name)

rayNamespace := "testing-ray-namespace"
test.T().Logf("Ray namespace: %s", rayNamespace)
LogWithTimestamp(test.T(), "Ray namespace: %s", rayNamespace)

ExecPodCmd(test, headPod, common.RayHeadContainer, []string{"python", "samples/test_detached_actor_1.py", rayNamespace})

// [Test 1: Kill GCS process to "restart" the head Pod]
// Assertion is implement in python, so no furthur handling needed here, and so are other ExecPodCmd
stdout, stderr := ExecPodCmd(test, headPod, common.RayHeadContainer, []string{"pkill", "gcs_server"})
t.Logf("pkill gcs_server output - stdout: %s, stderr: %s", stdout.String(), stderr.String())
LogWithTimestamp(test.T(), "pkill gcs_server output - stdout: %s, stderr: %s", stdout.String(), stderr.String())

// Restart count should eventually become 1, not creating a new pod
HeadPodRestartCount := func(p *corev1.Pod) int32 { return p.Status.ContainerStatuses[0].RestartCount }
Expand Down Expand Up @@ -224,7 +224,7 @@ func TestGcsFaultToleranceOptions(t *testing.T) {
defer g.Eventually(checkRedisDBSize, time.Second*30, time.Second).Should(BeEquivalentTo("0"))

if tc.createSecret {
test.T().Logf("Creating Redis password secret")
LogWithTimestamp(test.T(), "Creating Redis password secret")
_, err := test.Client().Core().CoreV1().Secrets(namespace.Name).Apply(
test.Ctx(),
corev1ac.Secret("redis-password-secret", namespace.Name).
Expand All @@ -237,13 +237,13 @@ func TestGcsFaultToleranceOptions(t *testing.T) {
rayClusterAC := tc.rayClusterFn(namespace.Name)
rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Should(WithTransform(StatusCondition(rayv1.RayClusterProvisioned), MatchCondition(metav1.ConditionTrue, rayv1.AllPodRunningAndReadyFirstTime)))

test.T().Logf("Verifying environment variables on Head Pod")
LogWithTimestamp(test.T(), "Verifying environment variables on Head Pod")
rayCluster, err = test.Client().Ray().RayV1().RayClusters(namespace.Name).Get(test.Ctx(), rayCluster.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
headPod, err := test.Client().Core().CoreV1().Pods(namespace.Name).Get(test.Ctx(), rayCluster.Status.Head.PodName, metav1.GetOptions{})
Expand Down Expand Up @@ -349,13 +349,13 @@ func TestGcsFaultToleranceAnnotations(t *testing.T) {
// Apply RayCluster
rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Should(WithTransform(StatusCondition(rayv1.RayClusterProvisioned), MatchCondition(metav1.ConditionTrue, rayv1.AllPodRunningAndReadyFirstTime)))

test.T().Logf("Verifying environment variables on Head Pod")
LogWithTimestamp(test.T(), "Verifying environment variables on Head Pod")
rayCluster, err = test.Client().Ray().RayV1().RayClusters(namespace.Name).Get(test.Ctx(), rayCluster.Name, metav1.GetOptions{})
g.Expect(err).NotTo(HaveOccurred())
headPod, err := test.Client().Core().CoreV1().Pods(namespace.Name).Get(test.Ctx(), rayCluster.Status.Head.PodName, metav1.GetOptions{})
Expand Down
20 changes: 10 additions & 10 deletions ray-operator/test/e2e/raycluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ func TestRayClusterManagedBy(t *testing.T) {

rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, rayCluster.Namespace, rayCluster.Name), TestTimeoutMedium).
Should(WithTransform(RayClusterState, Equal(rayv1.Ready)))
})
Expand All @@ -47,9 +47,9 @@ func TestRayClusterManagedBy(t *testing.T) {

rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("RayCluster %s/%s will not become ready - not reconciled", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "RayCluster %s/%s will not become ready - not reconciled", rayCluster.Namespace, rayCluster.Name)
g.Consistently(func(gg Gomega) {
rc, err := RayCluster(test, rayCluster.Namespace, rayCluster.Name)()
gg.Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -87,9 +87,9 @@ func TestRayClusterSuspend(t *testing.T) {

rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Should(WithTransform(StatusCondition(rayv1.HeadPodReady), MatchCondition(metav1.ConditionTrue, rayv1.HeadPodRunningAndReady)))
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Expand All @@ -98,9 +98,9 @@ func TestRayClusterSuspend(t *testing.T) {
rayClusterAC = rayClusterAC.WithSpec(rayClusterAC.Spec.WithSuspend(true))
rayCluster, err = test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Suspend RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Suspend RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to be suspended", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to be suspended", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Should(WithTransform(StatusCondition(rayv1.RayClusterSuspended), MatchCondition(metav1.ConditionTrue, string(rayv1.RayClusterSuspended))))
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Expand All @@ -111,9 +111,9 @@ func TestRayClusterSuspend(t *testing.T) {
rayClusterAC = rayClusterAC.WithSpec(rayClusterAC.Spec.WithSuspend(false))
rayCluster, err = test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Resume RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Resume RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to be resumed", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to be resumed", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Should(WithTransform(StatusCondition(rayv1.RayClusterSuspended), MatchCondition(metav1.ConditionFalse, string(rayv1.RayClusterSuspended))))
g.Eventually(RayCluster(test, namespace.Name, rayCluster.Name), TestTimeoutMedium).
Expand Down
16 changes: 8 additions & 8 deletions ray-operator/test/e2e/rayjob_cluster_selector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,17 @@ func TestRayJobWithClusterSelector(t *testing.T) {
jobsAC := newConfigMap(namespace.Name, files(test, "counter.py", "fail.py"))
jobs, err := test.Client().Core().CoreV1().ConfigMaps(namespace.Name).Apply(test.Ctx(), jobsAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name)
LogWithTimestamp(test.T(), "Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name)

// RayCluster
rayClusterAC := rayv1ac.RayCluster("raycluster", namespace.Name).
WithSpec(newRayClusterSpec(mountConfigMap[rayv1ac.RayClusterSpecApplyConfiguration](jobs, "/home/ray/jobs")))

rayCluster, err := test.Client().Ray().RayV1().RayClusters(namespace.Name).Apply(test.Ctx(), rayClusterAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Created RayCluster %s/%s successfully", rayCluster.Namespace, rayCluster.Name)

test.T().Logf("Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
LogWithTimestamp(test.T(), "Waiting for RayCluster %s/%s to become ready", rayCluster.Namespace, rayCluster.Name)
g.Eventually(RayCluster(test, rayCluster.Namespace, rayCluster.Name), TestTimeoutMedium).
Should(WithTransform(RayClusterState, Equal(rayv1.Ready)))

Expand All @@ -54,9 +54,9 @@ env_vars:

rayJob, err := test.Client().Ray().RayV1().RayJobs(namespace.Name).Apply(test.Ctx(), rayJobAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)

test.T().Logf("Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
g.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium).
Should(WithTransform(RayJobStatus, Satisfy(rayv1.IsJobTerminal)))

Expand All @@ -78,9 +78,9 @@ env_vars:

rayJob, err := test.Client().Ray().RayV1().RayJobs(namespace.Name).Apply(test.Ctx(), rayJobAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)

test.T().Logf("Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
g.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium).
Should(WithTransform(RayJobStatus, Satisfy(rayv1.IsJobTerminal)))

Expand All @@ -106,7 +106,7 @@ env_vars:

rayJob, err := test.Client().Ray().RayV1().RayJobs(namespace.Name).Apply(test.Ctx(), rayJobAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)

// Assert the Ray job status has not been updated
g.Consistently(func(gg Gomega) {
Expand Down
18 changes: 9 additions & 9 deletions ray-operator/test/e2e/rayjob_lightweight_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func TestRayJobLightWeightMode(t *testing.T) {
jobsAC := newConfigMap(namespace.Name, files(test, "counter.py", "fail.py", "stop.py"))
jobs, err := test.Client().Core().CoreV1().ConfigMaps(namespace.Name).Apply(test.Ctx(), jobsAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name)
LogWithTimestamp(test.T(), "Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name)

test.T().Run("Successful RayJob", func(_ *testing.T) {
rayJobAC := rayv1ac.RayJob("counter", namespace.Name).
Expand Down Expand Up @@ -55,9 +55,9 @@ env_vars:

rayJob, err := test.Client().Ray().RayV1().RayJobs(namespace.Name).Apply(test.Ctx(), rayJobAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)

test.T().Logf("Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
g.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium).
Should(WithTransform(RayJobStatus, Satisfy(rayv1.IsJobTerminal)))

Expand Down Expand Up @@ -89,9 +89,9 @@ env_vars:

rayJob, err := test.Client().Ray().RayV1().RayJobs(namespace.Name).Apply(test.Ctx(), rayJobAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)

test.T().Logf("Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for RayJob %s/%s to complete", rayJob.Namespace, rayJob.Name)
g.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium).
Should(WithTransform(RayJobStatus, Satisfy(rayv1.IsJobTerminal)))

Expand Down Expand Up @@ -120,13 +120,13 @@ env_vars:

rayJob, err := test.Client().Ray().RayV1().RayJobs(namespace.Name).Apply(test.Ctx(), rayJobAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)

test.T().Logf("Waiting for RayJob %s/%s to be 'Running'", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for RayJob %s/%s to be 'Running'", rayJob.Namespace, rayJob.Name)
g.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium).
Should(WithTransform(RayJobDeploymentStatus, Equal(rayv1.JobDeploymentStatusRunning)))

test.T().Logf("Waiting for RayJob %s/%s to be 'Complete'", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for RayJob %s/%s to be 'Complete'", rayJob.Namespace, rayJob.Name)
g.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium).
Should(WithTransform(RayJobDeploymentStatus, Equal(rayv1.JobDeploymentStatusComplete)))

Expand All @@ -136,6 +136,6 @@ env_vars:
// Delete the RayJob
err = test.Client().Ray().RayV1().RayJobs(namespace.Name).Delete(test.Ctx(), rayJob.Name, metav1.DeleteOptions{})
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Deleted RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Deleted RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
})
}
16 changes: 8 additions & 8 deletions ray-operator/test/e2e/rayjob_recovery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func TestRayJobRecovery(t *testing.T) {
jobsAC := newConfigMap(namespace.Name, files(test, "long_running_counter.py"))
jobs, err := test.Client().Core().CoreV1().ConfigMaps(namespace.Name).Apply(test.Ctx(), jobsAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name)
LogWithTimestamp(test.T(), "Created ConfigMap %s/%s successfully", jobs.Namespace, jobs.Name)

test.T().Run("RayJob should recover after pod deletion", func(_ *testing.T) {
rayJobAC := rayv1ac.RayJob("counter", namespace.Name).
Expand All @@ -41,14 +41,14 @@ env_vars:

rayJob, err := test.Client().Ray().RayV1().RayJobs(namespace.Name).Apply(test.Ctx(), rayJobAC, TestApplyOptions)
g.Expect(err).NotTo(HaveOccurred())
test.T().Logf("Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Created RayJob %s/%s successfully", rayJob.Namespace, rayJob.Name)

test.T().Logf("Waiting for RayJob %s/%s to start running", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for RayJob %s/%s to start running", rayJob.Namespace, rayJob.Name)
g.Eventually(RayJob(test, rayJob.Namespace, rayJob.Name), TestTimeoutMedium).
Should(WithTransform(RayJobStatus, Equal(rayv1.JobStatusRunning)))
test.T().Logf("Find RayJob %s/%s running", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Find RayJob %s/%s running", rayJob.Namespace, rayJob.Name)
// wait for the job to run a bit
test.T().Logf("Sleep RayJob %s/%s 15 seconds", rayJob.Namespace, rayJob.Name)
LogWithTimestamp(test.T(), "Sleep RayJob %s/%s 15 seconds", rayJob.Namespace, rayJob.Name)
time.Sleep(15 * time.Second)

// get the running jobpods
Expand All @@ -60,14 +60,14 @@ env_vars:
// remove the running jobpods
propagationPolicy := metav1.DeletePropagationBackground
for _, pod := range jobpods.Items {
test.T().Logf("Delete Pod %s from namespace %s", pod.Name, rayJob.Namespace)
LogWithTimestamp(test.T(), "Delete Pod %s from namespace %s", pod.Name, rayJob.Namespace)
err = test.Client().Core().CoreV1().Pods(namespace.Name).Delete(test.Ctx(), pod.Name, metav1.DeleteOptions{
PropagationPolicy: &propagationPolicy,
})
g.Expect(err).NotTo(HaveOccurred())
}

test.T().Logf("Waiting for new pod to be created and running for RayJob %s/%s", namespace.Name, rayJob.Name)
LogWithTimestamp(test.T(), "Waiting for new pod to be created and running for RayJob %s/%s", namespace.Name, rayJob.Name)
g.Eventually(func() ([]corev1.Pod, error) {
pods, err := test.Client().Core().CoreV1().Pods(namespace.Name).List(
test.Ctx(),
Expand All @@ -86,7 +86,7 @@ env_vars:
continue
}
}
test.T().Logf("Found new running pod %s/%s", pod.Namespace, pod.Name)
LogWithTimestamp(test.T(), "Found new running pod %s/%s", pod.Namespace, pod.Name)
return true
}
}
Expand Down
Loading

0 comments on commit 39e8028

Please sign in to comment.