Skip to content

Commit

Permalink
Merge pull request kubernetes#125627 from yt-huang/clean-up
Browse files Browse the repository at this point in the history
drop deprecated PollWithContext and adopt PollUntilContextTimeout ins…
  • Loading branch information
k8s-ci-robot committed Jun 26, 2024
2 parents e57f8ad + 2db1b32 commit b29dce0
Show file tree
Hide file tree
Showing 27 changed files with 62 additions and 62 deletions.
2 changes: 1 addition & 1 deletion cmd/kube-controller-manager/app/testing/testserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func StartTestServer(ctx context.Context, customFlags []string) (result TestServ
if err != nil {
return result, fmt.Errorf("failed to create a client: %v", err)
}
err = wait.PollWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) {
err = wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ func TestInformerList(t *testing.T) {
require.NoError(t, tracker.Add(object1))
require.NoError(t, tracker.Add(object2))

require.NoError(t, wait.PollWithContext(testContext, 100*time.Millisecond, 500*time.Millisecond, func(ctx context.Context) (done bool, err error) {
require.NoError(t, wait.PollUntilContextTimeout(testContext, 100*time.Millisecond, 500*time.Millisecond, false, func(ctx context.Context) (done bool, err error) {
return myController.Informer().LastSyncResourceVersion() == object2.GetResourceVersion(), nil
}))

Expand All @@ -508,7 +508,7 @@ func TestInformerList(t *testing.T) {
require.NoError(t, tracker.Delete(fakeGVR, object2.GetNamespace(), object2.GetName()))
require.NoError(t, tracker.Add(object3))

require.NoError(t, wait.PollWithContext(testContext, 100*time.Millisecond, 500*time.Millisecond, func(ctx context.Context) (done bool, err error) {
require.NoError(t, wait.PollUntilContextTimeout(testContext, 100*time.Millisecond, 500*time.Millisecond, false, func(ctx context.Context) (done bool, err error) {
return myController.Informer().LastSyncResourceVersion() == object3.GetResourceVersion(), nil
}))

Expand All @@ -519,7 +519,7 @@ func TestInformerList(t *testing.T) {
require.NoError(t, tracker.Add(namespacedObject1))
require.NoError(t, tracker.Add(namespacedObject2))

require.NoError(t, wait.PollWithContext(testContext, 100*time.Millisecond, 500*time.Millisecond, func(ctx context.Context) (done bool, err error) {
require.NoError(t, wait.PollUntilContextTimeout(testContext, 100*time.Millisecond, 500*time.Millisecond, false, func(ctx context.Context) (done bool, err error) {
return myController.Informer().LastSyncResourceVersion() == namespacedObject2.GetResourceVersion(), nil
}))
values, err = myController.Informer().Namespaced(namespacedObject1.GetNamespace()).List(labels.Everything())
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apimachinery/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -748,7 +748,7 @@ func pollTimed(ctx context.Context, interval, timeout time.Duration, condition w
elapsed := time.Since(start)
framework.Logf(msg, elapsed)
}(time.Now(), msg)
return wait.PollWithContext(ctx, interval, timeout, condition)
return wait.PollUntilContextTimeout(ctx, interval, timeout, false, condition)
}

func validateErrorWithDebugInfo(ctx context.Context, f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) {
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/apimachinery/etcd_failure.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func checkExistingRCRecovers(ctx context.Context, f *framework.Framework) {
rcSelector := labels.Set{"name": "baz"}.AsSelector()

ginkgo.By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) {
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Millisecond*500, time.Second*60, false, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(ctx, options)
if err != nil {
Expand All @@ -137,7 +137,7 @@ func checkExistingRCRecovers(ctx context.Context, f *framework.Framework) {
}))

ginkgo.By("waiting for replication controller to recover")
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*60, func(ctx context.Context) (bool, error) {
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Millisecond*500, time.Second*60, false, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(ctx, options)
framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apimachinery/flowcontrol.go
Original file line number Diff line number Diff line change
Expand Up @@ -832,7 +832,7 @@ func createFlowSchema(ctx context.Context, f *framework.Framework, flowSchemaNam
// by checking: (1) the dangling priority level reference condition in the flow
// schema status, and (2) metrics. The function times out after 30 seconds.
func waitForSteadyState(ctx context.Context, f *framework.Framework, flowSchemaName string, priorityLevelName string) {
framework.ExpectNoError(wait.PollWithContext(ctx, time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
framework.ExpectNoError(wait.PollUntilContextTimeout(ctx, time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) {
fs, err := f.ClientSet.FlowcontrolV1().FlowSchemas().Get(ctx, flowSchemaName, metav1.GetOptions{})
if err != nil {
return false, err
Expand Down
16 changes: 8 additions & 8 deletions test/e2e/apimachinery/garbage_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
ginkgo.By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.PollWithContext(ctx, 5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, (60*time.Second)+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(ctx, f, objects)
}); err != nil {
Expand Down Expand Up @@ -406,7 +406,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// actual qps is less than 5. Also, the e2e tests are running in
// parallel, the GC controller might get distracted by other tests.
// According to the test logs, 120s is enough time.
if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
rcs, err := rcClient.List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rcs: %w", err)
Expand Down Expand Up @@ -663,7 +663,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// owner deletion, but in practice there can be a long delay between owner
// deletion and dependent deletion processing. For now, increase the timeout
// and investigate the processing delay.
if err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 30*time.Second+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
_, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err == nil {
pods, _ := podClient.List(ctx, metav1.ListOptions{})
Expand Down Expand Up @@ -755,7 +755,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("wait for the rc to be deleted")
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 90*time.Second, false, func(ctx context.Context) (bool, error) {
_, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{})
if err == nil {
pods, _ := podClient.List(ctx, metav1.ListOptions{})
Expand Down Expand Up @@ -855,7 +855,7 @@ var _ = SIGDescribe("Garbage collector", func() {
var err2 error
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, false, func(ctx context.Context) (bool, error) {
pods, err2 = podClient.List(ctx, metav1.ListOptions{})
if err2 != nil {
return false, fmt.Errorf("failed to list pods: %w", err)
Expand Down Expand Up @@ -985,7 +985,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// Ensure the dependent is deleted.
var lastDependent *unstructured.Unstructured
var err2 error
if err := wait.PollWithContext(ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
lastDependent, err2 = resourceClient.Get(ctx, dependentName, metav1.GetOptions{})
return apierrors.IsNotFound(err2), nil
}); err != nil {
Expand Down Expand Up @@ -1088,7 +1088,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}

ginkgo.By("wait for the owner to be deleted")
if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, 120*time.Second, false, func(ctx context.Context) (bool, error) {
_, err = resourceClient.Get(ctx, ownerName, metav1.GetOptions{})
if err == nil {
return false, nil
Expand Down Expand Up @@ -1150,7 +1150,7 @@ func waitForReplicas(ctx context.Context, rc *v1.ReplicationController, rcClient
lastObservedRC *v1.ReplicationController
err error
)
if err := wait.PollWithContext(ctx, framework.Poll, replicaSyncTimeout, func(ctx context.Context) (bool, error) {
if err := wait.PollUntilContextTimeout(ctx, framework.Poll, replicaSyncTimeout, false, func(ctx context.Context) (bool, error) {
lastObservedRC, err = rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil {
return false, err
Expand Down
12 changes: 6 additions & 6 deletions test/e2e/apimachinery/resource_quota.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
watch "k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/watch"
quota "k8s.io/apiserver/pkg/quota/v1"
clientset "k8s.io/client-go/kubernetes"
clientscheme "k8s.io/client-go/kubernetes/scheme"
Expand Down Expand Up @@ -165,7 +165,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) {
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if len(secrets.Items) == found {
Expand Down Expand Up @@ -331,7 +331,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
err := wait.PollWithContext(ctx, 1*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if len(configmaps.Items) == found {
Expand Down Expand Up @@ -2121,7 +2121,7 @@ func deleteResourceQuota(ctx context.Context, c clientset.Interface, namespace,
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
func countResourceQuota(ctx context.Context, c clientset.Interface, namespace string) (int, error) {
found, unchanged := 0, 0
return found, wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
return found, wait.PollUntilContextTimeout(ctx, 1*time.Second, 30*time.Second, false, func(ctx context.Context) (bool, error) {
resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
if len(resourceQuotas.Items) == found {
Expand All @@ -2137,7 +2137,7 @@ func countResourceQuota(ctx context.Context, c clientset.Interface, namespace st

// wait for resource quota status to show the expected used resources value
func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, resourceQuotaTimeout, false, func(ctx context.Context) (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
if err != nil {
return false, err
Expand All @@ -2160,7 +2160,7 @@ func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaN
// updateResourceQuotaUntilUsageAppears updates the resource quota object until the usage is populated
// for the specific resource name.
func updateResourceQuotaUntilUsageAppears(ctx context.Context, c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error {
return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, resourceQuotaTimeout, false, func(ctx context.Context) (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
if err != nil {
return false, err
Expand Down
16 changes: 8 additions & 8 deletions test/e2e/apps/cronjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,7 @@ func deleteCronJob(ctx context.Context, c clientset.Interface, ns, name string)

// Wait for at least given amount of active jobs.
func waitForActiveJobs(ctx context.Context, c clientset.Interface, ns, cronJobName string, active int) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
curr, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil {
return false, err
Expand All @@ -633,7 +633,7 @@ func waitForActiveJobs(ctx context.Context, c clientset.Interface, ns, cronJobNa

// Wait till a given job actually goes away from the Active list for a given cronjob
func waitForJobNotActive(ctx context.Context, c clientset.Interface, ns, cronJobName, jobName string) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
curr, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil {
return false, err
Expand All @@ -650,7 +650,7 @@ func waitForJobNotActive(ctx context.Context, c clientset.Interface, ns, cronJob

// Wait for a job to disappear by listing them explicitly.
func waitForJobToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -667,7 +667,7 @@ func waitForJobToDisappear(ctx context.Context, c clientset.Interface, ns string

// Wait for a pod to disappear by listing them explicitly.
func waitForJobsPodToDisappear(ctx context.Context, c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)}
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
Expand All @@ -679,7 +679,7 @@ func waitForJobsPodToDisappear(ctx context.Context, c clientset.Interface, ns st

// Wait for a job to be replaced with a new one.
func waitForJobReplaced(ctx context.Context, c clientset.Interface, ns, previousJobName string) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -698,7 +698,7 @@ func waitForJobReplaced(ctx context.Context, c clientset.Interface, ns, previous

// waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(ctx context.Context, c clientset.Interface, ns string, atLeast int) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -709,7 +709,7 @@ func waitForJobsAtLeast(ctx context.Context, c clientset.Interface, ns string, a

// waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(ctx context.Context, c clientset.Interface, ns string) error {
return wait.PollWithContext(ctx, framework.Poll, cronJobTimeout, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, cronJobTimeout, false, func(ctx context.Context) (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
Expand All @@ -725,7 +725,7 @@ func waitForAnyFinishedJob(ctx context.Context, c clientset.Interface, ns string

// waitForEventWithReason waits for events with a reason within a list has occurred
func waitForEventWithReason(ctx context.Context, c clientset.Interface, ns, cronJobName string, reasons []string) error {
return wait.PollWithContext(ctx, framework.Poll, 30*time.Second, func(ctx context.Context) (bool, error) {
return wait.PollUntilContextTimeout(ctx, framework.Poll, 30*time.Second, false, func(ctx context.Context) (bool, error) {
sj, err := getCronJob(ctx, c, ns, cronJobName)
if err != nil {
return false, err
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/daemon_restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func (r *RestartDaemonConfig) waitUp(ctx context.Context) {
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort)

}
err := wait.PollWithContext(ctx, r.pollInterval, r.pollTimeout, func(ctx context.Context) (bool, error) {
err := wait.PollUntilContextTimeout(ctx, r.pollInterval, r.pollTimeout, false, func(ctx context.Context) (bool, error) {
result, err := e2essh.NodeExec(ctx, r.nodeName, healthzCheck, framework.TestContext.Provider)
if err != nil {
return false, err
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -1153,7 +1153,7 @@ func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) {
framework.ExpectNoError(err)

ginkgo.By("Wait for the ReplicaSet to be orphaned")
err = wait.PollWithContext(ctx, dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
err = wait.PollUntilContextTimeout(ctx, dRetryPeriod, dRetryTimeout, false, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")

deploymentName = "test-adopt-deployment"
Expand Down
Loading

0 comments on commit b29dce0

Please sign in to comment.