Skip to content

Commit

Permalink
scheduler_perf: automatically delete created objects
Browse files Browse the repository at this point in the history
This is not relevant for namespaced objects, but matters for the cluster-scoped
ResourceClass during unit testing. This works right now because there is only
one such unit test, but will fail when adding a second one.

Instead of passing a boolean flag down into all functions where it might be
needed, it's now a context value.
  • Loading branch information
pohly committed Mar 4, 2024
1 parent 47c92e2 commit eb6abf0
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 3 deletions.
13 changes: 13 additions & 0 deletions test/integration/scheduler_perf/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"fmt"
"time"

apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
Expand Down Expand Up @@ -107,6 +108,18 @@ func (c *createAny) run(tCtx ktesting.TContext) {
}
_, err = resourceClient.Create(tCtx, obj, metav1.CreateOptions{})
}
if err == nil && shouldCleanup(tCtx) {
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
del := resourceClient.Delete
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
del = resourceClient.Namespace(c.Namespace).Delete
}
err := del(tCtx, obj.GetName(), metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
tCtx.ExpectNoError(err, fmt.Sprintf("deleting %s.%s %s", obj.GetKind(), obj.GetAPIVersion(), klog.KObj(obj)))
}
})
}
return err
}
// Retry, some errors (like CRD just created and type not ready for use yet) are temporary.
Expand Down
34 changes: 32 additions & 2 deletions test/integration/scheduler_perf/scheduler_perf.go
Original file line number Diff line number Diff line change
Expand Up @@ -641,6 +641,30 @@ func initTestOutput(tb testing.TB) io.Writer {
return output
}

type cleanupKeyType struct{}

var cleanupKey = cleanupKeyType{}

// shouldCleanup returns true if a function should clean up resource in the
// apiserver when the test is done. This is true for unit tests (etcd and
// apiserver get reused) and false for benchmarks (each benchmark starts with a
// clean state, so cleaning up just wastes time).
//
// The default if not explicitly set in the context is true.
func shouldCleanup(ctx context.Context) bool {
val := ctx.Value(cleanupKey)
if enabled, ok := val.(bool); ok {
return enabled
}
return true
}

// withCleanup sets whether cleaning up resources in the apiserver
// should be done. The default is true.
func withCleanup(tCtx ktesting.TContext, enabled bool) ktesting.TContext {
return ktesting.WithValue(tCtx, cleanupKey, enabled)
}

var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")

// RunBenchmarkPerfScheduling runs the scheduler performance tests.
Expand Down Expand Up @@ -695,7 +719,12 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
}
informerFactory, tCtx := setupClusterForWorkload(tCtx, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry)
results := runWorkload(tCtx, tc, w, informerFactory, false)

// No need to clean up, each benchmark testcase starts with an empty
// etcd database.
tCtx = withCleanup(tCtx, false)

results := runWorkload(tCtx, tc, w, informerFactory)
dataItems.DataItems = append(dataItems.DataItems, results...)

if len(results) > 0 {
Expand Down Expand Up @@ -799,7 +828,7 @@ func setupClusterForWorkload(tCtx ktesting.TContext, configPath string, featureG
return mustSetupCluster(tCtx, cfg, featureGates, outOfTreePluginRegistry)
}

func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, cleanup bool) []DataItem {
func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory) []DataItem {
b, benchmarking := tCtx.TB().(*testing.B)
if benchmarking {
start := time.Now()
Expand All @@ -811,6 +840,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
b.ReportMetric(duration.Seconds(), "runtime_seconds")
})
}
cleanup := shouldCleanup(tCtx)

// Disable error checking of the sampling interval length in the
// throughput collector by default. When running benchmarks, report
Expand Down
2 changes: 1 addition & 1 deletion test/integration/scheduler_perf/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func TestScheduling(t *testing.T) {
t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter)
}
tCtx := ktesting.WithTB(tCtx, t)
runWorkload(tCtx, tc, w, informerFactory, true)
runWorkload(tCtx, tc, w, informerFactory)
})
}
})
Expand Down

0 comments on commit eb6abf0

Please sign in to comment.