From c9a62cc1195cc4d3a165f02ec35ade34253c8977 Mon Sep 17 00:00:00 2001 From: Jay Pipes Date: Fri, 28 Jul 2023 08:34:33 -0400 Subject: [PATCH] feature: skip-if conditional scenario skip Adds support for `skip-if` collection of evaluable conditions for a scenario. If any of these conditions fails, the test will be skipped. SkipIf contains a list of evaluable conditions that must evaluate successfully before the scenario's tests are executed. This allows test authors to specify "pre-flight checks" that should pass before attempting any of the actions in the scenario's tests. For example, let's assume you have a `gdt-kube` scenario that looks like this: ```yaml tests: - kube.create: manifests/nginx-deployment.yaml - kube: get: deployments/nginx assert: matches: status: readyReplicas: 2 - kube.delete: deployments/nginx ``` If you execute the above test and there is already an 'nginx' deployment, the `kube.create` test will fail. To prevent the scenario from proceeding with the tests if an 'nginx' deployment already exists, you could add the following ```yaml skip-if: - kube.get: deployments/nginx tests: - kube.create: manifests/nginx-deployment.yaml - kube: get: deployments/nginx assert: matches: status: readyReplicas: 2 - kube.delete: deployments/nginx ``` With the above, if an 'nginx' deployment exists already, the scenario will skip all the tests. Signed-off-by: Jay Pipes --- README.md | 3 +++ scenario/parse.go | 37 ++++++++++++++++++++++++++++-- scenario/run.go | 16 +++++++++++++ scenario/run_test.go | 39 +++++++++++++++---------------- scenario/scenario.go | 42 ++++++++++++++++++++++++++++++++++ scenario/stub_plugins_test.go | 18 +++++++++++++++ scenario/testdata/skip-if.yaml | 11 +++++++++ 7 files changed, 143 insertions(+), 23 deletions(-) create mode 100644 scenario/testdata/skip-if.yaml diff --git a/README.md b/README.md index 702e96a..61fce76 100644 --- a/README.md +++ b/README.md @@ -427,6 +427,9 @@ All `gdt` scenarios have the following fields: and configuration values for that plugin. * `fixtures`: (optional) list of strings indicating named fixtures that will be started before any of the tests in the file are run +* `skip-if`: (optional) list of [`Spec`][basespec] specializations that will be + evaluated *before* running any test in the scenario. If any of these + evaluations results in a failure, the test scenario will be skipped. * `tests`: list of [`Spec`][basespec] specializations that represent the runnable test units in the test scenario. diff --git a/scenario/parse.go b/scenario/parse.go index a5096a9..4972ae3 100644 --- a/scenario/parse.go +++ b/scenario/parse.go @@ -89,8 +89,9 @@ func (s *Scenario) UnmarshalYAML(node *yaml.Node) error { return gdterrors.ExpectedScalarAt(keyNode) } key := keyNode.Value - if key == "tests" { - valNode := node.Content[i+1] + valNode := node.Content[i+1] + switch key { + case "tests": if valNode.Kind != yaml.SequenceNode { return gdterrors.ExpectedSequenceAt(valNode) } @@ -122,6 +123,38 @@ func (s *Scenario) UnmarshalYAML(node *yaml.Node) error { return gdterrors.UnknownSpecAt(s.Path, valNode) } } + case "skip-if": + if valNode.Kind != yaml.SequenceNode { + return gdterrors.ExpectedSequenceAt(valNode) + } + for idx, testNode := range valNode.Content { + parsed := false + base := gdttypes.Spec{} + if err := testNode.Decode(&base); err != nil { + return err + } + base.Index = idx + base.Defaults = &defaults + specs := []gdttypes.Evaluable{} + for _, p := range plugins { + specs = append(specs, p.Specs()...) + } + for _, sp := range specs { + if err := testNode.Decode(sp); err != nil { + if errors.Is(err, gdterrors.ErrUnknownField) { + continue + } + return err + } + sp.SetBase(base) + s.SkipIf = append(s.SkipIf, sp) + parsed = true + break + } + if !parsed { + return gdterrors.UnknownSpecAt(s.Path, valNode) + } + } } } return nil diff --git a/scenario/run.go b/scenario/run.go index 275657b..4138119 100644 --- a/scenario/run.go +++ b/scenario/run.go @@ -42,6 +42,22 @@ func (s *Scenario) Run(ctx context.Context, t *testing.T) error { if found { scDefaults = scDefaultsAny.(*Defaults) } + // If the test author has specified any pre-flight checks in the `skip-if` + // collection, evaluate those first and if any failed, skip the scenario's + // tests. + for _, skipIf := range s.SkipIf { + res := skipIf.Eval(ctx, t) + if res.HasRuntimeError() { + return res.RuntimeError() + } + for _, failure := range res.Failures() { + t.Skipf( + "skip-if check %q failed: %s", + skipIf.Base().Title(), failure, + ) + return nil + } + } t.Run(s.Title(), func(t *testing.T) { for _, spec := range s.Tests { // Create a brand new context that inherits the top-level context's diff --git a/scenario/run_test.go b/scenario/run_test.go index b760889..a114602 100644 --- a/scenario/run_test.go +++ b/scenario/run_test.go @@ -6,37 +6,17 @@ package scenario_test import ( "context" - "fmt" "os" "path/filepath" "testing" gdtcontext "github.com/gdt-dev/gdt/context" - "github.com/gdt-dev/gdt/debug" gdterrors "github.com/gdt-dev/gdt/errors" - "github.com/gdt-dev/gdt/result" "github.com/gdt-dev/gdt/scenario" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func (s *fooSpec) Eval(ctx context.Context, t *testing.T) *result.Result { - fails := []error{} - t.Run(s.Title(), func(t *testing.T) { - debug.Printf(ctx, t, "in %s Foo=%s", s.Title(), s.Foo) - // This is just a silly test to demonstrate how to write Eval() methods - // for plugin Spec specialization classes. - if s.Name == "bar" && s.Foo != "bar" { - fail := fmt.Errorf("expected s.Foo = 'bar', got %s", s.Foo) - fails = append(fails, fail) - } else if s.Name != "bar" && s.Foo != "baz" { - fail := fmt.Errorf("expected s.Foo = 'baz', got %s", s.Foo) - fails = append(fails, fail) - } - }) - return result.New(result.WithFailures(fails...)) -} - func TestRun(t *testing.T) { require := require.New(t) @@ -62,7 +42,8 @@ func TestPriorRun(t *testing.T) { require.Nil(err) require.NotNil(s) - s.Run(context.TODO(), t) + err = s.Run(context.TODO(), t) + require.Nil(err) } func TestMissingFixtures(t *testing.T) { @@ -117,3 +98,19 @@ func TestTimeoutCascade(t *testing.T) { err = s.Run(context.TODO(), t) require.Nil(err) } + +func TestSkipIf(t *testing.T) { + require := require.New(t) + + fp := filepath.Join("testdata", "skip-if.yaml") + f, err := os.Open(fp) + require.Nil(err) + + s, err := scenario.FromReader(f, scenario.WithPath(fp)) + require.Nil(err) + require.NotNil(s) + + err = s.Run(context.TODO(), t) + require.Nil(err) + require.True(t.Skipped()) +} diff --git a/scenario/scenario.go b/scenario/scenario.go index c5057c4..3c2d35f 100644 --- a/scenario/scenario.go +++ b/scenario/scenario.go @@ -28,6 +28,48 @@ type Scenario struct { Defaults map[string]interface{} `yaml:"defaults,omitempty"` // Fixtures specifies an ordered list of fixtures the test case depends on. Fixtures []string `yaml:"fixtures,omitempty"` + // SkipIf contains a list of evaluable conditions that must evaluate + // successfully before the scenario's tests are executed. This allows test + // authors to specify "pre-flight checks" that should pass before + // attempting any of the actions in the scenario's tests. + // + // For example, let's assume you have a `gdt-kube` scenario that looks like + // this: + // + // ```yaml + // tests: + // - kube.create: manifests/nginx-deployment.yaml + // - kube: + // get: deployments/nginx + // assert: + // matches: + // status: + // readyReplicas: 2 + // - kube.delete: deployments/nginx + // ``` + // + // If you execute the above test and there is already an 'nginx' + // deployment, the `kube.create` test will fail. To prevent the scenario + // from proceeding with the tests if an 'nginx' deployment already exists, + // you could add the following + // + // ```yaml + // skip-if: + // - kube.get: deployments/nginx + // tests: + // - kube.create: manifests/nginx-deployment.yaml + // - kube: + // get: deployments/nginx + // assert: + // matches: + // status: + // readyReplicas: 2 + // - kube.delete: deployments/nginx + // ``` + // + // With the above, if an 'nginx' deployment exists already, the scenario + // will skip all the tests. + SkipIf []gdttypes.Evaluable `yaml:"skip-if,omitempty"` // Tests is the collection of test units in this test case. These will be // the fully parsed and materialized plugin Spec structs. Tests []gdttypes.Evaluable `yaml:"tests,omitempty"` diff --git a/scenario/stub_plugins_test.go b/scenario/stub_plugins_test.go index 97defd1..e48fba1 100644 --- a/scenario/stub_plugins_test.go +++ b/scenario/stub_plugins_test.go @@ -11,6 +11,7 @@ import ( "testing" gdtcontext "github.com/gdt-dev/gdt/context" + "github.com/gdt-dev/gdt/debug" "github.com/gdt-dev/gdt/errors" gdterrors "github.com/gdt-dev/gdt/errors" "github.com/gdt-dev/gdt/plugin" @@ -219,6 +220,23 @@ func (s *fooSpec) UnmarshalYAML(node *yaml.Node) error { return nil } +func (s *fooSpec) Eval(ctx context.Context, t *testing.T) *result.Result { + fails := []error{} + t.Run(s.Title(), func(t *testing.T) { + debug.Printf(ctx, t, "in %s Foo=%s", s.Title(), s.Foo) + // This is just a silly test to demonstrate how to write Eval() methods + // for plugin Spec specialization classes. + if s.Name == "bar" && s.Foo != "bar" { + fail := fmt.Errorf("expected s.Foo = 'bar', got %s", s.Foo) + fails = append(fails, fail) + } else if s.Name != "bar" && s.Foo != "baz" { + fail := fmt.Errorf("expected s.Foo = 'baz', got %s", s.Foo) + fails = append(fails, fail) + } + }) + return result.New(result.WithFailures(fails...)) +} + type fooPlugin struct{} func (p *fooPlugin) Info() gdttypes.PluginInfo { diff --git a/scenario/testdata/skip-if.yaml b/scenario/testdata/skip-if.yaml new file mode 100644 index 0000000..0e45489 --- /dev/null +++ b/scenario/testdata/skip-if.yaml @@ -0,0 +1,11 @@ +name: skip-if +description: a scenario with a skip-if condition +skip-if: + - foo: bar + # This causes the evaluation to fail (expects name=bar when foo=bar) + name: buzzy +tests: + - foo: bar + # Normally this would cause the test to fail, but this will be skipped due + # to the skip-if above. + name: bizzy