Skip to content

Commit

Permalink
Check samples in TestDNSResolver instead of log messages
Browse files Browse the repository at this point in the history
This should hopefully be more reliable, and avoid the flakiness we've
seen on Windows[1], even after #1974:

scheduler_ext_test.go:1130:
    	Error Trace:	D:/a/k6/k6/execution/scheduler_ext_test.go:1130
    	Error:      	Should be true
    	Test:       	TestDNSResolver/cache/3s
    	Messages:   	expected error to contain one of the list of messages

It doesn't get rid of the reliance on timings, but since the way we check DNS
TTL also relies on time, we can't easily avoid it. Maybe by switching to the
MiniRunner instead of js.Runner we could avoid the sleep() in the script, but
the value of this test is also calling the k6/http module directly, so it's
closer to a real world script.

[1]: https://github.com/grafana/k6/actions/runs/5047828671/jobs/9071063454
  • Loading branch information
Ivan Mirić committed May 23, 2023
1 parent bb7a7d1 commit 138b6fe
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 35 deletions.
8 changes: 4 additions & 4 deletions execution/scheduler_ext_exec_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func TestExecutionInfoVUSharing(t *testing.T) {
)
require.NoError(t, err)

ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{}, nil)
defer cancel()

type vuStat struct {
Expand Down Expand Up @@ -196,7 +196,7 @@ func TestExecutionInfoScenarioIter(t *testing.T) {
)
require.NoError(t, err)

ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{}, nil)
defer cancel()

errCh := make(chan error, 1)
Expand Down Expand Up @@ -278,7 +278,7 @@ func TestSharedIterationsStable(t *testing.T) {
)
require.NoError(t, err)

ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{}, nil)
defer cancel()

errCh := make(chan error, 1)
Expand Down Expand Up @@ -411,7 +411,7 @@ func TestExecutionInfoAll(t *testing.T) {
}, nil)
require.NoError(t, err)

ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{}, nil)
defer cancel()

errCh := make(chan error, 1)
Expand Down
64 changes: 33 additions & 31 deletions execution/scheduler_ext_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"net"
"net/url"
"reflect"
"strings"
"sync/atomic"
"testing"
"time"
Expand Down Expand Up @@ -59,6 +58,7 @@ func getTestRunState(

func newTestScheduler(
t *testing.T, runner lib.Runner, logger logrus.FieldLogger, opts lib.Options,
samplesHandler func(metrics.SampleContainer),
) (ctx context.Context, cancel func(), execScheduler *execution.Scheduler, samples chan metrics.SampleContainer) {
if runner == nil {
runner = &minirunner.MiniRunner{}
Expand All @@ -81,7 +81,10 @@ func newTestScheduler(
go func() {
for {
select {
case <-samples:
case s := <-samples:
if samplesHandler != nil {
samplesHandler(s)
}
case <-ctx.Done():
return
}
Expand All @@ -100,7 +103,7 @@ func newTestScheduler(

func TestSchedulerRun(t *testing.T) {
t.Parallel()
ctx, cancel, execScheduler, samples := newTestScheduler(t, nil, nil, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, nil, nil, lib.Options{}, nil)
defer cancel()

err := make(chan error, 1)
Expand Down Expand Up @@ -727,7 +730,7 @@ func TestSchedulerSetupTeardownRun(t *testing.T) {
return nil
},
}
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, nil, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, nil, lib.Options{}, nil)

err := make(chan error, 1)
go func() { err <- execScheduler.Run(ctx, ctx, samples) }()
Expand All @@ -743,7 +746,7 @@ func TestSchedulerSetupTeardownRun(t *testing.T) {
return nil, errors.New("setup error")
},
}
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, nil, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, nil, lib.Options{}, nil)
defer cancel()
assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "setup error")
})
Expand All @@ -761,7 +764,7 @@ func TestSchedulerSetupTeardownRun(t *testing.T) {
NoSetup: null.BoolFrom(true),
VUs: null.IntFrom(1),
Iterations: null.IntFrom(1),
})
}, nil)
defer cancel()
assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error")
})
Expand All @@ -779,7 +782,7 @@ func TestSchedulerSetupTeardownRun(t *testing.T) {
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, nil, lib.Options{
VUs: null.IntFrom(1),
Iterations: null.IntFrom(1),
})
}, nil)
defer cancel()

assert.EqualError(t, execScheduler.Run(ctx, ctx, samples), "teardown error")
Expand All @@ -798,7 +801,7 @@ func TestSchedulerSetupTeardownRun(t *testing.T) {
NoTeardown: null.BoolFrom(true),
VUs: null.IntFrom(1),
Iterations: null.IntFrom(1),
})
}, nil)
defer cancel()
assert.NoError(t, execScheduler.Run(ctx, ctx, samples))
})
Expand Down Expand Up @@ -843,7 +846,7 @@ func TestSchedulerStages(t *testing.T) {
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, nil, lib.Options{
VUs: null.IntFrom(1),
Stages: data.Stages,
})
}, nil)
defer cancel()
assert.NoError(t, execScheduler.Run(ctx, ctx, samples))
assert.True(t, execScheduler.GetState().GetCurrentTestRunDuration() >= data.Duration)
Expand All @@ -862,7 +865,7 @@ func TestSchedulerEndTime(t *testing.T) {
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, nil, lib.Options{
VUs: null.IntFrom(10),
Duration: types.NullDurationFrom(1 * time.Second),
})
}, nil)
defer cancel()

endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan())
Expand All @@ -889,7 +892,7 @@ func TestSchedulerRuntimeErrors(t *testing.T) {
},
}
logger, hook := logtest.NewNullLogger()
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{}, nil)
defer cancel()

endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan())
Expand Down Expand Up @@ -926,7 +929,7 @@ func TestSchedulerEndErrors(t *testing.T) {
},
}
logger, hook := logtest.NewNullLogger()
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{})
ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, lib.Options{}, nil)
defer cancel()

endTime, isFinal := lib.GetEndOffset(execScheduler.GetExecutionPlan())
Expand Down Expand Up @@ -1010,7 +1013,7 @@ func TestSchedulerIsRunning(t *testing.T) {
return nil
},
}
ctx, cancel, execScheduler, _ := newTestScheduler(t, runner, nil, lib.Options{})
ctx, cancel, execScheduler, _ := newTestScheduler(t, runner, nil, lib.Options{}, nil)
state := execScheduler.GetState()

err := make(chan error)
Expand Down Expand Up @@ -1046,8 +1049,8 @@ func TestDNSResolverCache(t *testing.T) {
}`)

testCases := map[string]struct {
opts lib.Options
expLogEntries int
opts lib.Options
expFailedRequests uint32
}{
"default": { // IPs are cached for 5m
lib.Options{DNS: types.DefaultDNSConfig()}, 0,
Expand Down Expand Up @@ -1079,11 +1082,9 @@ func TestDNSResolverCache(t *testing.T) {
tc := tc
t.Run(name, func(t *testing.T) {
t.Parallel()

logger := logrus.New()
logger.SetOutput(io.Discard)
logHook := testutils.NewLogHook(logrus.WarnLevel)
logger.AddHook(logHook)

registry := metrics.NewRegistry()
builtinMetrics := metrics.RegisterBuiltinMetrics(registry)
runner, err := js.New(
Expand All @@ -1100,7 +1101,19 @@ func TestDNSResolverCache(t *testing.T) {
mr := mockresolver.New(nil, net.LookupIP)
runner.ActualResolver = mr.LookupIPAll

ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, tc.opts)
var failedRequests uint32
samplesHandler := func(sc metrics.SampleContainer) {
if sc == nil {
return
}
for _, s := range sc.GetSamples() {
if s.Metric.Name == metrics.HTTPReqFailedName && s.Value == 1 {
atomic.AddUint32(&failedRequests, 1)
}
}
}

ctx, cancel, execScheduler, samples := newTestScheduler(t, runner, logger, tc.opts, samplesHandler)
defer cancel()

mr.Set("myhost", sr("HTTPBIN_IP"))
Expand All @@ -1115,18 +1128,7 @@ func TestDNSResolverCache(t *testing.T) {
select {
case err := <-errCh:
require.NoError(t, err)
entries := logHook.Drain()
require.Len(t, entries, tc.expLogEntries)
for _, entry := range entries {
require.IsType(t, &url.Error{}, entry.Data["error"])
err, ok := entry.Data["error"].(*url.Error)
assert.True(t, ok)
errMsg := err.Error()
contains := strings.Contains(errMsg, "connection refused") ||
strings.Contains(errMsg, "request timeout") ||
strings.Contains(errMsg, "dial: i/o timeout")
assert.Truef(t, contains, "expected error to contain one of the list of messages")
}
assert.Equal(t, tc.expFailedRequests, atomic.LoadUint32(&failedRequests))
case <-time.After(10 * time.Second):
t.Fatal("timed out")
}
Expand Down

0 comments on commit 138b6fe

Please sign in to comment.