diff --git a/go.mod b/go.mod index 29489aa9..6a52fc97 100644 --- a/go.mod +++ b/go.mod @@ -59,6 +59,7 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/termenv v0.15.2 // indirect + github.com/otiai10/copy v1.14.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.6 // indirect github.com/zclconf/go-cty v1.13.0 // indirect diff --git a/go.sum b/go.sum index a466b03d..054aa4c2 100644 --- a/go.sum +++ b/go.sum @@ -430,6 +430,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/packer-community/winrmcp v0.0.0-20180921211025-c76d91c1e7db/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0= diff --git a/internal/app/app.go b/internal/app/app.go index 0dfc2b0f..1d67f476 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os" + "sync" tea "github.com/charmbracelet/bubbletea" "github.com/leg100/pug/internal" @@ -21,25 +22,8 @@ import ( "github.com/leg100/pug/internal/workspace" ) -type app struct { - modules *module.Service - workspaces *workspace.Service - states *state.Service - runs *run.Service - tasks *task.Service - logger *logging.Logger - cfg config -} - -type sender interface { - Send(tea.Msg) -} - // Start the app. func Start(stdout, stderr io.Writer, args []string) error { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // Parse configuration from env vars and flags cfg, err := parse(stderr, args) if err != nil { @@ -51,10 +35,12 @@ func Start(stdout, stderr io.Writer, args []string) error { return nil } - app, model, err := newApp(ctx, cfg) + // Start daemons and create event subscriptions. + app, err := startApp(cfg, stdout) if err != nil { return err } + defer app.cleanup() // Log some info useful to the user app.logger.Info("loaded config", @@ -66,7 +52,7 @@ func Start(stdout, stderr io.Writer, args []string) error { ) p := tea.NewProgram( - model, + app.model, // Use the full size of the terminal with its "alternate screen buffer" tea.WithAltScreen(), // Enabling mouse cell motion removes the ability to "blackboard" text @@ -77,20 +63,28 @@ func Start(stdout, stderr io.Writer, args []string) error { // tea.WithMouseCellMotion(), ) - // Start daemons and relay events. When the app exits, use the returned func - // to wait for any running tasks to complete. - cleanup := app.start(ctx, stdout, p) + // Relay events to TUI + app.relay(p) // Blocks until user quits if _, err := p.Run(); err != nil { return err } - return cleanup() + return nil +} + +type app struct { + model tea.Model + ch chan tea.Msg + logger *logging.Logger + cleanup func() } -// newApp constructs an instance of the app and the top-level TUI model. -func newApp(ctx context.Context, cfg config) (*app, tea.Model, error) { +// startApp starts the application, constructing services, starting daemons and +// subscribing to events. The returned app is used for constructing the TUI and +// relaying events. The app's cleanup function should be called when finished. +func startApp(cfg config, stdout io.Writer) (*app, error) { // Setup logging logger := logging.NewLogger(cfg.loggingOptions) @@ -98,14 +92,15 @@ func newApp(ctx context.Context, cfg config) (*app, tea.Model, error) { // defined types. workdir, err := internal.NewWorkdir(cfg.WorkDir) if err != nil { - return nil, nil, err + return nil, err } // Instantiate services tasks := task.NewService(task.ServiceOptions{ - Program: cfg.Program, - Logger: logger, - Workdir: workdir, + Program: cfg.Program, + Logger: logger, + Workdir: workdir, + UserEnvs: cfg.Envs, }) modules := module.NewService(module.ServiceOptions{ TaskService: tasks, @@ -118,8 +113,7 @@ func newApp(ctx context.Context, cfg config) (*app, tea.Model, error) { ModuleService: modules, Logger: logger, }) - // TODO: separate auto-state pull code - states := state.NewService(ctx, state.ServiceOptions{ + states := state.NewService(state.ServiceOptions{ ModuleService: modules, WorkspaceService: workspaces, TaskService: tasks, @@ -135,17 +129,6 @@ func newApp(ctx context.Context, cfg config) (*app, tea.Model, error) { Logger: logger, }) - // Shutdown services once context is done. - go func() { - <-ctx.Done() - logger.Shutdown() - tasks.Shutdown() - modules.Shutdown() - workspaces.Shutdown() - states.Shutdown() - runs.Shutdown() - }() - // Construct top-level TUI model. model, err := top.New(top.Options{ TaskService: tasks, @@ -160,86 +143,123 @@ func newApp(ctx context.Context, cfg config) (*app, tea.Model, error) { Debug: cfg.Debug, }) if err != nil { - return nil, nil, err + return nil, err } - app := &app{ - modules: modules, - workspaces: workspaces, - runs: runs, - states: states, - tasks: tasks, - cfg: cfg, - logger: logger, - } - return app, model, nil -} + ctx, cancel := context.WithCancel(context.Background()) -// start starts the app daemons and relays events to the TUI, returning a func -// to wait for running tasks to complete. -func (a *app) start(ctx context.Context, stdout io.Writer, s sender) func() error { // Start daemons - task.StartEnqueuer(a.tasks) - run.StartScheduler(a.runs, a.workspaces) - waitfn := task.StartRunner(ctx, a.logger, a.tasks, a.cfg.MaxTasks) + task.StartEnqueuer(tasks) + run.StartScheduler(runs, workspaces) + waitTasks := task.StartRunner(ctx, logger, tasks, cfg.MaxTasks) // Automatically load workspaces whenever modules are loaded. - a.workspaces.LoadWorkspacesUponModuleLoad(a.modules) + workspaces.LoadWorkspacesUponModuleLoad(modules) // Relay resource events to TUI. Deliberately set up subscriptions *before* // any events are triggered, to ensure the TUI receives all messages. - logEvents := a.logger.Subscribe() + ch := make(chan tea.Msg) + wg := sync.WaitGroup{} // sync closure of subscriptions + + logEvents := logger.Subscribe() + wg.Add(1) go func() { for ev := range logEvents { - s.Send(ev) + ch <- ev } + wg.Done() }() - modEvents := a.modules.Subscribe() + + modEvents := modules.Subscribe() + wg.Add(1) go func() { for ev := range modEvents { - s.Send(ev) + ch <- ev } + wg.Done() }() - wsEvents := a.workspaces.Subscribe() + + wsEvents := workspaces.Subscribe() + wg.Add(1) go func() { for ev := range wsEvents { - s.Send(ev) + ch <- ev } + wg.Done() }() - stateEvents := a.states.Subscribe() + + stateEvents := states.Subscribe() + wg.Add(1) go func() { for ev := range stateEvents { - s.Send(ev) + ch <- ev } + wg.Done() }() - runEvents := a.runs.Subscribe() + + runEvents := runs.Subscribe() + wg.Add(1) go func() { for ev := range runEvents { - s.Send(ev) + ch <- ev } + wg.Done() }() - taskEvents := a.tasks.Subscribe() + + taskEvents := tasks.Subscribe() + wg.Add(1) go func() { for ev := range taskEvents { - s.Send(ev) + ch <- ev } + wg.Done() }() - // Return cleanup function to be invoked when app is terminated. - return func() error { - // Cancel any running processes - running := a.tasks.List(task.ListOptions{Status: []task.Status{task.Running}}) - if len(running) > 0 { - fmt.Fprintf(stdout, "terminating %d terraform processes\n", len(running)) - } - for _, task := range running { - a.tasks.Cancel(task.ID) - } + // cleanup function to be invoked when app is terminated. + cleanup := func() { + // Cancel context + cancel() + + // Close subscriptions + logger.Shutdown() + tasks.Shutdown() + modules.Shutdown() + workspaces.Shutdown() + states.Shutdown() + runs.Shutdown() + + // Wait for relays to finish before closing channel, to avoid sends + // to a closed channel, which would result in a panic. + wg.Wait() + close(ch) + // Remove all run artefacts (plan files etc,...) - for _, run := range a.runs.List(run.ListOptions{}) { + for _, run := range runs.List(run.ListOptions{}) { _ = os.RemoveAll(run.ArtefactsPath()) } - // Wait for canceled tasks to terminate. - return waitfn() + + // Wait for running tasks to terminate. Canceling the context (above) + // sends each task a termination signal so each task's process should + // shut itself down. + waitTasks() } + return &app{ + model: model, + ch: ch, + cleanup: cleanup, + logger: logger, + }, nil +} + +type tui interface { + Send(tea.Msg) +} + +// relay events to TUI. +func (a *app) relay(s tui) { + go func() { + for msg := range a.ch { + s.Send(msg) + } + }() } diff --git a/internal/app/config.go b/internal/app/config.go index 506c4838..d0696026 100644 --- a/internal/app/config.go +++ b/internal/app/config.go @@ -23,6 +23,7 @@ type config struct { DisableReloadAfterApply bool WorkDir string DataDir string + Envs []string loggingOptions logging.Options version bool @@ -44,6 +45,7 @@ func parse(stderr io.Writer, args []string) (config, error) { fs.StringVar(&cfg.WorkDir, 'w', "workdir", ".", "The working directory containing modules.") fs.IntVar(&cfg.MaxTasks, 't', "max-tasks", 2*runtime.NumCPU(), "The maximum number of parallel tasks.") fs.StringVar(&cfg.DataDir, 0, "data-dir", defaultDataDir, "Directory in which to store plan files.") + fs.StringListVar(&cfg.Envs, 'e', "env", "Environment variable to pass to terraform process. Can set more than once.") fs.StringEnumVar(&cfg.FirstPage, 'f', "first-page", "The first page to open on startup.", "modules", "workspaces", "runs", "tasks", "logs") fs.BoolVar(&cfg.Debug, 'd', "debug", "Log bubbletea messages to messages.log") fs.BoolVar(&cfg.version, 'v', "version", "Print version.") diff --git a/internal/app/config_test.go b/internal/app/config_test.go index 48332f72..610d9177 100644 --- a/internal/app/config_test.go +++ b/internal/app/config_test.go @@ -129,6 +129,27 @@ func TestConfig(t *testing.T) { assert.Equal(t, got.FirstPage, "runs") }, }, + { + "set terraform process environment variable", + "", + []string{"-e", "TF_LOG=DEBUG"}, + nil, + func(t *testing.T, got config) { + assert.Equal(t, got.Envs, []string{"TF_LOG=DEBUG"}) + }, + }, + { + "set multiple terraform process environment variables", + "", + []string{"-e", "TF_LOG=DEBUG", "-e", "TF_IGNORE=TRACE", "-e", "TF_PLUGIN_CACHE_DIR=/tmp"}, + nil, + func(t *testing.T, got config) { + assert.Len(t, got.Envs, 3) + assert.Contains(t, got.Envs, "TF_LOG=DEBUG") + assert.Contains(t, got.Envs, "TF_IGNORE=TRACE") + assert.Contains(t, got.Envs, "TF_PLUGIN_CACHE_DIR=/tmp") + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/app/helpers_test.go b/internal/app/helpers_test.go index 8931896b..6f8111e8 100644 --- a/internal/app/helpers_test.go +++ b/internal/app/helpers_test.go @@ -1,10 +1,10 @@ package app import ( - "context" "io" - "io/fs" - "os" + + cp "github.com/otiai10/copy" + "path/filepath" "regexp" "strings" @@ -14,7 +14,6 @@ import ( "github.com/charmbracelet/x/exp/teatest" "github.com/leg100/pug/internal" "github.com/leg100/pug/internal/logging" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -38,30 +37,29 @@ func setup(t *testing.T, workdir string, sopts ...setupOption) *teatest.TestMode fn(&opts) } - // Clean up any leftover artefacts from previous tests (previous tests - // neglect to clean up artefacts if they end with a panic). - cleanupArtefacts(workdir, opts) - - // And clean up artefacts once test finishes - t.Cleanup(func() { - cleanupArtefacts(workdir, opts) - }) - - // Cancel context once test finishes. - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) + // Copy workdir to a dedicated directory for this test, to ensure any + // artefacts created in workdir are done so in isolation from other + // tests that are run in parallel, and to ensure artefacts don't persist to + // future invocations of this test. + target := t.TempDir() + err := cp.Copy(workdir, target) + require.NoError(t, err) + workdir = target - // Setup provider mirror - setupProviderMirror(t) + // Get absolute path to terraform cli config. The config sets up terraform + // to use a provider filesystem mirror, which ensures tests avoid any + // network roundtrips to retrieve or query providers. + mirrorConfigPath, err := filepath.Abs("../../mirror/mirror.tfrc") + require.NoError(t, err) - app, m, err := newApp( - ctx, + app, err := startApp( config{ FirstPage: "modules", Program: "terraform", WorkDir: workdir, MaxTasks: 3, DataDir: t.TempDir(), + Envs: []string{"TF_CLI_CONFIG_FILE", mirrorConfigPath}, loggingOptions: logging.Options{ Level: "debug", AdditionalWriters: []io.Writer{ @@ -69,58 +67,24 @@ func setup(t *testing.T, workdir string, sopts ...setupOption) *teatest.TestMode }, }, }, + io.Discard, ) require.NoError(t, err) + t.Cleanup(app.cleanup) tm := teatest.NewTestModel( t, - m, + app.model, teatest.WithInitialTermSize(100, 50), ) - cleanup := app.start(ctx, io.Discard, tm) t.Cleanup(func() { - err := cleanup() - assert.NoError(t, err, "cleaning up app resources") + tm.Quit() }) - return tm -} + // Relay events to TUI + app.relay(tm) -// cleanupArtefacts removes all the detritus that terraform leaves behind. -func cleanupArtefacts(workdir string, opts setupOptions) { - _ = filepath.WalkDir(workdir, func(path string, d fs.DirEntry, walkerr error) error { - if walkerr != nil { - return walkerr - } - if d.IsDir() && d.Name() == ".terraform" { - os.RemoveAll(path) - return fs.SkipDir - } - // TODO: consider leaving lock file; it prevents a warning message cropping - // up. - if filepath.Base(path) == ".terraform.lock.hcl" { - os.Remove(path) - } - if !opts.keepState && filepath.Base(path) == "terraform.tfstate" { - os.Remove(path) - } - if strings.HasSuffix(filepath.Base(path), ".backup") { - os.Remove(path) - } - return nil - }) - -} - -// setupProviderMirror configures a dedicated provider filesystem mirror for -// a test. -func setupProviderMirror(t *testing.T) { - t.Helper() - - abs, err := filepath.Abs("../../mirror/mirror.tfrc") - require.NoError(t, err) - - t.Setenv("TF_CLI_CONFIG_FILE", abs) + return tm } // testLogger relays pug log records to the go test logger diff --git a/internal/app/module_list_test.go b/internal/app/module_list_test.go index 22481167..c298a8a6 100644 --- a/internal/app/module_list_test.go +++ b/internal/app/module_list_test.go @@ -8,6 +8,8 @@ import ( ) func TestModuleList(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect three modules to be listed @@ -69,6 +71,8 @@ func TestModuleList(t *testing.T) { } func TestModuleList_Reload(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect message to inform user that three modules have been loaded @@ -87,6 +91,8 @@ func TestModuleList_Reload(t *testing.T) { } func TestModuleList_ReloadWorkspaces(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect message to inform user that three modules have been loaded @@ -118,6 +124,8 @@ func TestModuleList_ReloadWorkspaces(t *testing.T) { // then attempting to create a run on each. Pug should de-select those // selections which are not initialized / have no current workspace. func TestModuleList_CreateRun(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect message to inform user that modules have been loaded @@ -162,6 +170,8 @@ func TestModuleList_CreateRun(t *testing.T) { // should de-select those selections which have no current run in a planned // state. func TestModuleList_ApplyCurrentRun(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect message to inform user that modules have been loaded diff --git a/internal/app/module_test.go b/internal/app/module_test.go index 4e5a394a..a30ac911 100644 --- a/internal/app/module_test.go +++ b/internal/app/module_test.go @@ -8,7 +8,9 @@ import ( "github.com/leg100/pug/internal" ) -func TestModule(t *testing.T) { +func TestModule_Init(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect module to be listed @@ -26,6 +28,8 @@ func TestModule(t *testing.T) { } func TestModule_SetCurrentWorkspace(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Wait for module to be loaded @@ -63,6 +67,8 @@ func TestModule_SetCurrentWorkspace(t *testing.T) { } func TestModule_ReloadWorkspaces(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Wait for module to be loaded @@ -95,6 +101,8 @@ func TestModule_ReloadWorkspaces(t *testing.T) { // TestModule_Destroy demonstrates creating a destroy plan on a module. func TestModule_Destroy(t *testing.T) { + t.Parallel() + // Setup test with pre-existing state tm := setup(t, "./testdata/module_destroy", keepState()) diff --git a/internal/app/quit_test.go b/internal/app/quit_test.go index 4126d6f0..9c529cca 100644 --- a/internal/app/quit_test.go +++ b/internal/app/quit_test.go @@ -10,6 +10,8 @@ import ( ) func TestQuit(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") tm.Send(tea.KeyMsg{ diff --git a/internal/app/run_list_test.go b/internal/app/run_list_test.go index 69c77369..e8f9cc6e 100644 --- a/internal/app/run_list_test.go +++ b/internal/app/run_list_test.go @@ -10,6 +10,8 @@ import ( // TestRunList_Single tests interacting with a single run in the run list view. func TestRunList_Single(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Wait for module to be loaded @@ -74,11 +76,13 @@ func TestRunList_Single(t *testing.T) { // TestRunList_Multiple demonstrates interacting with multiple runs on the run // list page. func TestRunList_Multiple(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect message to inform user that modules have been loaded waitFor(t, tm, func(s string) bool { - return strings.Contains(s, "loaded 3 modules") + return matchPattern(t, "(?s)modules/a.*modules/b.*modules/c", s) }) // Select all modules and init diff --git a/internal/app/run_test.go b/internal/app/run_test.go index eba079db..18ecbccd 100644 --- a/internal/app/run_test.go +++ b/internal/app/run_test.go @@ -8,6 +8,8 @@ import ( ) func TestRun(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Initialize and apply run on modules/a @@ -17,6 +19,8 @@ func TestRun(t *testing.T) { // TestRun_Stale tests that a planned run is placed into the 'stale' state when // a succeeding run is created. func TestRun_Stale(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Wait for module to be loaded @@ -80,6 +84,8 @@ func TestRun_Stale(t *testing.T) { } func TestRun_WithVars(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/run_with_vars") // Wait for module to be loaded @@ -106,12 +112,14 @@ func TestRun_WithVars(t *testing.T) { // User should now be taken to the run page... - // Expect to see summary of changes + // Expect to see summary of changes, and the run should be in the planned + // state waitFor(t, tm, func(s string) bool { // Remove formatting s = internal.StripAnsi(s) return strings.Contains(s, "Changes to Outputs:") && - strings.Contains(s, `+ foo = "override"`) + strings.Contains(s, `+ foo = "override"`) && + strings.Contains(s, "planned") }) // Apply plan and provide confirmation diff --git a/internal/app/testdata/workspace_resources/modules/a/main.tf b/internal/app/testdata/workspace_resources/modules/a/main.tf new file mode 100644 index 00000000..a250b310 --- /dev/null +++ b/internal/app/testdata/workspace_resources/modules/a/main.tf @@ -0,0 +1,15 @@ +terraform { + backend "local" {} +} + +resource "random_pet" "pet" { + count = 10 + + keepers = { + now = timestamp() + } +} + +output "pets" { + value = random_pet.pet[*].id +} diff --git a/internal/app/testdata/workspace_resources/modules/a/terraform.tfstate b/internal/app/testdata/workspace_resources/modules/a/terraform.tfstate new file mode 100644 index 00000000..8287612b --- /dev/null +++ b/internal/app/testdata/workspace_resources/modules/a/terraform.tfstate @@ -0,0 +1,188 @@ +{ + "version": 4, + "terraform_version": "1.6.2", + "serial": 11, + "lineage": "863a9ba0-33ee-c92d-f7ff-5c6ebaa14a53", + "outputs": { + "pets": { + "value": [ + "vast-marten", + "brave-dolphin", + "usable-roughy", + "upright-camel", + "up-gobbler", + "elegant-dane", + "engaged-buck", + "hip-ringtail", + "exact-orca", + "thorough-caiman" + ], + "type": [ + "tuple", + [ + "string", + "string", + "string", + "string", + "string", + "string", + "string", + "string", + "string", + "string" + ] + ] + } + }, + "resources": [ + { + "mode": "managed", + "type": "random_pet", + "name": "pet", + "provider": "provider[\"registry.terraform.io/hashicorp/random\"]", + "instances": [ + { + "index_key": 0, + "schema_version": 0, + "attributes": { + "id": "vast-marten", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 1, + "schema_version": 0, + "attributes": { + "id": "brave-dolphin", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 2, + "schema_version": 0, + "attributes": { + "id": "usable-roughy", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 3, + "schema_version": 0, + "attributes": { + "id": "upright-camel", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 4, + "schema_version": 0, + "attributes": { + "id": "up-gobbler", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 5, + "schema_version": 0, + "attributes": { + "id": "elegant-dane", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 6, + "schema_version": 0, + "attributes": { + "id": "engaged-buck", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 7, + "schema_version": 0, + "attributes": { + "id": "hip-ringtail", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 8, + "schema_version": 0, + "attributes": { + "id": "exact-orca", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + }, + { + "index_key": 9, + "schema_version": 0, + "attributes": { + "id": "thorough-caiman", + "keepers": { + "now": "2024-04-28T08:41:47Z" + }, + "length": 2, + "prefix": null, + "separator": "-" + }, + "sensitive_attributes": [] + } + ] + } + ], + "check_results": null +} diff --git a/internal/app/workspace_list_test.go b/internal/app/workspace_list_test.go index cfed907d..a54f4018 100644 --- a/internal/app/workspace_list_test.go +++ b/internal/app/workspace_list_test.go @@ -10,6 +10,8 @@ import ( ) func TestWorkspaceList_SetCurrentWorkspace(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Wait for module to be loaded @@ -50,11 +52,15 @@ func TestWorkspaceList_SetCurrentWorkspace(t *testing.T) { } func TestWorkspaceList_CreateRun(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") - // Expect message to inform user that modules have been loaded + // Expect all four modules to be listed waitFor(t, tm, func(s string) bool { - return strings.Contains(s, "loaded 3 modules") + return matchPattern(t, `modules/a`, s) && + matchPattern(t, `modules/b`, s) && + matchPattern(t, `modules/c`, s) }) // Select all modules and init @@ -64,16 +70,24 @@ func TestWorkspaceList_CreateRun(t *testing.T) { // Wait for each module to be initialized, and to have its current workspace // set (should be "default") waitFor(t, tm, func(s string) bool { - return strings.Count(s, "default") == 3 + return matchPattern(t, `modules/a.*default`, s) && + matchPattern(t, `modules/b.*default`, s) && + matchPattern(t, `modules/c.*default`, s) }) // Clear selection tm.Send(tea.KeyMsg{Type: tea.KeyCtrlBackslash}) - // Go to global workspaces page + // Go to global workspaces listing tm.Type("W") - // Create run on first workspace + // Wait for modules/a's default workspace to be listed. This should be the + // first workspace listed. + waitFor(t, tm, func(s string) bool { + return matchPattern(t, `modules/a.*default`, s) + }) + + // Create run on modules/a default tm.Type("p") // Expect to be taken to the run's page @@ -84,9 +98,13 @@ func TestWorkspaceList_CreateRun(t *testing.T) { // Return to global workspaces page tm.Type("W") - // Wait for all four workspaces to be listed + // Wait for all four workspaces to be listed. The current run for the + // default workspace on modules/a should be in the planned state. waitFor(t, tm, func(s string) bool { - return matchPattern(t, `(?s)WORKSPACE.*default.*dev.*default.*default`, s) + return matchPattern(t, `modules/a.*default.*planned`, s) && + matchPattern(t, `modules/a.*dev`, s) && + matchPattern(t, `modules/b.*default`, s) && + matchPattern(t, `modules/c.*default`, s) }) // Create run on all four workspaces @@ -95,12 +113,17 @@ func TestWorkspaceList_CreateRun(t *testing.T) { // Expect all four workspaces' current run to enter the planned state waitFor(t, tm, func(s string) bool { - return strings.Count(s, "planned") == 4 + return matchPattern(t, `modules/a.*default.*planned`, s) && + matchPattern(t, `modules/a.*dev.*planned`, s) && + matchPattern(t, `modules/b.*default.*planned`, s) && + matchPattern(t, `modules/c.*default.*planned`, s) }) } func TestWorkspaceList_ApplyCurrentRun(t *testing.T) { + t.Parallel() + tm := setup(t, "./testdata/module_list") // Expect message to inform user that modules have been loaded diff --git a/internal/app/workspace_resources_test.go b/internal/app/workspace_resources_test.go index 2fb05e08..51cee305 100644 --- a/internal/app/workspace_resources_test.go +++ b/internal/app/workspace_resources_test.go @@ -9,21 +9,31 @@ import ( ) func TestWorkspace_Resources(t *testing.T) { - tm := setup(t, "./testdata/module_list") + t.Parallel() - // Initialize and apply run on modules/a - initAndApplyModuleA(t, tm) + // Setup test with pre-existing state + tm := setup(t, "./testdata/workspace_resources") + + // Wait for module to be loaded + waitFor(t, tm, func(s string) bool { + return strings.Contains(s, "modules/a") + }) + + // Initialize module + tm.Type("i") + waitFor(t, tm, func(s string) bool { + return strings.Contains(s, "Terraform has been successfully initialized!") + }) // Go to workspaces tm.Type("W") // Wait for workspace to be loaded waitFor(t, tm, func(s string) bool { - return strings.Contains(s, "default") + return matchPattern(t, `modules/a.*default`, s) }) - // Select the default workspace (should be the first and only workspace - // listed) + // Go to workspace's page tm.Send(tea.KeyMsg{Type: tea.KeyEnter}) // Expect resources tab title @@ -80,11 +90,13 @@ func TestWorkspace_Resources(t *testing.T) { }) tm.Type("y") - // Expect to no longer see first three pets listed + // Expect only 7 resources. Note we can't test that the three deleted + // resources are NOT listed because waitFor accumulates all the string + // output since it was called, which is likely to include resources from + // both before and after the deletion. So instead we check for the presence + // of a new total number of resources. waitFor(t, tm, func(s string) bool { - return !strings.Contains(s, "random_pet.pet[0]") && - !strings.Contains(s, "random_pet.pet[1]") && - !strings.Contains(s, "random_pet.pet[2]") + return strings.Contains(s, "resources (7)") }) // Select several resources and create targeted plan diff --git a/internal/state/service.go b/internal/state/service.go index cefb2d2d..f04e51da 100644 --- a/internal/state/service.go +++ b/internal/state/service.go @@ -1,7 +1,6 @@ package state import ( - "context" "encoding/json" "errors" "slices" @@ -33,7 +32,7 @@ type ServiceOptions struct { Logger logging.Interface } -func NewService(ctx context.Context, opts ServiceOptions) *Service { +func NewService(opts ServiceOptions) *Service { broker := pubsub.NewBroker[*State](opts.Logger) svc := &Service{ modules: opts.ModuleService, diff --git a/internal/task/enqueuer_test.go b/internal/task/enqueuer_test.go index d1585559..b7ccd06d 100644 --- a/internal/task/enqueuer_test.go +++ b/internal/task/enqueuer_test.go @@ -9,6 +9,8 @@ import ( ) func TestEnqueuer(t *testing.T) { + t.Parallel() + mod1 := resource.New(resource.Module, resource.GlobalResource) ws1 := resource.New(resource.Workspace, mod1) diff --git a/internal/task/reader_test.go b/internal/task/reader_test.go index 5776a6da..53313818 100644 --- a/internal/task/reader_test.go +++ b/internal/task/reader_test.go @@ -10,6 +10,8 @@ import ( ) func TestBuffer_singleRead(t *testing.T) { + t.Parallel() + buf := newBuffer() r := &reader{buf: buf} _, err := buf.Write([]byte("hello world")) @@ -22,6 +24,8 @@ func TestBuffer_singleRead(t *testing.T) { } func TestBuffer_multiRead(t *testing.T) { + t.Parallel() + buf := newBuffer() r := &reader{buf: buf} _, err := buf.Write([]byte("hello world")) @@ -50,6 +54,8 @@ func TestBuffer_multiRead(t *testing.T) { } func TestBuffer_multiWrite(t *testing.T) { + t.Parallel() + buf := newBuffer() r := &reader{buf: buf} _, err := buf.Write([]byte("hello")) @@ -69,6 +75,8 @@ func TestBuffer_multiWrite(t *testing.T) { } func TestBuffer_blockRead(t *testing.T) { + t.Parallel() + buf := newBuffer() r := &reader{buf: buf} diff --git a/internal/task/runner.go b/internal/task/runner.go index 8cfb8a69..b487df5f 100644 --- a/internal/task/runner.go +++ b/internal/task/runner.go @@ -2,9 +2,9 @@ package task import ( "context" + "sync" "github.com/leg100/pug/internal/logging" - "golang.org/x/sync/errgroup" ) // Runner is the global task Runner that provides a couple of invariants: @@ -15,13 +15,15 @@ type runner struct { tasks taskLister } -func StartRunner(ctx context.Context, logger logging.Interface, tasks *Service, maxTasks int) func() error { +// StartRunner starts the task runner and returns a function that waits for +// running tasks to finish. +func StartRunner(ctx context.Context, logger logging.Interface, tasks *Service, maxTasks int) func() { sub := tasks.Subscribe() r := &runner{ max: maxTasks, tasks: tasks, } - g, ctx := errgroup.WithContext(ctx) + g := sync.WaitGroup{} // On each task event, get a list of tasks to be run, start them, and wait // for them to complete in the background. @@ -33,10 +35,11 @@ func StartRunner(ctx context.Context, logger logging.Interface, tasks *Service, logger.Error("starting task", "error", err.Error(), "task", task) } else { logger.Debug("started task", "task", task) - g.Go(func() error { + g.Add(1) + go func() { waitfn() - return nil - }) + g.Done() + }() } } } diff --git a/internal/task/runner_test.go b/internal/task/runner_test.go index ede94db0..1a984c61 100644 --- a/internal/task/runner_test.go +++ b/internal/task/runner_test.go @@ -9,6 +9,8 @@ import ( ) func TestRunner_runnable(t *testing.T) { + t.Parallel() + mod1 := resource.New(resource.Module, resource.GlobalResource) t1 := &Task{Resource: resource.New(resource.Task, mod1)} diff --git a/internal/task/service.go b/internal/task/service.go index 7492dcd6..bfdf299e 100644 --- a/internal/task/service.go +++ b/internal/task/service.go @@ -19,9 +19,10 @@ type Service struct { } type ServiceOptions struct { - Program string - Logger logging.Interface - Workdir internal.Workdir + Program string + Logger logging.Interface + Workdir internal.Workdir + UserEnvs []string } func NewService(opts ServiceOptions) *Service { @@ -33,6 +34,7 @@ func NewService(opts ServiceOptions) *Service { counter: &counter, program: opts.Program, workdir: opts.Workdir, + userEnvs: opts.UserEnvs, } svc := &Service{ diff --git a/internal/task/service_test.go b/internal/task/service_test.go index 29378d1d..58d36794 100644 --- a/internal/task/service_test.go +++ b/internal/task/service_test.go @@ -8,6 +8,8 @@ import ( ) func TestService_List(t *testing.T) { + t.Parallel() + mod1 := resource.New(resource.Module, resource.GlobalResource) pending := &Task{Resource: resource.New(resource.Task, mod1), State: Pending} diff --git a/internal/task/sort_test.go b/internal/task/sort_test.go deleted file mode 100644 index b028dfdf..00000000 --- a/internal/task/sort_test.go +++ /dev/null @@ -1 +0,0 @@ -package task diff --git a/internal/task/task.go b/internal/task/task.go index d981c474..79e6c6e4 100644 --- a/internal/task/task.go +++ b/internal/task/task.go @@ -79,6 +79,8 @@ type factory struct { program string publisher resource.Publisher[*Task] workdir internal.Workdir + // Additional user-supplied environment variables. + userEnvs []string } type CreateOptions struct { @@ -129,7 +131,7 @@ func (f *factory) newTask(opts CreateOptions) (*Task, error) { Command: opts.Command, Path: filepath.Join(f.workdir.String(), opts.Path), Args: opts.Args, - Env: append(os.Environ(), opts.Env...), + Env: append(append(f.userEnvs, opts.Env...), os.Environ()...), JSON: opts.JSON, Blocking: opts.Blocking, exclusive: opts.Exclusive, @@ -234,7 +236,13 @@ func (t *Task) cancel() { } func (t *Task) start(ctx context.Context) (func(), error) { + // Use the provided context to kill the program if the context becomes done, + // but also to prevent the program from starting if the context becomes done. cmd := exec.CommandContext(ctx, t.program, append(t.Command, t.Args...)...) + cmd.Cancel = func() error { + // Kill program gracefully + return cmd.Process.Signal(os.Interrupt) + } cmd.Dir = t.Path cmd.Stdout = t.buf cmd.Stderr = t.buf diff --git a/internal/task/task_test.go b/internal/task/task_test.go index ea633e36..7553ca69 100644 --- a/internal/task/task_test.go +++ b/internal/task/task_test.go @@ -13,6 +13,8 @@ import ( ) func TestTask_stdout(t *testing.T) { + t.Parallel() + f := factory{ counter: internal.Int(0), program: "./testdata/task", @@ -42,6 +44,8 @@ func TestTask_stdout(t *testing.T) { } func TestTask_cancel(t *testing.T) { + t.Parallel() + f := factory{ counter: internal.Int(0), program: "./testdata/killme",