From 7ec3f96e3aa46367a2d0762b8835fc18c5d2f996 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 25 Oct 2018 15:41:00 +0200 Subject: [PATCH 001/149] command/state: update and fix the state mv command --- command/state_meta.go | 20 ++ command/state_mv.go | 251 +++++++++---- command/state_mv_test.go | 53 ++- command/state_rm.go | 32 +- command/state_rm_test.go | 10 +- states/state_filter.go | 36 +- states/state_filter_test.go | 5 +- terraform/state_add.go | 374 ------------------- terraform/state_add_test.go | 695 ------------------------------------ 9 files changed, 278 insertions(+), 1198 deletions(-) delete mode 100644 terraform/state_add.go delete mode 100644 terraform/state_add_test.go diff --git a/command/state_meta.go b/command/state_meta.go index 4cf4034c0f48..247e66207b52 100644 --- a/command/state_meta.go +++ b/command/state_meta.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "sort" "time" "github.com/hashicorp/terraform/addrs" @@ -111,6 +112,25 @@ func (c *StateMeta) filter(state *states.State, args []string) ([]*states.Filter } } + // Sort the results + sort.Slice(results, func(i, j int) bool { + a, b := results[i], results[j] + + // If the length is different, sort on the length so that the + // best match is the first result. + if len(a.Address.String()) != len(b.Address.String()) { + return len(a.Address.String()) < len(b.Address.String()) + } + + // If the addresses are different it is just lexographic sorting + if a.Address.String() != b.Address.String() { + return a.Address.String() < b.Address.String() + } + + // Addresses are the same, which means it matters on the type + return a.SortedType() < b.SortedType() + }) + return results, nil } diff --git a/command/state_mv.go b/command/state_mv.go index 85a759b3e6cf..db6ae74a0d0f 100644 --- a/command/state_mv.go +++ b/command/state_mv.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" ) @@ -22,7 +23,9 @@ func (c *StateMvCommand) Run(args []string) int { // We create two metas to track the two states var backupPathOut, statePathOut string + var dryRun bool cmdFlags := c.Meta.flagSet("state mv") + cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") cmdFlags.StringVar(&c.statePath, "state", "", "path") cmdFlags.StringVar(&backupPathOut, "backup-out", "-", "backup") @@ -37,127 +40,228 @@ func (c *StateMvCommand) Run(args []string) int { } // Read the from state - stateFrom, err := c.State() + stateFromMgr, err := c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) return 1 } - - if err := stateFrom.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + if err := stateFromMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) return 1 } - stateFromReal := stateFrom.State() - if stateFromReal == nil { + stateFrom := stateFromMgr.State() + if stateFrom == nil { c.Ui.Error(fmt.Sprintf(errStateNotFound)) return 1 } // Read the destination state + stateToMgr := stateFromMgr stateTo := stateFrom - stateToReal := stateFromReal if statePathOut != "" { c.statePath = statePathOut c.backupPath = backupPathOut - stateTo, err = c.State() + + stateToMgr, err = c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) return 1 } - - if err := stateTo.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + if err := stateToMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) return 1 } - stateToReal = stateTo.State() - if stateToReal == nil { - stateToReal = states.NewState() + stateTo = stateToMgr.State() + if stateTo == nil { + stateTo = states.NewState() } } - c.Ui.Error("state mv command not yet updated for new state types") - return 1 - /* - // Filter what we're moving - filter := &terraform.StateFilter{State: stateFromReal} - results, err := filter.Filter(args[0]) - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return cli.RunResultHelp - } - if len(results) == 0 { - c.Ui.Output(fmt.Sprintf("Item to move doesn't exist: %s", args[0])) - return 1 + // Filter what we are moving. + results, err := c.filter(stateFrom, []string{args[0]}) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateFilter, err)) + return cli.RunResultHelp + } + + // If we have no results, exit early as we're not going to do anything. + if len(results) == 0 { + if dryRun { + c.Ui.Output("Would have moved nothing.") + } else { + c.Ui.Output("No matching objects found.") } + return 0 + } - // Get the item to add to the state - add := c.addableResult(results) + prefix := "Move" + if dryRun { + prefix = "Would move" + } - // Do the actual move - if err := stateFromReal.Remove(args[0]); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return 1 - } + var moved int + ssFrom := stateFrom.SyncWrapper() + for _, result := range c.moveableResult(results) { + switch addrFrom := result.Address.(type) { + case addrs.ModuleInstance: + search, err := addrs.ParseModuleInstanceStr(args[0]) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateMv, err)) + return 1 + } + addrTo, err := addrs.ParseModuleInstanceStr(args[1]) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateMv, err)) + return 1 + } - if err := stateToReal.Add(args[0], args[1], add); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return 1 - } + if len(search) < len(addrFrom) { + addrTo = append(addrTo, addrFrom[len(search):]...) + } - // Write the new state - if err := stateTo.WriteState(stateToReal); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) - return 1 - } + if stateTo.Module(addrTo) != nil { + c.Ui.Error(fmt.Sprintf(errStateMv, "destination module already exists")) + return 1 + } - if err := stateTo.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) - return 1 - } + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) + if !dryRun { + ssFrom.RemoveModule(addrFrom) - // Write the old state if it is different - if stateTo != stateFrom { - if err := stateFrom.WriteState(stateFromReal); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) + // Update the address before adding it to the state. + m := result.Value.(*states.Module) + m.Addr = addrTo + stateTo.Modules[addrTo.String()] = m + } + + case addrs.AbsResource: + addrTo, err := addrs.ParseAbsResourceStr(args[1]) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateMv, err)) return 1 } - if err := stateFrom.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) + if addrFrom.Resource.Type != addrTo.Resource.Type { + c.Ui.Error(fmt.Sprintf( + errStateMv, "resource types do not match")) + return 1 + } + if stateTo.Module(addrTo.Module) == nil { + c.Ui.Error(fmt.Sprintf( + errStateMv, "destination module does not exist")) return 1 } + if stateTo.Resource(addrTo) != nil { + c.Ui.Error(fmt.Sprintf( + errStateMv, "destination resource already exists")) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) + if !dryRun { + ssFrom.RemoveResource(addrFrom) + + // Update the address before adding it to the state. + rs := result.Value.(*states.Resource) + rs.Addr = addrTo.Resource + stateTo.Module(addrTo.Module).Resources[addrTo.Resource.String()] = rs + } + + case addrs.AbsResourceInstance: + addrTo, err := addrs.ParseAbsResourceInstanceStr(args[1]) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateMv, err)) + return 1 + } + + if stateTo.Module(addrTo.Module) == nil { + c.Ui.Error(fmt.Sprintf( + errStateMv, "destination module does not exist")) + return 1 + } + if stateTo.Resource(addrTo.ContainingResource()) == nil { + c.Ui.Error(fmt.Sprintf( + errStateMv, "destination resource does not exist")) + return 1 + } + if stateTo.ResourceInstance(addrTo) != nil { + c.Ui.Error(fmt.Sprintf( + errStateMv, "destination resource instance already exists")) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), args[1])) + if !dryRun { + ssFrom.ForgetResourceInstanceAll(addrFrom) + ssFrom.RemoveResourceIfEmpty(addrFrom.ContainingResource()) + + rs := stateTo.Resource(addrTo.ContainingResource()) + rs.Instances[addrTo.Resource.Key] = result.Value.(*states.ResourceInstance) + } + } + } + + if dryRun { + if moved == 0 { + c.Ui.Output("Would have moved nothing.") } - */ + return 0 // This is as far as we go in dry-run mode + } - c.Ui.Output(fmt.Sprintf( - "Moved %s to %s", args[0], args[1])) + // Write the new state + if err := stateToMgr.WriteState(stateTo); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + if err := stateToMgr.PersistState(); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + + // Write the old state if it is different + if stateTo != stateFrom { + if err := stateFromMgr.WriteState(stateFrom); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + if err := stateFromMgr.PersistState(); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) + return 1 + } + } + + if moved == 0 { + c.Ui.Output("No matching objects found.") + } else { + c.Ui.Output(fmt.Sprintf("Successfully moved %d object(s).", moved)) + } return 0 } -// addableResult takes the result from a filter operation and returns what to -// call State.Add with. The reason we do this is because in the module case +// moveableResult takes the result from a filter operation and returns what +// object(s) to move. The reason we do this is because in the module case // we must add the list of all modules returned versus just the root module. -func (c *StateMvCommand) addableResult(results []*states.FilterResult) interface{} { - switch v := results[0].Value.(type) { - case *states.Module: - // If a state module then we should add the full list of modules - result := []*states.Module{v} - if len(results) > 1 { +func (c *StateMvCommand) moveableResult(results []*states.FilterResult) []*states.FilterResult { + result := results[:1] + + if len(results) > 1 { + // If a state module then we should add the full list of modules. + if _, ok := result[0].Address.(addrs.ModuleInstance); ok { for _, r := range results[1:] { - if ms, ok := r.Value.(*states.Module); ok { - result = append(result, ms) + if _, ok := r.Address.(addrs.ModuleInstance); ok { + result = append(result, r) } } } - return result - - default: - // By default just add the first result - return v } + + return result } func (c *StateMvCommand) Help() string { @@ -182,6 +286,9 @@ Usage: terraform state mv [options] SOURCE DESTINATION Options: + -dry-run If set, prints out what would've been moved but doesn't + actually move anything. + -backup=PATH Path where Terraform should write the backup for the original state. This can't be disabled. If not set, Terraform will write it to the same path as the statefile with @@ -209,7 +316,7 @@ func (c *StateMvCommand) Synopsis() string { return "Move an item in the state" } -const errStateMv = `Error moving state: %[1]s +const errStateMv = `Error moving state: %s Please ensure your addresses and state paths are valid. No state was persisted. Your existing states are untouched.` diff --git a/command/state_mv_test.go b/command/state_mv_test.go index 2e00059f8dff..06c26c8f8239 100644 --- a/command/state_mv_test.go +++ b/command/state_mv_test.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "testing" "github.com/mitchellh/cli" @@ -74,6 +75,48 @@ func TestStateMv(t *testing.T) { testStateOutput(t, backups[0], testStateMvOutputOriginal) } +func TestStateMv_differentResourceTypes(t *testing.T) { + state := states.BuildState(func(s *states.SyncState) { + s.SetResourceInstanceCurrent( + addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_instance", + Name: "foo", + }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"id":"bar","foo":"value","bar":"value"}`), + Status: states.ObjectReady, + }, + addrs.ProviderConfig{Type: "test"}.Absolute(addrs.RootModuleInstance), + ) + }) + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateMvCommand{ + StateMeta{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + "test_network.bar", + } + if code := c.Run(args); code == 0 { + t.Fatalf("expected error output, got:\n%s", ui.OutputWriter.String()) + } + + if !strings.Contains(ui.ErrorWriter.String(), "resource types do not match") { + t.Fatalf("expected initialization error, got:\n%s", ui.ErrorWriter.String()) + } +} + // don't modify backend state is we supply a -state flag func TestStateMv_explicitWithBackend(t *testing.T) { td := tempDir(t) @@ -152,10 +195,6 @@ func TestStateMv_explicitWithBackend(t *testing.T) { } func TestStateMv_backupExplicit(t *testing.T) { - td := tempDir(t) - defer os.RemoveAll(td) - backupPath := filepath.Join(td, "backup") - state := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -183,6 +222,7 @@ func TestStateMv_backupExplicit(t *testing.T) { ) }) statePath := testStateFile(t, state) + backupPath := statePath + ".backup.test" p := testProvider() ui := new(cli.MockUi) @@ -913,8 +953,6 @@ test_instance.foo.10: const testStateMvNestedModule_stateOut = ` -module.bar: - module.bar.child1: test_instance.foo: ID = bar @@ -935,8 +973,6 @@ const testStateMvNestedModule_stateOutSrc = ` const testStateMvNestedModule_stateOutOriginal = ` -module.foo: - module.foo.child1: test_instance.foo: ID = bar @@ -983,6 +1019,7 @@ test_instance.bar: foo = value test_instance.qux: ID = bar + provider = provider.test ` const testStateMvExisting_stateSrcOriginal = ` diff --git a/command/state_rm.go b/command/state_rm.go index e3b2afd62015..0c57a33c1425 100644 --- a/command/state_rm.go +++ b/command/state_rm.go @@ -5,10 +5,9 @@ import ( "sort" "strings" - "github.com/mitchellh/cli" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/states" + "github.com/mitchellh/cli" ) // StateRmCommand is a Command implementation that shows a single resource. @@ -22,18 +21,19 @@ func (c *StateRmCommand) Run(args []string) int { return 1 } + var dryRun bool cmdFlags := c.Meta.flagSet("state show") + cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") cmdFlags.StringVar(&c.statePath, "state", "", "path") - dryRun := cmdFlags.Bool("dry-run", false, "dry run") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } args = cmdFlags.Args() if len(args) < 1 { - c.Ui.Error("At least one resource address is required.") - return 1 + c.Ui.Error("At least one address is required.\n") + return cli.RunResultHelp } // Get the state @@ -53,18 +53,16 @@ func (c *StateRmCommand) Run(args []string) int { return 1 } + // Filter what we are removing. results, err := c.filter(state, args) if err != nil { c.Ui.Error(fmt.Sprintf(errStateFilter, err)) return cli.RunResultHelp } - // If we have no results, just exit early, we're not going to do anything. - // While what happens below is fairly fast, this is an important early - // exit since the prune below might modify the state more and we don't - // want to modify the state if we don't have to. + // If we have no results, exit early as we're not going to do anything. if len(results) == 0 { - if *dryRun { + if dryRun { c.Ui.Output("Would have removed nothing.") } else { c.Ui.Output("No matching resources found.") @@ -73,7 +71,7 @@ func (c *StateRmCommand) Run(args []string) int { } prefix := "Remove resource " - if *dryRun { + if dryRun { prefix = "Would remove resource " } @@ -92,7 +90,7 @@ func (c *StateRmCommand) Run(args []string) int { if len(output) > 0 { c.Ui.Output(strings.Join(sort.StringSlice(output), "\n")) } - if !*dryRun { + if !dryRun { ss.RemoveModule(addr) } @@ -105,29 +103,27 @@ func (c *StateRmCommand) Run(args []string) int { if len(output) > 0 { c.Ui.Output(strings.Join(sort.StringSlice(output), "\n")) } - if !*dryRun { + if !dryRun { ss.RemoveResource(addr) } case addrs.AbsResourceInstance: isCount++ c.Ui.Output(prefix + addr.String()) - if !*dryRun { + if !dryRun { ss.ForgetResourceInstanceAll(addr) + ss.RemoveResourceIfEmpty(addr.ContainingResource()) } } } - if *dryRun { + if dryRun { if isCount == 0 { c.Ui.Output("Would have removed nothing.") } return 0 // This is as far as we go in dry-run mode } - // Prune the state before writing and persisting it. - state.PruneResourceHusks() - if err := stateMgr.WriteState(state); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 diff --git a/command/state_rm_test.go b/command/state_rm_test.go index 7274c8f532d1..fa5c9d769a9a 100644 --- a/command/state_rm_test.go +++ b/command/state_rm_test.go @@ -115,11 +115,11 @@ func TestStateRmNoArgs(t *testing.T) { args := []string{ "-state", statePath, } - if code := c.Run(args); code != 1 { - t.Errorf("wrong exit status %d; want %d", code, 1) + if code := c.Run(args); code == 0 { + t.Errorf("expected non-zero exit code, got: %d", code) } - if msg := ui.ErrorWriter.String(); !strings.Contains(msg, "At least one resource address") { + if msg := ui.ErrorWriter.String(); !strings.Contains(msg, "At least one address") { t.Errorf("not the error we were looking for:\n%s", msg) } @@ -207,7 +207,7 @@ func TestStateRm_backupExplicit(t *testing.T) { ) }) statePath := testStateFile(t, state) - backupPath := statePath + ".mybackup" + backupPath := statePath + ".backup.test" p := testProvider() ui := new(cli.MockUi) @@ -251,7 +251,7 @@ func TestStateRm_noState(t *testing.T) { }, } - args := []string{} + args := []string{"foo"} if code := c.Run(args); code != 1 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } diff --git a/states/state_filter.go b/states/state_filter.go index bec5f57f2164..0f10a58dc687 100644 --- a/states/state_filter.go +++ b/states/state_filter.go @@ -63,8 +63,19 @@ func (f *Filter) Filter(fs ...string) ([]*FilterResult, error) { results = append(results, v) } - // Sort them and return - sort.Sort(FilterResultSlice(results)) + // Sort the results + sort.Slice(results, func(i, j int) bool { + a, b := results[i], results[j] + + // If the addresses are different it is just lexographic sorting + if a.Address.String() != b.Address.String() { + return a.Address.String() < b.Address.String() + } + + // Addresses are the same, which means it matters on the type + return a.SortedType() < b.SortedType() + }) + return results, nil } @@ -155,7 +166,7 @@ func (r *FilterResult) String() string { return fmt.Sprintf("%T: %s", r.Value, r.Address) } -func (r *FilterResult) sortedType() int { +func (r *FilterResult) SortedType() int { switch r.Value.(type) { case *Module: return 0 @@ -167,22 +178,3 @@ func (r *FilterResult) sortedType() int { return 50 } } - -// FilterResultSlice is a slice of results that implements -// sort.Interface. The sorting goal is what is most appealing to -// human output. -type FilterResultSlice []*FilterResult - -func (s FilterResultSlice) Len() int { return len(s) } -func (s FilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s FilterResultSlice) Less(i, j int) bool { - a, b := s[i], s[j] - - // If the addresses are different it is just lexographic sorting - if a.Address.String() != b.Address.String() { - return a.Address.String() < b.Address.String() - } - - // Addresses are the same, which means it matters on the type - return a.sortedType() < b.sortedType() -} diff --git a/states/state_filter_test.go b/states/state_filter_test.go index 5303ce653683..bba2b915b433 100644 --- a/states/state_filter_test.go +++ b/states/state_filter_test.go @@ -399,7 +399,7 @@ func testStateSmall() *State { root := addrs.RootModuleInstance boot, _ := addrs.ParseModuleInstanceStr("module.boot") - state := BuildState(func(s *SyncState) { + return BuildState(func(s *SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ Mode: addrs.ManagedResourceMode, @@ -457,9 +457,6 @@ func testStateSmall() *State { }.Absolute(boot), ) }) - // fmt.Printf("mods: %#v\n", state.Modules) - // fmt.Printf("boot: %#+v\n", state.Modules["module.boot"]) - return state } // testStateSmallTestInstance returns a test State structure. diff --git a/terraform/state_add.go b/terraform/state_add.go deleted file mode 100644 index dc642dc03c49..000000000000 --- a/terraform/state_add.go +++ /dev/null @@ -1,374 +0,0 @@ -package terraform - -import "fmt" - -// Add adds the item in the state at the given address. -// -// The item can be a ModuleState, ResourceState, or InstanceState. Depending -// on the item type, the address may or may not be valid. For example, a -// module cannot be moved to a resource address, however a resource can be -// moved to a module address (it retains the same name, under that resource). -// -// The item can also be a []*ModuleState, which is the case for nested -// modules. In this case, Add will expect the zero-index to be the top-most -// module to add and will only nest children from there. For semantics, this -// is equivalent to module => module. -// -// The full semantics of Add: -// -// ┌───────────────────┬───────────────────┬───────────────────┐ -// │ Module Address │ Resource Address │ Instance Address │ -// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ ModuleState │ ✓ │ x │ x │ -// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ ResourceState │ ✓ │ ✓ │ maybe* │ -// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤ -// │ Instance State │ ✓ │ ✓ │ ✓ │ -// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘ -// -// *maybe - Resources can be added at an instance address only if the resource -// represents a single instance (primary). Example: -// "aws_instance.foo" can be moved to "aws_instance.bar.tainted" -// -func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error { - // Parse the address - - toAddr, err := ParseResourceAddress(toAddrRaw) - if err != nil { - return err - } - - // Parse the from address - fromAddr, err := ParseResourceAddress(fromAddrRaw) - if err != nil { - return err - } - - // Determine the types - from := detectValueAddLoc(raw) - to := detectAddrAddLoc(toAddr) - - // Find the function to do this - fromMap, ok := stateAddFuncs[from] - if !ok { - return fmt.Errorf("invalid source to add to state: %T", raw) - } - f, ok := fromMap[to] - if !ok { - return fmt.Errorf("invalid destination: %s (%d)", toAddr, to) - } - - // Call the migrator - if err := f(s, fromAddr, toAddr, raw); err != nil { - return err - } - - // Prune the state - s.prune() - return nil -} - -func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - // raw can be either *ModuleState or []*ModuleState. The former means - // we're moving just one module. The latter means we're moving a module - // and children. - root := raw - var rest []*ModuleState - if list, ok := raw.([]*ModuleState); ok { - // We need at least one item - if len(list) == 0 { - return fmt.Errorf("module move with no value to: %s", addr) - } - - // The first item is always the root - root = list[0] - if len(list) > 1 { - rest = list[1:] - } - } - - // Get the actual module state - src := root.(*ModuleState).deepcopy() - - // If the target module exists, it is an error - path := normalizeModulePath(addr.Path) - if s.ModuleByPath(path) != nil { - return fmt.Errorf("module target is not empty: %s", addr) - } - - // Create it and copy our outputs and dependencies - mod := s.AddModule(path) - mod.Outputs = src.Outputs - mod.Dependencies = src.Dependencies - - // Go through the resources perform an add for each of those - for k, v := range src.Resources { - resourceKey, err := ParseResourceStateKey(k) - if err != nil { - return err - } - - // Update the resource address for this - addrCopy := *addr - addrCopy.Type = resourceKey.Type - addrCopy.Name = resourceKey.Name - addrCopy.Index = resourceKey.Index - addrCopy.Mode = resourceKey.Mode - - // Perform an add - if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil { - return err - } - } - - // Add all the children if we have them - for _, item := range rest { - // If item isn't a descendent of our root, then ignore it - if !src.IsDescendent(item) { - continue - } - - // It is! Strip the leading prefix and attach that to our address - extra := item.Path[len(src.Path):] - addrCopy := addr.Copy() - addrCopy.Path = append(addrCopy.Path, extra...) - - // Add it - s.Add(fromAddr.String(), addrCopy.String(), item) - } - - return nil -} - -func stateAddFunc_Resource_Module( - s *State, from, to *ResourceAddress, raw interface{}) error { - // Build the more specific to addr - addr := *to - addr.Type = from.Type - addr.Name = from.Name - - return s.Add(from.String(), addr.String(), raw) -} - -func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - // raw can be either *ResourceState or []*ResourceState. The former means - // we're moving just one resource. The latter means we're moving a count - // of resources. - if list, ok := raw.([]*ResourceState); ok { - // We need at least one item - if len(list) == 0 { - return fmt.Errorf("resource move with no value to: %s", addr) - } - - // If there is an index, this is an error since we can't assign - // a set of resources to a single index - if addr.Index >= 0 && len(list) > 1 { - return fmt.Errorf( - "multiple resources can't be moved to a single index: "+ - "%s => %s", fromAddr, addr) - } - - // Add each with a specific index - for i, rs := range list { - addrCopy := addr.Copy() - addrCopy.Index = i - - if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil { - return err - } - } - - return nil - } - - src := raw.(*ResourceState).deepcopy() - - // Initialize the resource - resourceRaw, exists := stateAddInitAddr(s, addr) - if exists { - return fmt.Errorf("resource exists and not empty: %s", addr) - } - resource := resourceRaw.(*ResourceState) - resource.Type = src.Type - resource.Dependencies = src.Dependencies - resource.Provider = src.Provider - - // Move the primary - if src.Primary != nil { - addrCopy := *addr - addrCopy.InstanceType = TypePrimary - addrCopy.InstanceTypeSet = true - if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil { - return err - } - } - - // Move all deposed - if len(src.Deposed) > 0 { - resource.Deposed = src.Deposed - } - - return nil -} - -func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error { - src := raw.(*InstanceState).DeepCopy() - - // Create the instance - instanceRaw, _ := stateAddInitAddr(s, addr) - instance := instanceRaw.(*InstanceState) - - // Set it - instance.Set(src) - - return nil -} - -func stateAddFunc_Instance_Module( - s *State, from, to *ResourceAddress, raw interface{}) error { - addr := *to - addr.Type = from.Type - addr.Name = from.Name - - return s.Add(from.String(), addr.String(), raw) -} - -func stateAddFunc_Instance_Resource( - s *State, from, to *ResourceAddress, raw interface{}) error { - addr := *to - addr.InstanceType = TypePrimary - addr.InstanceTypeSet = true - - return s.Add(from.String(), addr.String(), raw) -} - -// stateAddFunc is the type of function for adding an item to a state -type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error - -// stateAddFuncs has the full matrix mapping of the state adders. -var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc - -func init() { - stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{ - stateAddModule: { - stateAddModule: stateAddFunc_Module_Module, - }, - stateAddResource: { - stateAddModule: stateAddFunc_Resource_Module, - stateAddResource: stateAddFunc_Resource_Resource, - }, - stateAddInstance: { - stateAddInstance: stateAddFunc_Instance_Instance, - stateAddModule: stateAddFunc_Instance_Module, - stateAddResource: stateAddFunc_Instance_Resource, - }, - } -} - -// stateAddLoc is an enum to represent the location where state is being -// moved from/to. We use this for quick lookups in a function map. -type stateAddLoc uint - -const ( - stateAddInvalid stateAddLoc = iota - stateAddModule - stateAddResource - stateAddInstance -) - -// detectAddrAddLoc detects the state type for the given address. This -// function is specifically not unit tested since we consider the State.Add -// functionality to be comprehensive enough to cover this. -func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc { - if addr.Name == "" { - return stateAddModule - } - - if !addr.InstanceTypeSet { - return stateAddResource - } - - return stateAddInstance -} - -// detectValueAddLoc determines the stateAddLoc value from the raw value -// that is some State structure. -func detectValueAddLoc(raw interface{}) stateAddLoc { - switch raw.(type) { - case *ModuleState: - return stateAddModule - case []*ModuleState: - return stateAddModule - case *ResourceState: - return stateAddResource - case []*ResourceState: - return stateAddResource - case *InstanceState: - return stateAddInstance - default: - return stateAddInvalid - } -} - -// stateAddInitAddr takes a ResourceAddress and creates the non-existing -// resources up to that point, returning the empty (or existing) interface -// at that address. -func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) { - addType := detectAddrAddLoc(addr) - - // Get the module - path := normalizeModulePath(addr.Path) - exists := true - mod := s.ModuleByPath(path) - if mod == nil { - mod = s.AddModule(path) - exists = false - } - if addType == stateAddModule { - return mod, exists - } - - // Add the resource - resourceKey := (&ResourceStateKey{ - Name: addr.Name, - Type: addr.Type, - Index: addr.Index, - Mode: addr.Mode, - }).String() - exists = true - resource, ok := mod.Resources[resourceKey] - if !ok { - resource = &ResourceState{Type: addr.Type} - resource.init() - mod.Resources[resourceKey] = resource - exists = false - } - if addType == stateAddResource { - return resource, exists - } - - // Get the instance - exists = true - instance := &InstanceState{} - switch addr.InstanceType { - case TypePrimary, TypeTainted: - if v := resource.Primary; v != nil { - instance = resource.Primary - } else { - exists = false - } - case TypeDeposed: - idx := addr.Index - if addr.Index < 0 { - idx = 0 - } - if len(resource.Deposed) > idx { - instance = resource.Deposed[idx] - } else { - resource.Deposed = append(resource.Deposed, instance) - exists = false - } - } - - return instance, exists -} diff --git a/terraform/state_add_test.go b/terraform/state_add_test.go deleted file mode 100644 index a91dee5897fb..000000000000 --- a/terraform/state_add_test.go +++ /dev/null @@ -1,695 +0,0 @@ -package terraform - -import ( - "fmt" - "testing" -) - -func TestStateAdd(t *testing.T) { - cases := []struct { - Name string - Err bool - From, To string - Value interface{} - One, Two *State - }{ - { - "ModuleState => Module Addr (new)", - false, - "", - "module.foo", - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ModuleState => Nested Module Addr (new)", - false, - "", - "module.foo.module.bar", - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo", "bar"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ModuleState w/ outputs and deps => Module Addr (new)", - false, - "", - "module.foo", - &ModuleState{ - Path: rootModulePath, - Outputs: map[string]*OutputState{ - "foo": &OutputState{ - Type: "string", - Sensitive: false, - Value: "bar", - }, - }, - Dependencies: []string{"foo"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - Outputs: map[string]*OutputState{ - "foo": &OutputState{ - Type: "string", - Sensitive: false, - Value: "bar", - }, - }, - Dependencies: []string{"foo"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "test_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ModuleState => Module Addr (existing)", - true, - "", - "module.foo", - &ModuleState{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{ - "test_instance.baz": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - nil, - }, - - { - "ModuleState with children => Module Addr (new)", - false, - "module.foo", - "module.bar", - - []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{}, - }, - - &ModuleState{ - Path: []string{"root", "foo", "child1"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &ModuleState{ - Path: []string{"root", "foo", "child2"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - // Should be ignored - &ModuleState{ - Path: []string{"root", "baz", "child2"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "bar"}, - Resources: map[string]*ResourceState{}, - }, - - &ModuleState{ - Path: []string{"root", "bar", "child1"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &ModuleState{ - Path: []string{"root", "bar", "child2"}, - Resources: map[string]*ResourceState{ - "test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ResourceState => Resource Addr (new)", - false, - "aws_instance.bar", - "aws_instance.foo", - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ResourceState w/ deps, provider => Resource Addr (new)", - false, - "aws_instance.bar", - "aws_instance.foo", - &ResourceState{ - Type: "test_instance", - Provider: "foo", - Dependencies: []string{"bar"}, - Primary: &InstanceState{ - ID: "foo", - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.foo": &ResourceState{ - Type: "test_instance", - Provider: "foo", - Dependencies: []string{"bar"}, - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ResourceState tainted => Resource Addr (new)", - false, - "aws_instance.bar", - "aws_instance.foo", - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - Tainted: true, - }, - }, - }, - }, - }, - }, - }, - - { - "ResourceState with count unspecified => Resource Addr (new)", - false, - "aws_instance.bar", - "aws_instance.foo", - []*ResourceState{ - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "bar", - }, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.foo.0": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - "aws_instance.foo.1": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "bar", - }, - }, - }, - }, - }, - }, - }, - - { - "ResourceState with count unspecified => Resource Addr (new with count)", - true, - "aws_instance.bar", - "aws_instance.foo[0]", - []*ResourceState{ - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "bar", - }, - }, - }, - - &State{}, - nil, - }, - - { - "ResourceState with single count unspecified => Resource Addr (new with count)", - false, - "aws_instance.bar", - "aws_instance.foo[0]", - []*ResourceState{ - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.foo.0": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ResourceState => Resource Addr (new with count)", - false, - "aws_instance.bar", - "aws_instance.foo[0]", - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.foo.0": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ResourceState => Resource Addr (existing)", - true, - "aws_instance.bar", - "aws_instance.foo", - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - nil, - }, - - { - "ResourceState => Module (new)", - false, - "aws_instance.bar", - "module.foo", - &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{ - "aws_instance.bar": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "InstanceState => Resource (new)", - false, - "aws_instance.bar.primary", - "aws_instance.baz", - &InstanceState{ - ID: "foo", - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root"}, - Resources: map[string]*ResourceState{ - "aws_instance.baz": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "InstanceState => Module (new)", - false, - "aws_instance.bar.primary", - "module.foo", - &InstanceState{ - ID: "foo", - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{ - "aws_instance.bar": &ResourceState{ - Type: "aws_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - - { - "ModuleState => Module Addr (new with data source)", - false, - "", - "module.foo", - &ModuleState{ - Path: rootModulePath, - Resources: map[string]*ResourceState{ - "data.test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - - &State{}, - &State{ - Modules: []*ModuleState{ - &ModuleState{ - Path: []string{"root", "foo"}, - Resources: map[string]*ResourceState{ - "data.test_instance.foo": &ResourceState{ - Type: "test_instance", - Primary: &InstanceState{ - ID: "foo", - }, - }, - }, - }, - }, - }, - }, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - // Make sure they're both initialized as normal - tc.One.init() - if tc.Two != nil { - tc.Two.init() - } - - // Add the value - err := tc.One.Add(tc.From, tc.To, tc.Value) - if (err != nil) != tc.Err { - t.Fatal(err) - } - if tc.Err { - return - } - - // Prune them both to be sure - tc.One.prune() - tc.Two.prune() - - // Verify equality - if !tc.One.Equal(tc.Two) { - //t.Fatalf("Bad: %s\n\n%#v\n\n%#v", k, tc.One, tc.Two) - t.Fatalf("Bad: \n\n%s\n\n%s", tc.One.String(), tc.Two.String()) - } - }) - } -} From 5458a91985e3dee49307e70913c622e4eb34ce39 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 26 Oct 2018 19:08:46 +0200 Subject: [PATCH 002/149] command/state: update and fix the state show command --- command/show.go | 1 - command/state_meta.go | 6 -- command/state_push.go | 4 +- command/state_show.go | 145 +++++++++++++++++++++++-------------- command/state_show_test.go | 73 +++++++++++++++---- states/state_filter.go | 2 +- 6 files changed, 151 insertions(+), 80 deletions(-) diff --git a/command/show.go b/command/show.go index 6d970f1bbb14..bb1106b5974a 100644 --- a/command/show.go +++ b/command/show.go @@ -23,7 +23,6 @@ type ShowCommand struct { } func (c *ShowCommand) Run(args []string) int { - args, err := c.Meta.process(args, false) if err != nil { return 1 diff --git a/command/state_meta.go b/command/state_meta.go index 247e66207b52..f823de880520 100644 --- a/command/state_meta.go +++ b/command/state_meta.go @@ -133,9 +133,3 @@ func (c *StateMeta) filter(state *states.State, args []string) ([]*states.Filter return results, nil } - -const errStateMultiple = `Multiple instances found for the given pattern! - -This command requires that the pattern match exactly one instance -of a resource. To view the matched instances, use "terraform state list". -Please modify the pattern to match only a single instance.` diff --git a/command/state_push.go b/command/state_push.go index ff79049149d1..9de232abca42 100644 --- a/command/state_push.go +++ b/command/state_push.go @@ -32,8 +32,8 @@ func (c *StatePushCommand) Run(args []string) int { args = cmdFlags.Args() if len(args) != 1 { - c.Ui.Error("Exactly one argument expected: path to state to push") - return 1 + c.Ui.Error("Exactly one argument expected.\n") + return cli.RunResultHelp } // Determine our reader for the input state. This is the filepath diff --git a/command/state_show.go b/command/state_show.go index db59cbca1f15..8f501dccee73 100644 --- a/command/state_show.go +++ b/command/state_show.go @@ -2,8 +2,13 @@ package command import ( "fmt" + "os" "strings" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" ) @@ -20,11 +25,15 @@ func (c *StateShowCommand) Run(args []string) int { } cmdFlags := c.Meta.flagSet("state show") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("Exactly one argument expected.\n") + return cli.RunResultHelp + } // Load the backend b, backendDiags := c.Backend(nil) @@ -33,73 +42,85 @@ func (c *StateShowCommand) Run(args []string) int { return 1 } + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // Check if the address can be parsed + addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) + if addrDiags.HasErrors() { + c.Ui.Error(fmt.Sprintf(errParsingAddress, args[0])) + return 1 + } + + // We expect the config dir to always be the cwd + cwd, err := os.Getwd() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error getting cwd: %s", err)) + return 1 + } + + // Build the operation (required to get the schemas) + opReq := c.Operation(b) + opReq.ConfigDir = cwd + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing config loader: %s", err)) + return 1 + } + + // Get the context (required to get the schemas) + ctx, _, ctxDiags := local.Context(opReq) + if ctxDiags.HasErrors() { + c.showDiagnostics(ctxDiags) + return 1 + } + + // Get the schemas from the context + schemas := ctx.Schemas() + // Get the state env := c.Workspace() - state, err := b.StateMgr(env) + stateMgr, err := b.StateMgr(env) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) return 1 } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) return 1 } - stateReal := state.State() - if stateReal == nil { + state := stateMgr.State() + if state == nil { c.Ui.Error(fmt.Sprintf(errStateNotFound)) return 1 } - c.Ui.Error("state show not yet updated for new state types") - return 1 - - /* - filter := &terraform.StateFilter{State: stateReal} - results, err := filter.Filter(args...) - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateFilter, err)) - return 1 - } - - if len(results) == 0 { - return 0 - } - - instance, err := c.filterInstance(results) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - if instance == nil { - return 0 - } - - is := instance.Value.(*terraform.InstanceState) - - // Sort the keys - var keys []string - for k, _ := range is.Attributes { - keys = append(keys, k) - } - sort.Strings(keys) - - // Build the output - var output []string - output = append(output, fmt.Sprintf("id | %s", is.ID)) - for _, k := range keys { - if k != "id" { - output = append(output, fmt.Sprintf("%s | %s", k, is.Attributes[k])) - } - } - - // Output - config := columnize.DefaultConfig() - config.Glue = " = " - c.Ui.Output(columnize.Format(output, config)) - return 0 - */ + is := state.ResourceInstance(addr) + if !is.HasCurrent() { + c.Ui.Error(errNoInstanceFound) + return 1 + } + + singleInstance := states.NewState() + singleInstance.EnsureModule(addr.Module).SetResourceInstanceCurrent( + addr.Resource, + is.Current, + addr.Resource.Resource.DefaultProviderConfig().Absolute(addr.Module), + ) + + output := format.State(&format.StateOpts{ + State: singleInstance, + Color: c.Colorize(), + Schemas: schemas, + }) + c.Ui.Output(output[strings.Index(output, "#"):]) + + return 0 } func (c *StateShowCommand) Help() string { @@ -125,3 +146,15 @@ Options: func (c *StateShowCommand) Synopsis() string { return "Show a resource in the state" } + +const errNoInstanceFound = `No instance found for the given address! + +This command requires that the address references one specific instance. +To view the available instances, use "terraform state list". Please modify +the address to reference a specific instance.` + +const errParsingAddress = `Error parsing instance address: %s + +This command requires that the address references one specific instance. +To view the available instances, use "terraform state list". Please modify +the address to reference a specific instance.` diff --git a/command/state_show_test.go b/command/state_show_test.go index c2f56c871374..928a306f3965 100644 --- a/command/state_show_test.go +++ b/command/state_show_test.go @@ -4,10 +4,12 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" - "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" ) func TestStateShow(t *testing.T) { @@ -28,6 +30,18 @@ func TestStateShow(t *testing.T) { statePath := testStateFile(t, state) p := testProvider() + p.GetSchemaReturn = &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + } + ui := new(cli.MockUi) c := &StateShowCommand{ Meta: Meta{ @@ -45,7 +59,7 @@ func TestStateShow(t *testing.T) { } // Test that outputs were displayed - expected := strings.TrimSpace(testStateShowOutput) + "\n" + expected := strings.TrimSpace(testStateShowOutput) + "\n\n\n" actual := ui.OutputWriter.String() if actual != expected { t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) @@ -53,6 +67,7 @@ func TestStateShow(t *testing.T) { } func TestStateShow_multi(t *testing.T) { + submod, _ := addrs.ParseModuleInstanceStr("module.sub") state := states.BuildState(func(s *states.SyncState) { s.SetResourceInstanceCurrent( addrs.Resource{ @@ -70,18 +85,30 @@ func TestStateShow_multi(t *testing.T) { addrs.Resource{ Mode: addrs.ManagedResourceMode, Type: "test_instance", - Name: "bar", - }.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance), + Name: "foo", + }.Instance(addrs.NoKey).Absolute(submod), &states.ResourceInstanceObjectSrc{ AttrsJSON: []byte(`{"id":"foo","foo":"value","bar":"value"}`), Status: states.ObjectReady, }, - addrs.ProviderConfig{Type: "test"}.Absolute(addrs.RootModuleInstance), + addrs.ProviderConfig{Type: "test"}.Absolute(submod), ) }) statePath := testStateFile(t, state) p := testProvider() + p.GetSchemaReturn = &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Optional: true, Computed: true}, + "foo": {Type: cty.String, Optional: true}, + "bar": {Type: cty.String, Optional: true}, + }, + }, + }, + } + ui := new(cli.MockUi) c := &StateShowCommand{ Meta: Meta{ @@ -94,9 +121,16 @@ func TestStateShow_multi(t *testing.T) { "-state", statePath, "test_instance.foo", } - if code := c.Run(args); code != 1 { + if code := c.Run(args); code != 0 { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } + + // Test that outputs were displayed + expected := strings.TrimSpace(testStateShowOutput) + "\n\n\n" + actual := ui.OutputWriter.String() + if actual != expected { + t.Fatalf("Expected:\n%q\n\nTo equal: %q", actual, expected) + } } func TestStateShow_noState(t *testing.T) { @@ -112,9 +146,14 @@ func TestStateShow_noState(t *testing.T) { }, } - args := []string{} + args := []string{ + "test_instance.foo", + } if code := c.Run(args); code != 1 { - t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + t.Fatalf("bad: %d", code) + } + if !strings.Contains(ui.ErrorWriter.String(), "No state file was found!") { + t.Fatalf("expected a no state file error, got: %s", ui.ErrorWriter.String()) } } @@ -135,13 +174,19 @@ func TestStateShow_emptyState(t *testing.T) { "-state", statePath, "test_instance.foo", } - if code := c.Run(args); code != 0 { - t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + if !strings.Contains(ui.ErrorWriter.String(), "No instance found for the given address!") { + t.Fatalf("expected a no instance found error, got: %s", ui.ErrorWriter.String()) } } const testStateShowOutput = ` -id = bar -bar = value -foo = value +# test_instance.foo: +resource "test_instance" "foo" { + bar = "value" + foo = "value" + id = "bar" +} ` diff --git a/states/state_filter.go b/states/state_filter.go index 0f10a58dc687..de5595cfa536 100644 --- a/states/state_filter.go +++ b/states/state_filter.go @@ -40,7 +40,7 @@ func (f *Filter) Filter(fs ...string) ([]*FilterResult, error) { as[i] = addr continue } - return nil, fmt.Errorf("Error parsing address '%s'", v) + return nil, fmt.Errorf("Error parsing address: %s", v) } // If we weren't given any filters, then we list all From 2b075bbf8e322b8e2b40b9cc4901d10c95281651 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Sat, 27 Oct 2018 15:21:42 +0200 Subject: [PATCH 003/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 99da946ae77e..4dbc392e3b9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ IMPROVEMENTS: * backend/s3: Support `credential_source` if specified in AWS configuration file [GH-19190] +* command/state: Update and enable the `state mv` command [GH-19197] * command/state: Update and enable the `state rm` command [GH-19178] BUG FIXES: From 5fa624c55eaf1e1037323e9b321f221c617662b1 Mon Sep 17 00:00:00 2001 From: Kristin Laemmert Date: Mon, 29 Oct 2018 09:22:21 -0700 Subject: [PATCH 004/149] website: update terraform_remote_state syntax in backend docs --- website/docs/backends/types/artifactory.html.md | 2 +- website/docs/backends/types/azurerm.html.md | 2 +- website/docs/backends/types/consul.html.md | 2 +- website/docs/backends/types/etcd.html.md | 2 +- website/docs/backends/types/etcdv3.html.md | 2 +- website/docs/backends/types/gcs.html.md | 2 +- website/docs/backends/types/http.html.md | 2 +- website/docs/backends/types/local.html.md | 2 +- website/docs/backends/types/manta.html.md | 2 +- website/docs/backends/types/remote.html.md | 2 +- website/docs/backends/types/s3.html.md | 2 +- website/docs/backends/types/swift.html.md | 2 +- website/docs/backends/types/terraform-enterprise.html.md | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/website/docs/backends/types/artifactory.html.md b/website/docs/backends/types/artifactory.html.md index e9634443123a..ea4e33cd0c27 100644 --- a/website/docs/backends/types/artifactory.html.md +++ b/website/docs/backends/types/artifactory.html.md @@ -38,7 +38,7 @@ terraform { ```hcl data "terraform_remote_state" "foo" { backend = "artifactory" - config { + config = { username = "SheldonCooper" password = "AmyFarrahFowler" url = "https://custom.artifactoryonline.com/artifactory" diff --git a/website/docs/backends/types/azurerm.html.md b/website/docs/backends/types/azurerm.html.md index 1d1126b9ab50..4ecc57375def 100644 --- a/website/docs/backends/types/azurerm.html.md +++ b/website/docs/backends/types/azurerm.html.md @@ -33,7 +33,7 @@ Note that for the access credentials we recommend using a ```hcl data "terraform_remote_state" "foo" { backend = "azurerm" - config { + config = { storage_account_name = "terraform123abc" container_name = "terraform-state" key = "prod.terraform.tfstate" diff --git a/website/docs/backends/types/consul.html.md b/website/docs/backends/types/consul.html.md index 79d726fb979d..5e1577269d61 100644 --- a/website/docs/backends/types/consul.html.md +++ b/website/docs/backends/types/consul.html.md @@ -34,7 +34,7 @@ Note that for the access credentials we recommend using a ```hcl data "terraform_remote_state" "foo" { backend = "consul" - config { + config = { path = "full/path" } } diff --git a/website/docs/backends/types/etcd.html.md b/website/docs/backends/types/etcd.html.md index 2e4cf089f178..cc731c94026e 100644 --- a/website/docs/backends/types/etcd.html.md +++ b/website/docs/backends/types/etcd.html.md @@ -28,7 +28,7 @@ terraform { ```hcl data "terraform_remote_state" "foo" { backend = "etcd" - config { + config = { path = "path/to/terraform.tfstate" endpoints = "http://one:4001 http://two:4001" } diff --git a/website/docs/backends/types/etcdv3.html.md b/website/docs/backends/types/etcdv3.html.md index 19f27c13c7c3..1be49373fdb5 100644 --- a/website/docs/backends/types/etcdv3.html.md +++ b/website/docs/backends/types/etcdv3.html.md @@ -34,7 +34,7 @@ Note that for the access credentials we recommend using a ```hcl data "terraform_remote_state" "foo" { backend = "etcdv3" - config { + config = { endpoints = ["etcd-1:2379", "etcd-2:2379", "etcd-3:2379"] lock = true prefix = "terraform-state/" diff --git a/website/docs/backends/types/gcs.html.md b/website/docs/backends/types/gcs.html.md index 93ae55870455..3349c304eefb 100644 --- a/website/docs/backends/types/gcs.html.md +++ b/website/docs/backends/types/gcs.html.md @@ -28,7 +28,7 @@ terraform { ```hcl data "terraform_remote_state" "foo" { backend = "gcs" - config { + config = { bucket = "terraform-state" prefix = "prod" } diff --git a/website/docs/backends/types/http.html.md b/website/docs/backends/types/http.html.md index f0dce897ac68..19254ce6968b 100644 --- a/website/docs/backends/types/http.html.md +++ b/website/docs/backends/types/http.html.md @@ -35,7 +35,7 @@ terraform { ```hcl data "terraform_remote_state" "foo" { backend = "http" - config { + config = { address = "http://my.rest.api.com" } } diff --git a/website/docs/backends/types/local.html.md b/website/docs/backends/types/local.html.md index 280f4118c3e1..a98787634d2b 100644 --- a/website/docs/backends/types/local.html.md +++ b/website/docs/backends/types/local.html.md @@ -29,7 +29,7 @@ terraform { data "terraform_remote_state" "foo" { backend = "local" - config { + config = { path = "${path.module}/../../terraform.tfstate" } } diff --git a/website/docs/backends/types/manta.html.md b/website/docs/backends/types/manta.html.md index 386f08af7e2e..018009422eba 100644 --- a/website/docs/backends/types/manta.html.md +++ b/website/docs/backends/types/manta.html.md @@ -31,7 +31,7 @@ Note that for the access credentials we recommend using a ```hcl data "terraform_remote_state" "foo" { backend = "manta" - config { + config = { path = "random/path" object_name = "terraform.tfstate" } diff --git a/website/docs/backends/types/remote.html.md b/website/docs/backends/types/remote.html.md index 576c3b22c667..25f08b1ec484 100644 --- a/website/docs/backends/types/remote.html.md +++ b/website/docs/backends/types/remote.html.md @@ -102,7 +102,7 @@ terraform { data "terraform_remote_state" "foo" { backend = "remote" - config { + config = { organization = "company" workspaces { diff --git a/website/docs/backends/types/s3.html.md b/website/docs/backends/types/s3.html.md index 50a7220ca430..92bd1155127c 100644 --- a/website/docs/backends/types/s3.html.md +++ b/website/docs/backends/types/s3.html.md @@ -104,7 +104,7 @@ source](/docs/providers/terraform/d/remote_state.html). ```hcl data "terraform_remote_state" "network" { backend = "s3" - config { + config = { bucket = "terraform-state-prod" key = "network/terraform.tfstate" region = "us-east-1" diff --git a/website/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md index 9e66cc03b718..192e5852931c 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/backends/types/swift.html.md @@ -35,7 +35,7 @@ For the access credentials we recommend using a ```hcl data "terraform_remote_state" "foo" { backend = "swift" - config { + config = { path = "terraform_state" } } diff --git a/website/docs/backends/types/terraform-enterprise.html.md b/website/docs/backends/types/terraform-enterprise.html.md index ecd87425ce24..2351d3a62b23 100644 --- a/website/docs/backends/types/terraform-enterprise.html.md +++ b/website/docs/backends/types/terraform-enterprise.html.md @@ -46,7 +46,7 @@ omitting the access token, which can be provided as an environment variable. ```hcl data "terraform_remote_state" "foo" { backend = "atlas" - config { + config = { name = "example_corp/networking-prod" } } From ab88f8ca0f80bb819ffa071ccd94403457fb73ff Mon Sep 17 00:00:00 2001 From: Nick Fagerlund Date: Mon, 29 Oct 2018 11:00:24 -0700 Subject: [PATCH 005/149] website: Update and link the page about remote backend operations (#19203) --- website/docs/backends/operations.html.md | 33 +++++++++++++++++------- website/layouts/docs.erb | 4 +++ 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/website/docs/backends/operations.html.md b/website/docs/backends/operations.html.md index 948e7c965cf5..690f1fe17e7b 100644 --- a/website/docs/backends/operations.html.md +++ b/website/docs/backends/operations.html.md @@ -1,19 +1,32 @@ --- layout: "docs" -page_title: "Backends: Operations (refresh, plan, apply, etc.)" -sidebar_current: "docs-backends-ops" +page_title: "Backends: Remote Operations (plan, apply, etc.)" +sidebar_current: "docs-backends-operations" description: |- Some backends support the ability to run operations (`refresh`, `plan`, `apply`, etc.) remotely. Terraform will continue to look and behave as if they're running locally while they in fact run on a remote machine. --- -# Operations (plan, apply, etc.) +# Remote Operations (plan, apply, etc.) -Some backends support the ability to run operations (`refresh`, `plan`, `apply`, -etc.) remotely. Terraform will continue to look and behave as if they're -running locally while they in fact run on a remote machine. +Most backends run all operations on the local system — although Terraform stores +its state remotely with these backends, it still executes its logic locally and +makes API requests directly from the system where it was invoked. -Backends should not modify the actual infrastructure change behavior of -these commands. They will only modify how they're invoked. +This is simple to understand and work with, but when many people are +collaborating on the same Terraform configurations, it requires everyone's +execution environment to be similar. This includes sharing access to +infrastructure provider credentials, keeping Terraform versions in sync, +keeping Terraform variables in sync, and installing any extra software required +by Terraform providers. This becomes more burdensome as teams get larger. -At the time of writing, no backends support this. This shouldn't be linked -in the sidebar yet! +Some backends can run operations (`plan`, `apply`, etc.) on a remote machine, +while appearing to execute locally. This enables a more consistent execution +environment and more powerful access controls, without disrupting workflows +for users who are already comfortable with running Terraform. + +Currently, [the `remote` backend](./types/remote.html) is the only backend to +support remote operations, and [Terraform Enterprise](/docs/enterprise/index.html) +is the only remote execution environment that supports it. For more information, see: + +- [The `remote` backend](./types/remote.html) +- [Terraform Enterprise's CLI-driven run workflow](/docs/enterprise/run/cli.html) diff --git a/website/layouts/docs.erb b/website/layouts/docs.erb index 6452e293f0b5..c9d6a757a267 100644 --- a/website/layouts/docs.erb +++ b/website/layouts/docs.erb @@ -308,6 +308,10 @@ State Storage & Locking + > + Remote Operations + + > Backend Types From 6dad121e7035f8b06aa6fccd0813205ac6d13294 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 29 Oct 2018 16:11:19 -0400 Subject: [PATCH 006/149] insert resource timeouts into the config schema Resource timeouts were a separate config block, but did not exist in the resource schema. Insert any defined timeouts when generating the configshema.Block so that the fields can be accepted and validated by core. --- helper/plugin/grpc_provider.go | 2 +- helper/plugin/grpc_provider_test.go | 46 ++++++++++++++++++++++++++-- helper/schema/core_schema.go | 47 +++++++++++++++++++++++++++++ terraform/resource.go | 2 +- 4 files changed, 93 insertions(+), 4 deletions(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index c8f329268998..16885bc76835 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -463,7 +463,7 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl } priorState.Meta = priorPrivate - // turn the propsed state into a legacy configuration + // turn the proposed state into a legacy configuration config := terraform.NewResourceConfigShimmed(proposedNewStateVal, block) diff, err := s.provider.SimpleDiff(info, priorState, config) diff --git a/helper/plugin/grpc_provider_test.go b/helper/plugin/grpc_provider_test.go index 5162d7d5725b..03f059a0b405 100644 --- a/helper/plugin/grpc_provider_test.go +++ b/helper/plugin/grpc_provider_test.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -388,8 +389,8 @@ func TestApplyResourceChange(t *testing.T) { t.Fatal(err) } - // A propsed state with only the ID unknown will produce a nil diff, and - // should return the propsed state value. + // A proposed state with only the ID unknown will produce a nil diff, and + // should return the proposed state value. plannedVal, err := schema.CoerceValue(cty.ObjectVal(map[string]cty.Value{ "id": cty.UnknownVal(cty.String), })) @@ -595,3 +596,44 @@ func TestPrepareProviderConfig(t *testing.T) { }) } } + +func TestGetSchemaTimeouts(t *testing.T) { + r := &schema.Resource{ + SchemaVersion: 4, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(time.Second), + Read: schema.DefaultTimeout(2 * time.Second), + Update: schema.DefaultTimeout(3 * time.Second), + Default: schema.DefaultTimeout(10 * time.Second), + }, + Schema: map[string]*schema.Schema{ + "foo": { + Type: schema.TypeInt, + Optional: true, + }, + }, + } + + // verify that the timeouts appear in the schema as defined + block := r.CoreConfigSchema() + timeoutsBlock := block.BlockTypes["timeouts"] + if timeoutsBlock == nil { + t.Fatal("missing timeouts in schema") + } + + if timeoutsBlock.Attributes["create"] == nil { + t.Fatal("missing create timeout in schema") + } + if timeoutsBlock.Attributes["read"] == nil { + t.Fatal("missing read timeout in schema") + } + if timeoutsBlock.Attributes["update"] == nil { + t.Fatal("missing update timeout in schema") + } + if d := timeoutsBlock.Attributes["delete"]; d != nil { + t.Fatalf("unexpected delete timeout in schema: %#v", d) + } + if timeoutsBlock.Attributes["default"] == nil { + t.Fatal("missing default timeout in schema") + } +} diff --git a/helper/schema/core_schema.go b/helper/schema/core_schema.go index f16cf3e64e53..a01653b80334 100644 --- a/helper/schema/core_schema.go +++ b/helper/schema/core_schema.go @@ -172,6 +172,53 @@ func (r *Resource) CoreConfigSchema() *configschema.Block { } } + // insert configured timeout values into the schema + if r.Timeouts != nil { + timeouts := configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + } + + if r.Timeouts.Create != nil { + timeouts.Attributes["create"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Read != nil { + timeouts.Attributes["read"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Update != nil { + timeouts.Attributes["update"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Delete != nil { + timeouts.Attributes["delete"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Default != nil { + timeouts.Attributes["default"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + block.BlockTypes["timeouts"] = &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: timeouts, + } + } + return block } diff --git a/terraform/resource.go b/terraform/resource.go index 3e87d719b30e..473d73d8f018 100644 --- a/terraform/resource.go +++ b/terraform/resource.go @@ -276,7 +276,7 @@ func newResourceConfigShimmedComputedKeys(obj cty.Value, schema *configschema.Bl } blockVal := obj.GetAttr(typeName) - if !blockVal.IsKnown() || blockVal.IsNull() { + if blockVal.IsNull() || !blockVal.IsKnown() { continue } From 121c9c127ff465b9f229d45a29261cb6bc8a81ec Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 30 Oct 2018 12:58:29 -0400 Subject: [PATCH 007/149] add timeout tests to the test provider --- builtin/providers/test/provider.go | 1 + builtin/providers/test/resource_timeout.go | 120 ++++++++++++++++++ .../providers/test/resource_timeout_test.go | 91 +++++++++++++ 3 files changed, 212 insertions(+) create mode 100644 builtin/providers/test/resource_timeout.go create mode 100644 builtin/providers/test/resource_timeout_test.go diff --git a/builtin/providers/test/provider.go b/builtin/providers/test/provider.go index 6c71bfb18be1..1f005b24f5e9 100644 --- a/builtin/providers/test/provider.go +++ b/builtin/providers/test/provider.go @@ -20,6 +20,7 @@ func Provider() terraform.ResourceProvider { "test_resource": testResource(), "test_resource_gh12183": testResourceGH12183(), "test_resource_with_custom_diff": testResourceCustomDiff(), + "test_resource_timeout": testResourceTimeout(), }, DataSourcesMap: map[string]*schema.Resource{ "test_data_source": testDataSource(), diff --git a/builtin/providers/test/resource_timeout.go b/builtin/providers/test/resource_timeout.go new file mode 100644 index 000000000000..cf32bcaf5a6a --- /dev/null +++ b/builtin/providers/test/resource_timeout.go @@ -0,0 +1,120 @@ +package test + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform/helper/schema" +) + +func testResourceTimeout() *schema.Resource { + return &schema.Resource{ + Create: testResourceTimeoutCreate, + Read: testResourceTimeoutRead, + Update: testResourceTimeoutUpdate, + Delete: testResourceTimeoutDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(time.Second), + Update: schema.DefaultTimeout(time.Second), + Delete: schema.DefaultTimeout(time.Second), + }, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "create_delay": { + Type: schema.TypeString, + Optional: true, + }, + "read_delay": { + Type: schema.TypeString, + Optional: true, + }, + "update_delay": { + Type: schema.TypeString, + Optional: true, + }, + "delete_delay": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func testResourceTimeoutCreate(d *schema.ResourceData, meta interface{}) error { + delayString := d.Get("create_delay").(string) + var delay time.Duration + var err error + if delayString != "" { + delay, err = time.ParseDuration(delayString) + if err != nil { + return err + } + } + + if delay > d.Timeout(schema.TimeoutCreate) { + return fmt.Errorf("timeout while creating resource") + } + + d.SetId("testId") + + return testResourceRead(d, meta) +} + +func testResourceTimeoutRead(d *schema.ResourceData, meta interface{}) error { + delayString := d.Get("read_delay").(string) + var delay time.Duration + var err error + if delayString != "" { + delay, err = time.ParseDuration(delayString) + if err != nil { + return err + } + } + + if delay > d.Timeout(schema.TimeoutRead) { + return fmt.Errorf("timeout while reading resource") + } + + return nil +} + +func testResourceTimeoutUpdate(d *schema.ResourceData, meta interface{}) error { + delayString := d.Get("update_delay").(string) + var delay time.Duration + var err error + if delayString != "" { + delay, err = time.ParseDuration(delayString) + if err != nil { + return err + } + } + + if delay > d.Timeout(schema.TimeoutUpdate) { + return fmt.Errorf("timeout while updating resource") + } + return nil +} + +func testResourceTimeoutDelete(d *schema.ResourceData, meta interface{}) error { + delayString := d.Get("delete_delay").(string) + var delay time.Duration + var err error + if delayString != "" { + delay, err = time.ParseDuration(delayString) + if err != nil { + return err + } + } + + if delay > d.Timeout(schema.TimeoutDelete) { + return fmt.Errorf("timeout while deleting resource") + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/test/resource_timeout_test.go b/builtin/providers/test/resource_timeout_test.go new file mode 100644 index 000000000000..9edbba89a04c --- /dev/null +++ b/builtin/providers/test/resource_timeout_test.go @@ -0,0 +1,91 @@ +package test + +import ( + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestResourceTimeout_create(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_timeout" "foo" { + create_delay = "2s" + timeouts { + create = "1s" + } +} + `), + ExpectError: regexp.MustCompile("timeout while creating resource"), + }, + }, + }) +} +func TestResourceTimeout_update(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_timeout" "foo" { + update_delay = "1s" + timeouts { + update = "1s" + } +} + `), + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_timeout" "foo" { + update_delay = "2s" + timeouts { + update = "1s" + } +} + `), + ExpectError: regexp.MustCompile("timeout while updating resource"), + }, + }, + }) +} + +func TestResourceTimeout_read(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_timeout" "foo" { +} + `), + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_timeout" "foo" { + read_delay = "30m" +} + `), + ExpectError: regexp.MustCompile("timeout while reading resource"), + }, + // we need to remove the read_delay so that the resource can be + // destroyed in the final step, but expect an error here from the + // pre-existing delay. + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_timeout" "foo" { +} + `), + ExpectError: regexp.MustCompile("timeout while reading resource"), + }, + }, + }) +} From e38a5a769d4b0f072618ade958bf8c3a83287dec Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 30 Oct 2018 12:59:45 -0400 Subject: [PATCH 008/149] copy timouts into plan and apply state helper/schema will remove "timeouts" from the config, and stash them in the diff.Meta map. Terraform sees "timeouts" as a regular config block, so needs them to be present in the state in order to not show a diff. Have the GRPCProviderServer shim copy all timeout values into any state it returns to provide consistent diffs in core. --- helper/plugin/grpc_provider.go | 57 ++++++++++++++++++++++++++----- helper/resource/testing_config.go | 2 +- 2 files changed, 49 insertions(+), 10 deletions(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index 16885bc76835..136336e456d9 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -412,20 +412,22 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso // helper/schema should always copy the ID over, but do it again just to be safe newInstanceState.Attributes["id"] = newInstanceState.ID - newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, block.ImpliedType()) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil } - newConfigMP, err := msgpack.Marshal(newConfigVal, block.ImpliedType()) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil } resp.NewState = &proto.DynamicValue{ - Msgpack: newConfigMP, + Msgpack: newStateMP, } return resp, nil @@ -461,6 +463,7 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl return resp, nil } } + priorState.Meta = priorPrivate // turn the proposed state into a legacy configuration @@ -488,6 +491,8 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl return resp, nil } + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + plannedMP, err := msgpack.Marshal(plannedStateVal, block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -498,12 +503,14 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl } // the Meta field gets encoded into PlannedPrivate - plannedPrivate, err := json.Marshal(diff.Meta) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) - return resp, nil + if diff.Meta != nil { + plannedPrivate, err := json.Marshal(diff.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate } - resp.PlannedPrivate = plannedPrivate // collect the attributes that require instance replacement, and convert // them to cty.Paths. @@ -594,7 +601,10 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A Meta: make(map[string]interface{}), } } - diff.Meta = private + + if private != nil { + diff.Meta = private + } newInstanceState, err := s.provider.Apply(info, priorState, diff) if err != nil { @@ -614,6 +624,8 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A } } + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + newStateMP, err := msgpack.Marshal(newStateVal, block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -726,6 +738,8 @@ func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDa return resp, nil } + newStateVal = copyTimeoutValues(newStateVal, configVal) + newStateMP, err := msgpack.Marshal(newStateVal, block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -770,3 +784,28 @@ func pathToAttributePath(path cty.Path) *proto.AttributePath { return &proto.AttributePath{Steps: steps} } + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `from` is null, then there are no attributes, and if `to` is null we + // are planning to remove it altogether. + if from.IsNull() || to.IsNull() { + return to + } + + fromAttrs := from.AsValueMap() + timeouts, ok := fromAttrs[schema.TimeoutsConfigKey] + + // no timeouts to copy + // timeouts shouldn't be unknown, but don't copy possibly invalid values + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + return to + } + + toAttrs := to.AsValueMap() + toAttrs[schema.TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} diff --git a/helper/resource/testing_config.go b/helper/resource/testing_config.go index ea1f412989f5..c8cc587bd4d0 100644 --- a/helper/resource/testing_config.go +++ b/helper/resource/testing_config.go @@ -77,7 +77,7 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep, } // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behaviour in the check + // such that destroy steps can verify their behavior in the check // function stateBeforeApplication := state.DeepCopy() From f153720a36bcf3c0823eb0f4e5a9b6e7495a82b5 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 30 Oct 2018 14:14:23 -0400 Subject: [PATCH 009/149] add checks for timeouts attributes and blocks Don't overwrite anything the provider defined, in order to maintain existing behavior. Change strings to pre-defined constants --- helper/schema/core_schema.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/helper/schema/core_schema.go b/helper/schema/core_schema.go index a01653b80334..85e8b8c5b3e4 100644 --- a/helper/schema/core_schema.go +++ b/helper/schema/core_schema.go @@ -172,48 +172,52 @@ func (r *Resource) CoreConfigSchema() *configschema.Block { } } - // insert configured timeout values into the schema - if r.Timeouts != nil { + _, timeoutsAttr := block.Attributes[TimeoutsConfigKey] + _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey] + + // Insert configured timeout values into the schema, as long as the schema + // didn't define anything else by that name. + if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { timeouts := configschema.Block{ Attributes: map[string]*configschema.Attribute{}, } if r.Timeouts.Create != nil { - timeouts.Attributes["create"] = &configschema.Attribute{ + timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{ Type: cty.String, Optional: true, } } if r.Timeouts.Read != nil { - timeouts.Attributes["read"] = &configschema.Attribute{ + timeouts.Attributes[TimeoutRead] = &configschema.Attribute{ Type: cty.String, Optional: true, } } if r.Timeouts.Update != nil { - timeouts.Attributes["update"] = &configschema.Attribute{ + timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{ Type: cty.String, Optional: true, } } if r.Timeouts.Delete != nil { - timeouts.Attributes["delete"] = &configschema.Attribute{ + timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{ Type: cty.String, Optional: true, } } if r.Timeouts.Default != nil { - timeouts.Attributes["default"] = &configschema.Attribute{ + timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{ Type: cty.String, Optional: true, } } - block.BlockTypes["timeouts"] = &configschema.NestedBlock{ + block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{ Nesting: configschema.NestingSingle, Block: timeouts, } From 36cede09f7d8f664efdc542e41b147fa4108d0da Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 30 Oct 2018 14:53:02 -0400 Subject: [PATCH 010/149] add provider tests for SuppressDiffFunc --- builtin/providers/test/provider.go | 1 + .../providers/test/resource_diff_suppress.go | 57 +++++++++++++++++++ .../test/resource_diff_suppress_test.go | 47 +++++++++++++++ 3 files changed, 105 insertions(+) create mode 100644 builtin/providers/test/resource_diff_suppress.go create mode 100644 builtin/providers/test/resource_diff_suppress_test.go diff --git a/builtin/providers/test/provider.go b/builtin/providers/test/provider.go index 1f005b24f5e9..d69ae95f9808 100644 --- a/builtin/providers/test/provider.go +++ b/builtin/providers/test/provider.go @@ -21,6 +21,7 @@ func Provider() terraform.ResourceProvider { "test_resource_gh12183": testResourceGH12183(), "test_resource_with_custom_diff": testResourceCustomDiff(), "test_resource_timeout": testResourceTimeout(), + "test_resource_diff_suppress": testResourceDiffSuppress(), }, DataSourcesMap: map[string]*schema.Resource{ "test_data_source": testDataSource(), diff --git a/builtin/providers/test/resource_diff_suppress.go b/builtin/providers/test/resource_diff_suppress.go new file mode 100644 index 000000000000..5c01a1d09d5e --- /dev/null +++ b/builtin/providers/test/resource_diff_suppress.go @@ -0,0 +1,57 @@ +package test + +import ( + "strings" + + "github.com/hashicorp/terraform/helper/schema" +) + +func testResourceDiffSuppress() *schema.Resource { + return &schema.Resource{ + Create: testResourceDiffSuppressCreate, + Read: testResourceDiffSuppressRead, + Update: testResourceDiffSuppressUpdate, + Delete: testResourceDiffSuppressDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "val_to_upper": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(val interface{}) string { + return strings.ToUpper(val.(string)) + }, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return strings.ToUpper(old) == strings.ToUpper(new) + }, + }, + "optional": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func testResourceDiffSuppressCreate(d *schema.ResourceData, meta interface{}) error { + d.SetId("testId") + + return testResourceRead(d, meta) +} + +func testResourceDiffSuppressRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func testResourceDiffSuppressUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func testResourceDiffSuppressDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} diff --git a/builtin/providers/test/resource_diff_suppress_test.go b/builtin/providers/test/resource_diff_suppress_test.go new file mode 100644 index 000000000000..59490e3584f8 --- /dev/null +++ b/builtin/providers/test/resource_diff_suppress_test.go @@ -0,0 +1,47 @@ +package test + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestResourceDiffSuppress_create(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_diff_suppress" "foo" { + val_to_upper = "foo" +} + `), + }, + }, + }) +} +func TestResourceDiffSuppress_update(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_diff_suppress" "foo" { + val_to_upper = "foo" +} + `), + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_diff_suppress" "foo" { + val_to_upper = "bar" + optional = "more" +} + `), + }, + }, + }) +} From e0ea2a5d069eb0837b124131ef3796291d5c8f57 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 30 Oct 2018 15:58:00 -0400 Subject: [PATCH 011/149] if there is no plan diff, prefer the prior state The prior state may contain customizations made by the provider. If there is no prior state, then take the proposed state. --- helper/plugin/grpc_provider.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index 136336e456d9..7669cb628218 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -476,11 +476,16 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl } if diff == nil { - // schema.Provider.Diff returns nil if it ends up making a diff with - // no changes, but our new interface wants us to return an actual - // change description that _shows_ there are no changes, so we return - // the proposed change that produces no diff. - resp.PlannedState = req.ProposedNewState + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is usually the + // PriorSate, however if there was no prior state and no diff, then we + // use the ProposedNewState. + if !priorStateVal.IsNull() { + resp.PlannedState = req.PriorState + } else { + resp.PlannedState = req.ProposedNewState + } return resp, nil } From af50f0e228b5f984f67da9f6d10e13e032412656 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 31 Oct 2018 01:07:43 +0000 Subject: [PATCH 012/149] changelog: clean up after v0.12.0-alpha2 release --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4dbc392e3b9e..2da5f6e779cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ ## 0.12.0-beta1 (Unreleased) + +## 0.12.0-alpha2 (October 30, 2018) + IMPROVEMENTS: * backend/s3: Support `credential_source` if specified in AWS configuration file [GH-19190] @@ -9,6 +12,10 @@ IMPROVEMENTS: BUG FIXES: * lang: Fix crash in `lookup` function [GH-19161] +* Hostnames inside module registry source strings may now contain segments that begin with digits, due to an upstream fix in the IDNA parsing library. [GH-18039] +* helper/schema: Fix panic when null values appear for nested blocks [GH-19201] +* helper/schema: Restore handling of the special "timeouts" block in certain resource types. [GH-19222] +* helper/schema: Restore handling of DiffSuppressFunc and StateFunc. [GH-19226] ## 0.12.0-alpha1 (October 19, 2018) From f959b560a2cc9863d1f8ce01e0383c1488808b33 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 31 Oct 2018 13:40:01 -0400 Subject: [PATCH 013/149] trim index steps from RequiresNew paths Only GetAttrSteps can actually trigger RequiresNew, but the flatmaps paths will point to the indexed value that caused the change. --- config/hcl2shim/paths.go | 28 +++++++ config/hcl2shim/paths_test.go | 146 ++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) diff --git a/config/hcl2shim/paths.go b/config/hcl2shim/paths.go index 5d2fb02d959b..99437cbb1766 100644 --- a/config/hcl2shim/paths.go +++ b/config/hcl2shim/paths.go @@ -28,6 +28,10 @@ func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { paths = append(paths, p) } + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + // There may be redundant paths due to set elements or index attributes // Do some ugly n^2 filtering, but these are always fairly small sets. for i := 0; i < len(paths)-1; i++ { @@ -44,6 +48,30 @@ func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { return paths, nil } +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + // requiresReplacePath takes a key from a flatmap along with the cty.Type // describing the structure, and returns the cty.Path that would be used to // reference the nested value in the data structure. diff --git a/config/hcl2shim/paths_test.go b/config/hcl2shim/paths_test.go index ff52c92d9752..cffbe6b5aaa1 100644 --- a/config/hcl2shim/paths_test.go +++ b/config/hcl2shim/paths_test.go @@ -6,9 +6,18 @@ import ( "strings" "testing" + "github.com/google/go-cmp/cmp/cmpopts" + + "github.com/google/go-cmp/cmp" + "github.com/zclconf/go-cty/cty" ) +var ( + ignoreUnexported = cmpopts.IgnoreUnexported(cty.GetAttrStep{}, cty.IndexStep{}) + valueComparer = cmp.Comparer(cty.Value.RawEquals) +) + func TestPathFromFlatmap(t *testing.T) { tests := []struct { Flatmap string @@ -221,3 +230,140 @@ func TestPathFromFlatmap(t *testing.T) { }) } } + +func TestRequiresReplace(t *testing.T) { + for _, tc := range []struct { + name string + attrs []string + expected []cty.Path + ty cty.Type + }{ + { + name: "basic", + attrs: []string{ + "foo", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.String, + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + }, + }, + { + name: "two", + attrs: []string{ + "foo", + "bar", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.String, + "bar": cty.String, + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + cty.Path{cty.GetAttrStep{Name: "bar"}}, + }, + }, + { + name: "nested object", + attrs: []string{ + "foo.bar", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Object(map[string]cty.Type{ + "bar": cty.String, + }), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.GetAttrStep{Name: "bar"}}, + }, + }, + { + name: "nested objects", + attrs: []string{ + "foo.bar.baz", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Object(map[string]cty.Type{ + "bar": cty.Object(map[string]cty.Type{ + "baz": cty.String, + }), + }), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.GetAttrStep{Name: "bar"}, cty.GetAttrStep{Name: "baz"}}, + }, + }, + { + name: "nested map", + attrs: []string{ + "foo.%", + "foo.bar", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + }, + }, + { + name: "nested list", + attrs: []string{ + "foo.#", + "foo.1", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.String), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}}, + }, + }, + { + name: "object in map", + attrs: []string{ + "foo.bar.baz", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.Map(cty.Object( + map[string]cty.Type{ + "baz": cty.String, + }, + )), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.IndexStep{Key: cty.StringVal("bar")}, cty.GetAttrStep{Name: "baz"}}, + }, + }, + { + name: "object in list", + attrs: []string{ + "foo.1.baz", + }, + ty: cty.Object(map[string]cty.Type{ + "foo": cty.List(cty.Object( + map[string]cty.Type{ + "baz": cty.String, + }, + )), + }), + expected: []cty.Path{ + cty.Path{cty.GetAttrStep{Name: "foo"}, cty.IndexStep{Key: cty.NumberIntVal(1)}, cty.GetAttrStep{Name: "baz"}}, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + rp, err := RequiresReplace(tc.attrs, tc.ty) + if err != nil { + t.Fatal(err) + } + if !cmp.Equal(tc.expected, rp, ignoreUnexported, valueComparer) { + t.Fatalf("\nexpected: %#v\ngot: %#v\n", tc.expected, rp) + } + }) + + } + +} From 8212a6a9d0c486128bbea764166225e24120e38d Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 31 Oct 2018 13:42:28 -0400 Subject: [PATCH 014/149] add provider tests for force-new with a map Adding and removing a single map that requires a new resource can cause empty diffs, relying on the core proposed state values for destruction. --- builtin/providers/test/provider.go | 1 + builtin/providers/test/resource_force_new.go | 39 +++++++++ .../providers/test/resource_force_new_test.go | 79 +++++++++++++++++++ builtin/providers/test/resource_test.go | 30 +++++++ 4 files changed, 149 insertions(+) create mode 100644 builtin/providers/test/resource_force_new.go create mode 100644 builtin/providers/test/resource_force_new_test.go diff --git a/builtin/providers/test/provider.go b/builtin/providers/test/provider.go index d69ae95f9808..e8b6cf2287b9 100644 --- a/builtin/providers/test/provider.go +++ b/builtin/providers/test/provider.go @@ -22,6 +22,7 @@ func Provider() terraform.ResourceProvider { "test_resource_with_custom_diff": testResourceCustomDiff(), "test_resource_timeout": testResourceTimeout(), "test_resource_diff_suppress": testResourceDiffSuppress(), + "test_resource_force_new": testResourceForceNew(), }, DataSourcesMap: map[string]*schema.Resource{ "test_data_source": testDataSource(), diff --git a/builtin/providers/test/resource_force_new.go b/builtin/providers/test/resource_force_new.go new file mode 100644 index 000000000000..81a06736c468 --- /dev/null +++ b/builtin/providers/test/resource_force_new.go @@ -0,0 +1,39 @@ +package test + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func testResourceForceNew() *schema.Resource { + return &schema.Resource{ + Create: testResourceForceNewCreate, + Read: testResourceForceNewRead, + Delete: testResourceForceNewDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "triggers": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func testResourceForceNewCreate(d *schema.ResourceData, meta interface{}) error { + d.SetId("testId") + return testResourceForceNewRead(d, meta) +} + +func testResourceForceNewRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func testResourceForceNewDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} diff --git a/builtin/providers/test/resource_force_new_test.go b/builtin/providers/test/resource_force_new_test.go new file mode 100644 index 000000000000..3e0bf19c344d --- /dev/null +++ b/builtin/providers/test/resource_force_new_test.go @@ -0,0 +1,79 @@ +package test + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestResourceForceNew_create(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_force_new" "foo" { + triggers = { + "a" = "foo" + } +}`), + }, + }, + }) +} +func TestResourceForceNew_update(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_force_new" "foo" { + triggers = { + "a" = "foo" + } +}`), + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_force_new" "foo" { + triggers = { + "a" = "bar" + } +}`), + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_force_new" "foo" { + triggers = { + "b" = "bar" + } +}`), + }, + }, + }) +} + +func TestResourceForceNew_remove(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_force_new" "foo" { + triggers = { + "a" = "bar" + } +}`), + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_force_new" "foo" { +} `), + }, + }, + }) +} diff --git a/builtin/providers/test/resource_test.go b/builtin/providers/test/resource_test.go index 2d0168fc72ee..dd33783ffc25 100644 --- a/builtin/providers/test/resource_test.go +++ b/builtin/providers/test/resource_test.go @@ -443,3 +443,33 @@ output "value_from_map_from_list" { func testAccCheckResourceDestroy(s *terraform.State) error { return nil } + +func TestResource_removeForceNew(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource" "foo" { + required = "yep" + required_map = { + key = "value" + } + optional_force_new = "here" +} + `), + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource" "foo" { + required = "yep" + required_map = { + key = "value" + } +} + `), + }, + }, + }) +} From 718a3c400a59f0852d5cffc9b843e30fe3dce6f4 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 31 Oct 2018 13:43:50 -0400 Subject: [PATCH 015/149] fix state variable name --- helper/plugin/grpc_provider.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index 7669cb628218..a25aa67459a9 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -399,12 +399,12 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso // The old provider API used an empty id to signal that the remote // object appears to have been deleted, but our new protocol expects // to see a null value (in the cty sense) in that case. - newConfigMP, err := msgpack.Marshal(cty.NullVal(block.ImpliedType()), block.ImpliedType()) + newStateMP, err := msgpack.Marshal(cty.NullVal(block.ImpliedType()), block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) } resp.NewState = &proto.DynamicValue{ - Msgpack: newConfigMP, + Msgpack: newStateMP, } return resp, nil } From 4635ebc61aafbe769a83c7d02114cf1629b599d4 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 31 Oct 2018 13:44:21 -0400 Subject: [PATCH 016/149] create a new proposed value when replacing When replacing an instance, calculate a new proposed value from the null state and the config. This ensures that all unknown values are properly set. --- terraform/eval_diff.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/terraform/eval_diff.go b/terraform/eval_diff.go index 0b6c4aff3831..50bb5ea3b79a 100644 --- a/terraform/eval_diff.go +++ b/terraform/eval_diff.go @@ -310,11 +310,15 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { // from known prior values to unknown values, unless the provider is // able to predict new values for any of these computed attributes. nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ TypeName: n.Addr.Resource.Type, Config: configVal, PriorState: nullPriorVal, - ProposedNewState: configVal, + ProposedNewState: proposedNewVal, PriorPrivate: plannedPrivate, }) // We need to tread carefully here, since if there are any warnings From c9e7346bfd9352397c21e5f0884a200ff8f37724 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 31 Oct 2018 13:44:21 -0400 Subject: [PATCH 017/149] create a new proposed value when replacing When replacing an instance, calculate a new proposed value from the null state and the config. This ensures that all unknown values are properly set. --- terraform/eval_diff.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/terraform/eval_diff.go b/terraform/eval_diff.go index 0b6c4aff3831..50bb5ea3b79a 100644 --- a/terraform/eval_diff.go +++ b/terraform/eval_diff.go @@ -310,11 +310,15 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { // from known prior values to unknown values, unless the provider is // able to predict new values for any of these computed attributes. nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ TypeName: n.Addr.Resource.Type, Config: configVal, PriorState: nullPriorVal, - ProposedNewState: configVal, + ProposedNewState: proposedNewVal, PriorPrivate: plannedPrivate, }) // We need to tread carefully here, since if there are any warnings From a5ef403dfd46121baadef1c69c7b9b83fccdcda5 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 31 Oct 2018 14:17:23 -0400 Subject: [PATCH 018/149] skip resource tests for now These aren't going to be fixed in the immediate future, and are preventing the CI tests from being helpful. --- helper/resource/testing_import_state_test.go | 16 ++++++------ helper/resource/testing_test.go | 26 ++++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/helper/resource/testing_import_state_test.go b/helper/resource/testing_import_state_test.go index a9a11b9b1010..9b2acc3c964d 100644 --- a/helper/resource/testing_import_state_test.go +++ b/helper/resource/testing_import_state_test.go @@ -9,7 +9,7 @@ import ( ) func TestTest_importState(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.ImportStateReturn = []*terraform.InstanceState{ @@ -61,7 +61,7 @@ func TestTest_importState(t *testing.T) { } func TestTest_importStateFail(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.ImportStateReturn = []*terraform.InstanceState{ @@ -113,7 +113,7 @@ func TestTest_importStateFail(t *testing.T) { } func TestTest_importStateDetectId(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.DiffReturn = nil @@ -189,7 +189,7 @@ func TestTest_importStateDetectId(t *testing.T) { } func TestTest_importStateIdPrefix(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.DiffReturn = nil @@ -266,7 +266,7 @@ func TestTest_importStateIdPrefix(t *testing.T) { } func TestTest_importStateVerify(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.DiffReturn = nil @@ -338,7 +338,7 @@ func TestTest_importStateVerify(t *testing.T) { } func TestTest_importStateVerifyFail(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.DiffReturn = nil @@ -403,7 +403,7 @@ func TestTest_importStateVerifyFail(t *testing.T) { } func TestTest_importStateIdFunc(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.ImportStateFn = func( @@ -463,7 +463,7 @@ func TestTest_importStateIdFunc(t *testing.T) { } func TestTest_importStateIdFuncFail(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.ImportStateFn = func( diff --git a/helper/resource/testing_test.go b/helper/resource/testing_test.go index db15ba155f45..448c2abb3f93 100644 --- a/helper/resource/testing_test.go +++ b/helper/resource/testing_test.go @@ -55,7 +55,7 @@ func TestParallelTest(t *testing.T) { } func TestTest(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := &resetProvider{ MockResourceProvider: testProvider(), @@ -138,7 +138,7 @@ func TestTest(t *testing.T) { } func TestTest_plan_only(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.ApplyReturn = &terraform.InstanceState{ @@ -192,7 +192,7 @@ STATE: } func TestTest_idRefresh(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") // Refresh count should be 3: // 1.) initial Ref/Plan/Apply @@ -246,7 +246,7 @@ func TestTest_idRefresh(t *testing.T) { } func TestTest_idRefreshCustomName(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") // Refresh count should be 3: // 1.) initial Ref/Plan/Apply @@ -300,7 +300,7 @@ func TestTest_idRefreshCustomName(t *testing.T) { } func TestTest_idRefreshFail(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") // Refresh count should be 3: // 1.) initial Ref/Plan/Apply @@ -364,7 +364,7 @@ func TestTest_idRefreshFail(t *testing.T) { } func TestTest_empty(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") destroyCalled := false checkDestroyFn := func(*terraform.State) error { @@ -386,7 +386,7 @@ func TestTest_empty(t *testing.T) { } func TestTest_noEnv(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") // Unset the variable if err := os.Setenv(TestEnvVar, ""); err != nil { @@ -403,7 +403,7 @@ func TestTest_noEnv(t *testing.T) { } func TestTest_preCheck(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") called := false @@ -418,7 +418,7 @@ func TestTest_preCheck(t *testing.T) { } func TestTest_skipFunc(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") preCheckCalled := false skipped := false @@ -460,7 +460,7 @@ func TestTest_skipFunc(t *testing.T) { } func TestTest_stepError(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.ApplyReturn = &terraform.InstanceState{ @@ -530,7 +530,7 @@ func TestTest_factoryError(t *testing.T) { } func TestTest_resetError(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := &resetProvider{ MockResourceProvider: testProvider(), @@ -555,7 +555,7 @@ func TestTest_resetError(t *testing.T) { } func TestTest_expectError(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") cases := []struct { name string @@ -953,7 +953,7 @@ func mockSweeperFunc(s string) error { } func TestTest_Taint(t *testing.T) { - t.Fatal("test requires new provider implementation") + t.Skip("test requires new provider implementation") mp := testProvider() mp.DiffFn = func( From 21064771ea3d9c2fcebd364997f1d25eec3ec191 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 31 Oct 2018 16:41:36 -0400 Subject: [PATCH 019/149] add failing test for required output value The required value from an output is nil when it should be unknown --- terraform/context_plan_test.go | 65 +++++++++++++++++++ .../plan-required-output/main.tf | 7 ++ .../plan-required-output/mod/main.tf | 7 ++ 3 files changed, 79 insertions(+) create mode 100644 terraform/test-fixtures/plan-required-output/main.tf create mode 100644 terraform/test-fixtures/plan-required-output/mod/main.tf diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 581a0f4292a0..3a9b826be812 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -5513,3 +5513,68 @@ func objectVal(t *testing.T, schema *configschema.Block, m map[string]cty.Value) } return v } + +func TestContext2Plan_requiredModuleOutput(t *testing.T) { + m := testModule(t, "plan-required-output") + p := testProvider("test") + p.GetSchemaReturn = &ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "required": {Type: cty.String, Required: true}, + }, + }, + }, + } + p.DiffFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Config: m, + ProviderResolver: providers.ResolverFixed( + map[string]providers.Factory{ + "test": testProviderFuncFixed(p), + }, + ), + }) + + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetSchemaReturn.ResourceTypes["test_resource"] + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + switch i := ric.Addr.String(); i { + case "test_resource.root": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.UnknownVal(cty.String), + }) + case "module.mod.test_resource.for_output": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.StringVal("val"), + }) + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + } +} diff --git a/terraform/test-fixtures/plan-required-output/main.tf b/terraform/test-fixtures/plan-required-output/main.tf new file mode 100644 index 000000000000..227b5c1530ce --- /dev/null +++ b/terraform/test-fixtures/plan-required-output/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "root" { + required = module.mod.object.id +} + +module "mod" { + source = "./mod" +} diff --git a/terraform/test-fixtures/plan-required-output/mod/main.tf b/terraform/test-fixtures/plan-required-output/mod/main.tf new file mode 100644 index 000000000000..772f1645f3e8 --- /dev/null +++ b/terraform/test-fixtures/plan-required-output/mod/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "for_output" { + required = "val" +} + +output "object" { + value = test_resource.for_output +} From 46c36b5e72b5c01d6362bb030710aa2880b63e66 Mon Sep 17 00:00:00 2001 From: cgriggs01 Date: Wed, 31 Oct 2018 13:53:22 -0700 Subject: [PATCH 020/149] adding two community --- .../type/community-index.html.markdown | 49 ++++++++++--------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/website/docs/providers/type/community-index.html.markdown b/website/docs/providers/type/community-index.html.markdown index 8b0f32e15d63..bd5cd6e43d84 100644 --- a/website/docs/providers/type/community-index.html.markdown +++ b/website/docs/providers/type/community-index.html.markdown @@ -28,118 +28,123 @@ please fill out this [community providers form](https://docs.google.com/forms/d/ Aviatrix + Azure Devops CloudAMQP - CloudKarafka + CloudKarafka CloudMQTT Consul ACL - CoreOS Container Linux Configs + CoreOS Container Linux Configs CouchDB Databricks - Dead Man's Snitch + Dead Man's Snitch Digital Rebar Docker Machine - Drone + Drone Dropbox EfficientIP - ElephantSQL + ElephantSQL ESXI Gandi - Generic Rest API + Generic Rest API Glue GoCD - Google Calendar + Google Calendar Google G Suite Helm - Hiera + Hiera HTTP File Upload> HP OneView - IBM Cloud + IBM Cloud IIJ GIO Infoblox - Jira + Jira JumpCloud Kafka - Keboola + Keboola Kibana Kong - LXD + LXD Manifold Matchbox - MongoDB Atlas + MongoDB Atlas NSX-V Nutanix - Online.net + Online.net Open Day Light OpenAPI - OpenFaaS + OpenFaaS Pass Puppet CA - PuppetDB + PuppetDB QingCloud Redshift - RKE + RKE Rollbar SakuraCloud - SCVMM + SCVMM Sentry Sewan - SignalFx + SignalFx + Smartronix Snowflake + + Stateful Stripe + Transloadit - Transloadit Updown.io Uptimerobot + vRealize Automation - vRealize Automation Win DNS YAML + Venafi - Venafi ZeroTier + From 56a16e57a972d194a53e6de9e32594ad2fe1facc Mon Sep 17 00:00:00 2001 From: cgriggs01 Date: Wed, 31 Oct 2018 14:25:30 -0700 Subject: [PATCH 021/149] another provider and edit --- .../type/community-index.html.markdown | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/website/docs/providers/type/community-index.html.markdown b/website/docs/providers/type/community-index.html.markdown index bd5cd6e43d84..66d0cc187aa2 100644 --- a/website/docs/providers/type/community-index.html.markdown +++ b/website/docs/providers/type/community-index.html.markdown @@ -68,7 +68,7 @@ please fill out this [community providers form](https://docs.google.com/forms/d/ Hiera - HTTP File Upload> + HTTP File Upload HP OneView @@ -104,47 +104,47 @@ please fill out this [community providers form](https://docs.google.com/forms/d/ OpenFaaS Pass - Puppet CA + Pingdom + Puppet CA PuppetDB QingCloud - Redshift + Redshift RKE Rollbar - SakuraCloud + SakuraCloud SCVMM Sentry - Sewan + Sewan SignalFx Smartronix - Snowflake + Snowflake Stateful Stripe - Transloadit + Transloadit Updown.io Uptimerobot - vRealize Automation + vRealize Automation Win DNS YAML - Venafi + Venafi ZeroTier - From 06a74a2dac8cda7f038b89e4690279a596d0e6d8 Mon Sep 17 00:00:00 2001 From: cgriggs01 Date: Wed, 31 Oct 2018 14:33:22 -0700 Subject: [PATCH 022/149] fixit --- website/docs/providers/type/community-index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/providers/type/community-index.html.markdown b/website/docs/providers/type/community-index.html.markdown index 66d0cc187aa2..ae24076b2fb7 100644 --- a/website/docs/providers/type/community-index.html.markdown +++ b/website/docs/providers/type/community-index.html.markdown @@ -104,7 +104,7 @@ please fill out this [community providers form](https://docs.google.com/forms/d/ OpenFaaS Pass - Pingdom + Pingdom Puppet CA From 5944e8e34fa435ab1b5323a8c42a8a4f86ef676c Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 1 Nov 2018 19:59:07 +0100 Subject: [PATCH 023/149] Fix the ability to ask for and save user input --- command/meta_backend.go | 2 +- command/meta_config.go | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/command/meta_backend.go b/command/meta_backend.go index 7b39257897ba..2208d4df900f 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -869,7 +869,7 @@ func (m *Meta) backendInitFromConfig(c *configs.Backend) (backend.Backend, cty.V b := f() schema := b.ConfigSchema() - decSpec := schema.DecoderSpec() + decSpec := schema.NoneRequired().DecoderSpec() configVal, hclDiags := hcldec.Decode(c.Config, decSpec, nil) diags = diags.Append(hclDiags) if hclDiags.HasErrors() { diff --git a/command/meta_config.go b/command/meta_config.go index 061e50f43ebc..b7619dccf2f8 100644 --- a/command/meta_config.go +++ b/command/meta_config.go @@ -231,12 +231,10 @@ func (m *Meta) inputForSchema(given cty.Value, schema *configschema.Block) (cty. return given, nil } - givenVals := given.AsValueMap() - retVals := make(map[string]cty.Value, len(givenVals)) + retVals := given.AsValueMap() names := make([]string, 0, len(schema.Attributes)) for name, attrS := range schema.Attributes { - retVals[name] = givenVals[name] - if givenVal := givenVals[name]; attrS.Required && givenVal.IsNull() && attrS.Type.IsPrimitiveType() { + if attrS.Required && retVals[name].IsNull() && attrS.Type.IsPrimitiveType() { names = append(names, name) } } From d50008a126dc057ba30aba6afe13ceafd37a4ce6 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 1 Nov 2018 20:22:37 +0100 Subject: [PATCH 024/149] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2da5f6e779cf..4b18747e7e74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ ## 0.12.0-beta1 (Unreleased) +IMPROVEMENTS: + +* command/state: Update and enable the `state show` command [GH-19200] ## 0.12.0-alpha2 (October 30, 2018) From e91f381cc4c527283279c458528e74d087fb0fb0 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 1 Nov 2018 16:11:19 -0400 Subject: [PATCH 025/149] test case for optional bools in schema Booleans in the legacy form were stored as strings, and can appear as the incorrect type in the new type system. Unset fields in sets also might show up erroneously in diffs, with equal old and new values. --- builtin/providers/test/provider.go | 1 + builtin/providers/test/resource_nested.go | 61 ++++++++++ .../providers/test/resource_nested_test.go | 104 ++++++++++++++++++ 3 files changed, 166 insertions(+) create mode 100644 builtin/providers/test/resource_nested.go create mode 100644 builtin/providers/test/resource_nested_test.go diff --git a/builtin/providers/test/provider.go b/builtin/providers/test/provider.go index e8b6cf2287b9..1c0fc574d305 100644 --- a/builtin/providers/test/provider.go +++ b/builtin/providers/test/provider.go @@ -23,6 +23,7 @@ func Provider() terraform.ResourceProvider { "test_resource_timeout": testResourceTimeout(), "test_resource_diff_suppress": testResourceDiffSuppress(), "test_resource_force_new": testResourceForceNew(), + "test_resource_nested": testResourceNested(), }, DataSourcesMap: map[string]*schema.Resource{ "test_data_source": testDataSource(), diff --git a/builtin/providers/test/resource_nested.go b/builtin/providers/test/resource_nested.go new file mode 100644 index 000000000000..54e78d7fda34 --- /dev/null +++ b/builtin/providers/test/resource_nested.go @@ -0,0 +1,61 @@ +package test + +import ( + "fmt" + "math/rand" + + "github.com/hashicorp/terraform/helper/schema" +) + +func testResourceNested() *schema.Resource { + return &schema.Resource{ + Create: testResourceNestedCreate, + Read: testResourceNestedRead, + Delete: testResourceNestedDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "optional": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "nested": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "string": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "optional": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func testResourceNestedCreate(d *schema.ResourceData, meta interface{}) error { + d.SetId(fmt.Sprintf("%x", rand.Int63())) + return testResourceNestedRead(d, meta) +} + +func testResourceNestedRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func testResourceNestedDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} diff --git a/builtin/providers/test/resource_nested_test.go b/builtin/providers/test/resource_nested_test.go new file mode 100644 index 000000000000..541dfa02fcbf --- /dev/null +++ b/builtin/providers/test/resource_nested_test.go @@ -0,0 +1,104 @@ +package test + +import ( + "errors" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestResourceNested_basic(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested" "foo" { + nested { + string = "val" + } +} + `), + }, + }, + }) +} + +func TestResourceNested_addRemove(t *testing.T) { + var id string + checkFunc := func(s *terraform.State) error { + root := s.ModuleByPath(addrs.RootModuleInstance) + res := root.Resources["test_resource_nested.foo"] + if res.Primary.ID == id { + return errors.New("expected new resource") + } + id = res.Primary.ID + return nil + } + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested" "foo" { +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested" "foo" { + nested { + string = "val" + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested" "foo" { + optional = true + nested { + string = "val" + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested" "foo" { + nested { + string = "val" + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested" "foo" { + nested { + string = "val" + optional = true + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested" "foo" { +} + `), + Check: checkFunc, + }, + }, + }) +} From 7e4f09c7871cfff3e54555185f2de9d0e86df1ef Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 1 Nov 2018 16:14:23 -0400 Subject: [PATCH 026/149] don't apply unchanged attributes from legacy diffs If a legacy diff has equal old and new values, don't apply the diff. These would show up in sets, because of the overall change in set key. --- terraform/diff.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/terraform/diff.go b/terraform/diff.go index 1ac43910e09f..92b575086ed1 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -457,6 +457,11 @@ func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) continue } + // sometimes helper/schema gives us values that aren't really a diff + if diff.Old == diff.New { + continue + } + attrs[attr] = diff.New } From d73b2d778fe15cbd0cee7e5ea1bf9077c5106197 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 1 Nov 2018 17:32:30 -0700 Subject: [PATCH 027/149] core: TestContext2Plan_requiredModuleOutput to use t.Run This allows us to see the results of the tests for all resources even if one of them fails. --- terraform/context_plan_test.go | 48 ++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 3a9b826be812..03dee235ecf0 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -5551,30 +5551,32 @@ func TestContext2Plan_requiredModuleOutput(t *testing.T) { } for _, res := range plan.Changes.Resources { - if res.Action != plans.Create { - t.Fatalf("expected resource creation, got %s", res.Action) - } - ric, err := res.Decode(ty) - if err != nil { - t.Fatal(err) - } + t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } - var expected cty.Value - switch i := ric.Addr.String(); i { - case "test_resource.root": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "required": cty.UnknownVal(cty.String), - }) - case "module.mod.test_resource.for_output": - expected = objectVal(t, schema, map[string]cty.Value{ - "id": cty.UnknownVal(cty.String), - "required": cty.StringVal("val"), - }) - default: - t.Fatal("unknown instance:", i) - } + var expected cty.Value + switch i := ric.Addr.String(); i { + case "test_resource.root": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.UnknownVal(cty.String), + }) + case "module.mod.test_resource.for_output": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.StringVal("val"), + }) + default: + t.Fatal("unknown instance:", i) + } - checkVals(t, expected, ric.After) + checkVals(t, expected, ric.After) + }) } } From bbf8dacac808ee3dc2a9ff27adb2073a155b0a63 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 1 Nov 2018 17:33:10 -0700 Subject: [PATCH 028/149] plans: OutputChange.Encode must preserve Addr field --- plans/changes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plans/changes.go b/plans/changes.go index cc6e58263ab5..d7e0dcdb897a 100644 --- a/plans/changes.go +++ b/plans/changes.go @@ -254,6 +254,7 @@ func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { return nil, err } return &OutputChangeSrc{ + Addr: oc.Addr, ChangeSrc: *cs, Sensitive: oc.Sensitive, }, err From 21577a5f158adffc4e87d6734a708b452b9c70d5 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 1 Nov 2018 17:41:35 -0700 Subject: [PATCH 029/149] core: Whole-module evaluation must consider planned output values Just as when we resolve single output values we must check to see if there is a planned new value for an output before using the value in state, because the planned new value might contain unknowns that can't be represented directly in the state (and would thus be incorrectly returned as null). --- terraform/context_plan_test.go | 67 +++++++++++++++++++ terraform/evaluate.go | 31 +++++++-- .../plan-required-whole-mod/main.tf | 17 +++++ .../plan-required-whole-mod/mod/main.tf | 7 ++ 4 files changed, 115 insertions(+), 7 deletions(-) create mode 100644 terraform/test-fixtures/plan-required-whole-mod/main.tf create mode 100644 terraform/test-fixtures/plan-required-whole-mod/mod/main.tf diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 03dee235ecf0..e7f0e7ab2b76 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -5580,3 +5580,70 @@ func TestContext2Plan_requiredModuleOutput(t *testing.T) { }) } } + +func TestContext2Plan_requiredModuleObject(t *testing.T) { + m := testModule(t, "plan-required-whole-mod") + p := testProvider("test") + p.GetSchemaReturn = &ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + "required": {Type: cty.String, Required: true}, + }, + }, + }, + } + p.DiffFn = testDiffFn + + ctx := testContext2(t, &ContextOpts{ + Config: m, + ProviderResolver: providers.ResolverFixed( + map[string]providers.Factory{ + "test": testProviderFuncFixed(p), + }, + ), + }) + + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatalf("unexpected errors: %s", diags.Err()) + } + + schema := p.GetSchemaReturn.ResourceTypes["test_resource"] + ty := schema.ImpliedType() + + if len(plan.Changes.Resources) != 2 { + t.Fatal("expected 2 changes, got", len(plan.Changes.Resources)) + } + + for _, res := range plan.Changes.Resources { + t.Run(fmt.Sprintf("%s %s", res.Action, res.Addr), func(t *testing.T) { + if res.Action != plans.Create { + t.Fatalf("expected resource creation, got %s", res.Action) + } + ric, err := res.Decode(ty) + if err != nil { + t.Fatal(err) + } + + var expected cty.Value + switch i := ric.Addr.String(); i { + case "test_resource.root": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.UnknownVal(cty.String), + }) + case "module.mod.test_resource.for_output": + expected = objectVal(t, schema, map[string]cty.Value{ + "id": cty.UnknownVal(cty.String), + "required": cty.StringVal("val"), + }) + default: + t.Fatal("unknown instance:", i) + } + + checkVals(t, expected, ric.After) + }) + } +} diff --git a/terraform/evaluate.go b/terraform/evaluate.go index bed3a6cc8c0e..918df2e8755f 100644 --- a/terraform/evaluate.go +++ b/terraform/evaluate.go @@ -305,14 +305,31 @@ func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, r vals := map[string]cty.Value{} for n := range outputConfigs { addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) - os := d.Evaluator.State.OutputValue(addr) - if os == nil { - // Not evaluated yet? - vals[n] = cty.DynamicVal - continue - } - vals[n] = os.Value + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) + vals[n] = cty.DynamicVal + continue + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + vals[n] = change.After + } else { + os := d.Evaluator.State.OutputValue(addr) + if os == nil { + // Not evaluated yet? + vals[n] = cty.DynamicVal + continue + } + vals[n] = os.Value + } } return cty.ObjectVal(vals), diags } diff --git a/terraform/test-fixtures/plan-required-whole-mod/main.tf b/terraform/test-fixtures/plan-required-whole-mod/main.tf new file mode 100644 index 000000000000..9deb3c5a162b --- /dev/null +++ b/terraform/test-fixtures/plan-required-whole-mod/main.tf @@ -0,0 +1,17 @@ +resource "test_resource" "root" { + required = local.object.id +} + +locals { + # This indirection is here to force the evaluator to produce the whole + # module object here rather than just fetching the single "object" output. + # This makes this fixture different than plan-required-output, which just + # accesses module.mod.object.id directly and thus visits a different + # codepath in the evaluator. + mod = module.mod + object = local.mod.object +} + +module "mod" { + source = "./mod" +} diff --git a/terraform/test-fixtures/plan-required-whole-mod/mod/main.tf b/terraform/test-fixtures/plan-required-whole-mod/mod/main.tf new file mode 100644 index 000000000000..772f1645f3e8 --- /dev/null +++ b/terraform/test-fixtures/plan-required-whole-mod/mod/main.tf @@ -0,0 +1,7 @@ +resource "test_resource" "for_output" { + required = "val" +} + +output "object" { + value = test_resource.for_output +} From 47d5d62bdbf67f712217979442a69b8c97df9d6c Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 2 Nov 2018 11:26:23 -0400 Subject: [PATCH 030/149] update CHANGELOG.md --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b18747e7e74..256dc5750031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,11 @@ IMPROVEMENTS: * command/state: Update and enable the `state show` command [GH-19200] +BUG FIXES: + +* helper/schema: Prevent the insertion of empty diff values when converting legacy diffs [GH-19253] +* core: Fix inconsistent plans when replacing instances. [GH-19233] + ## 0.12.0-alpha2 (October 30, 2018) IMPROVEMENTS: From 178ec8f7b42f1bbe5eb3c449da811d2872eacc6d Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 2 Nov 2018 17:58:38 +0100 Subject: [PATCH 031/149] Remove support for the -module-depth flag # Conflicts: # backend/backend.go --- command/graph.go | 15 +++--- command/meta.go | 19 ------- command/meta_test.go | 53 ------------------- command/plan.go | 6 --- command/show.go | 3 -- contrib/fish-completion/terraform.fish | 3 +- .../commands/environment-variables.html.md | 10 ---- website/docs/commands/graph.html.markdown | 3 ++ website/docs/commands/plan.html.markdown | 4 -- website/docs/commands/show.html.markdown | 3 -- website/docs/modules/usage.html.markdown | 9 +--- 11 files changed, 15 insertions(+), 113 deletions(-) diff --git a/command/graph.go b/command/graph.go index 03612f6a165a..d923ce437b48 100644 --- a/command/graph.go +++ b/command/graph.go @@ -31,7 +31,7 @@ func (c *GraphCommand) Run(args []string) int { } cmdFlags := flag.NewFlagSet("graph", flag.ContinueOnError) - c.addModuleDepthFlag(cmdFlags, &moduleDepth) + cmdFlags.IntVar(&moduleDepth, "module-depth", -1, "module-depth") cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") cmdFlags.BoolVar(&drawCycles, "draw-cycles", false, "draw-cycles") cmdFlags.StringVar(&graphTypeStr, "type", "", "type") @@ -181,13 +181,16 @@ Usage: terraform graph [options] [DIR] Options: - -draw-cycles Highlight any cycles in the graph with colored edges. - This helps when diagnosing cycle errors. + -draw-cycles Highlight any cycles in the graph with colored edges. + This helps when diagnosing cycle errors. - -no-color If specified, output won't contain any color. + -module-depth=n Specifies the depth of modules to show in the output. + By default this is -1, which will expand all. - -type=plan Type of graph to output. Can be: plan, plan-destroy, apply, - validate, input, refresh. + -no-color If specified, output won't contain any color. + + -type=plan Type of graph to output. Can be: plan, plan-destroy, apply, + validate, input, refresh. ` diff --git a/command/meta.go b/command/meta.go index da6d1c09b1ad..dbc1437d02b6 100644 --- a/command/meta.go +++ b/command/meta.go @@ -513,25 +513,6 @@ func (m *Meta) showDiagnostics(vals ...interface{}) { } } -const ( - // ModuleDepthDefault is the default value for - // module depth, which can be overridden by flag - // or env var - ModuleDepthDefault = -1 - - // ModuleDepthEnvVar is the name of the environment variable that can be used to set module depth. - ModuleDepthEnvVar = "TF_MODULE_DEPTH" -) - -func (m *Meta) addModuleDepthFlag(flags *flag.FlagSet, moduleDepth *int) { - flags.IntVar(moduleDepth, "module-depth", ModuleDepthDefault, "module-depth") - if envVar := os.Getenv(ModuleDepthEnvVar); envVar != "" { - if md, err := strconv.Atoi(envVar); err == nil { - *moduleDepth = md - } - } -} - // outputShadowError outputs the error from ctx.ShadowError. If the // error is nil then nothing happens. If output is false then it isn't // outputted to the user (you can define logic to guard against outputting). diff --git a/command/meta_test.go b/command/meta_test.go index 63ab0fb28b09..b42312924ced 100644 --- a/command/meta_test.go +++ b/command/meta_test.go @@ -1,7 +1,6 @@ package command import ( - "flag" "io/ioutil" "os" "path/filepath" @@ -226,58 +225,6 @@ func TestMeta_initStatePaths(t *testing.T) { } } -func TestMeta_addModuleDepthFlag(t *testing.T) { - old := os.Getenv(ModuleDepthEnvVar) - defer os.Setenv(ModuleDepthEnvVar, old) - - cases := map[string]struct { - EnvVar string - Args []string - Expected int - }{ - "env var sets value when no flag present": { - EnvVar: "4", - Args: []string{}, - Expected: 4, - }, - "flag overrides envvar": { - EnvVar: "4", - Args: []string{"-module-depth=-1"}, - Expected: -1, - }, - "negative envvar works": { - EnvVar: "-1", - Args: []string{}, - Expected: -1, - }, - "invalid envvar is ignored": { - EnvVar: "-#", - Args: []string{}, - Expected: ModuleDepthDefault, - }, - "empty envvar is okay too": { - EnvVar: "", - Args: []string{}, - Expected: ModuleDepthDefault, - }, - } - - for tn, tc := range cases { - m := new(Meta) - var moduleDepth int - flags := flag.NewFlagSet("test", flag.ContinueOnError) - os.Setenv(ModuleDepthEnvVar, tc.EnvVar) - m.addModuleDepthFlag(flags, &moduleDepth) - err := flags.Parse(tc.Args) - if err != nil { - t.Fatalf("%s: err: %#v", tn, err) - } - if moduleDepth != tc.Expected { - t.Fatalf("%s: expected: %#v, got: %#v", tn, tc.Expected, moduleDepth) - } - } -} - func TestMeta_Env(t *testing.T) { td := tempDir(t) os.MkdirAll(td, 0755) diff --git a/command/plan.go b/command/plan.go index 6f45650a8b18..2f3411286cfa 100644 --- a/command/plan.go +++ b/command/plan.go @@ -19,7 +19,6 @@ type PlanCommand struct { func (c *PlanCommand) Run(args []string) int { var destroy, refresh, detailed bool var outPath string - var moduleDepth int args, err := c.Meta.process(args, true) if err != nil { @@ -29,7 +28,6 @@ func (c *PlanCommand) Run(args []string) int { cmdFlags := c.Meta.flagSet("plan") cmdFlags.BoolVar(&destroy, "destroy", false, "destroy") cmdFlags.BoolVar(&refresh, "refresh", true, "refresh") - c.addModuleDepthFlag(cmdFlags, &moduleDepth) cmdFlags.StringVar(&outPath, "out", "", "path") cmdFlags.IntVar( &c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") @@ -221,10 +219,6 @@ Options: -lock-timeout=0s Duration to retry a state lock. - -module-depth=n Specifies the depth of modules to show in the output. - This does not affect the plan itself, only the output - shown. By default, this is -1, which will expand all. - -no-color If specified, output won't contain any color. -out=path Write a plan file to the given path. This can be used as diff --git a/command/show.go b/command/show.go index bb1106b5974a..66140a4a3653 100644 --- a/command/show.go +++ b/command/show.go @@ -173,9 +173,6 @@ Usage: terraform show [options] [path] Options: - -module-depth=n Specifies the depth of modules to show in the output. - By default this is -1, which will expand all. - -no-color If specified, output won't contain any color. ` diff --git a/contrib/fish-completion/terraform.fish b/contrib/fish-completion/terraform.fish index 41f3660f7339..0c5646230865 100644 --- a/contrib/fish-completion/terraform.fish +++ b/contrib/fish-completion/terraform.fish @@ -59,6 +59,7 @@ complete -f -c terraform -n '__fish_seen_subcommand_from get' -o no-color -d 'If ### graph complete -f -c terraform -n '__fish_use_subcommand' -a graph -d 'Create a visual graph of Terraform resources' complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o draw-cycles -d 'Highlight any cycles in the graph' +complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o module-depth -d 'Depth of modules to show in the output' complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o no-color -d 'If specified, output won\'t contain any color' complete -f -c terraform -n '__fish_seen_subcommand_from graph' -o type -d 'Type of graph to output' @@ -101,7 +102,6 @@ complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o detailed-exitc complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o input -d 'Ask for input for variables if not directly set' complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o lock -d 'Lock the state file when locking is supported' complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o lock-timeout -d 'Duration to retry a state lock' -complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o module-depth -d 'Depth of modules to show in the output' complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o no-color -d 'If specified, output won\'t contain any color' complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o out -d 'Write a plan file to the given path' complete -f -c terraform -n '__fish_seen_subcommand_from plan' -o parallelism -d 'Limit the number of concurrent operations' @@ -138,7 +138,6 @@ complete -f -c terraform -n '__fish_seen_subcommand_from refresh' -o var-file -d ### show complete -f -c terraform -n '__fish_use_subcommand' -a show -d 'Inspect Terraform state or plan' -complete -f -c terraform -n '__fish_seen_subcommand_from show' -o module-depth -d 'Depth of modules to show in the output' complete -f -c terraform -n '__fish_seen_subcommand_from show' -o no-color -d 'If specified, output won\'t contain any color' ### taint diff --git a/website/docs/commands/environment-variables.html.md b/website/docs/commands/environment-variables.html.md index e534288069c3..50098a6cb1a3 100644 --- a/website/docs/commands/environment-variables.html.md +++ b/website/docs/commands/environment-variables.html.md @@ -48,16 +48,6 @@ If set to "false" or "0", causes terraform commands to behave as if the `-input= export TF_INPUT=0 ``` -## TF_MODULE_DEPTH - -When given a value, causes terraform commands to behave as if the `-module-depth=VALUE` flag was specified. By setting this to 0, for example, you enable commands such as [plan](/docs/commands/plan.html) and [graph](/docs/commands/graph.html) to display more compressed information. - -```shell -export TF_MODULE_DEPTH=0 -``` - -For more information regarding modules, check out the section on [Using Modules](/docs/modules/usage.html). - ## TF_VAR_name Environment variables can be used to set variables. The environment variables must be in the format `TF_VAR_name` and this will be checked last for a value. For example: diff --git a/website/docs/commands/graph.html.markdown b/website/docs/commands/graph.html.markdown index b7d677c7bc94..6be2f49e7a6a 100644 --- a/website/docs/commands/graph.html.markdown +++ b/website/docs/commands/graph.html.markdown @@ -36,6 +36,9 @@ Options: * `-draw-cycles` - Highlight any cycles in the graph with colored edges. This helps when diagnosing cycle errors. +* `-module-depth=n` - Specifies the depth of modules to show in the output. + By default this is -1, which will expand all. + * `-no-color` - If specified, output won't contain any color. * `-type=plan` - Type of graph to output. Can be: plan, plan-destroy, apply, legacy. diff --git a/website/docs/commands/plan.html.markdown b/website/docs/commands/plan.html.markdown index 00d705a4bc33..41bf7d2954ca 100644 --- a/website/docs/commands/plan.html.markdown +++ b/website/docs/commands/plan.html.markdown @@ -52,10 +52,6 @@ The command-line flags are all optional. The list of available flags are: * `-lock-timeout=0s` - Duration to retry a state lock. -* `-module-depth=n` - Specifies the depth of modules to show in the output. - This does not affect the plan itself, only the output shown. By default, - this is -1, which will expand all. - * `-no-color` - Disables output with coloring. * `-out=path` - The path to save the generated execution plan. This plan diff --git a/website/docs/commands/show.html.markdown b/website/docs/commands/show.html.markdown index f2b9edc5ee83..18196bc22a4d 100644 --- a/website/docs/commands/show.html.markdown +++ b/website/docs/commands/show.html.markdown @@ -22,8 +22,5 @@ file. If no path is specified, the current state will be shown. The command-line flags are all optional. The list of available flags are: -* `-module-depth=n` - Specifies the depth of modules to show in the output. - By default this is -1, which will expand all. - * `-no-color` - Disables output with coloring diff --git a/website/docs/modules/usage.html.markdown b/website/docs/modules/usage.html.markdown index 20e01ce26a71..1f39c1e55e20 100644 --- a/website/docs/modules/usage.html.markdown +++ b/website/docs/modules/usage.html.markdown @@ -390,9 +390,8 @@ several regions or datacenters. ## Summarizing Modules in the UI -By default, commands such as the [plan command](/docs/commands/plan.html) and -[graph command](/docs/commands/graph.html) will show each resource in a nested -module to represent the full scope of the configuration. For more complex +By default the [graph command](/docs/commands/graph.html) will show each resource +in a nested module to represent the full scope of the configuration. For more complex configurations, the `-module-depth` option may be useful to summarize some or all of the modules as single objects. @@ -405,10 +404,6 @@ If we instead set `-module-depth=0`, the graph will look like this: ![Terraform Module Graph](docs/module_graph.png) -Other commands work similarly with modules. Note that `-module-depth` only -affects how modules are presented in the UI; it does not affect how modules -and their contained resources are processed by Terraform operations. - ## Tainting resources within a module The [taint command](/docs/commands/taint.html) can be used to _taint_ specific From b7cf7737f6dd21d53c1b1c45acad5865691a3c5d Mon Sep 17 00:00:00 2001 From: Sean Carolan Date: Fri, 2 Nov 2018 20:26:52 -0400 Subject: [PATCH 032/149] website: use a clearer header describing the CLI config file (#19263) Using a / could be confusing to users who assume it is a directory. --- website/docs/commands/cli-config.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/commands/cli-config.html.markdown b/website/docs/commands/cli-config.html.markdown index 3ca368782344..eb420fd82ae5 100644 --- a/website/docs/commands/cli-config.html.markdown +++ b/website/docs/commands/cli-config.html.markdown @@ -7,7 +7,7 @@ description: |- configuration file. --- -# CLI Configuration File (`.terraformrc`/`terraform.rc`) +# CLI Configuration File (`.terraformrc` or `terraform.rc`) The CLI configuration file configures per-user settings for CLI behaviors, which apply across all Terraform working directories. This is separate from From a7b8cc8fe31bdd117e7484ff705f160e08063f05 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Mon, 5 Nov 2018 11:20:46 +0100 Subject: [PATCH 033/149] Do not clear out a previous set state when refreshing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the case when no state existed remotely and a new one was created locally, we don’t want to blow away the new local state when refreshing. --- state/remote/state.go | 1 - 1 file changed, 1 deletion(-) diff --git a/state/remote/state.go b/state/remote/state.go index 6101e82f8244..5ead38e82b98 100644 --- a/state/remote/state.go +++ b/state/remote/state.go @@ -69,7 +69,6 @@ func (s *State) refreshState() error { // no remote state is OK if payload == nil { s.readState = nil - s.state = nil s.lineage = "" s.serial = 0 return nil From 186a6dcc388aeefaa295b853aa57fbda08dd4fa5 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 5 Nov 2018 12:21:37 +0000 Subject: [PATCH 034/149] helper/schema: Add test for wrong timeout type --- helper/schema/provider_test.go | 49 ++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/helper/schema/provider_test.go b/helper/schema/provider_test.go index 6bf8c5bdf6bb..39d16acbd249 100644 --- a/helper/schema/provider_test.go +++ b/helper/schema/provider_test.go @@ -3,6 +3,7 @@ package schema import ( "fmt" "reflect" + "strings" "testing" "time" @@ -277,6 +278,54 @@ func TestProviderValidate(t *testing.T) { } } +func TestProviderDiff_timeoutInvalidType(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "blah": &Resource{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + }, + }, + }, + } + + invalidCfg := map[string]interface{}{ + "foo": 42, + "timeouts": []map[string]interface{}{ + map[string]interface{}{ + "create": "40m", + }, + }, + } + ic, err := config.NewRawConfig(invalidCfg) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = p.Diff( + &terraform.InstanceInfo{ + Type: "blah", + }, + nil, + terraform.NewResourceConfig(ic), + ) + if err == nil { + t.Fatal("Expected provider.Diff to fail with invalid timeout type") + } + expectedErrMsg := "Invalid Timeout structure" + if !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("Unexpected error message: %q\nExpected message to contain %q", + err.Error(), + expectedErrMsg) + } +} + func TestProviderValidateResource(t *testing.T) { cases := []struct { P *Provider From 2fe3f16cb325f7cab6f629743e1ad73ff95f81b5 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 5 Nov 2018 12:28:56 +0000 Subject: [PATCH 035/149] helper/schema: Return error on invalid timeout type --- helper/schema/resource_timeout.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/helper/schema/resource_timeout.go b/helper/schema/resource_timeout.go index 445819f0f956..f558b177c030 100644 --- a/helper/schema/resource_timeout.go +++ b/helper/schema/resource_timeout.go @@ -110,7 +110,8 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) } } } else { - log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts") + log.Printf("[ERROR] Invalid timeout structure: %T", raw) + return fmt.Errorf("Invalid Timeout structure found") } } From 82a77f9bb504221f263a0c81b197b9342bf80b74 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 5 Nov 2018 12:16:11 +0000 Subject: [PATCH 036/149] helper/schema: Add test for invalid timeout value --- helper/schema/provider_test.go | 46 ++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/helper/schema/provider_test.go b/helper/schema/provider_test.go index 39d16acbd249..bebe1f19b77e 100644 --- a/helper/schema/provider_test.go +++ b/helper/schema/provider_test.go @@ -326,6 +326,52 @@ func TestProviderDiff_timeoutInvalidType(t *testing.T) { } } +func TestProviderDiff_timeoutInvalidValue(t *testing.T) { + p := &Provider{ + ResourcesMap: map[string]*Resource{ + "blah": &Resource{ + Schema: map[string]*Schema{ + "foo": { + Type: TypeInt, + Optional: true, + }, + }, + Timeouts: &ResourceTimeout{ + Create: DefaultTimeout(10 * time.Minute), + }, + }, + }, + } + + invalidCfg := map[string]interface{}{ + "foo": 42, + "timeouts": map[string]interface{}{ + "create": "invalid", + }, + } + ic, err := config.NewRawConfig(invalidCfg) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = p.Diff( + &terraform.InstanceInfo{ + Type: "blah", + }, + nil, + terraform.NewResourceConfig(ic), + ) + if err == nil { + t.Fatal("Expected provider.Diff to fail with invalid timeout value") + } + expectedErrMsg := "time: invalid duration invalid" + if !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("Unexpected error message: %q\nExpected message to contain %q", + err.Error(), + expectedErrMsg) + } +} + func TestProviderValidateResource(t *testing.T) { cases := []struct { P *Provider From 1cb8f1df80a493f4aae09f37b491c15ce3da5cf3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 5 Nov 2018 10:51:22 +0000 Subject: [PATCH 037/149] helper/schema: Fix timeout parsing in ResourceTimeout.ConfigDecode --- helper/schema/resource_test.go | 9 ++- helper/schema/resource_timeout.go | 76 ++++++++++++-------------- helper/schema/resource_timeout_test.go | 37 ++++++------- helper/schema/shims_test.go | 7 +-- 4 files changed, 59 insertions(+), 70 deletions(-) diff --git a/helper/schema/resource_test.go b/helper/schema/resource_test.go index 0b366d4d64c9..65ebf0c01ac0 100644 --- a/helper/schema/resource_test.go +++ b/helper/schema/resource_test.go @@ -220,10 +220,9 @@ func TestResourceDiff_Timeout_diff(t *testing.T) { raw, err := config.NewRawConfig( map[string]interface{}{ "foo": 42, - "timeouts": []map[string]interface{}{ - map[string]interface{}{ - "create": "2h", - }}, + TimeoutsConfigKey: map[string]interface{}{ + "create": "2h", + }, }) if err != nil { t.Fatalf("err: %s", err) @@ -256,7 +255,7 @@ func TestResourceDiff_Timeout_diff(t *testing.T) { } if !reflect.DeepEqual(actual, expected) { - t.Fatalf("Not equal in Timeout Diff:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) + t.Fatalf("Not equal Meta in Timeout Diff:\n\texpected: %#v\n\tactual: %#v", expected.Meta, actual.Meta) } } diff --git a/helper/schema/resource_timeout.go b/helper/schema/resource_timeout.go index f558b177c030..33cbce1854d1 100644 --- a/helper/schema/resource_timeout.go +++ b/helper/schema/resource_timeout.go @@ -62,52 +62,48 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) } if raw, ok := c.Config[TimeoutsConfigKey]; ok { - if configTimeouts, ok := raw.([]map[string]interface{}); ok { - for _, timeoutValues := range configTimeouts { - // loop through each Timeout given in the configuration and validate they - // the Timeout defined in the resource - for timeKey, timeValue := range timeoutValues { - // validate that we're dealing with the normal CRUD actions - var found bool - for _, key := range timeoutKeys() { - if timeKey == key { - found = true - break - } - } - - if !found { - return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + if timeoutValues, ok := raw.(map[string]interface{}); ok { + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break } + } - // Get timeout - rt, err := time.ParseDuration(timeValue.(string)) - if err != nil { - return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err) - } + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } - var timeout *time.Duration - switch timeKey { - case TimeoutCreate: - timeout = t.Create - case TimeoutUpdate: - timeout = t.Update - case TimeoutRead: - timeout = t.Read - case TimeoutDelete: - timeout = t.Delete - case TimeoutDefault: - timeout = t.Default - } + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err) + } - // If the resource has not delcared this in the definition, then error - // with an unsupported message - if timeout == nil { - return unsupportedTimeoutKeyError(timeKey) - } + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } - *timeout = rt + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) } + + *timeout = rt } } else { log.Printf("[ERROR] Invalid timeout structure: %T", raw) diff --git a/helper/schema/resource_timeout_test.go b/helper/schema/resource_timeout_test.go index bef98071b7fd..f48ba883be8a 100644 --- a/helper/schema/resource_timeout_test.go +++ b/helper/schema/resource_timeout_test.go @@ -16,7 +16,7 @@ func TestResourceTimeout_ConfigDecode_badkey(t *testing.T) { // what the resource has defined in source ResourceDefaultTimeout *ResourceTimeout // configuration provider by user in tf file - Config []map[string]interface{} + Config map[string]interface{} // what we expect the parsed ResourceTimeout to be Expected *ResourceTimeout // Should we have an error (key not defined in source) @@ -46,10 +46,9 @@ func TestResourceTimeout_ConfigDecode_badkey(t *testing.T) { { Name: "Use something besides 'minutes'", ResourceDefaultTimeout: timeoutForValues(10, 0, 5, 0, 3), - Config: []map[string]interface{}{ - map[string]interface{}{ - "create": "2h", - }}, + Config: map[string]interface{}{ + "create": "2h", + }, Expected: timeoutForValues(120, 0, 5, 0, 3), ShouldErr: false, }, @@ -87,7 +86,7 @@ func TestResourceTimeout_ConfigDecode_badkey(t *testing.T) { } if !reflect.DeepEqual(c.Expected, timeout) { - t.Fatalf("ConfigDecode match error case (%d), expected:\n%#v\ngot:\n%#v", i, c.Expected, timeout) + t.Fatalf("ConfigDecode match error case (%d).\nExpected:\n%#v\nGot:\n%#v", i, c.Expected, timeout) } }) } @@ -104,13 +103,9 @@ func TestResourceTimeout_ConfigDecode(t *testing.T) { raw, err := config.NewRawConfig( map[string]interface{}{ "foo": "bar", - TimeoutsConfigKey: []map[string]interface{}{ - map[string]interface{}{ - "create": "2m", - }, - map[string]interface{}{ - "update": "1m", - }, + TimeoutsConfigKey: map[string]interface{}{ + "create": "2m", + "update": "1m", }, }) if err != nil { @@ -130,7 +125,7 @@ func TestResourceTimeout_ConfigDecode(t *testing.T) { } if !reflect.DeepEqual(timeout, expected) { - t.Fatalf("bad timeout decode, expected (%#v), got (%#v)", expected, timeout) + t.Fatalf("bad timeout decode.\nExpected:\n%#v\nGot:\n%#v\n", expected, timeout) } } @@ -329,24 +324,24 @@ func expectedForValues(create, read, update, del, def int) map[string]interface{ return ex } -func expectedConfigForValues(create, read, update, delete, def int) []map[string]interface{} { - ex := make([]map[string]interface{}, 0) +func expectedConfigForValues(create, read, update, delete, def int) map[string]interface{} { + ex := make(map[string]interface{}, 0) if create != 0 { - ex = append(ex, map[string]interface{}{"create": fmt.Sprintf("%dm", create)}) + ex["create"] = fmt.Sprintf("%dm", create) } if read != 0 { - ex = append(ex, map[string]interface{}{"read": fmt.Sprintf("%dm", read)}) + ex["read"] = fmt.Sprintf("%dm", read) } if update != 0 { - ex = append(ex, map[string]interface{}{"update": fmt.Sprintf("%dm", update)}) + ex["update"] = fmt.Sprintf("%dm", update) } if delete != 0 { - ex = append(ex, map[string]interface{}{"delete": fmt.Sprintf("%dm", delete)}) + ex["delete"] = fmt.Sprintf("%dm", delete) } if def != 0 { - ex = append(ex, map[string]interface{}{"default": fmt.Sprintf("%dm", def)}) + ex["default"] = fmt.Sprintf("%dm", def) } return ex } diff --git a/helper/schema/shims_test.go b/helper/schema/shims_test.go index e08126893cfd..68186497bc12 100644 --- a/helper/schema/shims_test.go +++ b/helper/schema/shims_test.go @@ -286,10 +286,9 @@ func TestShimResourceDiff_Timeout_diff(t *testing.T) { raw, err := config.NewRawConfig( map[string]interface{}{ "foo": 42, - "timeouts": []map[string]interface{}{ - map[string]interface{}{ - "create": "2h", - }}, + TimeoutsConfigKey: map[string]interface{}{ + "create": "2h", + }, }) if err != nil { t.Fatalf("err: %s", err) From b62a22ab62cee7ffea9036a232d4704bb1df4232 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Mon, 5 Nov 2018 18:08:05 +0100 Subject: [PATCH 038/149] Add a VariableSourceType for names .tfvars files This new source type should be used for variables loaded from .tfvars files that were explicitly passed as command line arguments (e.g. -var-file=foo.tfvars) --- backend/unparsed_value.go | 2 +- command/meta_vars.go | 9 ++++----- terraform/semantics.go | 2 +- terraform/variables.go | 12 ++++++++---- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/backend/unparsed_value.go b/backend/unparsed_value.go index eec2998b6f05..0c246f6dd98f 100644 --- a/backend/unparsed_value.go +++ b/backend/unparsed_value.go @@ -45,7 +45,7 @@ func ParseVariableValues(vv map[string]UnparsedVariableValue, decls map[string]* if !declared { switch val.SourceType { - case terraform.ValueFromConfig, terraform.ValueFromFile: + case terraform.ValueFromConfig, terraform.ValueFromAutoFile, terraform.ValueFromNamedFile: // These source types have source ranges, so we can produce // a nice error message with good context. diags = diags.Append(&hcl.Diagnostic{ diff --git a/command/meta_vars.go b/command/meta_vars.go index 772dc867dea5..b11974cf72bf 100644 --- a/command/meta_vars.go +++ b/command/meta_vars.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hcl2/hcl/hclsyntax" hcljson "github.com/hashicorp/hcl2/hcl/json" - "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/terraform" @@ -59,12 +58,12 @@ func (m *Meta) collectVariableValues() (map[string]backend.UnparsedVariableValue // (DefaultVarsFilename) along with the later-added search for all files // ending in .auto.tfvars. if _, err := os.Stat(DefaultVarsFilename); err == nil { - moreDiags := m.addVarsFromFile(DefaultVarsFilename, terraform.ValueFromFile, ret) + moreDiags := m.addVarsFromFile(DefaultVarsFilename, terraform.ValueFromAutoFile, ret) diags = diags.Append(moreDiags) } const defaultVarsFilenameJSON = DefaultVarsFilename + ".json" if _, err := os.Stat(defaultVarsFilenameJSON); err == nil { - moreDiags := m.addVarsFromFile(defaultVarsFilenameJSON, terraform.ValueFromFile, ret) + moreDiags := m.addVarsFromFile(defaultVarsFilenameJSON, terraform.ValueFromAutoFile, ret) diags = diags.Append(moreDiags) } if infos, err := ioutil.ReadDir("."); err == nil { @@ -74,7 +73,7 @@ func (m *Meta) collectVariableValues() (map[string]backend.UnparsedVariableValue if !isAutoVarFile(name) { continue } - moreDiags := m.addVarsFromFile(name, terraform.ValueFromFile, ret) + moreDiags := m.addVarsFromFile(name, terraform.ValueFromAutoFile, ret) diags = diags.Append(moreDiags) } } @@ -106,7 +105,7 @@ func (m *Meta) collectVariableValues() (map[string]backend.UnparsedVariableValue } case "-var-file": - moreDiags := m.addVarsFromFile(rawFlag.Value, terraform.ValueFromFile, ret) + moreDiags := m.addVarsFromFile(rawFlag.Value, terraform.ValueFromNamedFile, ret) diags = diags.Append(moreDiags) default: diff --git a/terraform/semantics.go b/terraform/semantics.go index 6c583dc64250..3f6189a98134 100644 --- a/terraform/semantics.go +++ b/terraform/semantics.go @@ -79,7 +79,7 @@ func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdia _, err := convert.Convert(val.Value, wantType) if err != nil { switch val.SourceType { - case ValueFromConfig, ValueFromFile: + case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: // We have source location information for these. diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, diff --git a/terraform/variables.go b/terraform/variables.go index a4318f7aa84c..e54e27563930 100644 --- a/terraform/variables.go +++ b/terraform/variables.go @@ -32,10 +32,14 @@ const ( // e.g. the default value defined for a variable. ValueFromConfig ValueSourceType = 'C' - // ValueFromFile indicates that a value came from a "values file", like - // a .tfvars file, either passed explicitly on the command line or - // implicitly loaded by naming convention. - ValueFromFile ValueSourceType = 'F' + // ValueFromAutoFile indicates that a value came from a "values file", like + // a .tfvars file, that was implicitly loaded by naming convention. + ValueFromAutoFile ValueSourceType = 'F' + + // ValueFromNamedFile indicates that a value came from a named "values file", + // like a .tfvars file, that was passed explicitly on the command line (e.g. + // -var-file=foo.tfvars). + ValueFromNamedFile ValueSourceType = 'N' // ValueFromCLIArg indicates that the value was provided directly in // a CLI argument. The name of this argument is not recorded and so it must From ab62b330c16ce38ea453d925813b4521c65ce8a6 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 5 Nov 2018 16:02:45 -0800 Subject: [PATCH 039/149] core: Allow planned output changes to be updated during apply If plan and apply are both run against the same context then we still have the planned output values in memory while we're doing the apply walk, so we need to make sure to update them along with the state as we learn the final known values of each output. There were actually two different bugs here: - We weren't removing any existing planned change for an output when setting a new one. In retrospect a map would've been a better data structure for the output changes, rather than a slice to mimic what we do for resource instance objects, but for now we'll leave the structures alone and clean up as needed. (The set of outputs should be small for any reasonable configuration, so the main impact of this is some ugly code in RemoveOutputChange.) - RemoveOutputChange itself had a bug where it was iterating over the resource changes rather than the output changes. This didn't matter before because we weren't actually using that function, but now we are. This fix is confirmed by restoring various existing context apply tests back to passing again. --- plans/changes_sync.go | 4 ++-- terraform/eval_output.go | 20 ++++++++++++++++---- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/plans/changes_sync.go b/plans/changes_sync.go index e9305eaf9491..6b4ff98fffa3 100644 --- a/plans/changes_sync.go +++ b/plans/changes_sync.go @@ -133,8 +133,8 @@ func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { defer cs.lock.Unlock() addrStr := addr.String() - for i, r := range cs.changes.Resources { - if r.Addr.String() != addrStr { + for i, o := range cs.changes.Outputs { + if o.Addr.String() != addrStr { continue } copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) diff --git a/terraform/eval_output.go b/terraform/eval_output.go index 6829934f0e9d..10573971fbe4 100644 --- a/terraform/eval_output.go +++ b/terraform/eval_output.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" ) // EvalDeleteOutput is an EvalNode implementation that deletes an output @@ -61,22 +62,33 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { // if we're continuing, make sure the output is included, and // marked as unknown. If the evaluator was able to find a type // for the value in spite of the error then we'll use it. - state.SetOutputValue(addr, cty.UnknownVal(val.Type()), n.Sensitive) + n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) return nil, EvalEarlyExitError{} } return nil, diags.Err() } + n.setValue(addr, state, changes, val) + + return nil, nil +} + +func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { if val.IsKnown() && !val.IsNull() { // The state itself doesn't represent unknown values, so we null them // out here and then we'll save the real unknown value in the planned // changeset below, if we have one on this graph walk. + log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) stateVal := cty.UnknownAsNull(val) state.SetOutputValue(addr, stateVal, n.Sensitive) } else { + log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) state.RemoveOutputValue(addr) } + // If we also have an active changeset then we'll replicate the value in + // there. This is used in preference to the state where present, since it + // *is* able to represent unknowns, while the state cannot. if changes != nil { // For the moment we are not properly tracking changes to output // values, and just marking them always as "Create" or "Destroy" @@ -116,8 +128,8 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { // Should never happen, since we just constructed this right above panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) } - changes.AppendOutputChange(cs) + log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) + changes.RemoveOutputChange(addr) // remove any existing planned change, if present + changes.AppendOutputChange(cs) // add the new planned change } - - return nil, nil } From 3b723dd4d175e90732d9f47158030047883b1f2b Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 5 Nov 2018 16:34:51 -0800 Subject: [PATCH 040/149] Truncate CHANGELOG back to v0.9.0 The changelog is getting too long for convenient browsing and editing, so here we cut it off at the v0.9 series and link to the rest of the history via the v0.8.8 tag. --- CHANGELOG.md | 3251 +------------------------------------------------- 1 file changed, 2 insertions(+), 3249 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 256dc5750031..a003c915231b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1603,3253 +1603,6 @@ BUG FIXES: * provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903)) -## 0.8.8 (March 2, 2017) +---- -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/aws: Potential breaking change for `root_block_device` ([#12379](https://github.com/hashicorp/terraform/issues/12379)) - -FEATURES: - - * **New Provider:** `spotinst` ([#5001](https://github.com/hashicorp/terraform/issues/5001)) - * **New Interpolation:** `slice` ([#9729](https://github.com/hashicorp/terraform/issues/9729)) - * **New Data Source:** `aws_sns_topic` ([#11752](https://github.com/hashicorp/terraform/issues/11752)) - * **New Data Source:** `openstack_images_image_v2` ([#12097](https://github.com/hashicorp/terraform/issues/12097)) - * **New Resource:** `aws_elastic_beanstalk_application_version` ([#5770](https://github.com/hashicorp/terraform/issues/5770)) - * **New Resource:** `aws_cloudwatch_log_destination` ([#11940](https://github.com/hashicorp/terraform/issues/11940)) - * **New Resource:** `aws_cloudwatch_log_destination_policy` ([#11940](https://github.com/hashicorp/terraform/issues/11940)) - * **New Resource:** `aws_codepipeline` ([#11814](https://github.com/hashicorp/terraform/issues/11814)) - * **New Resource:** `aws_egress_only_internet_gateway` ([#10538](https://github.com/hashicorp/terraform/issues/10538)) - * **New Resource:** `datadog_user` ([#12268](https://github.com/hashicorp/terraform/issues/12268)) - * **New Resource:** `digitalocean_loadbalancer` ([#12077](https://github.com/hashicorp/terraform/issues/12077)) - * **New Resource:** `openstack_images_image_v2` ([#11942](https://github.com/hashicorp/terraform/issues/11942)) - * **New Resource:** `openstack_compute_floatingip_associate_v2` ([#12190](https://github.com/hashicorp/terraform/issues/12190)) - -IMPROVEMENTS: - - * provider/aws: Add support for AWS EBS Elastic Volumes ([#11981](https://github.com/hashicorp/terraform/issues/11981)) - * provider/aws: Allow aws_instances to be resized rather than forcing a new instance ([#11998](https://github.com/hashicorp/terraform/issues/11998)) - * provider/aws: Report bucket name in S3 Error message ([#12122](https://github.com/hashicorp/terraform/issues/12122)) - * provider/aws: Implement IPV6 Support for ec2 / VPC ([#10538](https://github.com/hashicorp/terraform/issues/10538)) - * provider/aws: Add support for import of aws_elasticsearch_domain ([#12330](https://github.com/hashicorp/terraform/issues/12330)) - * provider/aws: improve redshift cluster validation ([#12313](https://github.com/hashicorp/terraform/issues/12313)) - * provider/aws: Support IAM role attachment and replacement for existing EC2 instance ([#11852](https://github.com/hashicorp/terraform/issues/11852)) - * provider/azurerm: Auto base64encode virtual_machine custom data ([#12164](https://github.com/hashicorp/terraform/issues/12164)) - * provider/datadog: add support for new host delay to the datadog_monitor resource ([#11975](https://github.com/hashicorp/terraform/issues/11975)) - * provider/datadog: Upgrade to Datadog API v2 ([#12098](https://github.com/hashicorp/terraform/issues/12098)) - * provider/fastly: Make Backends optional if used in VCL ([#12025](https://github.com/hashicorp/terraform/issues/12025)) - * provider/fastly: Add support for custom `response_object` ([#12032](https://github.com/hashicorp/terraform/issues/12032)) - * provider/google: Add support for maintenance window in `sql_database_instance` ([#12042](https://github.com/hashicorp/terraform/issues/12042)) - * provider/google: google_project supports billing account ([#11653](https://github.com/hashicorp/terraform/issues/11653)) - * provider/openstack: Don't allow floating IP and port ([#12099](https://github.com/hashicorp/terraform/issues/12099)) - * provider/openstack: Enable HTTP Logging ([#12089](https://github.com/hashicorp/terraform/issues/12089)) - * provider/openstack: Add Additional Targets for LBaaS v1 Member ([#12266](https://github.com/hashicorp/terraform/issues/12266)) - * provider/openstack: Redesign openstack_blockstorage_volume_attach_v2 ([#12071](https://github.com/hashicorp/terraform/issues/12071)) - * provider/pagerduty: Import support for service integrations ([#12141](https://github.com/hashicorp/terraform/issues/12141)) - * provider/pagerduty: Updated implementation of pagerduty_vendor & pagerduty_service_integration ([#12357](https://github.com/hashicorp/terraform/issues/12357)) - * provider/random_id: Add prefix attribute ([#12016](https://github.com/hashicorp/terraform/issues/12016)) - * provider/statuscake: Add support for Port in statuscake_test ([#11966](https://github.com/hashicorp/terraform/issues/11966)) - -BUG FIXES: - - * core: Fix a hang that could occur at the end of a Terraform command with custom plugins used ([#12048](https://github.com/hashicorp/terraform/issues/12048)) - * command/fmt: Fix incorrect formatting with single line object following complex object ([#12049](https://github.com/hashicorp/terraform/issues/12049)) - * command/state: `-backup` flags work with `mv` and `rm` ([#12156](https://github.com/hashicorp/terraform/issues/12156)) - * provider/aws: add bucket name to delete error notification ([#11952](https://github.com/hashicorp/terraform/issues/11952)) - * provider/aws: Use proper Set for source.Auth in resource_aws_codebuild_project ([#11741](https://github.com/hashicorp/terraform/issues/11741)) - * provider/aws: aws_ecs_service should output service name along with err ([#12072](https://github.com/hashicorp/terraform/issues/12072)) - * provider/aws: Add VRRP to allowed protocols in network ACL rules ([#12107](https://github.com/hashicorp/terraform/issues/12107)) - * provider/aws: Add owner_account option to aws_redshift_cluster ([#12062](https://github.com/hashicorp/terraform/issues/12062)) - * provider/aws: Update of inspector_assessment_target should use ARN not Name ([#12115](https://github.com/hashicorp/terraform/issues/12115)) - * provider/aws: Fix the panic in ssm_association with parameters ([#12215](https://github.com/hashicorp/terraform/issues/12215)) - * provider/aws: Fix update of environment_variable in codebuild_project ([#12169](https://github.com/hashicorp/terraform/issues/12169)) - * provider/aws: Refresh aws_autoscaling_schedule from state when autoscaling_group not found ([#12312](https://github.com/hashicorp/terraform/issues/12312)) - * provider/aws: No longer ForceNew resource on lambda_function runtime update ([#12329](https://github.com/hashicorp/terraform/issues/12329)) - * provider/aws: reading multiple pages of aws_efs_file_system tags ([#12328](https://github.com/hashicorp/terraform/issues/12328)) - * provider/aws: Refresh cloudwatch log subscription filter on 404 ([#12333](https://github.com/hashicorp/terraform/issues/12333)) - * provider/aws: more details on which s3 bucket had an error ([#12314](https://github.com/hashicorp/terraform/issues/12314)) - * provider/azurerm: Ignore case on protocol and allocation types ([#12176](https://github.com/hashicorp/terraform/issues/12176)) - * provider/cloudflare: add validation for proxied record types ([#11993](https://github.com/hashicorp/terraform/issues/11993)) - * provider/datadog: Adding default values to datadog_monitor ([#12168](https://github.com/hashicorp/terraform/issues/12168)) - * provider/google: make local_traffic_selector computed ([#11631](https://github.com/hashicorp/terraform/issues/11631)) - * provider/google: Write the raw disk encryption key in the state file to avoid diffs on plan ([#12068](https://github.com/hashicorp/terraform/issues/12068)) - * provider/google: fix url map test and update logic ([#12317](https://github.com/hashicorp/terraform/issues/12317)) - * provider/openstack: Rename provider to loadbalancer_provider ([#12239](https://github.com/hashicorp/terraform/issues/12239)) - * provider/pagerduty: Setting incident_urgency_rule as optional ([#12211](https://github.com/hashicorp/terraform/issues/12211)) - * provider/profitbricks: Fixing how primary_nic is added to profitbricks server ([#12197](https://github.com/hashicorp/terraform/issues/12197)) - * state/azure: add environment option for non-public cloud usage ([#12364](https://github.com/hashicorp/terraform/issues/12364)) - -## 0.8.7 (February 15, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `kinesis_endpoint` & `dynamodb_endpoint` fields in the provider schema were deprecated in favour of `kinesis` & `dynamodb` inside the `endpoints` block. Deprecated fields will be removed in 0.9 ([#11768](https://github.com/hashicorp/terraform/issues/11768)) - -FEATURES: - - * **New Interpolation:** `slice` ([#9729](https://github.com/hashicorp/terraform/issues/9729)) - * **New Provider:** `arukas` ([#11171](https://github.com/hashicorp/terraform/issues/11171)) - * **New Data Source:** `aws_db_instance` ([#11717](https://github.com/hashicorp/terraform/issues/11717)) - * **New Data Source:** `aws_vpn_gateway` ([#11886](https://github.com/hashicorp/terraform/issues/11886)) - * **New Data Source:** `consul_agent_self`, `consul_catalog_service`, `consul_catalog_services`, `consul_catalog_nodes` ([#11729](https://github.com/hashicorp/terraform/pull/11729)) - * **New Data Source:** `google_compute_zones` ([#11954](https://github.com/hashicorp/terraform/issues/11954)) - * **New Resource:** `aws_elasticsearch_domain_policy` ([#8648](https://github.com/hashicorp/terraform/issues/8648)) - * **New Resource:** `aws_vpc_peering_connection_accepter` ([#11505](https://github.com/hashicorp/terraform/issues/11505)) - * **New Resource:** `aws_config_config_rule` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_configuration_recorder` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_configuration_recorder_status` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_delivery_channel` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `azurerm_container_service` ([#10820](https://github.com/hashicorp/terraform/issues/10820)) - * **New Resource:** `vault_policy` ([#10980](https://github.com/hashicorp/terraform/issues/10980)) - -IMPROVEMENTS: - - * provider/aws: Update aws_ssm_document to include `document_type`, `latest_version` and `default_version` ([#11671](https://github.com/hashicorp/terraform/issues/11671)) - * provider/aws: Support import of aws_opsworks_instance ([#11783](https://github.com/hashicorp/terraform/issues/11783)) - * provider/aws Add S3 bucket object tag support ([#11344](https://github.com/hashicorp/terraform/issues/11344)) - * provider/aws: Add validation for aws_iam_role ([#11915](https://github.com/hashicorp/terraform/issues/11915)) - * provider/fastly Allows for conditional settings across fastly ([#11843](https://github.com/hashicorp/terraform/issues/11843)) - * provider/openstack: Allow OpenStack SSL certs + keys to take path or content ([#10271](https://github.com/hashicorp/terraform/issues/10271)) - * provider/pagerduty: Add support for `incident_urgency_rule`, `support_hours` and `scheduled_actions` to `pagerduty_service` ([#11856](https://github.com/hashicorp/terraform/issues/11856)) - * provider/rancher: parse Rancher client cli.json config file ([#11658](https://github.com/hashicorp/terraform/issues/11658)) - * provider/vault: Use Vault api.DefaultConfig() ([#11523](https://github.com/hashicorp/terraform/issues/11523)) - -Bug FIXES: - - * core: resources that depend on create-before-destroy resources don't create cycles ([#11753](https://github.com/hashicorp/terraform/issues/11753)) - * core: create-before-destroy resources with a count > 1 create proper edges ([#11753](https://github.com/hashicorp/terraform/issues/11753)) - * core: fix "diffs didn't match issue" for removing or empty collections that force new ([#11732](https://github.com/hashicorp/terraform/issues/11732)) - * core: module sources ended in archive extensions without a "." won't be treated as archives ([#11438](https://github.com/hashicorp/terraform/issues/11438)) - * core: destroy ordering of resources within modules is correct ([#11765](https://github.com/hashicorp/terraform/issues/11765)) - * core: Fix crash if count interpolates into a non-int ([#11864](https://github.com/hashicorp/terraform/issues/11864)) - * core: Targeting a module will properly exclude untargeted module outputs ([#11921](https://github.com/hashicorp/terraform/issues/11921)) - * state/remote/s3: Fix Bug with Assume Role for Federated IAM Account ([#10067](https://github.com/hashicorp/terraform/issues/10067)) - * provider/aws: Fix security_group_rule resource timeout errors ([#11809](https://github.com/hashicorp/terraform/issues/11809)) - * provider/aws: Fix diff suppress function for aws_db_instance ([#11909](https://github.com/hashicorp/terraform/issues/11909)) - * provider/aws: Fix default values for AMI volume size ([#11842](https://github.com/hashicorp/terraform/issues/11842)) - * provider/aws: Fix aws_db_event_subscription import ([#11744](https://github.com/hashicorp/terraform/issues/11744)) - * provider/aws: Respect 400 returned from AWS API on RDS Cluster termination ([#11795](https://github.com/hashicorp/terraform/issues/11795)) - * provider/aws: Raise the codebuild_project create timeout ([#11777](https://github.com/hashicorp/terraform/issues/11777)) - * provider/aws: Make aws_dms_endpoint database_name optional ([#11792](https://github.com/hashicorp/terraform/issues/11792)) - * provider/aws: Bump Create and Delete timeouts to 60 mins on directory_service ([#11793](https://github.com/hashicorp/terraform/issues/11793)) - * provider/aws: aws_codecommit_trigger fix typo that causes serialization to fail when events is non-empty ([#11839](https://github.com/hashicorp/terraform/issues/11839)) - * provider/aws: Fix bug to allow update of maintenance_window in elasticache_replication_group ([#11850](https://github.com/hashicorp/terraform/issues/11850)) - * provider/azurerm: Don't push an empty set of ssh keys to virtual machine or they cannot be ammended ([#11804](https://github.com/hashicorp/terraform/issues/11804)) - * provider/azurerm: Refresh from state when VM Extension Resource not found ([#11894](https://github.com/hashicorp/terraform/issues/11894)) - * provider/cloudstack: Ensure consistent hashes of `cloudstack_port_forward` forward items. ([#11546](https://github.com/hashicorp/terraform/issues/11546)) - * provider/google: set additional_zones to computed and disallow the original zone from appearing in the list ([#11650](https://github.com/hashicorp/terraform/issues/11650)) - * provider/google: set subnetwork_project to computed ([#11646](https://github.com/hashicorp/terraform/issues/11646)) - * provider/openstack BlockStorage v1 availability_zone Fix ([#11949](https://github.com/hashicorp/terraform/issues/11949)) - -## 0.8.6 (07 February 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_appautoscaling_policy` no longer has default values for `scalable_dimension` and `service_namespace` - - -FEATURES: - - * **New Data Source:** `aws_kms_secret` ([#11460](https://github.com/hashicorp/terraform/issues/11460)) - * **New Data Source:** `aws_ecs_task_definition` ([#8509](https://github.com/hashicorp/terraform/issues/8509)) - * **New Data Source:** `aws_ecs_cluster` ([#11558](https://github.com/hashicorp/terraform/issues/11558)) - * **New Data Source:** `aws_partition` ([#11675](https://github.com/hashicorp/terraform/issues/11675)) - * **New Data Source:** `pagerduty_escalation_policy` ([#11616](https://github.com/hashicorp/terraform/issues/11616)) - * **New Data Source:** `pagerduty_schedule` ([#11614](https://github.com/hashicorp/terraform/issues/11614)) - * **New Data Source:** `profitbricks_datacenter` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Data Source:** `profitbricks_location` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Data Source:** `profitbricks_image` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Resource:** `aws_sfn_activity` ([#11420](https://github.com/hashicorp/terraform/issues/11420)) - * **New Resource:** `aws_sfn_state_machine` ([#11420](https://github.com/hashicorp/terraform/issues/11420)) - * **New Resource:** `aws_codebuild_project` ([#11560](https://github.com/hashicorp/terraform/issues/11560)) - * **New Resource:** `aws_dms_certificate` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_endpoint` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_instance` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_subnet_group` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_subnet_group` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `pagerduty_addon` ([#11620](https://github.com/hashicorp/terraform/issues/11620)) - - -IMPROVEMENTS: - - * core: Interaction with Atlas now supports the `ATLAS_TLS_NOVERIFY` environment variable ([#11576](https://github.com/hashicorp/terraform/issues/11576)) - * provider/aws: Add EBS Volume support for EMR Instance Groups ([#11411](https://github.com/hashicorp/terraform/issues/11411)) - * provider/aws: Add support for policy to AWS provider assume_role ([#11501](https://github.com/hashicorp/terraform/issues/11501)) - * provider/aws: Add support for more sns_topic_subscription parameters on import command ([#10408](https://github.com/hashicorp/terraform/issues/10408)) - * provider/aws: Add support for Sever Side Encryption with default S3 KMS key to `aws_s3_bucket_object` ([#11261](https://github.com/hashicorp/terraform/issues/11261)) - * provider/aws: Add support for Cross Region RDS Cluster Replica ([#11428](https://github.com/hashicorp/terraform/issues/11428)) - * provider/aws: Add sensitive attribute in master_password ([#11584](https://github.com/hashicorp/terraform/issues/11584)) - * provider/aws: Application Auto Scaling now supports scaling an Amazon EC2 Spot fleet ([#8697](https://github.com/hashicorp/terraform/issues/8697)) - * provider/aws: Add tag support to DynamoDb tables ([#11617](https://github.com/hashicorp/terraform/issues/11617)) - * provider/aws: Provide the certificate ID in the aws data source ([#11693](https://github.com/hashicorp/terraform/issues/11693)) - * provider/aws: Wait for instance_profile creation to complete ([#11678](https://github.com/hashicorp/terraform/issues/11678)) - * provider/azurerm: Add support for scale sets overprovision ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: support import for load balancer and sub resources ([#11610](https://github.com/hashicorp/terraform/issues/11610)) - * provider/fastly: Adds papertrail logging ([#11491](https://github.com/hashicorp/terraform/issues/11491)) - * provider/fastly: Adds format_version for s3logging ([#11725](https://github.com/hashicorp/terraform/issues/11725)) - * provider/fastly: Adds healthcheck service ([#11709](https://github.com/hashicorp/terraform/issues/11709)) - * provider/google: allow instance group managers in region other than project ([#11294](https://github.com/hashicorp/terraform/issues/11294)) - * provider/google: Add second generation disk specification options ([#11571](https://github.com/hashicorp/terraform/issues/11571)) - * provider/google: remote_traffic_selector for google_compute_vpn_tunnel ([#11020](https://github.com/hashicorp/terraform/issues/11020)) - * provider/nomad: Update jobspec dependency to allow parsing parameterized nomad jobfiles ([#11691](https://github.com/hashicorp/terraform/issues/11691)) - * provider/google: No default root user for SQL ([#11590](https://github.com/hashicorp/terraform/issues/11590)) - * provider/opsgenie: Descriptions for Teams ([#11391](https://github.com/hashicorp/terraform/issues/11391)) - * provider/rancher: rancher_registration_token add image parameter ([#11551](https://github.com/hashicorp/terraform/issues/11551)) - * provider/rancher: allow for importing resources using environment ID to target ([#11688](https://github.com/hashicorp/terraform/issues/11688)) - -BUG FIXES: - - * core: Remove missed subfields when parent list is removed ([#11498](https://github.com/hashicorp/terraform/issues/11498)) - * command/fmt: Trailing blocks of comments at the end of files are formatted properly ([#11585](https://github.com/hashicorp/terraform/issues/11585)) - * provider/aws: Fix issue with `path` not updated when modifying AWS API Gateway Resource ([#11443](https://github.com/hashicorp/terraform/issues/11443)) - * provider/aws: Fix AWS Lambda Qualifier Regexp for `aws_lambda_permission` ([#11383](https://github.com/hashicorp/terraform/issues/11383)) - * provider/aws: allow destroy of LB stickiness policy with missing LB ([#11462](https://github.com/hashicorp/terraform/issues/11462)) - * provider/aws: ECS Placement constraints fix ([#11475](https://github.com/hashicorp/terraform/issues/11475)) - * provider/aws: retry kms_key CreateKey if arn in policy not yet seen ([#11509](https://github.com/hashicorp/terraform/issues/11509)) - * provider/aws: Fix ALB Listener Rule Import ([#1174](https://github.com/hashicorp/terraform/issues/1174)) - * provider/aws: Fix issue with ECS Placement Strat. and type casing ([#11565](https://github.com/hashicorp/terraform/issues/11565)) - * provider/aws: aws_route53_record import error processing ([#11603](https://github.com/hashicorp/terraform/issues/11603)) - * provider/aws: Fix panic in aws_rds_cluster missing parameter error message ([#11600](https://github.com/hashicorp/terraform/issues/11600)) - * provider/aws: Succeed creating aws_volume_attachment if identical attachment exists ([#11060](https://github.com/hashicorp/terraform/issues/11060)) - * provider/aws: Guard against panic in aws_vpc_endpoint_association ([#11613](https://github.com/hashicorp/terraform/issues/11613)) - * provider/aws: Allow root volume size changes in aws_instance ([#11619](https://github.com/hashicorp/terraform/issues/11619)) - * provider/aws: Fix spot instance request block device configs ([#11649](https://github.com/hashicorp/terraform/issues/11649)) - * provider/aws: Fix validation issues for onceAWeek and onceADay validation functions ([#11679](https://github.com/hashicorp/terraform/issues/11679)) - * provider/aws: Return route_table_id from aws_route_table data source ([#11703](https://github.com/hashicorp/terraform/issues/11703)) - * provider/aws: validate aws_alb_target_group name is less than 32 characters ([#11699](https://github.com/hashicorp/terraform/issues/11699)) - * provider/azurerm: Scale Sets Load balancer pools should not be computed ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: Scale Sets ip configuration handling and update support for load balancer backend pools. ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: check if lb sub resources exist when reading ([#11553](https://github.com/hashicorp/terraform/issues/11553)) - * provider/google: Fix master_instance_name to prevent slave rebuilds ([#11477](https://github.com/hashicorp/terraform/issues/11477)) - * provider/google: Refresh google_compute_instance machine_type on read ([#11645](https://github.com/hashicorp/terraform/issues/11645)) - * provider/google: Added forceNew on accessConfig in google_compute_instance_template ([#11548](https://github.com/hashicorp/terraform/issues/11548)) - * provider/ignition: Allow to add authorized keys without user creation ([#11406](https://github.com/hashicorp/terraform/issues/11406)) - * provider/ignition: mount and path are mutually exclusive ([#11409](https://github.com/hashicorp/terraform/issues/11409)) - * provider/ns1: Fix "use_client_subnet" in ns1_record ([#11368](https://github.com/hashicorp/terraform/issues/11368)) - * provider/openstack: Remove Default Security Group Rules on Create ([#11466](https://github.com/hashicorp/terraform/issues/11466)) - * provider/pagerduty: Allow timeouts to be disabled (pagerduty_service) ([#11483](https://github.com/hashicorp/terraform/issues/11483)) - * provider/rancher: Use environment specific client for accessing resources ([#11503](https://github.com/hashicorp/terraform/issues/11503)) - * provider/rancher: Refresh rancher stack from state on delete ([#11539](https://github.com/hashicorp/terraform/issues/11539)) - * provider/rancher: Refresh rancher token and registry from state on not found ([#11543](https://github.com/hashicorp/terraform/issues/11543)) - * provider/rancher: return error when Rancher template not found ([#11544](https://github.com/hashicorp/terraform/issues/11544)) - * provider/rancher: rancher_stack set docker_compose and rancher_compose ([#11550](https://github.com/hashicorp/terraform/issues/11550)) - * provider/rancher: Handle deleted/purged resources from Rancher ([#11607](https://github.com/hashicorp/terraform/issues/11607)) - * provider/statuscake: Remove computed from statuscake_test timeout parameter ([#11541](https://github.com/hashicorp/terraform/issues/11541)) - * provider/vsphere: vSphere virtual machine don't ignore VM power on errors ([#11604](https://github.com/hashicorp/terraform/issues/11604)) - * provisioner/remote-exec: Revert change in 0.8.5 that treated each line as a script since that doesn't work for stateful scripts. ([#11692](https://github.com/hashicorp/terraform/issues/11692)) - * provisioner/chef: Attributes JSON coming from computed source validates ([#11502](https://github.com/hashicorp/terraform/issues/11502)) - -## 0.8.5 (26 January 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: We no longer prefix an ECR repository address with `https://` - * provider/google: `google_project` has undergone significant changes. Existing configs and state should continue to work as they always have, but new configs and state will exhibit some new behaviour, including actually creating and deleting projects, instead of just referencing them. See https://www.terraform.io/docs/providers/google/r/google_project.html for more details. - -FEATURES: - - * **New Data Source:** `aws_autoscaling_groups` ([#11303](https://github.com/hashicorp/terraform/issues/11303)) - * **New Data Source:** `aws_elb_hosted_zone_id ` ([#11027](https://github.com/hashicorp/terraform/issues/11027)) - * **New Data Source:** `aws_instance` ([#11272](https://github.com/hashicorp/terraform/issues/11272)) - * **New Data Source:** `aws_canonical_user_id` ([#11332](https://github.com/hashicorp/terraform/issues/11332)) - * **New Data Source:** `aws_vpc_endpoint` ([#11323](https://github.com/hashicorp/terraform/issues/11323)) - * **New Provider:** `profitbricks` ([#7943](https://github.com/hashicorp/terraform/issues/7943)) - * **New Provider:** `alicloud` ([#11235](https://github.com/hashicorp/terraform/issues/11235)) - * **New Provider:** `ns1` ([#10782](https://github.com/hashicorp/terraform/issues/10782)) - * **New Resource:** `aws_inspector_assessment_target` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `aws_inspector_assessment_template` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `aws_inspector_resource_group` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `google_project_iam_policy` ([#10425](https://github.com/hashicorp/terraform/issues/10425)) - * **New Resource:** `google_project_services` ([#10425](https://github.com/hashicorp/terraform/issues/10425)) - * **New Interpolation Function:** `pathexpand()` ([#11277](https://github.com/hashicorp/terraform/issues/11277)) - -IMPROVEMENTS: - - * command/fmt: Single line objects (such as `variable "foo" {}`) aren't separated by newlines - * provider/aws: Add 'route_table_id' to route_table data source ([#11157](https://github.com/hashicorp/terraform/pull/11157)) - * provider/aws: Add Support for aws_cloudwatch_metric_alarm extended statistic ([#11193](https://github.com/hashicorp/terraform/issues/11193)) - * provider/aws: Make the type of a route53_record modifiable without recreating the resource ([#11164](https://github.com/hashicorp/terraform/issues/11164)) - * provider/aws: Add Placement Strategy to aws_ecs_service resource ([#11201](https://github.com/hashicorp/terraform/issues/11201)) - * provider/aws: Add support for placement_constraint to aws_ecs_service ([#11242](https://github.com/hashicorp/terraform/issues/11242)) - * provider/aws: allow ALB target group stickiness to be enabled/disabled ([#11251](https://github.com/hashicorp/terraform/issues/11251)) - * provider/aws: ALBs now wait for provisioning to complete before proceeding ([#11333](https://github.com/hashicorp/terraform/issues/11333)) - * provider/aws: Add support for setting MSSQL Timezone in aws_db_instance ([#11247](https://github.com/hashicorp/terraform/issues/11247)) - * provider/aws: CloudFormation YAML template support ([#11121](https://github.com/hashicorp/terraform/issues/11121)) - * provider/aws: Remove hardcoded https from the ecr repository ([#11307](https://github.com/hashicorp/terraform/issues/11307)) - * provider/aws: Implement CloudFront Lambda Function Associations ([#11291](https://github.com/hashicorp/terraform/issues/11291)) - * provider/aws: Remove MaxFrameRate default on ElasticTranscoderPreset ([#11340](https://github.com/hashicorp/terraform/issues/11340)) - * provider/aws: Allow ARN Identifier to be set for different partitions ([#11359](https://github.com/hashicorp/terraform/issues/11359)) - * provider/aws: Allow bypassing region validation ([#11358](https://github.com/hashicorp/terraform/issues/11358)) - * provider/aws: Added a s3_bucket domain name attribute ([#10088](https://github.com/hashicorp/terraform/issues/10088)) - * provider/aws: Add DiffSupressFunction to aws_db_instance's engine_version ([#11369](https://github.com/hashicorp/terraform/issues/11369)) - * provider/archive: Adding support for multiple source contents ([#11271](https://github.com/hashicorp/terraform/issues/11271)) - * provider/azurerm: add caching support for virtual_machine data_disks ([#11142](https://github.com/hashicorp/terraform/issues/11142)) - * provider/azurerm: make lb sub resources idempotent ([#11128](https://github.com/hashicorp/terraform/issues/11128)) - * provider/cloudflare: Add verification for record types and content ([#11197](https://github.com/hashicorp/terraform/issues/11197)) - * provider/datadog: Add aggregator method to timeboard graph resource ([#11206](https://github.com/hashicorp/terraform/issues/11206)) - * provider/fastly Add request_condition to backend definition ([#11238](https://github.com/hashicorp/terraform/issues/11238)) - * provider/google: Add subnetwork_project field to enable cross-project networking in instance templates ([#11110](https://github.com/hashicorp/terraform/issues/11110)) - * provider/google: Add support for encrypting a disk ([#11167](https://github.com/hashicorp/terraform/issues/11167)) - * provider/google: Add support for session_affinity to google_compute_region_backend_service ([#11228](https://github.com/hashicorp/terraform/issues/11228)) - * provider/google: Allow additional zones to be configured in GKE ([#11018](https://github.com/hashicorp/terraform/issues/11018)) - * provider/ignition: Allow empty dropin and content for systemd_units ([#11327](https://github.com/hashicorp/terraform/issues/11327)) - * provider/openstack: LoadBalancer Security Groups ([#11074](https://github.com/hashicorp/terraform/issues/11074)) - * provider/openstack: Volume Attachment Updates ([#11285](https://github.com/hashicorp/terraform/issues/11285)) - * provider/scaleway improve bootscript data source ([#11183](https://github.com/hashicorp/terraform/issues/11183)) - * provider/statuscake: Add support for StatusCake confirmation servers ([#11179](https://github.com/hashicorp/terraform/issues/11179)) - * provider/statuscake: Add support for Updating StatusCake contact_ids ([#7115](https://github.com/hashicorp/terraform/issues/7115)) - * provisioner/chef: Add support for named run-lists when using policyfiles ([#11215](https://github.com/hashicorp/terraform/issues/11215)) - * core: Add basic HTTP Auth for remote state backend ([#11301](https://github.com/hashicorp/terraform/issues/11301)) - -BUG FIXES: - - * command/fmt: Multiple `#` comments won't be separated by newlines. ([#11209](https://github.com/hashicorp/terraform/issues/11209)) - * command/fmt: Lists with a heredoc element that starts on the same line as the opening brace is formatted properly. ([#11208](https://github.com/hashicorp/terraform/issues/11208)) - * command/import: Provider configuration inheritance into modules works properly ([#11393](https://github.com/hashicorp/terraform/issues/11393)) - * command/import: Update help text to note that `-var` and `-var-file` work - * provider/aws: Fix panic when querying VPC's main route table via data source ([#11134](https://github.com/hashicorp/terraform/issues/11134)) - * provider/aws: Allow creating aws_codecommit repository outside of us-east-1 ([#11177](https://github.com/hashicorp/terraform/issues/11177)) - * provider/aws: Fix issue destroying or updating CloudFront due to missing Lambda Function Associations parameters ([#11291](https://github.com/hashicorp/terraform/issues/11291)) - * provider/aws: Correct error messages are now returned if an `aws_autoscaling_lifecycle_hook` fails during creation ([#11360](https://github.com/hashicorp/terraform/issues/11360)) - * provider/aws: Fix issue updating/destroying Spot Fleet requests when using `terminate_instances_with_expiration` ([#10953](https://github.com/hashicorp/terraform/issues/10953)) - * provider/azurerm: use configured environment for storage clients ([#11159](https://github.com/hashicorp/terraform/issues/11159)) - * provider/google: removes region param from google_compute_backend_service ([#10903](https://github.com/hashicorp/terraform/issues/10903)) - * provider/ignition: allowing empty systemd.content when a dropin is provided ([#11216](https://github.com/hashicorp/terraform/issues/11216)) - * provider/openstack: Increase deletion timeout for router interfaces ([#11250](https://github.com/hashicorp/terraform/issues/11250)) - * provider/openstack: Fix Instance Metadata Deletion ([#11252](https://github.com/hashicorp/terraform/issues/11252)) - * provider/scaleway: Rename Scaleway provider parameters to match more closely to the API ([#10874](https://github.com/hashicorp/terraform/issues/10874)) - * provider/vault: Remove user input for optional vault provider fields ([#11082](https://github.com/hashicorp/terraform/issues/11082)) - * provider/vsphere: Set deviceID to 0 if one 1 network interface in vsphere_virtual_machine ([#8276](https://github.com/hashicorp/terraform/issues/8276)) - * provisioner/remote-exec: fail on first inline script with bad exit code ([#11155](https://github.com/hashicorp/terraform/issues/11155)) - -## 0.8.4 (January 11, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * We have removed the `Arukas` provider that was added in v0.8.3 for this release. Unfortunately we found the - new provider included a dependency that would not compile and run on Windows operating systems. For now the - provider has been removed and we hope to work to reintroduce it for all platforms in the near future. Going forward we will also be taking additional steps in our build testing to ensure Terraform builds on all platforms before release. - -## 0.8.3 (January 10, 2017) - -FEATURES: - - * **New Provider:** `Arukas` ([#10862](https://github.com/hashicorp/terraform/issues/10862)) - * **New Provider:** `Ignition` ([#6189](https://github.com/hashicorp/terraform/issues/6189)) - * **New Provider:** `OpsGenie` ([#11012](https://github.com/hashicorp/terraform/issues/11012)) - * **New Data Source:** `aws_vpc_peering_connection` ([#10913](https://github.com/hashicorp/terraform/issues/10913)) - * **New Resource:** `aws_codedeploy_deployment_config` ([#11062](https://github.com/hashicorp/terraform/issues/11062)) - * **New Resource:** `azurerm_container_registry` ([#10973](https://github.com/hashicorp/terraform/issues/10973)) - * **New Resource:** `azurerm_eventhub_authorization_rule` ([#10971](https://github.com/hashicorp/terraform/issues/10971)) - * **New Resource:** `azurerm_eventhub_consumer_group` ([#9902](https://github.com/hashicorp/terraform/issues/9902)) - -IMPROVEMENTS: - - * command/fmt: Show filename on parse error ([#10923](https://github.com/hashicorp/terraform/issues/10923)) - * provider/archive: `archive_file` now exports `output_md5` attribute in addition to existing SHA1 and Base64 SHA256 hashes. ([#10851](https://github.com/hashicorp/terraform/issues/10851)) - * provider/aws: Add `most_recent` to the `ebs_snapshot` data source ([#10986](https://github.com/hashicorp/terraform/issues/10986)) - * provider/aws: Add support for instance tenancy in `aws_opsworks_instance` ([#10885](https://github.com/hashicorp/terraform/issues/10885)) - * provider/aws: Added a validation for security group rule types ([#10864](https://github.com/hashicorp/terraform/issues/10864)) - * provider:aws: Add support for updating aws_emr_cluster parameters ([#11008](https://github.com/hashicorp/terraform/issues/11008)) - * provider/aws: Add Placement Constraints to `aws_ecs_task_definition` ([#11030](https://github.com/hashicorp/terraform/issues/11030)) - * provider/aws: Increasing timeout for redshift cluster creation to 75 minutes ([#11041](https://github.com/hashicorp/terraform/issues/11041)) - * provider/aws: Add support for content_handling to aws_api_gateway_integration_response ([#11002](https://github.com/hashicorp/terraform/issues/11002)) - * provider/aws: Add S3 bucket name validation ([#11116](https://github.com/hashicorp/terraform/issues/11116)) - * provider/aws: Add Route53 Record type validation ([#11119](https://github.com/hashicorp/terraform/issues/11119)) - * provider/azurerm: support non public clouds ([#11026](https://github.com/hashicorp/terraform/issues/11026)) - * provider/azurerm: Azure resource providers which are already registered are no longer re-registered. ([#10991](https://github.com/hashicorp/terraform/issues/10991)) - * provider/docker: Add network create --internal flag support ([#10932](https://github.com/hashicorp/terraform/issues/10932)) - * provider/docker: Add support for a list of pull_triggers within the docker_image resource. ([#10845](https://github.com/hashicorp/terraform/issues/10845)) - * provider/pagerduty Add delete support to `pagerduty_service_integration` ([#10891](https://github.com/hashicorp/terraform/issues/10891)) - * provider/postgresql Add permissions support to `postgresql_schema` as nested `policy` attributes ([#10808](https://github.com/hashicorp/terraform/issues/10808)) - -BUG FIXES: - - * core: Properly expand sets as lists from a flatmap ([#11042](https://github.com/hashicorp/terraform/issues/11042)) - * core: Disallow root modules named "root" as a temporary workaround ([#11099](https://github.com/hashicorp/terraform/issues/11099)) - * command/fmt: Lists of heredocs format properly ([#10947](https://github.com/hashicorp/terraform/issues/10947)) - * command/graph: Fix crash when `-type=legacy` ([#11095](https://github.com/hashicorp/terraform/issues/11095)) - * provider/aws: Guard against nil change output in `route53_zone` that causes panic ([#10798](https://github.com/hashicorp/terraform/issues/10798)) - * provider/aws: Reworked validateArn function to handle empty values ([#10833](https://github.com/hashicorp/terraform/issues/10833)) - * provider/aws: Set `aws_autoscaling_policy` `metric_aggregation_type` to be Computed ([#10904](https://github.com/hashicorp/terraform/issues/10904)) - * provider/aws: `storage_class` is now correctly treated as optional when configuring replication for `aws_s3_bucket` resources. ([#10921](https://github.com/hashicorp/terraform/issues/10921)) - * provider/aws: `user_data` on `aws_launch_configuration` resources is only base 64 encoded if the value provided is not already base 64 encoded. ([#10871](https://github.com/hashicorp/terraform/issues/10871)) - * provider/aws: Add snapshotting to the list of pending state for elasticache ([#10965](https://github.com/hashicorp/terraform/issues/10965)) - * provider/aws: Add support for updating tags in aws_emr_cluster ([#11003](https://github.com/hashicorp/terraform/issues/11003)) - * provider/aws: Fix the normalization of AWS policy statements ([#11009](https://github.com/hashicorp/terraform/issues/11009)) - * provider/aws: data_source_aws_iam_server_certificate latest should be bool not string causes panic ([#11016](https://github.com/hashicorp/terraform/issues/11016)) - * provider/aws: Fix typo in aws_redshift_cluster causing security groups to not allow update ([#11025](https://github.com/hashicorp/terraform/issues/11025)) - * provider/aws: Set `key_name` in `aws_key_pair` if omited in configuration ([#10987](https://github.com/hashicorp/terraform/issues/10987)) - * provider/aws: Updating the aws_efs_mount_target dns_name ([#11023](https://github.com/hashicorp/terraform/issues/11023)) - * provider/aws: Validate window time format for snapshot times and backup windows on RDS and ElastiCache resources ([#11089](https://github.com/hashicorp/terraform/issues/11089)) - * provider/aws: aws_db_instance restored from snapshot had problem with subnet_group ([#11050](https://github.com/hashicorp/terraform/issues/11050)) - * provider/aws: Allow disabled access_log in ELB ([#11120](https://github.com/hashicorp/terraform/issues/11120)) - * provider/azurerm: fix update protocol for lb_probe ([#11125](https://github.com/hashicorp/terraform/issues/11125)) - * provider/google: Fix backwards incompatibility around create_timeout in instances ([#10858](https://github.com/hashicorp/terraform/issues/10858)) - * provider/google: google_compute_instance_group_manager update_strategy not properly read ([#10174](https://github.com/hashicorp/terraform/issues/10174)) - * provider/openstack: Handle `PENDING_UPDATE` status with LBaaS v2 members ([#10875](https://github.com/hashicorp/terraform/issues/10875)) - * provider/rancher: Add 'finishing-upgrade' state to rancher stack ([#11019](https://github.com/hashicorp/terraform/issues/11019)) - - -## 0.8.2 (December 21, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `aws_lambda_function` Please note that `runtime` is now a required field as AWS have deprecated the use of nodejs 0.10 in lambda functions ([#9724](https://github.com/hashicorp/terraform/issues/9724)) - -FEATURES: - - * **New Provider:** `New Relic` ([#10317](https://github.com/hashicorp/terraform/issues/10317)) - * **New Resource:** `aws_ses_configuration_set` ([#10735](https://github.com/hashicorp/terraform/issues/10735)) - * **New Resource:** `aws_ses_event_destination` ([#10735](https://github.com/hashicorp/terraform/issues/10735)) - * **New Resource:** `azurerm_redis_cache` ([#10184](https://github.com/hashicorp/terraform/issues/10184)) - * **New Resource:** `ultradns_dirpool` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_probe_http` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_probe_ping` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_record` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_tcpool` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Data Source:** `aws_iam_account_alias` ([#10804](https://github.com/hashicorp/terraform/issues/10804)) - -IMPROVEMENTS: - - * provider/aws: Add support for BinaryMediaTypes and ContentHandling to AWS API Gateway ([#10776](https://github.com/hashicorp/terraform/issues/10776)) - * provider/aws: Deprecated aws_lambda_function nodejs runtime in favor of nodejs4.3 ([#9724](https://github.com/hashicorp/terraform/issues/9724)) - * provider/aws: Support updating of aws_db_instance db_subnet_group_name ([#10818](https://github.com/hashicorp/terraform/issues/10818)) - * provider/aws: Allow update to RDS password when restoring from snapshot ([#8622](https://github.com/hashicorp/terraform/issues/8622)) - * provider/azurerm: add support for tags to dns_zone ([#10750](https://github.com/hashicorp/terraform/issues/10750)) - * provider/pagerduty pagerduty_schedule - support for start_day_of_week (schedule restriction) ([#10069](https://github.com/hashicorp/terraform/issues/10069)) - * state/remote/swift: add support for token authentication ([#10866](https://github.com/hashicorp/terraform/issues/10866)) - -BUG FIXES: - - * core: Improve validation for provider aliases to allow inheritance in moduels. ([#10807](https://github.com/hashicorp/terraform/issues/10807)) - * core: Math operations always prefer floating point if an argument is floating point. ([#10886](https://github.com/hashicorp/terraform/issues/10886)) - * core: Strings are implicitly converted to integers/floats for comparison. ([#10886](https://github.com/hashicorp/terraform/issues/10886)) - * provider/aws: Fixed crash in `data_source_ami` with empty `owner` value ([#10763](https://github.com/hashicorp/terraform/issues/10763)) - * provider/aws: Require `master_username` and `master_password` if no snapshot given in Redshift Cluster ([#9837](https://github.com/hashicorp/terraform/issues/9837)) - * provider/azurerm: fix network_interface.ip_configuration hash for load balancers ([#10834](https://github.com/hashicorp/terraform/issues/10834)) - * provider/docker: Fix regression, 'cert_path' stop working ([#10801](https://github.com/hashicorp/terraform/issues/10801)) - * provider/google: Use node_version during google_container_cluster creation ([#10817](https://github.com/hashicorp/terraform/issues/10817)) - * provider/openstack: Handle Volume Creation Errors ([#10821](https://github.com/hashicorp/terraform/issues/10821)) - -## 0.8.1 (December 14, 2016) - -IMPROVEMENTS: - - * provider/aws: Support eu-west-2 ([#10470](https://github.com/hashicorp/terraform/issues/10470)) - * provider/aws: Improved the SNS topic subscription protocols validation ([#10704](https://github.com/hashicorp/terraform/issues/10704)) - * providers/google: Add subnetwork_project field to enable cross-project networking ([#9662](https://github.com/hashicorp/terraform/issues/9662)) - * provider/pagerduty: Allow 'team_responder' role for pagerduty_user resource ([#10728](https://github.com/hashicorp/terraform/issues/10728)) - -BUG FIXES: - - * core: Handle whitespace around the key in the `-var` flag. ([#10717](https://github.com/hashicorp/terraform/issues/10717)) - * core: `terraform` block works in the presence of `_override` files ([#10715](https://github.com/hashicorp/terraform/issues/10715)) - * core: Fix error when a provider in a module only referenced a variable ([#10719](https://github.com/hashicorp/terraform/issues/10719)) - * core: Destroy ordering for resources that depend on each other across modules is correct ([#745](https://github.com/hashicorp/terraform/issues/745)) - -DEPRECATION REMOVALS: - - * provider/aws: Removed deprecated `parameter_group` from `aws_rds_cluster` ([#10733](https://github.com/hashicorp/terraform/issues/10733)) - -## 0.8.0 (December 13, 2016) - -**This is the complete 0.7.13 to 0.8 CHANGELOG. Below this section we -also have a 0.8.0-rc3 to 0.8.0 final CHANGELOG.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `template_file` _inline_ templates must escape their variable usage. What - was previously `${foo}` must now be `$${foo}`. Note that this is only - for _inline_ templates. Templates read from files are unchanged. ([#9698](https://github.com/hashicorp/terraform/issues/9698)) - * Escape sequences used to require double-escaping when used within interpolations. - You now must only escape once (which is the expected/typical behavior). - For example: `${replace(var.foo, "\\", "\\\\")}` is correct. Before, - that would cause very strange behavior. However, this may break existing - configurations which found a level of escape sequences to work. Check - `terraform plan` for incorrect output. - * Math operators now follow the standard order of operations: *, /, % followed - by +, -. See the updated interpolation docs for more information. You can - continue to force ordering with parentheses. - * Strings in configuration can no longer contain unescaped newlines. For - unescaped newlines, heredocs must be used - - * provider/aws: Anywhere where we can specify kms_key_id must now be a valid KMS Key ID ARN to stop continual diffs - * provider/chef: The chef provider now accepts `key_material` as an alternative to - `private_key_pem`. The `private_key_pem` attribute will be deprecated in a - future release - * provider/postgres: `ssl_mode` has been renamed `sslmode` to match common usage ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - -DEPRECATION REMOVALS: - - * The `template_file` resource no longer accepts a direct file path for the - `template` attribute. You may either specify a path wrapped in a `file` - function or specify a file path with the `filepath` attribute. This was - deprecated during 0.7.x. - -FEATURES: - - * **New command:** `terraform console`, an interactive console for experimenting - with and using interpolations. ([#10093](https://github.com/hashicorp/terraform/issues/10093)) - * **Terraform version requirement in configuration.** You can now specify - a Terraform version requirement in configuration and modules. ([#10080](https://github.com/hashicorp/terraform/issues/10080)) - * **Conditional values:** You can now use conditionals to determine the values - of attributes. For example: `count = "${var.env == "prod" ? 1 : 0}"`. - * **`depends_on` can reference modules.** This allows a resource or output - to depend on everything within a module. ([#10076](https://github.com/hashicorp/terraform/issues/10076)) - * **`output` supports `depends_on`.** This is useful when the output depends - on a certain ordering to happen that can't be represented with interpolations. - ([#10072](https://github.com/hashicorp/terraform/issues/10072)) - * Providers and resources are now notified by Terraform core to "stop" when - an interrupt is received, allowing resources to gracefully exit much, much - faster. ([#9607](https://github.com/hashicorp/terraform/issues/9607)) - * The `import` command can now specify a provider alias to use. ([#10310](https://github.com/hashicorp/terraform/issues/10310)) - * The `import` command will now read provider configuration from Terraform - configuration files (including loading tfvars files and so on). - ([#9809](https://github.com/hashicorp/terraform/issues/9809)) - - * **New Provider:** `external` ([#8768](https://github.com/hashicorp/terraform/issues/8768)) - * **New Provider:** `nomad` ([#9538](https://github.com/hashicorp/terraform/issues/9538)) - * **New Provider:** `rancher` ([#9173](https://github.com/hashicorp/terraform/issues/9173)) - * **New Provider:** `vault` ([#9158](https://github.com/hashicorp/terraform/issues/9158)) - * **New Provider:** `Icinga2` ([#8306](https://github.com/hashicorp/terraform/issues/8306)) - * **New Resource:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Resource:** `aws_lightsail_domain` ([#10637](https://github.com/hashicorp/terraform/issues/10637)) - * **New Resource:** `aws_lightsail_key_pair` ([#10583](https://github.com/hashicorp/terraform/issues/10583)) - * **New Resource:** `aws_lightsail_instance` ([#10473](https://github.com/hashicorp/terraform/issues/10473)) - * **New Resource:** `aws_opsworks_rds_db_instance` ([#10294](https://github.com/hashicorp/terraform/issues/10294)) - * **New Resource:** `aws_snapshot_create_volume_permission` ([#9891](https://github.com/hashicorp/terraform/issues/9891)) - * **New Resource:** `aws_vpc_endpoint_route_table_association` ([#10137](https://github.com/hashicorp/terraform/issues/10137)) - * **New Resource:** `google_compute_health_check` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `google_compute_region_backend_service` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `openstack_blockstorage_volume_attach_v2` ([#10259](https://github.com/hashicorp/terraform/issues/10259)) - * **New Resource:** `openstack_compute_volume_attach_v2` ([#10260](https://github.com/hashicorp/terraform/issues/10260)) - * **New Data Source:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Data Source:** `aws_eip` ([#9833](https://github.com/hashicorp/terraform/issues/9833)) - * **New Data Source:** `aws_iam_server_certificate` ([#10558](https://github.com/hashicorp/terraform/issues/10558)) - * **New Data Source:** `aws_route_table` ([#10301](https://github.com/hashicorp/terraform/issues/10301)) - * **New Data Source:** `aws_route53_zone` ([#9766](https://github.com/hashicorp/terraform/issues/9766)) - * **New Data Source:** `aws_vpc_endpoint_services` ([#10261](https://github.com/hashicorp/terraform/issues/10261)) - * **New Data Source:** `pagerduty_user` ([#10541](https://github.com/hashicorp/terraform/issues/10541)) - * **New Interpolation Function:** `timestamp` ([#10475](https://github.com/hashicorp/terraform/issues/10475)) - * core: allow outputs to have descriptions ([#9722](https://github.com/hashicorp/terraform/issues/9722)) - * state/azure: support passing of lease ID when writing storage blob ([#10115](https://github.com/hashicorp/terraform/issues/10115)) - -IMPROVEMENTS: - - * core: Human-friendly error when a computed count is used. ([#10060](https://github.com/hashicorp/terraform/issues/10060)) - * core: Maps across multiple input sources (files, CLI, env vars) are merged. ([#10654](https://github.com/hashicorp/terraform/issues/10654)) - * core: SIGTERM also triggers graceful shutdown in addition to SIGINT ([#10534](https://github.com/hashicorp/terraform/issues/10534)) - * core: Plan will show deposed-only destroys for create-before-destroy resources. ([#10404](https://github.com/hashicorp/terraform/issues/10404)) - * command/plan: Show warning when a plan file is given as input to make behavior clear. ([#10639](https://github.com/hashicorp/terraform/issues/10639)) - * helper/schema: only map, list, and set elements that are actually causing - a resource to destroy/create are marked as "requires new". ([#9613](https://github.com/hashicorp/terraform/issues/9613)) - * provider/aws: Add support for AWS CA Central 1 Region ([#10618](https://github.com/hashicorp/terraform/issues/10618)) - * provider/aws: Allow importing of aws_iam_role, aws_iam_role_policy and aws_iam_policy ([#9398](https://github.com/hashicorp/terraform/issues/9398)) - * provider/aws: Added s3 bucket region attribute management ([#10482](https://github.com/hashicorp/terraform/issues/10482)) - * provider/aws: Added SQS FIFO queues ([#10614](https://github.com/hashicorp/terraform/issues/10614)) - * provider/aws: Addition of suspended_processes to aws_autoscaling_group ([#10096](https://github.com/hashicorp/terraform/issues/10096)) - * provider/aws: added auto_minor_version_upgrade on aws_rds_cluster_insstance ([#10284](https://github.com/hashicorp/terraform/issues/10284)) - * provider/aws: Add JSON validation to the aws_iam_policy resource ([#10239](https://github.com/hashicorp/terraform/issues/10239)) - * provider/aws: Support MFA delete for s3 bucket versioning ([#10020](https://github.com/hashicorp/terraform/issues/10020)) - * provider/aws: Enable DeleteOnTermination in ENI when created by spot fleet ([#9922](https://github.com/hashicorp/terraform/issues/9922)) - * provider/aws: Enforced kms_key_* attributes to be ARNs ([#10356](https://github.com/hashicorp/terraform/issues/10356)) - * provider/aws: IPv6 Support To Cloudfront ([#10332](https://github.com/hashicorp/terraform/issues/10332)) - * provider/aws: Support import of aws_iam_instance_profile ([#10436](https://github.com/hashicorp/terraform/issues/10436)) - * provider/aws: Increase `aws_emr_cluster` timeout ([#10444](https://github.com/hashicorp/terraform/issues/10444)) - * provider/aws: Support Automatic Rollback of CodeDeploy deployments and CloudWatch Alarms for a Deployment Group ([#9039](https://github.com/hashicorp/terraform/issues/9039)) - * provider/aws: Add support for termination protection and autotermination to EMR ([#10252](https://github.com/hashicorp/terraform/issues/10252)) - * provider/aws: Add "no_device" support to ephemeral block devices ([#10547](https://github.com/hashicorp/terraform/issues/10547)) - * provider/aws: Added S3 Bucket replication ([#10552](https://github.com/hashicorp/terraform/issues/10552)) - * provider/aws: Add `pgp_key` to `aws_iam_access_key` to protect key. ([#10615](https://github.com/hashicorp/terraform/issues/10615)) - * provider/azurerm: make DiskSizeGB optional for azurerm_virtual_machine data_disks ([#10232](https://github.com/hashicorp/terraform/issues/10232)) - * provider/azurerm support `license_type` virtual_machine property ([#10539](https://github.com/hashicorp/terraform/issues/10539)) - * provider/azurerm: support import of routes, fix route_table ([#10389](https://github.com/hashicorp/terraform/issues/10389)) - * provider/azurerm: enable import of more resources ([#10195](https://github.com/hashicorp/terraform/issues/10195)) - * provider/azurerm: create common schema for location field, add diff suppress ([#10409](https://github.com/hashicorp/terraform/issues/10409)) - * provider/chef: Migrate Chef to use KEY_MATERIAL rather than using a Pem file ([#10105](https://github.com/hashicorp/terraform/issues/10105)) - * provider/cloudstack: Add option to set a custom `network_domain` for `cloudstack_network` ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_static_nat` resource ([#10420](https://github.com/hashicorp/terraform/issues/10420)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_port_forward` resource ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/datadog: Make monitor thresholds optional. ([#10526](https://github.com/hashicorp/terraform/issues/10526)) - * provider/datadog: Improve datadog timeboard support ([#10027](https://github.com/hashicorp/terraform/issues/10027)) - * provider/docker: Upload files into container before first start ([#9520](https://github.com/hashicorp/terraform/issues/9520)) - * provider/docker: authentication via values instead of files ([#10151](https://github.com/hashicorp/terraform/issues/10151)) - * provider/fastly add origin shielding ([#10677](https://github.com/hashicorp/terraform/issues/10677)) - * provider/fastly: add ssl_hostname option ([#9629](https://github.com/hashicorp/terraform/issues/9629)) - * provider/github: supports importing resources ([#10382](https://github.com/hashicorp/terraform/issues/10382)) - * provider/google: Add support for Internal Load Balancing ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * provider/google: Add Service Accounts resource ([#9946](https://github.com/hashicorp/terraform/issues/9946)) - * provider/google: Instances and templates now both support `metadata_startup_script` and `metadata.startup-script`. ([#10537](https://github.com/hashicorp/terraform/issues/10537)) - * provider/google: Added support for session affinity to compute_backend_service ([#10387](https://github.com/hashicorp/terraform/issues/10387)) - * provider/google: Projects are now importable ([#10469](https://github.com/hashicorp/terraform/issues/10469)) - * provider/google: SSL certificates can now specify prefix instead of a full name ([#10684](https://github.com/hashicorp/terraform/issues/10684)) - * provider/openstack: Add Swauth/Swift Authentication ([#9943](https://github.com/hashicorp/terraform/issues/9943)) - * provider/openstack: Detect Region for Importing Resources ([#10509](https://github.com/hashicorp/terraform/issues/10509)) - * provider/postgresql: Improved support for many PostgreSQL resources ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - * provider/postgresql: Added 'connect_timeout' argument to provider 'postgresql' ([#10380](https://github.com/hashicorp/terraform/issues/10380)) - * provider/rundeck: enable validation for multiple values in an array ([#8913](https://github.com/hashicorp/terraform/issues/8913)) - * provider/rundeck: Add support for scheduler to rundeck_job ([#9449](https://github.com/hashicorp/terraform/issues/9449)) - * state/remote/swift: Add support for versioning state file in swift and expiring versioned state ([#10055](https://github.com/hashicorp/terraform/issues/10055)) - -BUG FIXES: - - * core: Escape sequences in interpolations work in every case. ([#8709](https://github.com/hashicorp/terraform/issues/8709)) - * core: Maps in outputs with computed values are no longer removed. ([#9549](https://github.com/hashicorp/terraform/issues/9549)) - * core: Direct indexing into a computed list no longer errors. ([#10657](https://github.com/hashicorp/terraform/issues/10657)) - * core: Validate fails on invalid keys in `variable` blocks. ([#10658](https://github.com/hashicorp/terraform/issues/10658)) - * core: Validate that only a single `lifecycle` block exists per rource. ([#10656](https://github.com/hashicorp/terraform/issues/10656)) - * core: When destroying, the resources of a provider that depends on another resource are destroyed first. ([#10659](https://github.com/hashicorp/terraform/issues/10659)) - * core: Catch parse errors for null characters mid-file ([#9134](https://github.com/hashicorp/terraform/issues/9134)) - * core: Remove extra dot from state command backup files ([#10300](https://github.com/hashicorp/terraform/issues/10300)) - * core: Validate data sources do not have provisioners ([#10318](https://github.com/hashicorp/terraform/issues/10318)) - * core: Disable checkpoint settings take effect ([#10206](https://github.com/hashicorp/terraform/issues/10206)) - * core: Changed attribute console output shows up on Windows. ([#10417](https://github.com/hashicorp/terraform/issues/10417)) - * core: Destroying deposed resources in create before destroy waits until the creation step of its specific index. (0.8 regression) ([#10416](https://github.com/hashicorp/terraform/issues/10416)) - * core: Certain invalid configurations will no longer print "illegal". ([#10448](https://github.com/hashicorp/terraform/issues/10448)) - * core: Fix a crash that could occur when multiple deposed instances exist. ([#10504](https://github.com/hashicorp/terraform/issues/10504)) - * core: Fix a diff mismatch error that could happen when a resource depends on a count resource being decreased. ([#10522](https://github.com/hashicorp/terraform/issues/10522)) - * core: On Unix machines if `getent` is not available, fall back to shell to find home dir. ([#10515](https://github.com/hashicorp/terraform/issues/10515)) - * command/fmt: Multiline comments aren't indented every fmt. ([#6524](https://github.com/hashicorp/terraform/issues/6524)) - * communicator/ssh: Avoid race that could cause parallel remote execs on the same host to overwrite each other ([#10549](https://github.com/hashicorp/terraform/issues/10549)) - * provider/aws: Added Lambda function guard when needed attributes are not set ([#10663](https://github.com/hashicorp/terraform/issues/10663)) - * provider/aws: Allow import of aws_security_groups with more than one source_security_group_id rule ([#9477](https://github.com/hashicorp/terraform/issues/9477)) - * provider/aws: Allow setting the DB Instance name when restoring from a snapshot ([#10664](https://github.com/hashicorp/terraform/issues/10664)) - * provider/aws: Fix issue importing `aws_vpc_peering_connection` ([#10635](https://github.com/hashicorp/terraform/issues/10635)) - * provider/aws: Fixed deletion of aws_api_gateway_base_path_mapping with empty path ([#10177](https://github.com/hashicorp/terraform/issues/10177)) - * provider/aws: Fix issue removing Lambda environment variables ([#10492](https://github.com/hashicorp/terraform/issues/10492)) - * provider/aws: Skip VPC endpoint routes when removing default route table's routes ([#10303](https://github.com/hashicorp/terraform/issues/10303)) - * provider/aws: Do not return a root device for instance store backed AMIs. ([#9483](https://github.com/hashicorp/terraform/issues/9483)) - * provider/aws: resource_aws_opsworks_application does not accept document_root parameter ([#10477](https://github.com/hashicorp/terraform/issues/10477)) - * provider/aws: bug fix when specifying level on aws_opsworks_permission ([#10394](https://github.com/hashicorp/terraform/issues/10394)) - * provider/aws: cloudfront distribution 404 should mark as gone ([#10281](https://github.com/hashicorp/terraform/issues/10281)) - * provider/aws: Assign correct number of core instances (n-1) to aws-emr-cluster on update ([#10529](https://github.com/hashicorp/terraform/issues/10529)) - * provider/aws: Allow update of Service role on a CodeDeploy deployment group ([#9866](https://github.com/hashicorp/terraform/issues/9866)) - * provider/aws: fixed the api_gw_domain_name replace operation ([#10179](https://github.com/hashicorp/terraform/issues/10179)) - * provider/aws: Forces the API GW domain name certificates to recreate the resource ([#10588](https://github.com/hashicorp/terraform/issues/10588)) - * provider/aws: Validate `effect` in aws_iam_policy_document data source ([#10021](https://github.com/hashicorp/terraform/issues/10021)) - * provider/azurerm: fix virtual_machine reading plan as the wrong type ([#10626](https://github.com/hashicorp/terraform/issues/10626)) - * provider/azurerm: Prevent null reference when reading boot_diagnostics settings in azurerm_virtual_machine ([#10283](https://github.com/hashicorp/terraform/issues/10283)) - * provider/azurerm: azurerm_availability_set not is ForceNew for UpdateDomain and FaultDomain ([#10545](https://github.com/hashicorp/terraform/issues/10545)) - * provider/azurerm: fix servicebus_topic max_size_in_megabytes for premium namespaces ([#10611](https://github.com/hashicorp/terraform/issues/10611)) - * provider/azurerm: set ForceNew for storage image and OS disk of virtual_machine ([#10340](https://github.com/hashicorp/terraform/issues/10340)) - * provider/datadog: Refactor monitor tags to a list instead of a map. ([#10570](https://github.com/hashicorp/terraform/issues/10570)) - * provider/datadog 9869: Validate credentials when initialising client. ([#10567](https://github.com/hashicorp/terraform/issues/10567)) - * provider/openstack: More Import and Region Fixes ([#10662](https://github.com/hashicorp/terraform/issues/10662)) - * provider/openstack: Fix Ordering of Port Allowed Address Pairs ([#10250](https://github.com/hashicorp/terraform/issues/10250)) - * provider/template: No file path error when setting template to `/` ([#10297](https://github.com/hashicorp/terraform/issues/10297)) - -## 0.8.0 from 0.8.0-rc3 (December 13, 2016) - -**This only includes changes from 0.8.0-rc3 to 0.8.0 final. The section above -has the complete 0.7.x to 0.8.0 CHANGELOG.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/postgres: `ssl_mode` has been renamed `sslmode` to match common usage ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - -FEATURES: - - * **New Provider:** `Icinga2` ([#8306](https://github.com/hashicorp/terraform/issues/8306)) - * **New Resource:** `aws_lightsail_domain` ([#10637](https://github.com/hashicorp/terraform/issues/10637)) - * **New Resource:** `aws_lightsail_key_pair` ([#10583](https://github.com/hashicorp/terraform/issues/10583)) - * **New Resource:** `aws_snapshot_create_volume_permission` ([#9891](https://github.com/hashicorp/terraform/issues/9891)) - * **New Resource:** `google_compute_health_check` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `google_compute_region_backend_service` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Data Source:** `aws_eip` ([#9833](https://github.com/hashicorp/terraform/issues/9833)) - * **New Data Source:** `aws_route53_zone` ([#9766](https://github.com/hashicorp/terraform/issues/9766)) - * **New Data Source:** `aws_vpc_endpoint_services` ([#10261](https://github.com/hashicorp/terraform/issues/10261)) - -IMPROVEMENTS: - - * command/plan: Show warning when a plan file is given as input to make behavior clear. ([#10639](https://github.com/hashicorp/terraform/issues/10639)) - * core: Maps across multiple input sources (files, CLI, env vars) are merged. ([#10654](https://github.com/hashicorp/terraform/issues/10654)) - * provider/aws: Add support for AWS CA Central 1 Region ([#10618](https://github.com/hashicorp/terraform/issues/10618)) - * provider/aws: Added SQS FIFO queues ([#10614](https://github.com/hashicorp/terraform/issues/10614)) - * provider/aws: Support MFA delete for s3 bucket versioning ([#10020](https://github.com/hashicorp/terraform/issues/10020)) - * provider/aws: Enable DeleteOnTermination in ENI when created by spot fleet ([#9922](https://github.com/hashicorp/terraform/issues/9922)) - * provider/cloudstack: Add option to set a custom `network_domain` for `cloudstack_network` ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_port_forward` resource ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/fastly add origin shielding ([#10677](https://github.com/hashicorp/terraform/issues/10677)) - * provider/google: Add support for Internal Load Balancing ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * provider/google: SSL certificates can now specify prefix instead of a full name ([#10684](https://github.com/hashicorp/terraform/issues/10684)) - * provider/postgresql: Improved support for many PostgreSQL resources ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - * provider/rundeck: enable validation for multiple values in an array ([#8913](https://github.com/hashicorp/terraform/issues/8913)) - * provider/rundeck: Add support for scheduler to rundeck_job ([#9449](https://github.com/hashicorp/terraform/issues/9449)) - -BUG FIXES: - - * core: Direct indexing into a computed list no longer errors. ([#10657](https://github.com/hashicorp/terraform/issues/10657)) - * core: Validate fails on invalid keys in `variable` blocks. ([#10658](https://github.com/hashicorp/terraform/issues/10658)) - * core: Validate that only a single `lifecycle` block exists per rource. ([#10656](https://github.com/hashicorp/terraform/issues/10656)) - * core: When destroying, the resources of a provider that depends on another resource are destroyed first. ([#10659](https://github.com/hashicorp/terraform/issues/10659)) - * provider/aws: Added Lambda function guard when needed attributes are not set ([#10663](https://github.com/hashicorp/terraform/issues/10663)) - * provider/aws: Allow import of aws_security_groups with more than one source_security_group_id rule ([#9477](https://github.com/hashicorp/terraform/issues/9477)) - * provider/aws: Allow setting the DB Instance name when restoring from a snapshot ([#10664](https://github.com/hashicorp/terraform/issues/10664)) - * provider/aws: Fix issue importing `aws_vpc_peering_connection` ([#10635](https://github.com/hashicorp/terraform/issues/10635)) - * provider/aws: Fixed deletion of aws_api_gateway_base_path_mapping with empty path ([#10177](https://github.com/hashicorp/terraform/issues/10177)) - * provider/aws: Fix issue removing Lambda environment variables ([#10492](https://github.com/hashicorp/terraform/issues/10492)) - * provider/azurerm: fix virtual_machine reading plan as the wrong type ([#10626](https://github.com/hashicorp/terraform/issues/10626)) - * provider/azurerm: set ForceNew for storage image and OS disk of virtual_machine ([#10340](https://github.com/hashicorp/terraform/issues/10340)) - * provider/openstack: More Import and Region Fixes ([#10662](https://github.com/hashicorp/terraform/issues/10662)) - -## 0.8.0-rc3 (December 8, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Variable, resource, provider, and module names may no longer start with - a number or hyphen. Please see the upgrade guide for more information. - -FEATURES: - - * **New Provider:** `external` ([#8768](https://github.com/hashicorp/terraform/issues/8768)) - * **New Provider:** `Rancher` ([#9173](https://github.com/hashicorp/terraform/issues/9173)) - * **New Data Source:** `aws_iam_server_certificate` ([#10558](https://github.com/hashicorp/terraform/issues/10558)) - * **New Data Source:** `pagerduty_user` ([#10541](https://github.com/hashicorp/terraform/issues/10541)) - * **New Resource:** `aws_opsworks_rds_db_instance` ([#10294](https://github.com/hashicorp/terraform/issues/10294)) - * **New Resource:** `aws_vpc_endpoint_route_table_association` ([#10137](https://github.com/hashicorp/terraform/issues/10137)) -  * **New Resource:**  `aws_lightsail_instance` ([#10473](https://github.com/hashicorp/terraform/issues/10473)) -IMPROVEMENTS: - - * core: SIGTERM also triggers graceful shutdown in addition to SIGINT ([#10534](https://github.com/hashicorp/terraform/issues/10534)) - * provider/aws: Add support for termination protection and autotermination to EMR ([#10252](https://github.com/hashicorp/terraform/issues/10252)) - * provider/aws: Add "no_device" support to ephemeral block devices ([#10547](https://github.com/hashicorp/terraform/issues/10547)) - * provider/aws: Added S3 Bucket replication ([#10552](https://github.com/hashicorp/terraform/issues/10552)) - * provider/aws: Add `pgp_key` to `aws_iam_access_key` to protect key. ([#10615](https://github.com/hashicorp/terraform/issues/10615)) - * provider/azurerm: make DiskSizeGB optional for azurerm_virtual_machine data_disks ([#10232](https://github.com/hashicorp/terraform/issues/10232)) - * provider/azurerm support `license_type` virtual_machine property ([#10539](https://github.com/hashicorp/terraform/issues/10539)) - * provider/datadog: Make monitor thresholds optional. ([#10526](https://github.com/hashicorp/terraform/issues/10526)) - * provider/datadog: Improve datadog timeboard support ([#10027](https://github.com/hashicorp/terraform/issues/10027)) - * provider/docker: Upload files into container before first start ([#9520](https://github.com/hashicorp/terraform/issues/9520)) - * provider/fastly: add ssl_hostname option ([#9629](https://github.com/hashicorp/terraform/issues/9629)) - * provider/openstack: Detect Region for Importing Resources ([#10509](https://github.com/hashicorp/terraform/issues/10509)) - * provider/google: Instances and templates now both support `metadata_startup_script` and `metadata.startup-script`. ([#10537](https://github.com/hashicorp/terraform/issues/10537)) - -BUG FIXES: - - * core: Fix a diff mismatch error that could happen when a resource depends on a count resource being decreased. ([#10522](https://github.com/hashicorp/terraform/issues/10522)) - * core: On Unix machines if `getent` is not available, fall back to shell to find home dir. ([#10515](https://github.com/hashicorp/terraform/issues/10515)) - * communicator/ssh: Avoid race that could cause parallel remote execs on the same host to overwrite each other ([#10549](https://github.com/hashicorp/terraform/issues/10549)) - * provider/aws: cloudfront distribution 404 should mark as gone ([#10281](https://github.com/hashicorp/terraform/issues/10281)) - * provider/aws: Assign correct number of core instances (n-1) to aws-emr-cluster on update ([#10529](https://github.com/hashicorp/terraform/issues/10529)) - * provider/aws: Allow update of Service role on a CodeDeploy deployment group ([#9866](https://github.com/hashicorp/terraform/issues/9866)) - * provider/aws: fixed the api_gw_domain_name replace operation ([#10179](https://github.com/hashicorp/terraform/issues/10179)) - * provider/aws: Forces the API GW domain name certificates to recreate the resource ([#10588](https://github.com/hashicorp/terraform/issues/10588)) - * provider/aws: Validate `effect` in aws_iam_policy_document data source ([#10021](https://github.com/hashicorp/terraform/issues/10021)) - * provider/azurem: azurerm_availability_set not is ForceNew for UpdateDomain and FaultDomain ([#10545](https://github.com/hashicorp/terraform/issues/10545)) - * provider/azurerm: fix servicebus_topic max_size_in_megabytes for premium namespaces ([#10611](https://github.com/hashicorp/terraform/issues/10611)) - * provider/datadog: Refactor monitor tags to a list instead of a map. ([#10570](https://github.com/hashicorp/terraform/issues/10570)) - * provider/datadog 9869: Validate credentials when initialising client. ([#10567](https://github.com/hashicorp/terraform/issues/10567)) - * provider/openstack: Fix Ordering of Port Allowed Address Pairs ([#10250](https://github.com/hashicorp/terraform/issues/10250)) - -## 0.8.0-rc2 (December 2, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Strings in configuration can no longer contain unescaped newlines. For unescaped newlines, heredocs must be used - * provider/aws: Anywhere where we can specify kms_key_id must now be a valid KMS Key ID ARN to stop continual diffs - -FEATURES: - - * **New DataSource:** `aws_route_table` ([#10301](https://github.com/hashicorp/terraform/issues/10301)) - * **New Interpolation Function:** `timestamp` ([#10475](https://github.com/hashicorp/terraform/issues/10475)) - -IMPROVEMENTS: - - * core: Plan will show deposed-only destroys for create-before-destroy resources. ([#10404](https://github.com/hashicorp/terraform/issues/10404)) - * provider/aws: Enforced kms_key_* attributes to be ARNs ([#10356](https://github.com/hashicorp/terraform/issues/10356)) - * provider/aws: IPv6 Support To Cloudfront ([#10332](https://github.com/hashicorp/terraform/issues/10332)) - * provider/aws: Support import of aws_iam_instance_profile ([#10436](https://github.com/hashicorp/terraform/issues/10436)) - * provider/aws: Increase `aws_emr_cluster` timeout ([#10444](https://github.com/hashicorp/terraform/issues/10444)) - * provider/aws: Support Automatic Rollback of CodeDeploy deployments and CloudWatch Alarms for a Deployment Group ([#9039](https://github.com/hashicorp/terraform/issues/9039)) - * provider/aws: Allow importing of aws_iam_role, aws_iam_role_policy and aws_iam_policy ([#9398](https://github.com/hashicorp/terraform/issues/9398)) - * provider/aws: Added s3 bucket region attribute management ([#10482](https://github.com/hashicorp/terraform/issues/10482)) - * provider/azurerm: support import of routes, fix route_table ([#10389](https://github.com/hashicorp/terraform/issues/10389)) - * provider/azurerm: create common schema for location field, add diff suppress ([#10409](https://github.com/hashicorp/terraform/issues/10409)) - * provider/github: supports importing resources ([#10382](https://github.com/hashicorp/terraform/issues/10382)) - * provider/postgresql: Added 'connect_timeout' argument to provider 'postgresql' ([#10380](https://github.com/hashicorp/terraform/issues/10380)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_static_nat` resource ([#10420](https://github.com/hashicorp/terraform/issues/10420)) - * provider/google: Added support for session affinity to compute_backend_service ([#10387](https://github.com/hashicorp/terraform/issues/10387)) - * provider/google: Projects are now importable ([#10469](https://github.com/hashicorp/terraform/issues/10469)) - -BUG FIXES: - - * core: Changed attribute console output shows up on Windows. ([#10417](https://github.com/hashicorp/terraform/issues/10417)) - * core: Destroying deposed resources in create before destroy waits until the creation step of its specific index. (0.8 regression) ([#10416](https://github.com/hashicorp/terraform/issues/10416)) - * core: Certain invalid configurations will no longer print "illegal". ([#10448](https://github.com/hashicorp/terraform/issues/10448)) - * core: Fix a crash that could occur when multiple deposed instances exist. ([#10504](https://github.com/hashicorp/terraform/issues/10504)) - * command/console: variable access works ([#10446](https://github.com/hashicorp/terraform/issues/10446)) - * provider/aws: Do not return a root device for instance store backed AMIs. ([#9483](https://github.com/hashicorp/terraform/issues/9483)) - * provider/aws: resource_aws_opsworks_application does not accept document_root parameter ([#10477](https://github.com/hashicorp/terraform/issues/10477)) - * provider/aws: bug fix when specifying level on aws_opsworks_permission ([#10394](https://github.com/hashicorp/terraform/issues/10394)) - -## 0.8.0-rc1 (November 23, 2016) - -BASED ON: 0.7.13 (includes any changes up to that point as well) - -**Please read prior beta notes, as those are also included. The 0.8 changes -will be coalesced for a 0.8 final, but will remain separate for the pre-release -period.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The chef provider now accepts `key_material` as an alternative to `private_key_pem`. The `private_key_pem` attribute will be deprecated in a future release - * The `template_file` resource no longer accepts a direct file path for the `template` attribute. You may either specify a path wrapped in a `file` function or specify a file path with the `filepath` attribute. This was deprecated during 0.7.x. - -FEATURES: - * core: allow outputs to have descriptions ([#9722](https://github.com/hashicorp/terraform/issues/9722)) - * state/azure: support passing of lease ID when writing storage blob ([#10115](https://github.com/hashicorp/terraform/issues/10115)) - * **New Resource:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Resource:** `openstack_blockstorage_volume_attach_v2` ([#10259](https://github.com/hashicorp/terraform/issues/10259)) - * **New Resource:** `openstack_compute_volume_attach_v2` ([#10260](https://github.com/hashicorp/terraform/issues/10260)) - * **New Data Source:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * The `import` command can now specify a provider alias to use. ([#10310](https://github.com/hashicorp/terraform/issues/10310)) - -IMPROVEMENTS: - - * provider/aws: Addition of suspended_processes to aws_autoscaling_group ([#10096](https://github.com/hashicorp/terraform/issues/10096)) - * provider/aws: added auto_minor_version_upgrade on aws_rds_cluster_insstance ([#10284](https://github.com/hashicorp/terraform/issues/10284)) - * provider/aws: Add JSON validation to the aws_iam_policy resource ([#10239](https://github.com/hashicorp/terraform/issues/10239)) - * provider/azurerm: enable import of more resources ([#10195](https://github.com/hashicorp/terraform/issues/10195)) - * provider/chef: Migrate Chef to use KEY_MATERIAL rather than using a Pem file ([#10105](https://github.com/hashicorp/terraform/issues/10105)) - * provider/docker: authentication via values instead of files ([#10151](https://github.com/hashicorp/terraform/issues/10151)) - * provider/google: Add Service Accounts resource ([#9946](https://github.com/hashicorp/terraform/issues/9946)) - * provider/nomad: Update to support Nomad 0.5.0 - * provider/openstack: Add Swauth/Swift Authentication ([#9943](https://github.com/hashicorp/terraform/issues/9943)) - * state/remote/swift: Add support for versioning state file in swift and expiring versioned state ([#10055](https://github.com/hashicorp/terraform/issues/10055)) - -BUG FIXES: - - * core: Catch parse errors for null characters mid-file ([#9134](https://github.com/hashicorp/terraform/issues/9134)) - * core: escape sequence for " works (0.8 beta regression) ([#10236](https://github.com/hashicorp/terraform/issues/10236)) - * core: Terraform starts on Windows (0.8 beta2 regression) ([#10266](https://github.com/hashicorp/terraform/issues/10266)) - * core: Remove extra dot from state command backup files ([#10300](https://github.com/hashicorp/terraform/issues/10300)) - * core: Validate data sources do not have provisioners ([#10318](https://github.com/hashicorp/terraform/issues/10318)) - * core: Disable checkpoint settings take effect ([#10206](https://github.com/hashicorp/terraform/issues/10206)) - * provider/aws: Skip VPC endpoint routes when removing default route table's routes ([#10303](https://github.com/hashicorp/terraform/issues/10303)) - * provider/azurerm: Prevent null reference when reading boot_diagnostics settings in azurerm_virtual_machine ([#10283](https://github.com/hashicorp/terraform/issues/10283)) - * provider/template: No file path error when setting template to `/` ([#10297](https://github.com/hashicorp/terraform/issues/10297)) - -PLUGIN CHANGES: - - * The protocol version has been incremented, requiring all plugins for - 0.8 to be built with 0.8 sources (or newer). This should only require - a simple recompile for compatibility. - -## 0.8.0-beta2 (November 16, 2016) - -BASED ON: 0.7.11 (includes any changes up to that point as well) - -**Please read prior beta notes, as those are also included. The 0.8 changes -will be coalesced for a 0.8 final, but will remain separate for the pre-release -period.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Math operators now follow the standard order of operations: *, /, % followed - by +, -. See the updated interpolation docs for more information. You can - continue to force ordering with parentheses. - -FEATURES: - - * **New command:** `terraform console`, an interactive console for experimenting - with and using interpolations. ([#10093](https://github.com/hashicorp/terraform/issues/10093)) - * **Terraform version requirement in configuration.** You can now specify - a Terraform version requirement in configuration and modules. ([#10080](https://github.com/hashicorp/terraform/issues/10080)) - * **`depends_on` can reference modules.** This allows a resource or output - to depend on everything within a module. ([#10076](https://github.com/hashicorp/terraform/issues/10076)) - * **`output` supports `depends_on`.** This is useful when the output depends - on a certain ordering to happen that can't be represented with interpolations. - ([#10072](https://github.com/hashicorp/terraform/issues/10072)) - -## 0.8.0-beta1 (November 11, 2016) - -BASED ON: 0.7.10 (includes any changes up to that point as well) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `template_file` _inline_ templates must escape their variable usage. What - was previously `${foo}` must now be `$${foo}`. Note that this is only - for _inline_ templates. Templates read from files are unchanged. ([#9698](https://github.com/hashicorp/terraform/issues/9698)) - * Escape sequences used to require double-escaping when used within interpolations. - You now must only escape once (which is the expected/typical behavior). - For example: `${replace(var.foo, "\\", "\\\\")}` is correct. Before, - that would cause very strange behavior. However, this may break existing - configurations which found a level of escape sequences to work. Check - `terraform plan` for incorrect output. - -FEATURES: - - * **New provider:** `nomad` ([#9538](https://github.com/hashicorp/terraform/issues/9538)) - * **New provider:** `vault` ([#9158](https://github.com/hashicorp/terraform/issues/9158)) - * The `import` command will now read provider configuration from Terraform - configuration files (including loading tfvars files and so on). ([#9809](https://github.com/hashicorp/terraform/issues/9809)) - * Providers and resources are now notified by Terraform core to "stop" when - an interrupt is received, allowing resources to gracefully exit much, much - faster. ([#9607](https://github.com/hashicorp/terraform/issues/9607)) - -IMPROVEMENTS: - - * core: Human-friendly error when a computed count is used. ([#10060](https://github.com/hashicorp/terraform/issues/10060)) - * helper/schema: only map, list, and set elements that are actually causing - a resource to destroy/create are marked as "requires new". ([#9613](https://github.com/hashicorp/terraform/issues/9613)) - -BUG FIXES: - - * core: Escape sequences in interpolations work in every case. ([#8709](https://github.com/hashicorp/terraform/issues/8709)) - * core: Maps in outputs with computed values are no longer removed. ([#9549](https://github.com/hashicorp/terraform/issues/9549)) - * command/fmt: Multiline comments aren't indented every fmt. ([#6524](https://github.com/hashicorp/terraform/issues/6524)) - -## 0.7.13 (November 23, 2016) - -BUG FIXES: - - * core: New graph records dependencies for explicit self references ([#10319](https://github.com/hashicorp/terraform/issues/10319)) - -## 0.7.12 (November 22, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/cloudstack: `cloudstack_static_nat` has now deprecated `network_id` ([#10204](https://github.com/hashicorp/terraform/issues/10204)) - -FEATURES: - - * *New Data Source:* `aws_alb_listener` ([#10181](https://github.com/hashicorp/terraform/issues/10181)) - * *New Resource:* `github_label` ([#10213](https://github.com/hashicorp/terraform/issues/10213)) - -IMPROVEMENTS: - - * core: Experimental feature failures are less verbose. ([#10276](https://github.com/hashicorp/terraform/issues/10276)) - * provider/aws: Add name_prefix to aws_iam_policy ([#10178](https://github.com/hashicorp/terraform/issues/10178)) - * provider/aws: Add ability to select aws_prefix_list data source by name ([#10248](https://github.com/hashicorp/terraform/issues/10248)) - * provider/aws Return service CIDR blocks from aws_vpc_endpoint resource ([#10254](https://github.com/hashicorp/terraform/issues/10254)) - * provider/aws: Added `environment` configuration for AWS Lambda Functions ([#10275](https://github.com/hashicorp/terraform/issues/10275)) - -BUG FIXES: - - * core: Fix potential crashing race condition on state write ([#10277](https://github.com/hashicorp/terraform/issues/10277)) - * core: Data sources in modules lose their `data.` prefix when moved within the state ([#9996](https://github.com/hashicorp/terraform/issues/9996)) - * provider/aws: Fixed issue with `enable_dns_support` on creation in `aws_vpc` ([#10171](https://github.com/hashicorp/terraform/issues/10171)) - * provider/aws: Add CertificateNotFound retry waiter to aws_alb_listener ([#10180](https://github.com/hashicorp/terraform/issues/10180)) - * provider/aws: Remove IAM user's MFA devices with `force_destroy` ([#10262](https://github.com/hashicorp/terraform/issues/10262)) - * provider/scaleway: improve volume attachment ([#10084](https://github.com/hashicorp/terraform/issues/10084)) - -## 0.7.11 (November 15, 2016) - -FEATURES: - -IMPROVEMENTS: - - * provider/aws: Expose RDS DB Instance HostedZoneId attribute ([#10000](https://github.com/hashicorp/terraform/issues/10000)) - * provider/aws: Ignore AWS internal tags ([#7454](https://github.com/hashicorp/terraform/issues/7454)) - * provider/aws: Exposed aws_iam_role create_date attribute ([#10091](https://github.com/hashicorp/terraform/issues/10091)) - * provider/aws: Added aws_api_gateway_api_key created_date & last_updated_date attributes ([#9530](https://github.com/hashicorp/terraform/issues/9530)) - * provider/aws: Added aws_api_gateway_rest_api created_date attribute ([#9532](https://github.com/hashicorp/terraform/issues/9532)) - * provider/aws: Exposed aws_api_gateway_deployment.created_date attribute ([#9534](https://github.com/hashicorp/terraform/issues/9534)) - * provider/aws: Added `retry_duration` to `redshift_configuration` in `kinesis_firehose_delivery_stream` ([#10113](https://github.com/hashicorp/terraform/issues/10113)) - * provider/azurerm: allow updating load balancer sub-resources ([#10016](https://github.com/hashicorp/terraform/issues/10016)) - * provider/openstack: Instance `user_data` will now detect if input is already Base64-encode ([#9966](https://github.com/hashicorp/terraform/issues/9966)) - -BUG FIXES: - - * core: Fix diff mismatch error on "Destroy: true to false" scenarios. ([#10139](https://github.com/hashicorp/terraform/issues/10139)) - * core: New destroy graph `-target` includes dependencies. ([#10036](https://github.com/hashicorp/terraform/issues/10036)) - * core: New destroy graph creates proper edges through module outputs ([#10068](https://github.com/hashicorp/terraform/issues/10068)) - * core: Fix shadow error when using uuid() ([#10106](https://github.com/hashicorp/terraform/issues/10106)) - * core: Fix an issue where applies with data sources could hang ([#10134](https://github.com/hashicorp/terraform/issues/10134)) - * core: Fix plan operation diff mismatch for computed keys in slices ([#10118](https://github.com/hashicorp/terraform/issues/10118)) - * provider/aws: fix the validation of aws_redshift_cluster database_name ([#10019](https://github.com/hashicorp/terraform/issues/10019)) - * provider/aws: Fix panic in aws_acm_certificate datasource ([#10051](https://github.com/hashicorp/terraform/issues/10051)) - * provider/aws: increase aws_lambda_function timeout ([#10116](https://github.com/hashicorp/terraform/issues/10116)) - * provider/aws: Fixed ES buffering_interval option in `kinesis_firehose_delivery_stream` ([#10112](https://github.com/hashicorp/terraform/issues/10112)) - -## 0.7.10 (November 9, 2016) - -FEATURES: - - * **New Resource:** `azurerm_eventhub` ([#9889](https://github.com/hashicorp/terraform/issues/9889)) - * **New Resource:** `azurerm_virtual_machine_extension` ([#9962](https://github.com/hashicorp/terraform/issues/9962)) - * **Experimental new plan graph:** `terraform plan` is getting a new graph - creation process for 0.8. This is now available behind a flag `-Xnew-apply` - (on any command). This will become the default in 0.8. There may still be - bugs. ([#9973](https://github.com/hashicorp/terraform/issues/9973)) - -IMPROVEMENTS: - - * provider/aws: Add support for Service Access Security Group in `aws_emr_cluster` ([#9600](https://github.com/hashicorp/terraform/issues/9600)) - * provider/aws: Add Enhanced VPC routing to Redshift ([#9950](https://github.com/hashicorp/terraform/issues/9950)) - * provider/aws: Add key_name_prefix argument to aws_key_pair resource ([#9993](https://github.com/hashicorp/terraform/issues/9993)) - * provider/openstack: Add `value_specs` to `openstack_fw_policy_v1` resource, allowing vendor information ([#9835](https://github.com/hashicorp/terraform/issues/9835)) - * provider/openstack: Add `value_specs` to `openstack_fw_firewall_v1` resource, allowing vendor information ([#9836](https://github.com/hashicorp/terraform/issues/9836)) - * provider/random: The `b64` attribute on `random_id` resources is deprecated, replaced by `b64_url` and `b64_std` ([#9903](https://github.com/hashicorp/terraform/issues/9903)) - -BUG FIXES: - - * core: Splat variables (`foo.*.bar`) are now ordered by count index for deterministic ordering. ([#9883](https://github.com/hashicorp/terraform/issues/9883)) - * core: Prune orphan outputs (in the config but not in the state). ([#9971](https://github.com/hashicorp/terraform/issues/9971)) - * core: New apply graph doesn't prune module variables as aggressively. ([#9898](https://github.com/hashicorp/terraform/issues/9898)) - * core: New apply graph properly configures providers with aliases. ([#9894](https://github.com/hashicorp/terraform/issues/9894)) - * core: New destroy graph doesn't create edge loops to destroy nodes that reference themselves. ([#9968](https://github.com/hashicorp/terraform/issues/9968)) - * provider/aws: Fix crash when adding EBS volumes to spot fleet request. ([#9857](https://github.com/hashicorp/terraform/issues/9857)) - * provider/aws: Ignore NoSuchEntity error when IAM user does not have login profile ([#9900](https://github.com/hashicorp/terraform/issues/9900)) - * provider/aws: Setting static_routes_only on import of vpn_connection ([#9802](https://github.com/hashicorp/terraform/issues/9802)) - * provider/aws: aws_alb_target_group arn_suffix missing the targetgroup ([#9911](https://github.com/hashicorp/terraform/issues/9911)) - * provider/aws: Fix the validateFunc of aws_elasticache_replication_group ([#9918](https://github.com/hashicorp/terraform/issues/9918)) - * provider/aws: removing toLower when setting aws_db_parameter_group options ([#9820](https://github.com/hashicorp/terraform/issues/9820)) - * provider/aws: Fix panic when passing statuses to aws_acm_certificate ([#9990](https://github.com/hashicorp/terraform/issues/9990)) - * provider/aws: AWS IAM, User and Role allow + in the name ([#9991](https://github.com/hashicorp/terraform/issues/9991)) - * provider/scaleway: retry volume attachment ([#9972](https://github.com/hashicorp/terraform/issues/9972)) - * provider/scaleway: fix `scaleway_image` datasource returning unknown images ([#9899](https://github.com/hashicorp/terraform/issues/9899)) - * provider/google: fix crash when mistakenly configuring disks ([#9942](https://github.com/hashicorp/terraform/issues/9942)) - -## 0.7.9 (November 4, 2016) - -FEATURES: - - * **New Data Source:** `aws_acm_certificate` ([#8359](https://github.com/hashicorp/terraform/issues/8359)) - * **New Resource:** `aws_autoscaling_attachment` ([#9146](https://github.com/hashicorp/terraform/issues/9146)) - * **New Resource:** `postgresql_extension` ([#9210](https://github.com/hashicorp/terraform/issues/9210)) - -IMPROVEMENTS: - - * core: Improve shadow graph robustness by catching panics during graph evaluation. ([#9852](https://github.com/hashicorp/terraform/issues/9852)) - * provider/aws: Provide the option to skip_destroy on aws_volume_attachment ([#9792](https://github.com/hashicorp/terraform/issues/9792)) - * provider/aws: Allows aws_alb security_groups to be updated ([#9804](https://github.com/hashicorp/terraform/issues/9804)) - * provider/aws: Add the enable_sni attribute for Route53 health checks. ([#9822](https://github.com/hashicorp/terraform/issues/9822)) - * provider/openstack: Add `value_specs` to openstack_fw_rule_v1 resource, allowing vendor information ([#9834](https://github.com/hashicorp/terraform/issues/9834)) - * state/remote/swift: Enable OpenStack Identity/Keystone v3 authentication ([#9769](https://github.com/hashicorp/terraform/issues/9769)) - * state/remote/swift: Now supports all login/config options that the OpenStack Provider supports ([#9777](https://github.com/hashicorp/terraform/issues/9777)) - -BUG FIXES: - - * core: Provisioners in modules do not crash during `apply` (regression). ([#9846](https://github.com/hashicorp/terraform/issues/9846)) - * core: Computed bool fields with non-bool values will not crash ([#9812](https://github.com/hashicorp/terraform/issues/9812)) - * core: `formatlist` interpolation function accepts an empty list ([#9795](https://github.com/hashicorp/terraform/issues/9795)) - * core: Validate outputs have a name ([#9823](https://github.com/hashicorp/terraform/issues/9823)) - * core: Validate variables have a name ([#9818](https://github.com/hashicorp/terraform/issues/9818)) - * command/apply: If a partial set of required variables are provided with `-var`, ask for the remainder ([#9794](https://github.com/hashicorp/terraform/issues/9794)) - * command/fmt: Multiline strings aren't erroneously indented ([#9859](https://github.com/hashicorp/terraform/issues/9859)) - * provider/aws: Fix issue setting `certificate_upload_date` in `aws_api_gateway_domain_name` ([#9815](https://github.com/hashicorp/terraform/issues/9815)) - * provider/azurerm: allow storage_account resource with name "$root" ([#9813](https://github.com/hashicorp/terraform/issues/9813)) - * provider/google: fix for looking up project image families ([#9243](https://github.com/hashicorp/terraform/issues/9243)) - * provider/openstack: Don't pass `shared` in FWaaS Policy unless it's set ([#9830](https://github.com/hashicorp/terraform/issues/9830)) - * provider/openstack: openstack_fw_firewall_v1 `admin_state_up` should default to true ([#9832](https://github.com/hashicorp/terraform/issues/9832)) - -PLUGIN CHANGES: - - * Fields in resources can now have both `Optional` and `ConflictsWith` ([#9825](https://github.com/hashicorp/terraform/issues/9825)) - -## 0.7.8 (November 1, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/openstack: The OpenStack provider has switched to the new Gophercloud SDK. - No front-facing changes were made, but please be aware that there might be bugs. - Please report any if found. - * `archive_file` is now a data source, instead of a resource ([#8492](https://github.com/hashicorp/terraform/issues/8492)) - -FEATURES: - - * **Experimental new apply graph:** `terraform apply` is getting a new graph - creation process for 0.8. This is now available behind a flag `-Xnew-apply` - (on any command). This will become the default in 0.8. There may still be - bugs. ([#9388](https://github.com/hashicorp/terraform/issues/9388)) - * **Experimental new destroy graph:** `terraform destroy` is also getting - a new graph creation process for 0.8. This is now available behind a flag - `-Xnew-destroy`. This will become the default in 0.8. ([#9527](https://github.com/hashicorp/terraform/issues/9527)) - * **New Provider:** `pagerduty` ([#9022](https://github.com/hashicorp/terraform/issues/9022)) - * **New Resource:** `aws_iam_user_login_profile` ([#9605](https://github.com/hashicorp/terraform/issues/9605)) - * **New Resource:** `aws_waf_ipset` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_rule` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_web_acl` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_byte_match_set` ([#9681](https://github.com/hashicorp/terraform/issues/9681)) - * **New Resource:** `aws_waf_size_constraint_set` ([#9689](https://github.com/hashicorp/terraform/issues/9689)) - * **New Resource:** `aws_waf_sql_injection_match_set` ([#9709](https://github.com/hashicorp/terraform/issues/9709)) - * **New Resource:** `aws_waf_xss_match_set` ([#9710](https://github.com/hashicorp/terraform/issues/9710)) - * **New Resource:** `aws_ssm_activation` ([#9111](https://github.com/hashicorp/terraform/issues/9111)) - * **New Resource:** `azurerm_key_vault` ([#9478](https://github.com/hashicorp/terraform/issues/9478)) - * **New Resource:** `azurerm_storage_share` ([#8674](https://github.com/hashicorp/terraform/issues/8674)) - * **New Resource:** `azurerm_eventhub_namespace` ([#9297](https://github.com/hashicorp/terraform/issues/9297)) - * **New Resource:** `cloudstack_security_group` ([#9103](https://github.com/hashicorp/terraform/issues/9103)) - * **New Resource:** `cloudstack_security_group_rule` ([#9645](https://github.com/hashicorp/terraform/issues/9645)) - * **New Resource:** `cloudstack_private_gateway` ([#9637](https://github.com/hashicorp/terraform/issues/9637)) - * **New Resource:** `cloudstack_static_route` ([#9637](https://github.com/hashicorp/terraform/issues/9637)) - * **New DataSource:** `aws_ebs_volume` ([#9753](https://github.com/hashicorp/terraform/issues/9753)) - * **New DataSource:** `aws_prefix_list` ([#9566](https://github.com/hashicorp/terraform/issues/9566)) - * **New DataSource:** `aws_security_group` ([#9604](https://github.com/hashicorp/terraform/issues/9604)) - * **New DataSource:** `azurerm_client_config` ([#9478](https://github.com/hashicorp/terraform/issues/9478)) - * **New Interpolation Function:** `ceil` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `floor` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `min` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `max` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `title` ([#9087](https://github.com/hashicorp/terraform/issues/9087)) - * **New Interpolation Function:** `zipmap` ([#9627](https://github.com/hashicorp/terraform/issues/9627)) - -IMPROVEMENTS: - - * provider/aws: No longer require `route_table_ids` list in `aws_vpc_endpoint` resources ([#9357](https://github.com/hashicorp/terraform/issues/9357)) - * provider/aws: Allow `description` in `aws_redshift_subnet_group` to be modified ([#9515](https://github.com/hashicorp/terraform/issues/9515)) - * provider/aws: Add tagging support to aws_redshift_subnet_group ([#9504](https://github.com/hashicorp/terraform/issues/9504)) - * provider/aws: Add validation to IAM User and Group Name ([#9584](https://github.com/hashicorp/terraform/issues/9584)) - * provider/aws: Add Ability To Enable / Disable ALB AccessLogs ([#9290](https://github.com/hashicorp/terraform/issues/9290)) - * provider/aws: Add support for `AutoMinorVersionUpgrade` to aws_elasticache_replication_group resource. ([#9657](https://github.com/hashicorp/terraform/issues/9657)) - * provider/aws: Fix import of RouteTable with destination prefixes ([#9686](https://github.com/hashicorp/terraform/issues/9686)) - * provider/aws: Add support for reference_name to aws_route53_health_check ([#9737](https://github.com/hashicorp/terraform/issues/9737)) - * provider/aws: Expose ARN suffix on ALB Target Group ([#9734](https://github.com/hashicorp/terraform/issues/9734)) - * provider/azurerm: add account_kind and access_tier to storage_account ([#9408](https://github.com/hashicorp/terraform/issues/9408)) - * provider/azurerm: write load_balanacer attributes to network_interface_card hash ([#9207](https://github.com/hashicorp/terraform/issues/9207)) - * provider/azurerm: Add disk_size_gb param to VM storage_os_disk ([#9200](https://github.com/hashicorp/terraform/issues/9200)) - * provider/azurerm: support importing of subnet resource ([#9646](https://github.com/hashicorp/terraform/issues/9646)) - * provider/azurerm: Add support for *all* of the Azure regions e.g. Germany, China and Government ([#9765](https://github.com/hashicorp/terraform/issues/9765)) - * provider/digitalocean: Allow resizing DigitalOcean Droplets without increasing disk size. ([#9573](https://github.com/hashicorp/terraform/issues/9573)) - * provider/google: enhance service scope list ([#9442](https://github.com/hashicorp/terraform/issues/9442)) - * provider/google Change default MySQL instance version to 5.6 ([#9674](https://github.com/hashicorp/terraform/issues/9674)) - * provider/google Support MySQL 5.7 instances ([#9673](https://github.com/hashicorp/terraform/issues/9673)) - * provider/google: Add support for using source_disk to google_compute_image ([#9614](https://github.com/hashicorp/terraform/issues/9614)) - * provider/google: Add support for default-internet-gateway alias for google_compute_route ([#9676](https://github.com/hashicorp/terraform/issues/9676)) - * provider/openstack: Added value_specs to openstack_networking_port_v2, allowing vendor information ([#9551](https://github.com/hashicorp/terraform/issues/9551)) - * provider/openstack: Added value_specs to openstack_networking_floatingip_v2, allowing vendor information ([#9552](https://github.com/hashicorp/terraform/issues/9552)) - * provider/openstack: Added value_specs to openstack_compute_keypair_v2, allowing vendor information ([#9554](https://github.com/hashicorp/terraform/issues/9554)) - * provider/openstack: Allow any protocol in openstack_fw_rule_v1 ([#9617](https://github.com/hashicorp/terraform/issues/9617)) - * provider/openstack: expose LoadBalancer v2 VIP Port ID ([#9727](https://github.com/hashicorp/terraform/issues/9727)) - * provider/openstack: Openstack Provider enhancements including environment variables ([#9725](https://github.com/hashicorp/terraform/issues/9725)) - * provider/scaleway: update sdk for ams1 region ([#9687](https://github.com/hashicorp/terraform/issues/9687)) - * provider/scaleway: server volume property ([#9695](https://github.com/hashicorp/terraform/issues/9695)) - -BUG FIXES: - - * core: Resources suffixed with 'panic' won't falsely trigger crash detection. ([#9395](https://github.com/hashicorp/terraform/issues/9395)) - * core: Validate lifecycle options don't contain interpolations. ([#9576](https://github.com/hashicorp/terraform/issues/9576)) - * core: Tainted resources will not process `ignore_changes`. ([#7855](https://github.com/hashicorp/terraform/issues/7855)) - * core: Boolean looking values passed in via `-var` no longer cause type errors. ([#9642](https://github.com/hashicorp/terraform/issues/9642)) - * core: Computed primitives in certain cases no longer cause diff mismatch errors. ([#9618](https://github.com/hashicorp/terraform/issues/9618)) - * core: Empty arrays for list vars in JSON work ([#8886](https://github.com/hashicorp/terraform/issues/8886)) - * core: Boolean types in tfvars work propertly ([#9751](https://github.com/hashicorp/terraform/issues/9751)) - * core: Deposed resource destruction is accounted for properly in `apply` counts. ([#9731](https://github.com/hashicorp/terraform/issues/9731)) - * core: Check for graph cycles on resource expansion to catch cycles between self-referenced resources. ([#9728](https://github.com/hashicorp/terraform/issues/9728)) - * core: `prevent_destroy` prevents decreasing count ([#9707](https://github.com/hashicorp/terraform/issues/9707)) - * core: removed optional items will trigger "requires new" if necessary ([#9699](https://github.com/hashicorp/terraform/issues/9699)) - * command/apply: `-backup` and `-state-out` work with plan files ([#9706](https://github.com/hashicorp/terraform/issues/9706)) - * command/fmt: Cleaner formatting for multiline standalone comments above resources - * command/validate: respond to `--help` ([#9660](https://github.com/hashicorp/terraform/issues/9660)) - * provider/archive: Converting to datasource. ([#8492](https://github.com/hashicorp/terraform/issues/8492)) - * provider/aws: Fix issue importing AWS Instances and setting the correct `associate_public_ip_address` value ([#9453](https://github.com/hashicorp/terraform/issues/9453)) - * provider/aws: Fix issue with updating ElasticBeanstalk environment variables ([#9259](https://github.com/hashicorp/terraform/issues/9259)) - * provider/aws: Allow zero value for `scaling_adjustment` in `aws_autoscaling_policy` when using `SimpleScaling` ([#8893](https://github.com/hashicorp/terraform/issues/8893)) - * provider/aws: Increase ECS service drain timeout ([#9521](https://github.com/hashicorp/terraform/issues/9521)) - * provider/aws: Remove VPC Endpoint from state if it's not found ([#9561](https://github.com/hashicorp/terraform/issues/9561)) - * provider/aws: Delete Loging Profile from IAM User on force_destroy ([#9583](https://github.com/hashicorp/terraform/issues/9583)) - * provider/aws: Exposed aws_api_gw_domain_name.certificate_upload_date attribute ([#9533](https://github.com/hashicorp/terraform/issues/9533)) - * provider/aws: fix aws_elasticache_replication_group for Redis in cluster mode ([#9601](https://github.com/hashicorp/terraform/issues/9601)) - * provider/aws: Validate regular expression passed via the ami data_source `name_regex` attribute. ([#9622](https://github.com/hashicorp/terraform/issues/9622)) - * provider/aws: Bug fix for NoSuckBucket on Destroy of aws_s3_bucket_policy ([#9641](https://github.com/hashicorp/terraform/issues/9641)) - * provider/aws: Refresh aws_autoscaling_schedule from state on 404 ([#9659](https://github.com/hashicorp/terraform/issues/9659)) - * provider/aws: Allow underscores in IAM user and group names ([#9684](https://github.com/hashicorp/terraform/issues/9684)) - * provider/aws: aws_ami: handle deletion of AMIs ([#9721](https://github.com/hashicorp/terraform/issues/9721)) - * provider/aws: Fix aws_route53_record alias perpetual diff ([#9704](https://github.com/hashicorp/terraform/issues/9704)) - * provider/aws: Allow `active` state while waiting for the VPC Peering Connection. ([#9754](https://github.com/hashicorp/terraform/issues/9754)) - * provider/aws: Normalize all-principals wildcard in `aws_iam_policy_document` ([#9720](https://github.com/hashicorp/terraform/issues/9720)) - * provider/azurerm: Fix Azure RM loadbalancer rules validation ([#9468](https://github.com/hashicorp/terraform/issues/9468)) - * provider/azurerm: Fix servicebus_topic values when using the Update func to stop perpetual diff ([#9323](https://github.com/hashicorp/terraform/issues/9323)) - * provider/azurerm: lower servicebus_topic max size to Azure limit ([#9649](https://github.com/hashicorp/terraform/issues/9649)) - * provider/azurerm: Fix VHD deletion when VM and Storage account are in separate resource groups ([#9631](https://github.com/hashicorp/terraform/issues/9631)) - * provider/azurerm: Guard against panic when importing arm_virtual_network ([#9739](https://github.com/hashicorp/terraform/issues/9739)) - * provider/azurerm: fix sql_database resource reading tags ([#9767](https://github.com/hashicorp/terraform/issues/9767)) - * provider/cloudflare: update client library to stop connection closed issues ([#9715](https://github.com/hashicorp/terraform/issues/9715)) - * provider/consul: Change to consul_service resource to introduce a `service_id` parameter ([#9366](https://github.com/hashicorp/terraform/issues/9366)) - * provider/datadog: Ignore float/int diffs on thresholds ([#9466](https://github.com/hashicorp/terraform/issues/9466)) - * provider/docker: Fixes for docker_container host object and documentation ([#9367](https://github.com/hashicorp/terraform/issues/9367)) - * provider/scaleway improve the performance of server deletion ([#9491](https://github.com/hashicorp/terraform/issues/9491)) - * provider/scaleway: fix scaleway_volume_attachment with count > 1 ([#9493](https://github.com/hashicorp/terraform/issues/9493)) - - -## 0.7.7 (October 18, 2016) - -FEATURES: - - * **New Data Source:** `scaleway_bootsscript`. ([#9386](https://github.com/hashicorp/terraform/issues/9386)) - * **New Data Source:** `scaleway_image`. ([#9386](https://github.com/hashicorp/terraform/issues/9386)) - -IMPROVEMENTS: - - * core: When the environment variable TF_LOG_PATH is specified, debug logs are now appended to the specified file instead of being truncated. ([#9440](https://github.com/hashicorp/terraform/pull/9440)) - * provider/aws: Expose ARN for `aws_lambda_alias`. ([#9390](https://github.com/hashicorp/terraform/issues/9390)) - * provider/aws: Add support for AWS US East (Ohio) region. ([#9414](https://github.com/hashicorp/terraform/issues/9414)) - * provider/scaleway: `scaleway_ip`, `scaleway_security_group`, `scalway_server` and `scaleway_volume` resources can now be imported. ([#9387](https://github.com/hashicorp/terraform/issues/9387)) - -BUG FIXES: - - * core: List and map indexes support arithmetic. ([#9372](https://github.com/hashicorp/terraform/issues/9372)) - * core: List and map indexes are implicitly converted to the correct type if possible. ([#9372](https://github.com/hashicorp/terraform/issues/9372)) - * provider/aws: Read back `associate_public_ip_address` in `aws_launch_configuration` resources to enable importing. ([#9399](https://github.com/hashicorp/terraform/issues/9399)) - * provider/aws: Remove `aws_route` resources from state if their associated `aws_route_table` has been removed. ([#9431](https://github.com/hashicorp/terraform/issues/9431)) - * provider/azurerm: Load balancer resources now have their `id` attribute set to the resource URI instead of the load balancer URI. ([#9401](https://github.com/hashicorp/terraform/issues/9401)) - * provider/google: Fix a bug causing a crash when migrating `google_compute_target_pool` resources from 0.6.x releases. ([#9370](https://github.com/hashicorp/terraform/issues/9370)) - -## 0.7.6 (October 14, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `azurerm_virtual_machine` has deprecated the use of `diagnostics_profile` in favour of `boot_diagnostics`. ([#9122](https://github.com/hashicorp/terraform/issues/9122)) - * The deprecated `key_file` and `bastion_key_file` arguments to Provisioner Connections have been removed ([#9340](https://github.com/hashicorp/terraform/issues/9340)) - -FEATURES: - * **New Data Source:** `aws_billing_service_account` ([#8701](https://github.com/hashicorp/terraform/issues/8701)) - * **New Data Source:** `aws_availability_zone` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_region` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_subnet` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_vpc` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Resource:** `azurerm_lb` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_backend_address_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_nat_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_nat_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_probe` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `github_repository` ([#9327](https://github.com/hashicorp/terraform/issues/9327)) - -IMPROVEMENTS: - * core-validation: create validation package to provide common validation functions ([#8103](https://github.com/hashicorp/terraform/issues/8103)) - * provider/aws: Support Import of OpsWorks Custom Layers ([#9252](https://github.com/hashicorp/terraform/issues/9252)) - * provider/aws: Automatically constructed ARNs now support partitions other than `aws`, allowing operation with `aws-cn` and `aws-us-gov` ([#9273](https://github.com/hashicorp/terraform/issues/9273)) - * provider/aws: Retry setTags operation for EC2 resources ([#7890](https://github.com/hashicorp/terraform/issues/7890)) - * provider/aws: Support refresh of EC2 instance `user_data` ([#6736](https://github.com/hashicorp/terraform/issues/6736)) - * provider/aws: Poll to confirm delete of `resource_aws_customer_gateway` ([#9346](https://github.com/hashicorp/terraform/issues/9346)) - * provider/azurerm: expose default keys for `servicebus_namespace` ([#9242](https://github.com/hashicorp/terraform/issues/9242)) - * provider/azurerm: add `enable_blob_encryption` to `azurerm_storage_account` resource ([#9233](https://github.com/hashicorp/terraform/issues/9233)) - * provider/azurerm: set `resource_group_name` on resource import across the provider ([#9073](https://github.com/hashicorp/terraform/issues/9073)) - * provider/azurerm: `azurerm_cdn_profile` resources can now be imported ([#9306](https://github.com/hashicorp/terraform/issues/9306)) - * provider/datadog: add support for Datadog dashboard "type" and "style" options ([#9228](https://github.com/hashicorp/terraform/issues/9228)) - * provider/scaleway: `region` is now supported for provider configuration - -BUG FIXES: - * core: Local state can now be refreshed when no resources exist ([#7320](https://github.com/hashicorp/terraform/issues/7320)) - * core: Orphaned nested (depth 2+) modules will inherit provider configs ([#9318](https://github.com/hashicorp/terraform/issues/9318)) - * core: Fix crash when a map key contains an interpolation function ([#9282](https://github.com/hashicorp/terraform/issues/9282)) - * core: Numeric variables values were incorrectly converted to numbers ([#9263](https://github.com/hashicorp/terraform/issues/9263)) - * core: Fix input and output of map variables from HCL ([#9268](https://github.com/hashicorp/terraform/issues/9268)) - * core: Crash when interpolating a map value with a function in the key ([#9282](https://github.com/hashicorp/terraform/issues/9282)) - * core: Crash when copying a nil value in an InstanceState ([#9356](https://github.com/hashicorp/terraform/issues/9356)) - * command/fmt: Bare comment groups no longer have superfluous newlines - * command/fmt: Leading comments on list items are formatted properly - * provider/aws: Return correct AMI image when `most_recent` is set to `true`. ([#9277](https://github.com/hashicorp/terraform/issues/9277)) - * provider/aws: Fix issue with diff on import of `aws_eip` in EC2 Classic ([#9009](https://github.com/hashicorp/terraform/issues/9009)) - * provider/aws: Handle EC2 tags related errors in CloudFront Distribution resource. ([#9298](https://github.com/hashicorp/terraform/issues/9298)) - * provider/aws: Fix cause error when using `etag` and `kms_key_id` with `aws_s3_bucket_object` ([#9168](https://github.com/hashicorp/terraform/issues/9168)) - * provider/aws: Fix issue reassigning EIP instances appropriately ([#7686](https://github.com/hashicorp/terraform/issues/7686)) - * provider/azurerm: removing resources from state when the API returns a 404 for them ([#8859](https://github.com/hashicorp/terraform/issues/8859)) - * provider/azurerm: Fixed a panic in `azurerm_virtual_machine` when using `diagnostic_profile` ([#9122](https://github.com/hashicorp/terraform/issues/9122)) - -## 0.7.5 (October 6, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `tls_cert_request` is now a managed resource instead of a data source, restoring the pre-Terraform 0.7 behaviour ([#9035](https://github.com/hashicorp/terraform/issues/9035)) - -FEATURES: - * **New Provider:** `bitbucket` ([#7405](https://github.com/hashicorp/terraform/issues/7405)) - * **New Resource:** `aws_api_gateway_client_certificate` ([#8775](https://github.com/hashicorp/terraform/issues/8775)) - * **New Resource:** `azurerm_servicebus_topic` ([#9151](https://github.com/hashicorp/terraform/issues/9151)) - * **New Resource:** `azurerm_servicebus_subscription` ([#9185](https://github.com/hashicorp/terraform/issues/9185)) - * **New Resource:** `aws_emr_cluster` ([#9106](https://github.com/hashicorp/terraform/issues/9106)) - * **New Resource:** `aws_emr_instance_group` ([#9106](https://github.com/hashicorp/terraform/issues/9106)) - -IMPROVEMENTS: - * helper/schema: Adding of MinItems as a validation to Lists and Maps ([#9216](https://github.com/hashicorp/terraform/issues/9216)) - * provider/aws: Add JSON validation to the `aws_cloudwatch_event_rule` resource ([#8897](https://github.com/hashicorp/terraform/issues/8897)) - * provider/aws: S3 bucket policies are imported as separate resources ([#8915](https://github.com/hashicorp/terraform/issues/8915)) - * provider/aws: S3 bucket policies can now be removed via the `aws_s3_bucket` resource ([#8915](https://github.com/hashicorp/terraform/issues/8915)) - * provider/aws: Added a `cluster_address` attribute to aws elasticache ([#8935](https://github.com/hashicorp/terraform/issues/8935)) - * provider/aws: Add JSON validation to the `aws_elasticsearch_domain resource`. ([#8898](https://github.com/hashicorp/terraform/issues/8898)) - * provider/aws: Add JSON validation to the `aws_kms_key resource`. ([#8900](https://github.com/hashicorp/terraform/issues/8900)) - * provider/aws: Add JSON validation to the `aws_s3_bucket_policy resource`. ([#8901](https://github.com/hashicorp/terraform/issues/8901)) - * provider/aws: Add JSON validation to the `aws_sns_topic resource`. ([#8902](https://github.com/hashicorp/terraform/issues/8902)) - * provider/aws: Add JSON validation to the `aws_sns_topic_policy resource`. ([#8903](https://github.com/hashicorp/terraform/issues/8903)) - * provider/aws: Add JSON validation to the `aws_sqs_queue resource`. ([#8904](https://github.com/hashicorp/terraform/issues/8904)) - * provider/aws: Add JSON validation to the `aws_sqs_queue_policy resource`. ([#8905](https://github.com/hashicorp/terraform/issues/8905)) - * provider/aws: Add JSON validation to the `aws_vpc_endpoint resource`. ([#8906](https://github.com/hashicorp/terraform/issues/8906)) - * provider/aws: Update `aws_cloudformation_stack` data source with new helper function. ([#8907](https://github.com/hashicorp/terraform/issues/8907)) - * provider/aws: Add JSON validation to the `aws_s3_bucket` resource. ([#8908](https://github.com/hashicorp/terraform/issues/8908)) - * provider/aws: Add support for `cloudwatch_logging_options` to Firehose Delivery Streams ([#8671](https://github.com/hashicorp/terraform/issues/8671)) - * provider/aws: Add HTTP/2 support via the http_version parameter to CloudFront distribution ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Add `query_string_cache_keys` to allow for selective caching of CloudFront keys ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Support Import `aws_elasticache_cluster` ([#9010](https://github.com/hashicorp/terraform/issues/9010)) - * provider/aws: Add support for tags to `aws_cloudfront_distribution` ([#9011](https://github.com/hashicorp/terraform/issues/9011)) - * provider/aws: Support Import `aws_opsworks_stack` ([#9124](https://github.com/hashicorp/terraform/issues/9124)) - * provider/aws: Support Import `aws_elasticache_replication_groups` ([#9140](https://github.com/hashicorp/terraform/issues/9140)) - * provider/aws: Add new aws api-gateway integration types ([#9213](https://github.com/hashicorp/terraform/issues/9213)) - * provider/aws: Import `aws_db_event_subscription` ([#9220](https://github.com/hashicorp/terraform/issues/9220)) - * provider/azurerm: Add normalizeJsonString and validateJsonString functions ([#8909](https://github.com/hashicorp/terraform/issues/8909)) - * provider/azurerm: Support AzureRM Sql Database DataWarehouse ([#9196](https://github.com/hashicorp/terraform/issues/9196)) - * provider/openstack: Use proxy environment variables for communication with services ([#8948](https://github.com/hashicorp/terraform/issues/8948)) - * provider/vsphere: Adding `detach_unknown_disks_on_delete` flag for VM resource ([#8947](https://github.com/hashicorp/terraform/issues/8947)) - * provisioner/chef: Add `skip_register` attribute to allow skipping the registering steps ([#9127](https://github.com/hashicorp/terraform/issues/9127)) - -BUG FIXES: - * core: Fixed variables not being in scope for destroy -target on modules ([#9021](https://github.com/hashicorp/terraform/issues/9021)) - * core: Fixed issue that prevented diffs from being properly generated in a specific resource schema scenario ([#8891](https://github.com/hashicorp/terraform/issues/8891)) - * provider/aws: Remove support for `ah` and `esp` literals in Security Group Ingress/Egress rules; you must use the actual protocol number for protocols other than `tcp`, `udp`, `icmp`, or `all` ([#8975](https://github.com/hashicorp/terraform/issues/8975)) - * provider/aws: Do not report drift for effect values differing only by case in AWS policies ([#9139](https://github.com/hashicorp/terraform/issues/9139)) - * provider/aws: VPC ID, Port, Protocol and Name change on aws_alb_target_group will ForceNew resource ([#8989](https://github.com/hashicorp/terraform/issues/8989)) - * provider/aws: Wait for Spot Fleet to drain before removing from state ([#8938](https://github.com/hashicorp/terraform/issues/8938)) - * provider/aws: Fix issue when importing `aws_eip` resources by IP address ([#8970](https://github.com/hashicorp/terraform/issues/8970)) - * provider/aws: Ensure that origin_access_identity is a required value within the CloudFront distribution s3_config block ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Corrected Seoul S3 Website Endpoint format ([#9032](https://github.com/hashicorp/terraform/issues/9032)) - * provider/aws: Fix failed remove S3 lifecycle_rule ([#9031](https://github.com/hashicorp/terraform/issues/9031)) - * provider/aws: Fix crashing bug in `aws_ami` data source when using `name_regex` ([#9033](https://github.com/hashicorp/terraform/issues/9033)) - * provider/aws: Fix reading dimensions on cloudwatch alarms ([#9029](https://github.com/hashicorp/terraform/issues/9029)) - * provider/aws: Changing snapshot_identifier on aws_db_instance resource should force… ([#8806](https://github.com/hashicorp/terraform/issues/8806)) - * provider/aws: Refresh AWS EIP association from state when not found ([#9056](https://github.com/hashicorp/terraform/issues/9056)) - * provider/aws: Make encryption in Aurora instances computed-only ([#9060](https://github.com/hashicorp/terraform/issues/9060)) - * provider/aws: Make sure that VPC Peering Connection in a failed state returns an error. ([#9038](https://github.com/hashicorp/terraform/issues/9038)) - * provider/aws: guard against aws_route53_record delete panic ([#9049](https://github.com/hashicorp/terraform/issues/9049)) - * provider/aws: aws_db_option_group flattenOptions failing due to missing values ([#9052](https://github.com/hashicorp/terraform/issues/9052)) - * provider/aws: Add retry logic to the aws_ecr_repository delete func ([#9050](https://github.com/hashicorp/terraform/issues/9050)) - * provider/aws: Modifying the parameter_group_name of aws_elasticache_replication_group caused a panic ([#9101](https://github.com/hashicorp/terraform/issues/9101)) - * provider/aws: Fix issue with updating ELB subnets for subnets in the same AZ ([#9131](https://github.com/hashicorp/terraform/issues/9131)) - * provider/aws: aws_route53_record alias refresh manually updated record ([#9125](https://github.com/hashicorp/terraform/issues/9125)) - * provider/aws: Fix issue detaching volumes that were already detached ([#9023](https://github.com/hashicorp/terraform/issues/9023)) - * provider/aws: Add retry to the `aws_ssm_document` delete func ([#9188](https://github.com/hashicorp/terraform/issues/9188)) - * provider/aws: Fix issue updating `search_string` in aws_cloudwatch_metric_alarm ([#9230](https://github.com/hashicorp/terraform/issues/9230)) - * provider/aws: Update EFS resource to read performance mode and creation_token ([#9234](https://github.com/hashicorp/terraform/issues/9234)) - * provider/azurerm: fix resource ID parsing for subscriptions resources ([#9163](https://github.com/hashicorp/terraform/issues/9163)) - * provider/librato: Mandatory name and conditions attributes weren't being sent on Update unless changed ([#8984](https://github.com/hashicorp/terraform/issues/8984)) - * provisioner/chef: Fix an error with parsing certain `vault_json` content ([#9114](https://github.com/hashicorp/terraform/issues/9114)) - * provisioner/chef: Change to order in which to cleanup the user key so this is done before the Chef run starts ([#9114](https://github.com/hashicorp/terraform/issues/9114)) - -## 0.7.4 (September 19, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * In previous releases, the `private_key` field in the connection provisioner - inadvertently accepted a path argument and would read the file contents. - This functionality has been removed in this release ([#8577](https://github.com/hashicorp/terraform/issues/8577)), and the documented - method of using the `file()` interpolation function should be used to load - the key from a file. - -FEATURES: - * **New Resource:** `aws_codecommit_trigger` ([#8751](https://github.com/hashicorp/terraform/issues/8751)) - * **New Resource:** `aws_default_security_group` ([#8861](https://github.com/hashicorp/terraform/issues/8861)) - * **New Remote State Backend:** `manta` ([#8830](https://github.com/hashicorp/terraform/issues/8830)) - -IMPROVEMENTS: - * provider/aws: Support 'publish' attribute in `lambda_function` ([#8653](https://github.com/hashicorp/terraform/issues/8653)) - * provider/aws: Add `reader_endpoint` RDS Clusters ([#8884](https://github.com/hashicorp/terraform/issues/8884)) - * provider/aws: Export AWS ELB service account ARN ([#8700](https://github.com/hashicorp/terraform/issues/8700)) - * provider/aws: Allow `aws_alb` to have the name auto-generated ([#8673](https://github.com/hashicorp/terraform/issues/8673)) - * provider/aws: Expose `arn_suffix` on `aws_alb` ([#8833](https://github.com/hashicorp/terraform/issues/8833)) - * provider/aws: Add JSON validation to the `aws_cloudformation_stack` resource ([#8896](https://github.com/hashicorp/terraform/issues/8896)) - * provider/aws: Add JSON validation to the `aws_glacier_vault` resource ([#8899](https://github.com/hashicorp/terraform/issues/8899)) - * provider/azurerm: support Diagnostics Profile ([#8277](https://github.com/hashicorp/terraform/issues/8277)) - * provider/google: Resources depending on the `network` attribute can now reference the network by `self_link` or `name` ([#8639](https://github.com/hashicorp/terraform/issues/8639)) - * provider/postgresql: The standard environment variables PGHOST, PGUSER, PGPASSWORD and PGSSLMODE are now supported for provider configuration ([#8666](https://github.com/hashicorp/terraform/issues/8666)) - * helper/resource: Add timeout duration to timeout error message ([#8773](https://github.com/hashicorp/terraform/issues/8773)) - * provisioner/chef: Support recreating Chef clients by setting `recreate_client=true` ([#8577](https://github.com/hashicorp/terraform/issues/8577)) - * provisioner/chef: Support encrypting existing Chef-Vaults for newly created clients ([#8577](https://github.com/hashicorp/terraform/issues/8577)) - -BUG FIXES: - * core: Fix regression when loading variables from json ([#8820](https://github.com/hashicorp/terraform/issues/8820)) - * provider/aws: Prevent crash creating an `aws_sns_topic` with an empty policy ([#8834](https://github.com/hashicorp/terraform/issues/8834)) - * provider/aws: Bump `aws_elasticsearch_domain` timeout values ([#672](https://github.com/hashicorp/terraform/issues/672)) - * provider/aws: `aws_nat_gateways` will now recreate on `failed` state ([#8689](https://github.com/hashicorp/terraform/issues/8689)) - * provider/aws: Prevent crash on account ID validation ([#8731](https://github.com/hashicorp/terraform/issues/8731)) - * provider/aws: `aws_db_instance` unexpected state when configurating enhanced monitoring ([#8707](https://github.com/hashicorp/terraform/issues/8707)) - * provider/aws: Remove region condition from `aws_codecommit_repository` ([#8778](https://github.com/hashicorp/terraform/issues/8778)) - * provider/aws: Support Policy DiffSuppression in `aws_kms_key` policy ([#8675](https://github.com/hashicorp/terraform/issues/8675)) - * provider/aws: Fix issue updating Elastic Beanstalk Environment variables ([#8848](https://github.com/hashicorp/terraform/issues/8848)) - * provider/scaleway: Fix `security_group_rule` identification ([#8661](https://github.com/hashicorp/terraform/issues/8661)) - * provider/cloudstack: Fix renaming a VPC with the `cloudstack_vpc` resource ([#8784](https://github.com/hashicorp/terraform/issues/8784)) - -## 0.7.3 (September 5, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * Terraform now validates the uniqueness of variable and output names in your configurations. In prior versions certain ways of duplicating variable names would work. This is now a configuration error (and should've always been). If you get an error running Terraform you may need to remove the duplicates. Done right, this should not affect the behavior of Terraform. - * The internal structure of `.terraform/modules` changed slightly. For configurations with modules, you'll need to run `terraform get` again. - -FEATURES: - * **New Provider:** `rabbitmq` ([#7694](https://github.com/hashicorp/terraform/issues/7694)) - * **New Data Source:** `aws_cloudformation_stack` ([#8640](https://github.com/hashicorp/terraform/issues/8640)) - * **New Resource:** `aws_cloudwatch_log_stream` ([#8626](https://github.com/hashicorp/terraform/issues/8626)) - * **New Resource:** `aws_default_route_table` ([#8323](https://github.com/hashicorp/terraform/issues/8323)) - * **New Resource:** `aws_spot_datafeed_subscription` ([#8640](https://github.com/hashicorp/terraform/issues/8640)) - * **New Resource:** `aws_s3_bucket_policy` ([#8615](https://github.com/hashicorp/terraform/issues/8615)) - * **New Resource:** `aws_sns_topic_policy` ([#8654](https://github.com/hashicorp/terraform/issues/8654)) - * **New Resource:** `aws_sqs_queue_policy` ([#8657](https://github.com/hashicorp/terraform/issues/8657)) - * **New Resource:** `aws_ssm_association` ([#8376](https://github.com/hashicorp/terraform/issues/8376)) - * **New Resource:** `cloudstack_affinity_group` ([#8360](https://github.com/hashicorp/terraform/issues/8360)) - * **New Resource:** `librato_alert` ([#8170](https://github.com/hashicorp/terraform/issues/8170)) - * **New Resource:** `librato_service` ([#8170](https://github.com/hashicorp/terraform/issues/8170)) - * **New Remote State Backend:** `local` ([#8647](https://github.com/hashicorp/terraform/issues/8647)) - * Data source blocks can now have a count associated with them ([#8635](https://github.com/hashicorp/terraform/issues/8635)) - * The count of a resource can now be referenced for interpolations: `self.count` and `type.name.count` work ([#8581](https://github.com/hashicorp/terraform/issues/8581)) - * Provisioners now support connection using IPv6 in addition to IPv4 ([#6616](https://github.com/hashicorp/terraform/issues/6616)) - -IMPROVEMENTS: - * core: Add wildcard (match all) support to `ignore_changes` ([#8599](https://github.com/hashicorp/terraform/issues/8599)) - * core: HTTP module sources can now use netrc files for auth - * core: Show last resource state in a timeout error message ([#8510](https://github.com/hashicorp/terraform/issues/8510)) - * helper/schema: Add diff suppression callback ([#8585](https://github.com/hashicorp/terraform/issues/8585)) - * provider/aws: API Gateway Custom Authorizer ([#8535](https://github.com/hashicorp/terraform/issues/8535)) - * provider/aws: Add MemoryReservation To `aws_ecs_container_definition` data source ([#8437](https://github.com/hashicorp/terraform/issues/8437)) - * provider/aws: Add ability Enable/Disable For ELB Access logs ([#8438](https://github.com/hashicorp/terraform/issues/8438)) - * provider/aws: Add support for assuming a role prior to performing API operations ([#8638](https://github.com/hashicorp/terraform/issues/8638)) - * provider/aws: Export `arn` of `aws_autoscaling_group` ([#8503](https://github.com/hashicorp/terraform/issues/8503)) - * provider/aws: More robust handling of Lambda function archives hosted on S3 ([#6860](https://github.com/hashicorp/terraform/issues/6860)) - * provider/aws: Spurious diffs of `aws_s3_bucket` policy attributes due to JSON field ordering are reduced ([#8615](https://github.com/hashicorp/terraform/issues/8615)) - * provider/aws: `name_regex` attribute for local post-filtering of `aws_ami` data source results ([#8403](https://github.com/hashicorp/terraform/issues/8403)) - * provider/aws: Support for lifecycle hooks at ASG creation ([#5620](https://github.com/hashicorp/terraform/issues/5620)) - * provider/consul: Make provider settings truly optional ([#8551](https://github.com/hashicorp/terraform/issues/8551)) - * provider/statuscake: Add support for contact-group id in statuscake test ([#8417](https://github.com/hashicorp/terraform/issues/8417)) - -BUG FIXES: - * core: Changing a module source from file to VCS no longer errors ([#8398](https://github.com/hashicorp/terraform/issues/8398)) - * core: Configuration is now validated prior to input, fixing an obscure parse error when attempting to interpolate a count ([#8591](https://github.com/hashicorp/terraform/issues/8591)) - * core: JSON configuration with resources with a single key parse properly ([#8485](https://github.com/hashicorp/terraform/issues/8485)) - * core: States with duplicate modules are detected and an error is shown ([#8463](https://github.com/hashicorp/terraform/issues/8463)) - * core: Validate uniqueness of variables/outputs in a module ([#8482](https://github.com/hashicorp/terraform/issues/8482)) - * core: `-var` flag inputs starting with `/` work - * core: `-var` flag inputs starting with a number work and was fixed in such a way that this should overall be a lot more resilient to inputs ([#8044](https://github.com/hashicorp/terraform/issues/8044)) - * provider/aws: Add AWS error message to retry APIGateway account update ([#8533](https://github.com/hashicorp/terraform/issues/8533)) - * provider/aws: Do not set empty string to state for `aws_vpn_gateway` availability zone ([#8645](https://github.com/hashicorp/terraform/issues/8645)) - * provider/aws: Fix. Adjust create and destroy timeout in aws_vpn_gateway_attachment. ([#8636](https://github.com/hashicorp/terraform/issues/8636)) - * provider/aws: Handle missing EFS mount target in `aws_efs_mount_target` ([#8529](https://github.com/hashicorp/terraform/issues/8529)) - * provider/aws: If an `aws_security_group` was used in Lambda function it may have prevented you from destroying such SG due to dangling ENIs created by Lambda service. These ENIs are now automatically cleaned up prior to SG deletion ([#8033](https://github.com/hashicorp/terraform/issues/8033)) - * provider/aws: Increase `aws_route_table` timeouts from 1 min to 2 mins ([#8465](https://github.com/hashicorp/terraform/issues/8465)) - * provider/aws: Increase aws_rds_cluster timeout to 40 minutes ([#8623](https://github.com/hashicorp/terraform/issues/8623)) - * provider/aws: Refresh `aws_route` from state if `aws_route_table` not found ([#8443](https://github.com/hashicorp/terraform/issues/8443)) - * provider/aws: Remove `aws_elasticsearch_domain` from state if it doesn't exist ([#8643](https://github.com/hashicorp/terraform/issues/8643)) - * provider/aws: Remove unsafe ptr dereferencing from ECS/ECR ([#8514](https://github.com/hashicorp/terraform/issues/8514)) - * provider/aws: Set `apply_method` to state in `aws_db_parameter_group` ([#8603](https://github.com/hashicorp/terraform/issues/8603)) - * provider/aws: Stop `aws_instance` `source_dest_check` triggering an API call on each terraform run ([#8450](https://github.com/hashicorp/terraform/issues/8450)) - * provider/aws: Wait for `aws_route_53_record` to be in-sync after a delete ([#8646](https://github.com/hashicorp/terraform/issues/8646)) - * provider/aws: `aws_volume_attachment` detachment errors are caught ([#8479](https://github.com/hashicorp/terraform/issues/8479)) - * provider/aws: adds resource retry to `aws_spot_instance_request` ([#8516](https://github.com/hashicorp/terraform/issues/8516)) - * provider/aws: Add validation of Health Check target to aws_elb. ([#8578](https://github.com/hashicorp/terraform/issues/8578)) - * provider/aws: Skip detaching when aws_internet_gateway not found ([#8454](https://github.com/hashicorp/terraform/issues/8454)) - * provider/aws: Handle all kinds of CloudFormation stack failures ([#5606](https://github.com/hashicorp/terraform/issues/5606)) - * provider/azurerm: Reordering the checks after an Azure API Get ([#8607](https://github.com/hashicorp/terraform/issues/8607)) - * provider/chef: Fix "invalid header" errors that could occur ([#8382](https://github.com/hashicorp/terraform/issues/8382)) - * provider/github: Remove unsafe ptr dereferencing ([#8512](https://github.com/hashicorp/terraform/issues/8512)) - * provider/librato: Refresh space from state when not found ([#8596](https://github.com/hashicorp/terraform/issues/8596)) - * provider/mysql: Fix breakage in parsing MySQL version string ([#8571](https://github.com/hashicorp/terraform/issues/8571)) - * provider/template: `template_file` vars can be floating point ([#8590](https://github.com/hashicorp/terraform/issues/8590)) - * provider/triton: Fix bug where the ID of a `triton_key` was used prior to being set ([#8563](https://github.com/hashicorp/terraform/issues/8563)) - -## 0.7.2 (August 25, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/openstack: changes were made to how volumes attached to instances are detected. If you attached a volume to an instance out of band to Terraform, it will be detached upon the next apply. You can resolve this by adding a `volume` entry for the attached volume. - * provider/aws: `aws_spot_fleet_request` has changed the `associate_public_ip_address` default from `true` to `false` - -FEATURES: - * **New Resource:** `aws_api_gateway_base_path_mapping` ([#8353](https://github.com/hashicorp/terraform/issues/8353)) - * **New Resource:** `aws_api_gateway_domain_name` ([#8353](https://github.com/hashicorp/terraform/issues/8353)) - * **New Resource:** `aws_ssm_document` ([#8460](https://github.com/hashicorp/terraform/issues/8460)) - -IMPROVEMENTS: - * core: Names generated with a unique prefix are now sortable based on age ([#8249](https://github.com/hashicorp/terraform/issues/8249)) - * provider/aws: Add Primary Endpoint Address attribute for `aws_elasticache_replication_group` ([#8385](https://github.com/hashicorp/terraform/issues/8385)) - * provider/aws: Add support for `network_mode` to `aws_ecs_task_definition` ([#8391](https://github.com/hashicorp/terraform/issues/8391)) - * provider/aws: Add support for LB target group to ECS service ([#8190](https://github.com/hashicorp/terraform/issues/8190)) - * provider/aws: Support Tags for `aws_alb` and `aws_alb_target_group` resources ([#8422](https://github.com/hashicorp/terraform/issues/8422)) - * provider/aws: Support `snapshot_name` for ElastiCache Cluster and Replication Groups ([#8419](https://github.com/hashicorp/terraform/issues/8419)) - * provider/aws: Add support to `aws_redshift_cluster` for restoring from snapshot ([#8414](https://github.com/hashicorp/terraform/issues/8414)) - * provider/aws: Add validation for master_password in `aws_redshift_cluster` ([#8434](https://github.com/hashicorp/terraform/issues/8434)) - * provider/openstack: Add `allowed_address_pairs` to `openstack_networking_port_v2` ([#8257](https://github.com/hashicorp/terraform/issues/8257)) - -BUG FIXES: - * core: fix crash case when malformed JSON given ([#8295](https://github.com/hashicorp/terraform/issues/8295)) - * core: when asking for input, spaces are allowed ([#8394](https://github.com/hashicorp/terraform/issues/8394)) - * core: module sources with URL encodings in the local file path won't error ([#8418](https://github.com/hashicorp/terraform/issues/8418)) - * command/apply: prefix destroying resources with module path ([#8396](https://github.com/hashicorp/terraform/issues/8396)) - * command/import: can import into specific indexes ([#8335](https://github.com/hashicorp/terraform/issues/8335)) - * command/push: -upload-modules=false works ([#8456](https://github.com/hashicorp/terraform/issues/8456)) - * command/state mv: nested modules can be moved ([#8304](https://github.com/hashicorp/terraform/issues/8304)) - * command/state mv: resources with a count > 1 can be moved ([#8304](https://github.com/hashicorp/terraform/issues/8304)) - * provider/aws: Refresh `aws_lambda_event_source_mapping` from state when NotFound ([#8378](https://github.com/hashicorp/terraform/issues/8378)) - * provider/aws: `aws_elasticache_replication_group_id` validation change ([#8381](https://github.com/hashicorp/terraform/issues/8381)) - * provider/aws: Fix possible crash if using duplicate Route53 records ([#8399](https://github.com/hashicorp/terraform/issues/8399)) - * provider/aws: Refresh `aws_autoscaling_policy` from state on 404 ([#8430](https://github.com/hashicorp/terraform/issues/8430)) - * provider/aws: Fix crash with VPC Peering connection accept/requests ([#8432](https://github.com/hashicorp/terraform/issues/8432)) - * provider/aws: AWS SpotFleet Requests now works with Subnets and AZs ([#8320](https://github.com/hashicorp/terraform/issues/8320)) - * provider/aws: Refresh `aws_cloudwatch_event_target` from state on `ResourceNotFoundException` ([#8442](https://github.com/hashicorp/terraform/issues/8442)) - * provider/aws: Validate `aws_iam_policy_attachment` Name parameter to stop being empty ([#8441](https://github.com/hashicorp/terraform/issues/8441)) - * provider/aws: Fix segmentation fault in `aws_api_gateway_base_path_mapping` resource ([#8466](https://github.com/hashicorp/terraform/issues/8466)) - * provider/google: fix crash regression from Terraform 0.7.1 on `google_compute_firewall` resource ([#8390](https://github.com/hashicorp/terraform/issues/8390)) - * provider/openstack: Volume Attachment and Detachment Fixes ([#8172](https://github.com/hashicorp/terraform/issues/8172)) - -## 0.7.1 (August 19, 2016) - -FEATURES: - * **New Command:** `terraform state rm` ([#8200](https://github.com/hashicorp/terraform/issues/8200)) - * **New Provider:** `archive` ([#7322](https://github.com/hashicorp/terraform/issues/7322)) - * **New Resource:** `aws_alb` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_listener` ([#8269](https://github.com/hashicorp/terraform/issues/8269)) - * **New Resource:** `aws_alb_target_group` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_target_group_attachment` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_target_group_rule` ([#8321](https://github.com/hashicorp/terraform/issues/8321)) - * **New Resource:** `aws_vpn_gateway_attachment` ([#7870](https://github.com/hashicorp/terraform/issues/7870)) - * **New Resource:** `aws_load_balancer_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_load_balancer_backend_server_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_load_balancer_listener_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_lb_ssl_negotiation_policy` ([#8084](https://github.com/hashicorp/terraform/issues/8084)) - * **New Resource:** `aws_elasticache_replication_groups` ([#8275](https://github.com/hashicorp/terraform/issues/8275)) - * **New Resource:** `azurerm_virtual_network_peering` ([#8168](https://github.com/hashicorp/terraform/issues/8168)) - * **New Resource:** `azurerm_servicebus_namespace` ([#8195](https://github.com/hashicorp/terraform/issues/8195)) - * **New Resource:** `google_compute_image` ([#7960](https://github.com/hashicorp/terraform/issues/7960)) - * **New Resource:** `packet_volume` ([#8142](https://github.com/hashicorp/terraform/issues/8142)) - * **New Resource:** `consul_prepared_query` ([#7474](https://github.com/hashicorp/terraform/issues/7474)) - * **New Data Source:** `aws_ip_ranges` ([#7984](https://github.com/hashicorp/terraform/issues/7984)) - * **New Data Source:** `fastly_ip_ranges` ([#7984](https://github.com/hashicorp/terraform/issues/7984)) - * **New Data Source:** `aws_caller_identity` ([#8206](https://github.com/hashicorp/terraform/issues/8206)) - * **New Data Source:** `aws_elb_service_account` ([#8221](https://github.com/hashicorp/terraform/issues/8221)) - * **New Data Source:** `aws_redshift_service_account` ([#8224](https://github.com/hashicorp/terraform/issues/8224)) - -IMPROVEMENTS - * provider/archive support folders in output_path ([#8278](https://github.com/hashicorp/terraform/issues/8278)) - * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) ([#7860](https://github.com/hashicorp/terraform/issues/7860)) - * provider/aws: Add support for TargetGroups (`aws_alb_target_groups`) to `aws_autoscaling_group` [8327] - * provider/aws: CloudWatch Metrics are now supported for `aws_route53_health_check` resources ([#8319](https://github.com/hashicorp/terraform/issues/8319)) - * provider/aws: Query all pages of group membership ([#6726](https://github.com/hashicorp/terraform/issues/6726)) - * provider/aws: Query all pages of IAM Policy attachments ([#7779](https://github.com/hashicorp/terraform/issues/7779)) - * provider/aws: Change the way ARNs are built ([#7151](https://github.com/hashicorp/terraform/issues/7151)) - * provider/aws: Add support for Elasticsearch destination to firehose delivery streams ([#7839](https://github.com/hashicorp/terraform/issues/7839)) - * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` ([#7891](https://github.com/hashicorp/terraform/issues/7891)) - * provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` ([#8038](https://github.com/hashicorp/terraform/issues/8038)) - * provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` ([#8065](https://github.com/hashicorp/terraform/issues/8065)) - * provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` ([#8091](https://github.com/hashicorp/terraform/issues/8091)) - * provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check ([#7874](https://github.com/hashicorp/terraform/issues/7874)) - * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated ([#7794](https://github.com/hashicorp/terraform/issues/7794)) - * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` ([#8087](https://github.com/hashicorp/terraform/issues/8087)) - * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options ([#7871](https://github.com/hashicorp/terraform/issues/7871)) - * provider/aws: Add ability to set Storage Class in `aws_s3_bucket_object` ([#8174](https://github.com/hashicorp/terraform/issues/8174)) - * provider/aws: Treat `aws_lambda_function` w/ empty `subnet_ids` & `security_groups_ids` in `vpc_config` as VPC-disabled function ([#6191](https://github.com/hashicorp/terraform/issues/6191)) - * provider/aws: Allow `source_ids` in `aws_db_event_subscription` to be Updatable ([#7892](https://github.com/hashicorp/terraform/issues/7892)) - * provider/aws: Make `aws_efs_mount_target` creation fail for 2+ targets per AZ ([#8205](https://github.com/hashicorp/terraform/issues/8205)) - * provider/aws: Add `force_destroy` option to `aws_route53_zone` ([#8239](https://github.com/hashicorp/terraform/issues/8239)) - * provider/aws: Support import of `aws_s3_bucket` ([#8262](https://github.com/hashicorp/terraform/issues/8262)) - * provider/aws: Increase timeout for retrying creation of IAM role ([#7733](https://github.com/hashicorp/terraform/issues/7733)) - * provider/aws: Add ability to set peering options in aws_vpc_peering_connection. ([#8310](https://github.com/hashicorp/terraform/issues/8310)) - * provider/azure: add custom_data argument for azure_instance resource ([#8158](https://github.com/hashicorp/terraform/issues/8158)) - * provider/azurerm: Adds support for uploading blobs to azure storage from local source ([#7994](https://github.com/hashicorp/terraform/issues/7994)) - * provider/azurerm: Storage blob contents can be copied from an existing blob ([#8126](https://github.com/hashicorp/terraform/issues/8126)) - * provider/datadog: Allow `tags` to be configured for monitor resources. ([#8284](https://github.com/hashicorp/terraform/issues/8284)) - * provider/google: allows atomic Cloud DNS record changes ([#6575](https://github.com/hashicorp/terraform/issues/6575)) - * provider/google: Move URLMap hosts to TypeSet from TypeList ([#7472](https://github.com/hashicorp/terraform/issues/7472)) - * provider/google: Support static private IP addresses in `resource_compute_instance` ([#6310](https://github.com/hashicorp/terraform/issues/6310)) - * provider/google: Add support for using a GCP Image Family ([#8083](https://github.com/hashicorp/terraform/issues/8083)) - * provider/openstack: Support updating the External Gateway assigned to a Neutron router ([#8070](https://github.com/hashicorp/terraform/issues/8070)) - * provider/openstack: Support for `value_specs` param on `openstack_networking_network_v2` ([#8155](https://github.com/hashicorp/terraform/issues/8155)) - * provider/openstack: Add `value_specs` param on `openstack_networking_subnet_v2` ([#8181](https://github.com/hashicorp/terraform/issues/8181)) - * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` ([#7908](https://github.com/hashicorp/terraform/issues/7908)) - * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` ([#7916](https://github.com/hashicorp/terraform/issues/7916)) - * provider/vsphere: Standardizing datastore references to use builtin Path func ([#8075](https://github.com/hashicorp/terraform/issues/8075)) - * provider/consul: add tls config support to consul provider ([#7015](https://github.com/hashicorp/terraform/issues/7015)) - * remote/consul: Support setting datacenter when using consul remote state ([#8102](https://github.com/hashicorp/terraform/issues/8102)) - * provider/google: Support import of `google_compute_instance_template` ([#8147](https://github.com/hashicorp/terraform/issues/8147)), `google_compute_firewall` ([#8236](https://github.com/hashicorp/terraform/issues/8236)), `google_compute_target_pool` ([#8133](https://github.com/hashicorp/terraform/issues/8133)), `google_compute_fowarding_rule` ([#8122](https://github.com/hashicorp/terraform/issues/8122)), `google_compute_http_health_check` ([#8121](https://github.com/hashicorp/terraform/issues/8121)), `google_compute_autoscaler` ([#8115](https://github.com/hashicorp/terraform/issues/8115)) - -BUG FIXES: - * core: Fix issue preventing `taint` from working with resources that had no other attributes in their diff ([#8167](https://github.com/hashicorp/terraform/issues/8167)) - * core: CLI will only run exact match commands ([#7983](https://github.com/hashicorp/terraform/issues/7983)) - * core: Fix panic when resources ends up null in state file ([#8120](https://github.com/hashicorp/terraform/issues/8120)) - * core: Fix panic when validating a count with a unprefixed variable ([#8243](https://github.com/hashicorp/terraform/issues/8243)) - * core: Divide by zero in interpolations no longer panics ([#7701](https://github.com/hashicorp/terraform/issues/7701)) - * core: Fix panic on some invalid interpolation syntax ([#5672](https://github.com/hashicorp/terraform/issues/5672)) - * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` ([#7966](https://github.com/hashicorp/terraform/issues/7966)) - * provider/aws: `aws_cloudformation_stack` now respects `timeout_in_minutes` field when waiting for CF API to finish an update operation ([#7997](https://github.com/hashicorp/terraform/issues/7997)) - * provider/aws: Prevent errors when `aws_s3_bucket` `acceleration_status` is not available in a given region ([#7999](https://github.com/hashicorp/terraform/issues/7999)) - * provider/aws: Add state filter to `aws_availability_zone`s data source ([#7965](https://github.com/hashicorp/terraform/issues/7965)) - * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` ([#7995](https://github.com/hashicorp/terraform/issues/7995)) - * provider/aws: Retry association of IAM Role & instance profile ([#7938](https://github.com/hashicorp/terraform/issues/7938)) - * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action ([#7883](https://github.com/hashicorp/terraform/issues/7883)) - * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings ([#7777](https://github.com/hashicorp/terraform/issues/7777)) - * provider/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes ([#8052](https://github.com/hashicorp/terraform/issues/8052)) - * provider/aws: Update ElasticTranscoder to allow empty notifications, removing notifications, etc ([#8207](https://github.com/hashicorp/terraform/issues/8207)) - * provider/aws: Fix line ending errors/diffs with IAM Server Certs ([#8074](https://github.com/hashicorp/terraform/issues/8074)) - * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs ([#6956](https://github.com/hashicorp/terraform/issues/6956)) - * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` ([#8096](https://github.com/hashicorp/terraform/issues/8096)) - * provider/aws: allow numeric characters in RedshiftClusterDbName ([#8178](https://github.com/hashicorp/terraform/issues/8178)) - * provider/aws: `aws_security_group` now creates tags as early as possible in the process ([#7849](https://github.com/hashicorp/terraform/issues/7849)) - * provider/aws: Defensively code around `db_security_group` ingress rules ([#7893](https://github.com/hashicorp/terraform/issues/7893)) - * provider/aws: `aws_spot_fleet_request` throws panic on missing subnet_id or availability_zone ([#8217](https://github.com/hashicorp/terraform/issues/8217)) - * provider/aws: Terraform fails during Redshift delete if FinalSnapshot is being taken. ([#8270](https://github.com/hashicorp/terraform/issues/8270)) - * provider/azurerm: `azurerm_storage_account` will interrupt for Ctrl-C ([#8215](https://github.com/hashicorp/terraform/issues/8215)) - * provider/azurerm: Public IP - Setting idle timeout value caused panic. #8283 - * provider/digitalocean: trim whitespace from ssh key ([#8173](https://github.com/hashicorp/terraform/issues/8173)) - * provider/digitalocean: Enforce Lowercase on IPV6 Addresses ([#7652](https://github.com/hashicorp/terraform/issues/7652)) - * provider/google: Use resource specific project when making queries/changes ([#7029](https://github.com/hashicorp/terraform/issues/7029)) - * provider/google: Fix read for the backend service resource ([#7476](https://github.com/hashicorp/terraform/issues/7476)) - * provider/mysql: `mysql_user` works with MySQL versions before 5.7.6 ([#8251](https://github.com/hashicorp/terraform/issues/8251)) - * provider/openstack: Fix typo in OpenStack LBaaSv2 pool resource ([#8179](https://github.com/hashicorp/terraform/issues/8179)) - * provider/vSphere: Fix for IPv6 only environment creation ([#7643](https://github.com/hashicorp/terraform/issues/7643)) - * provider/google: Correct update process for authorized networks in `google_sql_database_instance` ([#8290](https://github.com/hashicorp/terraform/issues/8290)) - -## 0.7.0 (August 2, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Terraform Core - * Terraform's built-in plugins are now distributed as part of the main Terraform binary, and use the go-plugin framework. Overrides are still available using separate binaries, but will need recompiling against Terraform 0.7. - * The `terraform plan` command no longer persists state. This makes the command much safer to run, since it is now side-effect free. The `refresh` and `apply` commands still persist state to local and remote storage. Any automation that assumes that `terraform plan` persists state will need to be reworked to explicitly call `terraform refresh` to get the equivalent side-effect. (The `terraform plan` command no longer has the `-state-out` or `-backup` flags due to this change.) - * The `concat()` interpolation function can no longer be used to join strings. - * Quotation marks may no longer be escaped in HIL expressions ([#7201](https://github.com/hashicorp/terraform/issues/7201)) - * Lists materialized using splat syntax, for example `aws_instance.foo.*.id` are now ordered by the count index rather than lexographically sorted. If this produces a large number of undesirable differences, you can use the new `sort()` interpolation function to produce the previous behaviour. - * You now access the values of maps using the syntax `var.map["key"]` or the `lookup` function instead of `var.map.key`. - * Outputs on `terraform_remote_state` resources are now top level attributes rather than inside the `output` map. In order to access outputs, use the syntax: `terraform_remote_state.name.outputname`. Currently outputs cannot be named `config` or `backend`. - * AWS Provider - * `aws_elb` now defaults `cross_zone_load_balancing` to `true` - * `aws_instance`: EC2 Classic users may continue to use `security_groups` to reference Security Groups by their `name`. Users who are managing Instances inside VPCs will need to use `vpc_security_group_ids` instead, and reference the security groups by their `id`. Ref https://github.com/hashicorp/terraform/issues/6416#issuecomment-219145065 - * `aws_kinesis_firehose_delivery_stream`: AWS Kinesis Firehose has been refactored to support Redshift as a destination in addition to S3. As a result, the configuration has changed and users will need to update their configuration to match the new `s3_configuration` block. Checkout the documentaiton on [AWS Kinesis Firehose](http://localhost:4567/docs/providers/aws/r/kinesis_firehose_delivery_stream.html) for more information ([#7375](https://github.com/hashicorp/terraform/issues/7375)) - * `aws_route53_record`: `latency_routing_policy`, `geolocation_routing_policy`, and `failover_routing_policy` block options have been added. With these additions we’ve renamed the `weight` attribute to `weighted_routing_policy`, and it has changed from a string to a block to match the others. Please see the updated documentation on using `weighted_routing_policy`: https://www.terraform.io/docs/providers/aws/r/route53_record.html . ([#6954](https://github.com/hashicorp/terraform/issues/6954)) - * `aws_db_instance` now defaults `publicly_accessible` to false - * Microsoft Azure Provider - * In documentation, the "Azure (Resource Manager)" provider has been renamed to the "Microsoft Azure" provider. - * `azurerm_dns_cname_record` now accepts a single record rather than a list of records - * `azurerm_virtual_machine` computer_name now Required - * Openstack Provider - * `openstack_networking_subnet_v2` now defaults to turning DHCP on. - * `openstack_fw_policy_v1` now correctly applies rules in the order they are specified. Upon the next apply, current rules might be re-ordered. - * The `member` attribute of `openstack_lb_pool_v1` has been deprecated. Please ue the new `openstack_lb_member_v1` resource. - * Docker Provider - * `keep_updated` parameter removed from `docker_image` - This parameter never did what it was supposed to do. See relevant docs, specifically `pull_trigger` & new `docker_registry_image` data source to understand how to keep your `docker_image` updated. - * Atlas Provider - * `atlas_artifact` resource has be deprecated. Please use the new `atlas_artifact` Data Source. - * CloudStack Provider - * All deprecated parameters are removed from all `CloudStack` resources - -FEATURES: - - * **Data sources** are a new kind of primitive in Terraform. Attributes for data sources are refreshed and available during the planning stage. ([#6598](https://github.com/hashicorp/terraform/issues/6598)) - * **Lists and maps** can now be used as first class types for variables and may also be passed between modules. ([#6322](https://github.com/hashicorp/terraform/issues/6322)) - * **State management CLI commands** provide a variety of state manipulation functions for advanced use cases. This should be used where possible instead of manually modifying state files. ([#5811](https://github.com/hashicorp/terraform/issues/5811)) - * **State Import** allows a way to import existing resources into Terraform state for many types of resource. Initial coverage of AWS is quite high, and it is straightforward to add support for new resources. - * **New Command:** `terraform state` to provide access to a variety of state manipulation functions ([#5811](https://github.com/hashicorp/terraform/issues/5811)) - * **New Option:** `terraform output` now supports the `-json` flag to print a machine-readable representation of outputs ([#7608](https://github.com/hashicorp/terraform/issues/7608)) - * **New Data Source:** `aws_ami` ([#6911](https://github.com/hashicorp/terraform/issues/6911)) - * **New Data Source:** `aws_availability_zones` ([#6805](https://github.com/hashicorp/terraform/issues/6805)) - * **New Data Source:** `aws_iam_policy_document` ([#6881](https://github.com/hashicorp/terraform/issues/6881)) - * **New Data Source:** `aws_s3_bucket_object` ([#6946](https://github.com/hashicorp/terraform/issues/6946)) - * **New Data Source:** `aws_ecs_container_definition` ([#7230](https://github.com/hashicorp/terraform/issues/7230)) - * **New Data Source:** `atlas_artifact` ([#7419](https://github.com/hashicorp/terraform/issues/7419)) - * **New Data Source:** `docker_registry_image` ([#7000](https://github.com/hashicorp/terraform/issues/7000)) - * **New Data Source:** `consul_keys` ([#7678](https://github.com/hashicorp/terraform/issues/7678)) - * **New Interpolation Function:** `sort` ([#7128](https://github.com/hashicorp/terraform/issues/7128)) - * **New Interpolation Function:** `distinct` ([#7174](https://github.com/hashicorp/terraform/issues/7174)) - * **New Interpolation Function:** `list` ([#7528](https://github.com/hashicorp/terraform/issues/7528)) - * **New Interpolation Function:** `map` ([#7832](https://github.com/hashicorp/terraform/issues/7832)) - * **New Provider:** `grafana` ([#6206](https://github.com/hashicorp/terraform/issues/6206)) - * **New Provider:** `logentries` ([#7067](https://github.com/hashicorp/terraform/issues/7067)) - * **New Provider:** `scaleway` ([#7331](https://github.com/hashicorp/terraform/issues/7331)) - * **New Provider:** `random` - allows generation of random values without constantly generating diffs ([#6672](https://github.com/hashicorp/terraform/issues/6672)) - * **New Remote State Provider:** - `gcs` - Google Cloud Storage ([#6814](https://github.com/hashicorp/terraform/issues/6814)) - * **New Remote State Provider:** - `azure` - Microsoft Azure Storage ([#7064](https://github.com/hashicorp/terraform/issues/7064)) - * **New Resource:** `aws_elb_attachment` ([#6879](https://github.com/hashicorp/terraform/issues/6879)) - * **New Resource:** `aws_elastictranscoder_preset` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) - * **New Resource:** `aws_elastictranscoder_pipeline` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) - * **New Resource:** `aws_iam_group_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_iam_role_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_iam_user_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_rds_cluster_parameter_group` ([#5269](https://github.com/hashicorp/terraform/issues/5269)) - * **New Resource:** `aws_spot_fleet_request` ([#7243](https://github.com/hashicorp/terraform/issues/7243)) - * **New Resource:** `aws_ses_active_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_filter` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_rule` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_simpledb_domain` ([#7600](https://github.com/hashicorp/terraform/issues/7600)) - * **New Resource:** `aws_opsworks_user_profile` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) - * **New Resource:** `aws_opsworks_permission` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) - * **New Resource:** `aws_ami_launch_permission` ([#7365](https://github.com/hashicorp/terraform/issues/7365)) - * **New Resource:** `aws_appautoscaling_policy` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) - * **New Resource:** `aws_appautoscaling_target` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) - * **New Resource:** `openstack_blockstorage_volume_v2` ([#6693](https://github.com/hashicorp/terraform/issues/6693)) - * **New Resource:** `openstack_lb_loadbalancer_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_listener_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_pool_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_member_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_monitor_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `vsphere_virtual_disk` ([#6273](https://github.com/hashicorp/terraform/issues/6273)) - * **New Resource:** `github_repository_collaborator` ([#6861](https://github.com/hashicorp/terraform/issues/6861)) - * **New Resource:** `datadog_timeboard` ([#6900](https://github.com/hashicorp/terraform/issues/6900)) - * **New Resource:** `digitalocean_tag` ([#7500](https://github.com/hashicorp/terraform/issues/7500)) - * **New Resource:** `digitalocean_volume` ([#7560](https://github.com/hashicorp/terraform/issues/7560)) - * **New Resource:** `consul_agent_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_catalog_entry` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_node` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `mysql_grant` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) - * **New Resource:** `mysql_user` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) - * **New Resource:** `azurerm_storage_table` ([#7327](https://github.com/hashicorp/terraform/issues/7327)) - * **New Resource:** `azurerm_virtual_machine_scale_set` ([#6711](https://github.com/hashicorp/terraform/issues/6711)) - * **New Resource:** `azurerm_traffic_manager_endpoint` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) - * **New Resource:** `azurerm_traffic_manager_profile` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) - * core: Tainted resources now show up in the plan and respect dependency ordering ([#6600](https://github.com/hashicorp/terraform/issues/6600)) - * core: The `lookup` interpolation function can now have a default fall-back value specified ([#6884](https://github.com/hashicorp/terraform/issues/6884)) - * core: The `terraform plan` command no longer persists state. ([#6811](https://github.com/hashicorp/terraform/issues/6811)) - -IMPROVEMENTS: - - * core: The `jsonencode` interpolation function now supports encoding lists and maps ([#6749](https://github.com/hashicorp/terraform/issues/6749)) - * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. ([#6923](https://github.com/hashicorp/terraform/issues/6923)) - * core: Support `.` in map keys ([#7654](https://github.com/hashicorp/terraform/issues/7654)) - * core: Enhance interpolation functions to account for first class maps and lists ([#7832](https://github.com/hashicorp/terraform/issues/7832)) ([#7834](https://github.com/hashicorp/terraform/issues/7834)) - * command: Remove second DefaultDataDirectory const ([#7666](https://github.com/hashicorp/terraform/issues/7666)) - * provider/aws: Add `dns_name` to `aws_efs_mount_target` ([#7428](https://github.com/hashicorp/terraform/issues/7428)) - * provider/aws: Add `force_destroy` to `aws_iam_user` for force-deleting access keys assigned to the user ([#7766](https://github.com/hashicorp/terraform/issues/7766)) - * provider/aws: Add `option_settings` to `aws_db_option_group` ([#6560](https://github.com/hashicorp/terraform/issues/6560)) - * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster ([#6795](https://github.com/hashicorp/terraform/issues/6795)) - * provider/aws: Add support for S3 Bucket Acceleration ([#6628](https://github.com/hashicorp/terraform/issues/6628)) - * provider/aws: Add support for `kms_key_id` to `aws_db_instance` ([#6651](https://github.com/hashicorp/terraform/issues/6651)) - * provider/aws: Specifying more than one health check on an `aws_elb` fails with an error prior to making an API request ([#7489](https://github.com/hashicorp/terraform/issues/7489)) - * provider/aws: Add support to `aws_redshift_cluster` for `iam_roles` ([#6647](https://github.com/hashicorp/terraform/issues/6647)) - * provider/aws: SQS use raw policy string if compact fails ([#6724](https://github.com/hashicorp/terraform/issues/6724)) - * provider/aws: Set default description to "Managed by Terraform" ([#6104](https://github.com/hashicorp/terraform/issues/6104)) - * provider/aws: Support for Redshift Cluster encryption using a KMS key ([#6712](https://github.com/hashicorp/terraform/issues/6712)) - * provider/aws: Support tags for AWS redshift cluster ([#5356](https://github.com/hashicorp/terraform/issues/5356)) - * provider/aws: Add `iam_arn` to aws_cloudfront_origin_access_identity ([#6955](https://github.com/hashicorp/terraform/issues/6955)) - * provider/aws: Add `cross_zone_load_balancing` on `aws_elb` default to true ([#6897](https://github.com/hashicorp/terraform/issues/6897)) - * provider/aws: Add support for `character_set_name` to `aws_db_instance` ([#4861](https://github.com/hashicorp/terraform/issues/4861)) - * provider/aws: Add support for DB parameter group with RDS Cluster Instances (Aurora) ([#6865](https://github.com/hashicorp/terraform/issues/6865)) - * provider/aws: Add `name_prefix` to `aws_iam_instance_profile` and `aws_iam_role` ([#6939](https://github.com/hashicorp/terraform/issues/6939)) - * provider/aws: Allow authentication & credentials validation for federated IAM Roles and EC2 instance profiles ([#6536](https://github.com/hashicorp/terraform/issues/6536)) - * provider/aws: Rename parameter_group_name to db_cluster_parameter_group_name ([#7083](https://github.com/hashicorp/terraform/issues/7083)) - * provider/aws: Retry RouteTable Route/Assocation creation ([#7156](https://github.com/hashicorp/terraform/issues/7156)) - * provider/aws: `delegation_set_id` conflicts w/ `vpc_id` in `aws_route53_zone` as delegation sets can only be used for public zones ([#7213](https://github.com/hashicorp/terraform/issues/7213)) - * provider/aws: Support Elastic Beanstalk scheduledaction ([#7376](https://github.com/hashicorp/terraform/issues/7376)) - * provider/aws: Add support for NewInstancesProtectedFromScaleIn to `aws_autoscaling_group` ([#6490](https://github.com/hashicorp/terraform/issues/6490)) - * provider/aws: Added support for `snapshot_identifier` parameter in aws_rds_cluster ([#7158](https://github.com/hashicorp/terraform/issues/7158)) - * provider/aws: Add inplace edit/update DB Security Group Rule Ingress ([#7245](https://github.com/hashicorp/terraform/issues/7245)) - * provider/aws: Added support for redshift destination to firehose delivery streams ([#7375](https://github.com/hashicorp/terraform/issues/7375)) - * provider/aws: Allow `aws_redshift_security_group` ingress rules to change ([#5939](https://github.com/hashicorp/terraform/issues/5939)) - * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` ([#7181](https://github.com/hashicorp/terraform/issues/7181)) - * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint ([#7511](https://github.com/hashicorp/terraform/issues/7511)) - * provider/aws: Retry creation of IAM role depending on new IAM user ([#7324](https://github.com/hashicorp/terraform/issues/7324)) - * provider/aws: Allow `port` on `aws_db_instance` to be updated ([#7441](https://github.com/hashicorp/terraform/issues/7441)) - * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs ([#7470](https://github.com/hashicorp/terraform/issues/7470)) - * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition ([#7653](https://github.com/hashicorp/terraform/issues/7653)) - * provider/aws: Support Tags on `aws_rds_cluster` ([#7695](https://github.com/hashicorp/terraform/issues/7695)) - * provider/aws: Support kms_key_id for `aws_rds_cluster` ([#7662](https://github.com/hashicorp/terraform/issues/7662)) - * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` ([#7523](https://github.com/hashicorp/terraform/issues/7523)) - * provider/aws: Add support for Kinesis streams shard-level metrics ([#7684](https://github.com/hashicorp/terraform/issues/7684)) - * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` ([#7364](https://github.com/hashicorp/terraform/issues/7364)) - * provider/aws: expose network interface id in `aws_instance` ([#6751](https://github.com/hashicorp/terraform/issues/6751)) - * provider/aws: Adding passthrough behavior for API Gateway integration ([#7801](https://github.com/hashicorp/terraform/issues/7801)) - * provider/aws: Enable Redshift Cluster Logging ([#7813](https://github.com/hashicorp/terraform/issues/7813)) - * provider/aws: Add ability to set Performance Mode in `aws_efs_file_system` ([#7791](https://github.com/hashicorp/terraform/issues/7791)) - * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` ([#6807](https://github.com/hashicorp/terraform/issues/6807)) - * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys ([#6742](https://github.com/hashicorp/terraform/issues/6742)) - * provider/azurerm: The Azure SDK now exposes better error messages ([#6976](https://github.com/hashicorp/terraform/issues/6976)) - * provider/azurerm: `azurerm_dns_zone` now returns `name_servers` ([#7434](https://github.com/hashicorp/terraform/issues/7434)) - * provider/azurerm: dump entire Request/Response in autorest Decorator ([#7719](https://github.com/hashicorp/terraform/issues/7719)) - * provider/azurerm: add option to delete VMs Data disks on termination ([#7793](https://github.com/hashicorp/terraform/issues/7793)) - * provider/clc: Add support for hyperscale and bareMetal server types and package installation - * provider/clc: Fix optional server password ([#6414](https://github.com/hashicorp/terraform/issues/6414)) - * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` ([#6898](https://github.com/hashicorp/terraform/issues/6898)) - * provider/cloudstack: Enable swapping of ACLs without having to rebuild the network tier ([#6741](https://github.com/hashicorp/terraform/issues/6741)) - * provider/cloudstack: Improve ACL swapping ([#7315](https://github.com/hashicorp/terraform/issues/7315)) - * provider/cloudstack: Add project support to `cloudstack_network_acl` and `cloudstack_network_acl_rule` ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Add option to set `root_disk_size` to `cloudstack_instance` ([#7070](https://github.com/hashicorp/terraform/issues/7070)) - * provider/cloudstack: Do no longer force a new `cloudstack_instance` resource when updating `user_data` ([#7074](https://github.com/hashicorp/terraform/issues/7074)) - * provider/cloudstack: Add option to set `security_group_names` to `cloudstack_instance` ([#7240](https://github.com/hashicorp/terraform/issues/7240)) - * provider/cloudstack: Add option to set `affinity_group_names` to `cloudstack_instance` ([#7242](https://github.com/hashicorp/terraform/issues/7242)) - * provider/datadog: Add support for 'require full window' and 'locked' ([#6738](https://github.com/hashicorp/terraform/issues/6738)) - * provider/docker: Docker Container DNS Setting Enhancements ([#7392](https://github.com/hashicorp/terraform/issues/7392)) - * provider/docker: Add `destroy_grace_seconds` option to stop container before delete ([#7513](https://github.com/hashicorp/terraform/issues/7513)) - * provider/docker: Add `pull_trigger` option to `docker_image` to trigger pulling layers of a given image ([#7000](https://github.com/hashicorp/terraform/issues/7000)) - * provider/fastly: Add support for Cache Settings ([#6781](https://github.com/hashicorp/terraform/issues/6781)) - * provider/fastly: Add support for Service Request Settings on `fastly_service_v1` resources ([#6622](https://github.com/hashicorp/terraform/issues/6622)) - * provider/fastly: Add support for custom VCL configuration ([#6662](https://github.com/hashicorp/terraform/issues/6662)) - * provider/google: Support optional uuid naming for Instance Template ([#6604](https://github.com/hashicorp/terraform/issues/6604)) - * provider/openstack: Add support for client certificate authentication ([#6279](https://github.com/hashicorp/terraform/issues/6279)) - * provider/openstack: Allow Neutron-based Floating IP to target a specific tenant ([#6454](https://github.com/hashicorp/terraform/issues/6454)) - * provider/openstack: Enable DHCP By Default ([#6838](https://github.com/hashicorp/terraform/issues/6838)) - * provider/openstack: Implement fixed_ip on Neutron floating ip allocations ([#6837](https://github.com/hashicorp/terraform/issues/6837)) - * provider/openstack: Increase timeouts for image resize, subnets, and routers ([#6764](https://github.com/hashicorp/terraform/issues/6764)) - * provider/openstack: Add `lb_provider` argument to `lb_pool_v1` resource ([#6919](https://github.com/hashicorp/terraform/issues/6919)) - * provider/openstack: Enforce `ForceNew` on Instance Block Device ([#6921](https://github.com/hashicorp/terraform/issues/6921)) - * provider/openstack: Can now stop instances before destroying them ([#7184](https://github.com/hashicorp/terraform/issues/7184)) - * provider/openstack: Disassociate LBaaS v1 Monitors from Pool Before Deletion ([#6997](https://github.com/hashicorp/terraform/issues/6997)) - * provider/powerdns: Add support for PowerDNS 4 API ([#7819](https://github.com/hashicorp/terraform/issues/7819)) - * provider/triton: add `triton_machine` `domain names` ([#7149](https://github.com/hashicorp/terraform/issues/7149)) - * provider/vsphere: Add support for `controller_type` to `vsphere_virtual_machine` ([#6785](https://github.com/hashicorp/terraform/issues/6785)) - * provider/vsphere: Fix bug with `vsphere_virtual_machine` wait for ip ([#6377](https://github.com/hashicorp/terraform/issues/6377)) - * provider/vsphere: Virtual machine update disk ([#6619](https://github.com/hashicorp/terraform/issues/6619)) - * provider/vsphere: `vsphere_virtual_machine` adding controller creation logic ([#6853](https://github.com/hashicorp/terraform/issues/6853)) - * provider/vsphere: `vsphere_virtual_machine` added support for `mac address` on `network_interface` ([#6966](https://github.com/hashicorp/terraform/issues/6966)) - * provider/vsphere: Enhanced `vsphere` logging capabilities ([#6893](https://github.com/hashicorp/terraform/issues/6893)) - * provider/vsphere: Add DiskEnableUUID option to `vsphere_virtual_machine` ([#7088](https://github.com/hashicorp/terraform/issues/7088)) - * provider/vsphere: Virtual Machine and File resources handle Read errors properley ([#7220](https://github.com/hashicorp/terraform/issues/7220)) - * provider/vsphere: set uuid as `vsphere_virtual_machine` output ([#4382](https://github.com/hashicorp/terraform/issues/4382)) - * provider/vsphere: Add support for `keep_on_remove` to `vsphere_virtual_machine` ([#7169](https://github.com/hashicorp/terraform/issues/7169)) - * provider/vsphere: Add support for additional `vsphere_virtial_machine` SCSI controller types ([#7525](https://github.com/hashicorp/terraform/issues/7525)) - * provisioner/file: File provisioners may now have file content set as an attribute ([#7561](https://github.com/hashicorp/terraform/issues/7561)) - -BUG FIXES: - - * core: Correct the previous fix for a bug causing "attribute not found" messages during destroy, as it was insufficient ([#6599](https://github.com/hashicorp/terraform/issues/6599)) - * core: Fix issue causing syntax errors interpolating count attribute when value passed between modules ([#6833](https://github.com/hashicorp/terraform/issues/6833)) - * core: Fix "diffs didn't match during apply" error for computed sets ([#7205](https://github.com/hashicorp/terraform/issues/7205)) - * core: Fix issue where `terraform init .` would truncate existing files ([#7273](https://github.com/hashicorp/terraform/issues/7273)) - * core: Don't compare diffs between maps with computed values ([#7249](https://github.com/hashicorp/terraform/issues/7249)) - * core: Don't copy existing files over themselves when fetching modules ([#7273](https://github.com/hashicorp/terraform/issues/7273)) - * core: Always increment the state serial number when upgrading the version ([#7402](https://github.com/hashicorp/terraform/issues/7402)) - * core: Fix a crash during eval when we're upgrading an empty state ([#7403](https://github.com/hashicorp/terraform/issues/7403)) - * core: Honor the `-state-out` flag when applying with a plan file ([#7443](https://github.com/hashicorp/terraform/issues/7443)) - * core: Fix a panic when a `terraform_remote_state` data source doesn't exist ([#7464](https://github.com/hashicorp/terraform/issues/7464)) - * core: Fix issue where `ignore_changes` caused incorrect diffs on dependent resources ([#7563](https://github.com/hashicorp/terraform/issues/7563)) - * provider/aws: Manual changes to `aws_codedeploy_deployment_group` resources are now detected ([#7530](https://github.com/hashicorp/terraform/issues/7530)) - * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources ([#6829](https://github.com/hashicorp/terraform/issues/6829)) - * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation ([#7227](https://github.com/hashicorp/terraform/issues/7227)) - * provider/aws: Fix crash in `aws_elasticache_parameter_group` occuring following edits in the console ([#6687](https://github.com/hashicorp/terraform/issues/6687)) - * provider/aws: Fix issue reattaching a VPN gateway to a VPC ([#6987](https://github.com/hashicorp/terraform/issues/6987)) - * provider/aws: Fix issue with Root Block Devices and encrypted flag in Launch Configurations ([#6512](https://github.com/hashicorp/terraform/issues/6512)) - * provider/aws: If more ENIs are attached to `aws_instance`, the one w/ DeviceIndex `0` is always used in context of `aws_instance` (previously unpredictable) ([#6761](https://github.com/hashicorp/terraform/issues/6761)) - * provider/aws: Increased lambda event mapping creation timeout ([#7657](https://github.com/hashicorp/terraform/issues/7657)) - * provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead ([#7377](https://github.com/hashicorp/terraform/issues/7377)) - * provider/aws: Make 'stage_name' required in api_gateway_deployment ([#6797](https://github.com/hashicorp/terraform/issues/6797)) - * provider/aws: Mark Lambda function as gone when it's gone ([#6924](https://github.com/hashicorp/terraform/issues/6924)) - * provider/aws: Trim trailing `.` from `name` in `aws_route53_record` resources to prevent spurious diffs ([#6592](https://github.com/hashicorp/terraform/issues/6592)) - * provider/aws: Update Lambda functions on name change ([#7081](https://github.com/hashicorp/terraform/issues/7081)) - * provider/aws: Updating state when `aws_sns_topic_subscription` is missing ([#6629](https://github.com/hashicorp/terraform/issues/6629)) - * provider/aws: `aws_codedeploy_deployment_group` panic when setting `on_premises_instance_tag_filter` ([#6617](https://github.com/hashicorp/terraform/issues/6617)) - * provider/aws: `aws_db_instance` now defaults `publicly_accessible` to false ([#7117](https://github.com/hashicorp/terraform/issues/7117)) - * provider/aws: `aws_opsworks_application.app_source` SSH key is write-only ([#6649](https://github.com/hashicorp/terraform/issues/6649)) - * provider/aws: fix Elastic Beanstalk `cname_prefix` continual plans ([#6653](https://github.com/hashicorp/terraform/issues/6653)) - * provider/aws: Bundle IOPs and Allocated Storage update for DB Instances ([#7203](https://github.com/hashicorp/terraform/issues/7203)) - * provider/aws: Fix case when instanceId is absent in network interfaces ([#6851](https://github.com/hashicorp/terraform/issues/6851)) - * provider/aws: fix aws_security_group_rule refresh ([#6730](https://github.com/hashicorp/terraform/issues/6730)) - * provider/aws: Fix issue with Elastic Beanstalk and invalid settings ([#7222](https://github.com/hashicorp/terraform/issues/7222)) - * provider/aws: Fix issue where aws_app_cookie_stickiness_policy fails on destroy if LoadBalancer doesn't exist ([#7166](https://github.com/hashicorp/terraform/issues/7166)) - * provider/aws: Stickiness Policy exists, but isn't assigned to the ELB ([#7188](https://github.com/hashicorp/terraform/issues/7188)) - * provider/aws: Fix issue with `manage_bundler` on `aws_opsworks_layers` ([#7219](https://github.com/hashicorp/terraform/issues/7219)) - * provider/aws: Set Elastic Beanstalk stack name back to state ([#7445](https://github.com/hashicorp/terraform/issues/7445)) - * provider/aws: Allow recreation of VPC Peering Connection when state is rejected ([#7466](https://github.com/hashicorp/terraform/issues/7466)) - * provider/aws: Remove EFS File System from State when NotFound ([#7437](https://github.com/hashicorp/terraform/issues/7437)) - * provider/aws: `aws_customer_gateway` refreshing from state on deleted state ([#7482](https://github.com/hashicorp/terraform/issues/7482)) - * provider/aws: Retry finding `aws_route` after creating it ([#7463](https://github.com/hashicorp/terraform/issues/7463)) - * provider/aws: Refresh CloudWatch Group from state on 404 ([#7576](https://github.com/hashicorp/terraform/issues/7576)) - * provider/aws: Adding in additional retry logic due to latency with delete of `db_option_group` ([#7312](https://github.com/hashicorp/terraform/issues/7312)) - * provider/aws: Safely get ELB values ([#7585](https://github.com/hashicorp/terraform/issues/7585)) - * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk ([#6491](https://github.com/hashicorp/terraform/issues/6491)) - * provider/aws: Bump rds_cluster timeout to 15 mins ([#7604](https://github.com/hashicorp/terraform/issues/7604)) - * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured ([#7669](https://github.com/hashicorp/terraform/issues/7669)) - * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` ([#7698](https://github.com/hashicorp/terraform/issues/7698)) - * provider/aws: Ignore IOPS on non io1 AWS root_block_device ([#7783](https://github.com/hashicorp/terraform/issues/7783)) - * provider/aws: Ignore missing ENI attachment when trying to detach ENI ([#7185](https://github.com/hashicorp/terraform/issues/7185)) - * provider/aws: Fix issue updating ElasticBeanstalk Environment templates ([#7811](https://github.com/hashicorp/terraform/issues/7811)) - * provider/aws: Restore Defaults to SQS Queues ([#7818](https://github.com/hashicorp/terraform/issues/7818)) - * provider/aws: Don't delete Lambda function from state on initial call of the Read func ([#7829](https://github.com/hashicorp/terraform/issues/7829)) - * provider/aws: `aws_vpn_gateway` should be removed from state when in deleted state ([#7861](https://github.com/hashicorp/terraform/issues/7861)) - * provider/aws: Fix aws_route53_record 0-2 migration ([#7907](https://github.com/hashicorp/terraform/issues/7907)) - * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` ([#6766](https://github.com/hashicorp/terraform/issues/6766)) - * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources ([#6790](https://github.com/hashicorp/terraform/issues/6790)) - * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` ([#6768](https://github.com/hashicorp/terraform/issues/6768)) - * provider/azurerm: Add support for storage container name validation ([#6852](https://github.com/hashicorp/terraform/issues/6852)) - * provider/azurerm: Remove storage containers and blobs when storage accounts are not found ([#6855](https://github.com/hashicorp/terraform/issues/6855)) - * provider/azurerm: `azurerm_virtual_machine` fix `additional_unattend_rm` Windows config option ([#7105](https://github.com/hashicorp/terraform/issues/7105)) - * provider/azurerm: Fix `azurerm_virtual_machine` windows_config ([#7123](https://github.com/hashicorp/terraform/issues/7123)) - * provider/azurerm: `azurerm_dns_cname_record` can create CNAME records again ([#7113](https://github.com/hashicorp/terraform/issues/7113)) - * provider/azurerm: `azurerm_network_security_group` now waits for the provisioning state of `ready` before proceeding ([#7307](https://github.com/hashicorp/terraform/issues/7307)) - * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources ([#7308](https://github.com/hashicorp/terraform/issues/7308)) - * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion ([#7584](https://github.com/hashicorp/terraform/issues/7584)) - * provider/azurerm: catch `azurerm_template_deployment` erroring silently ([#7644](https://github.com/hashicorp/terraform/issues/7644)) - * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource ([#7646](https://github.com/hashicorp/terraform/issues/7646)) - * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names ([#7674](https://github.com/hashicorp/terraform/issues/7674)) - * provider/azurerm: `azurerm_virtual_machine` computer_name now Required ([#7308](https://github.com/hashicorp/terraform/issues/7308)) - * provider/azurerm: Change of `availability_set_id` on `azurerm_virtual_machine` should ForceNew ([#7650](https://github.com/hashicorp/terraform/issues/7650)) - * provider/azurerm: Wait for `azurerm_storage_account` to be available ([#7329](https://github.com/hashicorp/terraform/issues/7329)) - * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 ([#6969](https://github.com/hashicorp/terraform/issues/6969)) - * provider/cloudstack: Fix using `cloudstack_network_acl` within a project ([#6743](https://github.com/hashicorp/terraform/issues/6743)) - * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Fix refresing `cloudstack_port_forward` when the associated IP address is no longer associated ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Fix creating `cloudstack_network` with offerings that do not support specifying IP ranges ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region ([#7044](https://github.com/hashicorp/terraform/issues/7044)) - * provider/digitalocean: Reassign Floating IP when droplet changes ([#7411](https://github.com/hashicorp/terraform/issues/7411)) - * provider/google: Fix a bug causing an error attempting to delete an already-deleted `google_compute_disk` ([#6689](https://github.com/hashicorp/terraform/issues/6689)) - * provider/mysql: Specifying empty provider credentials no longer causes a panic ([#7211](https://github.com/hashicorp/terraform/issues/7211)) - * provider/openstack: Reassociate Floating IP on network changes ([#6579](https://github.com/hashicorp/terraform/issues/6579)) - * provider/openstack: Ensure CIDRs Are Lower Case ([#6864](https://github.com/hashicorp/terraform/issues/6864)) - * provider/openstack: Rebuild Instances On Network Changes ([#6844](https://github.com/hashicorp/terraform/issues/6844)) - * provider/openstack: Firewall rules are applied in the correct order ([#7194](https://github.com/hashicorp/terraform/issues/7194)) - * provider/openstack: Fix Security Group EOF Error when Adding / Removing Multiple Groups ([#7468](https://github.com/hashicorp/terraform/issues/7468)) - * provider/openstack: Fixing boot volumes interfering with block storage volumes list ([#7649](https://github.com/hashicorp/terraform/issues/7649)) - * provider/vsphere: `gateway` and `ipv6_gateway` are now read from `vsphere_virtual_machine` resources ([#6522](https://github.com/hashicorp/terraform/issues/6522)) - * provider/vsphere: `ipv*_gateway` parameters won't force a new `vsphere_virtual_machine` ([#6635](https://github.com/hashicorp/terraform/issues/6635)) - * provider/vsphere: adding a `vsphere_virtual_machine` migration ([#7023](https://github.com/hashicorp/terraform/issues/7023)) - * provider/vsphere: Don't require vsphere debug paths to be set ([#7027](https://github.com/hashicorp/terraform/issues/7027)) - * provider/vsphere: Fix bug where `enable_disk_uuid` was not set on `vsphere_virtual_machine` resources ([#7275](https://github.com/hashicorp/terraform/issues/7275)) - * provider/vsphere: Make `vsphere_virtual_machine` `product_key` optional ([#7410](https://github.com/hashicorp/terraform/issues/7410)) - * provider/vsphere: Refreshing devices list after adding a disk or cdrom controller ([#7167](https://github.com/hashicorp/terraform/issues/7167)) - * provider/vsphere: `vsphere_virtual_machine` no longer has to be powered on to delete ([#7206](https://github.com/hashicorp/terraform/issues/7206)) - * provider/vSphere: Fixes the hasBootableVmdk flag when attaching multiple disks ([#7804](https://github.com/hashicorp/terraform/issues/7804)) - * provisioner/remote-exec: Properly seed random script paths so they are not deterministic across runs ([#7413](https://github.com/hashicorp/terraform/issues/7413)) - -## 0.6.16 (May 9, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_eip` field `private_ip` is now a computed value, and cannot be set in your configuration. - Use `associate_with_private_ip` instead. See ([#6521](https://github.com/hashicorp/terraform/issues/6521)) - -FEATURES: - - * **New provider:** `librato` ([#3371](https://github.com/hashicorp/terraform/issues/3371)) - * **New provider:** `softlayer` ([#4327](https://github.com/hashicorp/terraform/issues/4327)) - * **New resource:** `aws_api_gateway_account` ([#6321](https://github.com/hashicorp/terraform/issues/6321)) - * **New resource:** `aws_api_gateway_authorizer` ([#6320](https://github.com/hashicorp/terraform/issues/6320)) - * **New resource:** `aws_db_event_subscription` ([#6367](https://github.com/hashicorp/terraform/issues/6367)) - * **New resource:** `aws_db_option_group` ([#4401](https://github.com/hashicorp/terraform/issues/4401)) - * **New resource:** `aws_eip_association` ([#6552](https://github.com/hashicorp/terraform/issues/6552)) - * **New resource:** `openstack_networking_secgroup_rule_v2` ([#6410](https://github.com/hashicorp/terraform/issues/6410)) - * **New resource:** `openstack_networking_secgroup_v2` ([#6410](https://github.com/hashicorp/terraform/issues/6410)) - * **New resource:** `vsphere_file` ([#6401](https://github.com/hashicorp/terraform/issues/6401)) - -IMPROVEMENTS: - - * core: update HCL dependency to improve whitespace handling in `terraform fmt` ([#6347](https://github.com/hashicorp/terraform/issues/6347)) - * core: Add support for marking outputs as sensitive ([#6559](https://github.com/hashicorp/terraform/issues/6559)) - * provider/aws: Add agent_version argument to `aws_opswork_stack` ([#6493](https://github.com/hashicorp/terraform/issues/6493)) - * provider/aws: Add support for request parameters to `api_gateway_method` & `api_gateway_integration` ([#6501](https://github.com/hashicorp/terraform/issues/6501)) - * provider/aws: Add support for response parameters to `api_gateway_method_response` & `api_gateway_integration_response` ([#6344](https://github.com/hashicorp/terraform/issues/6344)) - * provider/aws: Allow empty S3 config in Cloudfront Origin ([#6487](https://github.com/hashicorp/terraform/issues/6487)) - * provider/aws: Improve error handling in IAM Server Certificates ([#6442](https://github.com/hashicorp/terraform/issues/6442)) - * provider/aws: Use `sts:GetCallerIdentity` as additional method for getting AWS account ID ([#6385](https://github.com/hashicorp/terraform/issues/6385)) - * provider/aws: `aws_redshift_cluster` `automated_snapshot_retention_period` didn't allow 0 value ([#6537](https://github.com/hashicorp/terraform/issues/6537)) - * provider/aws: Add CloudFront `hosted_zone_id` attribute ([#6530](https://github.com/hashicorp/terraform/issues/6530)) - * provider/azurerm: Increase timeout for ARM Template deployments to 40 minutes ([#6319](https://github.com/hashicorp/terraform/issues/6319)) - * provider/azurerm: Make `private_ip_address` an exported field on `azurerm_network_interface` ([#6538](https://github.com/hashicorp/terraform/issues/6538)) - * provider/azurerm: Add support for `tags` to `azurerm_virtual_machine` ([#6556](https://github.com/hashicorp/terraform/issues/6556)) - * provider/azurerm: Add `os_type` and `image_uri` in `azurerm_virtual_machine` ([#6553](https://github.com/hashicorp/terraform/issues/6553)) - * provider/cloudflare: Add proxied option to `cloudflare_record` ([#5508](https://github.com/hashicorp/terraform/issues/5508)) - * provider/docker: Add ability to keep docker image locally on terraform destroy ([#6376](https://github.com/hashicorp/terraform/issues/6376)) - * provider/fastly: Add S3 Log Streaming to Fastly Service ([#6378](https://github.com/hashicorp/terraform/issues/6378)) - * provider/fastly: Add Conditions to Fastly Service ([#6481](https://github.com/hashicorp/terraform/issues/6481)) - * provider/github: Add support for Github Enterprise via base_url configuration option ([#6434](https://github.com/hashicorp/terraform/issues/6434)) - * provider/triton: Add support for specifying network interfaces on `triton machine` resources ([#6418](https://github.com/hashicorp/terraform/issues/6418)) - * provider/triton: Deleted firewall rules no longer prevent refresh ([#6529](https://github.com/hashicorp/terraform/issues/6529)) - * provider/vsphere: Add `skip_customization` option to `vsphere_virtual_machine` resources ([#6355](https://github.com/hashicorp/terraform/issues/6355)) - * provider/vsphere: Add ability to specify and mount bootable vmdk in `vsphere_virtual_machine` ([#6146](https://github.com/hashicorp/terraform/issues/6146)) - * provider/vsphere: Add support for IPV6 to `vsphere_virtual_machine` ([#6457](https://github.com/hashicorp/terraform/issues/6457)) - * provider/vsphere: Add support for `memory_reservation` to `vsphere_virtual_machine` ([#6036](https://github.com/hashicorp/terraform/issues/6036)) - * provider/vsphere: Checking for empty diskPath in `vsphere_virtual_machine` before creating ([#6400](https://github.com/hashicorp/terraform/issues/6400)) - * provider/vsphere: Support updates to vcpu and memory on `vsphere_virtual_machine` ([#6356](https://github.com/hashicorp/terraform/issues/6356)) - * remote/s3: Logic for loading credentials now follows the same [conventions as AWS provider](https://www.terraform.io/docs/providers/aws/index.html#authentication) which means it also supports EC2 role auth and session token (e.g. assumed IAM Roles) ([#5270](https://github.com/hashicorp/terraform/issues/5270)) - -BUG FIXES: - - * core: Boolean values in diffs are normalized to `true` and `false`, eliminating some erroneous diffs ([#6499](https://github.com/hashicorp/terraform/issues/6499)) - * core: Fix a bug causing "attribute not found" messages during destroy ([#6557](https://github.com/hashicorp/terraform/issues/6557)) - * provider/aws: Allow account ID checks on EC2 instances & w/ federated accounts ([#5030](https://github.com/hashicorp/terraform/issues/5030)) - * provider/aws: Fix an eventually consistent issue aws_security_group_rule and possible duplications ([#6325](https://github.com/hashicorp/terraform/issues/6325)) - * provider/aws: Fix bug where `aws_elastic_beanstalk_environment` ignored `wait_for_ready_timeout` ([#6358](https://github.com/hashicorp/terraform/issues/6358)) - * provider/aws: Fix bug where `aws_elastic_beanstalk_environment` update config template didn't work ([#6342](https://github.com/hashicorp/terraform/issues/6342)) - * provider/aws: Fix issue in updating CloudFront distribution LoggingConfig ([#6407](https://github.com/hashicorp/terraform/issues/6407)) - * provider/aws: Fix issue in upgrading AutoScaling Policy to use `min_adjustment_magnitude` ([#6440](https://github.com/hashicorp/terraform/issues/6440)) - * provider/aws: Fix issue replacing Network ACL Relationship ([#6421](https://github.com/hashicorp/terraform/issues/6421)) - * provider/aws: Fix issue with KMS Alias keys and name prefixes ([#6328](https://github.com/hashicorp/terraform/issues/6328)) - * provider/aws: Fix issue with encrypted snapshots of block devices in `aws_launch_configuration` resources ([#6452](https://github.com/hashicorp/terraform/issues/6452)) - * provider/aws: Fix read of `aws_cloudwatch_log_group` after an update is applied ([#6384](https://github.com/hashicorp/terraform/issues/6384)) - * provider/aws: Fix updating `number_of_nodes` on `aws_redshift_cluster` ([#6333](https://github.com/hashicorp/terraform/issues/6333)) - * provider/aws: Omit `aws_cloudfront_distribution` custom_error fields when not explicitly set ([#6382](https://github.com/hashicorp/terraform/issues/6382)) - * provider/aws: Refresh state on `aws_sqs_queue` not found ([#6381](https://github.com/hashicorp/terraform/issues/6381)) - * provider/aws: Respect `selection_pattern` in `aws_api_gateway_integration_response` (previously ignored field) ([#5893](https://github.com/hashicorp/terraform/issues/5893)) - * provider/aws: `aws_cloudfront_distribution` resources now require the `cookies` argument ([#6505](https://github.com/hashicorp/terraform/issues/6505)) - * provider/aws: `aws_route` crash when used with `aws_vpc_endpoint` ([#6338](https://github.com/hashicorp/terraform/issues/6338)) - * provider/aws: validate `cluster_id` length for `aws_elasticache_cluster` ([#6330](https://github.com/hashicorp/terraform/issues/6330)) - * provider/azurerm: `ssh_keys` can now be set for `azurerm_virtual_machine` resources, allowing provisioning ([#6541](https://github.com/hashicorp/terraform/issues/6541)) - * provider/azurerm: Fix issue that updating `azurerm_virtual_machine` was failing due to empty adminPassword ([#6528](https://github.com/hashicorp/terraform/issues/6528)) - * provider/azurerm: `storage_data_disk` settings now work correctly on `azurerm_virtual_machine` resources ([#6543](https://github.com/hashicorp/terraform/issues/6543)) - * provider/cloudflare: can manage apex records ([#6449](https://github.com/hashicorp/terraform/issues/6449)) - * provider/cloudflare: won't refresh with incorrect record if names match ([#6449](https://github.com/hashicorp/terraform/issues/6449)) - * provider/datadog: `notify_no_data` and `no_data_timeframe` are set correctly for `datadog_monitor` resources ([#6509](https://github.com/hashicorp/terraform/issues/6509)) - * provider/docker: Fix crash when using empty string in the `command` list in `docker_container` resources ([#6424](https://github.com/hashicorp/terraform/issues/6424)) - * provider/vsphere: Memory reservations are now set correctly in `vsphere_virtual_machine` resources ([#6482](https://github.com/hashicorp/terraform/issues/6482)) - -## 0.6.15 (April 22, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `aws_instance` - if you still use `security_groups` field for SG IDs - i.e. inside VPC, this will generate diffs during `plan` and `apply` will **recreate** the resource. Terraform expects IDs (VPC SGs) inside `vpc_security_group_ids`. - -FEATURES: - - * **New command:** `terraform fmt` to automatically normalize config file style ([#4955](https://github.com/hashicorp/terraform/issues/4955)) - * **New interpolation function:** `jsonencode` ([#5890](https://github.com/hashicorp/terraform/issues/5890)) - * **New provider:** `cobbler` ([#5969](https://github.com/hashicorp/terraform/issues/5969)) - * **New provider:** `fastly` ([#5814](https://github.com/hashicorp/terraform/issues/5814)) - * **New resource:** `aws_cloudfront_distribution` ([#5221](https://github.com/hashicorp/terraform/issues/5221)) - * **New resource:** `aws_cloudfront_origin_access_identity` ([#5221](https://github.com/hashicorp/terraform/issues/5221)) - * **New resource:** `aws_iam_user_ssh_key` ([#5774](https://github.com/hashicorp/terraform/issues/5774)) - * **New resource:** `aws_s3_bucket_notification` ([#5473](https://github.com/hashicorp/terraform/issues/5473)) - * **New resource:** `cloudstack_static_nat` ([#6004](https://github.com/hashicorp/terraform/issues/6004)) - * **New resource:** `consul_key_prefix` ([#5988](https://github.com/hashicorp/terraform/issues/5988)) - * **New resource:** `aws_default_network_acl` ([#6165](https://github.com/hashicorp/terraform/issues/6165)) - * **New resource:** `triton_fabric` ([#5920](https://github.com/hashicorp/terraform/issues/5920)) - * **New resource:** `triton_vlan` ([#5920](https://github.com/hashicorp/terraform/issues/5920)) - * **New resource:** `aws_opsworks_application` ([#4419](https://github.com/hashicorp/terraform/issues/4419)) - * **New resource:** `aws_opsworks_instance` ([#4276](https://github.com/hashicorp/terraform/issues/4276)) - * **New resource:** `aws_cloudwatch_log_subscription_filter` ([#5996](https://github.com/hashicorp/terraform/issues/5996)) - * **New resource:** `openstack_networking_router_route_v2` ([#6207](https://github.com/hashicorp/terraform/issues/6207)) - -IMPROVEMENTS: - - * command/apply: Output will now show periodic status updates of slow resources. ([#6163](https://github.com/hashicorp/terraform/issues/6163)) - * core: Variables passed between modules are now type checked ([#6185](https://github.com/hashicorp/terraform/issues/6185)) - * core: Smaller release binaries by stripping debug information ([#6238](https://github.com/hashicorp/terraform/issues/6238)) - * provider/aws: Add support for Step Scaling in `aws_autoscaling_policy` ([#4277](https://github.com/hashicorp/terraform/issues/4277)) - * provider/aws: Add support for `cname_prefix` to `aws_elastic_beanstalk_environment` resource ([#5966](https://github.com/hashicorp/terraform/issues/5966)) - * provider/aws: Add support for trigger_configuration to `aws_codedeploy_deployment_group` ([#5599](https://github.com/hashicorp/terraform/issues/5599)) - * provider/aws: Adding outputs for elastic_beanstalk_environment resource ([#5915](https://github.com/hashicorp/terraform/issues/5915)) - * provider/aws: Adds `wait_for_ready_timeout` option to `aws_elastic_beanstalk_environment` ([#5967](https://github.com/hashicorp/terraform/issues/5967)) - * provider/aws: Allow `aws_db_subnet_group` description to be updated ([#5921](https://github.com/hashicorp/terraform/issues/5921)) - * provider/aws: Allow multiple EIPs to associate to single ENI ([#6070](https://github.com/hashicorp/terraform/issues/6070)) - * provider/aws: Change `aws_elb` access_logs to list type ([#5065](https://github.com/hashicorp/terraform/issues/5065)) - * provider/aws: Check that InternetGateway exists before returning from creation ([#6105](https://github.com/hashicorp/terraform/issues/6105)) - * provider/aws: Don't Base64-encode EC2 userdata if it is already Base64 encoded ([#6140](https://github.com/hashicorp/terraform/issues/6140)) - * provider/aws: Making the Cloudwatch Event Rule Target `target_id` optional ([#5787](https://github.com/hashicorp/terraform/issues/5787)) - * provider/aws: Timeouts for `elasticsearch_domain` are increased ([#5910](https://github.com/hashicorp/terraform/issues/5910)) - * provider/aws: `aws_codecommit_repository` set `default_branch` only if defined ([#5904](https://github.com/hashicorp/terraform/issues/5904)) - * provider/aws: `aws_redshift_cluster` allows usernames with underscore in it ([#5935](https://github.com/hashicorp/terraform/issues/5935)) - * provider/aws: normalise json for `aws_sns_topic` ([#6089](https://github.com/hashicorp/terraform/issues/6089)) - * provider/aws: normalize json for `aws_cloudwatch_event_rule` ([#6025](https://github.com/hashicorp/terraform/issues/6025)) - * provider/aws: increase timeout for aws_redshift_cluster ([#6305](https://github.com/hashicorp/terraform/issues/6305)) - * provider/aws: Opsworks layers now support `custom_json` argument ([#4272](https://github.com/hashicorp/terraform/issues/4272)) - * provider/aws: Added migration for `tier` attribute in `aws_elastic_beanstalk_environment` ([#6167](https://github.com/hashicorp/terraform/issues/6167)) - * provider/aws: Use resource.Retry for route creation and deletion ([#6225](https://github.com/hashicorp/terraform/issues/6225)) - * provider/aws: Add support S3 Bucket Lifecycle Rule ([#6220](https://github.com/hashicorp/terraform/issues/6220)) - * provider/clc: Override default `account` alias in provider config ([#5785](https://github.com/hashicorp/terraform/issues/5785)) - * provider/cloudstack: Deprecate `ipaddress` in favour of `ip_address` in all resources ([#6010](https://github.com/hashicorp/terraform/issues/6010)) - * provider/cloudstack: Deprecate allowing names (instead of IDs) for parameters that reference other resources ([#6123](https://github.com/hashicorp/terraform/issues/6123)) - * provider/datadog: Add heredoc support to message, escalation_message, and query ([#5788](https://github.com/hashicorp/terraform/issues/5788)) - * provider/docker: Add support for docker run --user option ([#5300](https://github.com/hashicorp/terraform/issues/5300)) - * provider/github: Add support for privacy to `github_team` ([#6116](https://github.com/hashicorp/terraform/issues/6116)) - * provider/google: Accept GOOGLE_CLOUD_KEYFILE_JSON env var for credentials ([#6007](https://github.com/hashicorp/terraform/issues/6007)) - * provider/google: Add "project" argument and attribute to all GCP compute resources which inherit from the provider's value ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/google: Make "project" attribute on provider configuration optional ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/google: Read more common configuration values from the environment and clarify precedence ordering ([#6114](https://github.com/hashicorp/terraform/issues/6114)) - * provider/google: `addons_config` and `subnetwork` added as attributes to `google_container_cluster` ([#5871](https://github.com/hashicorp/terraform/issues/5871)) - * provider/fastly: Add support for Request Headers ([#6197](https://github.com/hashicorp/terraform/issues/6197)) - * provider/fastly: Add support for Gzip rules ([#6247](https://github.com/hashicorp/terraform/issues/6247)) - * provider/openstack: Add value_specs argument and attribute for routers ([#4898](https://github.com/hashicorp/terraform/issues/4898)) - * provider/openstack: Allow subnets with no gateway ([#6060](https://github.com/hashicorp/terraform/issues/6060)) - * provider/openstack: Enable Token Authentication ([#6081](https://github.com/hashicorp/terraform/issues/6081)) - * provider/postgresql: New `ssl_mode` argument allowing different SSL usage tradeoffs ([#6008](https://github.com/hashicorp/terraform/issues/6008)) - * provider/vsphere: Support for linked clones and Windows-specific guest config options ([#6087](https://github.com/hashicorp/terraform/issues/6087)) - * provider/vsphere: Checking for Powered Off State before `vsphere_virtual_machine` deletion ([#6283](https://github.com/hashicorp/terraform/issues/6283)) - * provider/vsphere: Support mounting ISO images to virtual cdrom drives ([#4243](https://github.com/hashicorp/terraform/issues/4243)) - * provider/vsphere: Fix missing ssh connection info ([#4283](https://github.com/hashicorp/terraform/issues/4283)) - * provider/google: Deprecate unused "region" attribute in `global_forwarding_rule`; this attribute was never used anywhere in the computation of the resource ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/cloudstack: Add group attribute to `cloudstack_instance` resource ([#6023](https://github.com/hashicorp/terraform/issues/6023)) - * provider/azurerm: Provider meaningful error message when credentials not correct ([#6290](https://github.com/hashicorp/terraform/issues/6290)) - * provider/cloudstack: Improve support for using projects ([#6282](https://github.com/hashicorp/terraform/issues/6282)) - -BUG FIXES: - - * core: Providers are now correctly inherited down a nested module tree ([#6186](https://github.com/hashicorp/terraform/issues/6186)) - * provider/aws: Convert protocols to standard format for Security Groups ([#5881](https://github.com/hashicorp/terraform/issues/5881)) - * provider/aws: Fix Lambda VPC integration (missing `vpc_id` field in schema) ([#6157](https://github.com/hashicorp/terraform/issues/6157)) - * provider/aws: Fix `aws_route panic` when destination CIDR block is nil ([#5781](https://github.com/hashicorp/terraform/issues/5781)) - * provider/aws: Fix issue re-creating deleted VPC peering connections ([#5959](https://github.com/hashicorp/terraform/issues/5959)) - * provider/aws: Fix issue with changing iops when also changing storage type to io1 on RDS ([#5676](https://github.com/hashicorp/terraform/issues/5676)) - * provider/aws: Fix issue with retrying deletion of Network ACLs ([#5954](https://github.com/hashicorp/terraform/issues/5954)) - * provider/aws: Fix potential crash when receiving malformed `aws_route` API responses ([#5867](https://github.com/hashicorp/terraform/issues/5867)) - * provider/aws: Guard against empty responses from Lambda Permissions ([#5838](https://github.com/hashicorp/terraform/issues/5838)) - * provider/aws: Normalize and compact SQS Redrive, Policy JSON ([#5888](https://github.com/hashicorp/terraform/issues/5888)) - * provider/aws: Fix issue updating ElasticBeanstalk Configuraiton Templates ([#6307](https://github.com/hashicorp/terraform/issues/6307)) - * provider/aws: Remove CloudTrail Trail from state if not found ([#6024](https://github.com/hashicorp/terraform/issues/6024)) - * provider/aws: Fix crash in AWS S3 Bucket when website index/error is empty ([#6269](https://github.com/hashicorp/terraform/issues/6269)) - * provider/aws: Report better error message in `aws_route53_record` when `set_identifier` is required ([#5777](https://github.com/hashicorp/terraform/issues/5777)) - * provider/aws: Show human-readable error message when failing to read an EBS volume ([#6038](https://github.com/hashicorp/terraform/issues/6038)) - * provider/aws: set ASG `health_check_grace_period` default to 300 ([#5830](https://github.com/hashicorp/terraform/issues/5830)) - * provider/aws: Fix issue with with Opsworks and empty Custom Cook Book sources ([#6078](https://github.com/hashicorp/terraform/issues/6078)) - * provider/aws: wait for IAM instance profile to propagate when creating Opsworks stacks ([#6049](https://github.com/hashicorp/terraform/issues/6049)) - * provider/aws: Don't read back `aws_opsworks_stack` cookbooks source password ([#6203](https://github.com/hashicorp/terraform/issues/6203)) - * provider/aws: Resolves DefaultOS and ConfigurationManager conflict on `aws_opsworks_stack` ([#6244](https://github.com/hashicorp/terraform/issues/6244)) - * provider/aws: Renaming `aws_elastic_beanstalk_configuration_template``option_settings` to `setting` ([#6043](https://github.com/hashicorp/terraform/issues/6043)) - * provider/aws: `aws_customer_gateway` will properly populate `bgp_asn` on refresh. [no issue] - * provider/aws: provider/aws: Refresh state on `aws_directory_service_directory` not found ([#6294](https://github.com/hashicorp/terraform/issues/6294)) - * provider/aws: `aws_elb` `cross_zone_load_balancing` is not refreshed in the state file ([#6295](https://github.com/hashicorp/terraform/issues/6295)) - * provider/aws: `aws_autoscaling_group` will properly populate `tag` on refresh. [no issue] - * provider/azurerm: Fix detection of `azurerm_storage_account` resources removed manually ([#5878](https://github.com/hashicorp/terraform/issues/5878)) - * provider/docker: Docker Image will be deleted on destroy ([#5801](https://github.com/hashicorp/terraform/issues/5801)) - * provider/openstack: Fix Disabling DHCP on Subnets ([#6052](https://github.com/hashicorp/terraform/issues/6052)) - * provider/openstack: Fix resizing when Flavor Name changes ([#6020](https://github.com/hashicorp/terraform/issues/6020)) - * provider/openstack: Fix Access Address Detection ([#6181](https://github.com/hashicorp/terraform/issues/6181)) - * provider/openstack: Fix admin_state_up on openstack_lb_member_v1 ([#6267](https://github.com/hashicorp/terraform/issues/6267)) - * provider/triton: Firewall status on `triton_machine` resources is reflected correctly ([#6119](https://github.com/hashicorp/terraform/issues/6119)) - * provider/triton: Fix time out when applying updates to Triton machine metadata ([#6149](https://github.com/hashicorp/terraform/issues/6149)) - * provider/vsphere: Add error handling to `vsphere_folder` ([#6095](https://github.com/hashicorp/terraform/issues/6095)) - * provider/cloudstack: Fix mashalling errors when using CloudStack 4.7.x (or newer) [GH-#226] - -## 0.6.14 (March 21, 2016) - -FEATURES: - - * **New provider:** `triton` - Manage Joyent Triton public cloud or on-premise installations ([#5738](https://github.com/hashicorp/terraform/issues/5738)) - * **New provider:** `clc` - Manage CenturyLink Cloud resources ([#4893](https://github.com/hashicorp/terraform/issues/4893)) - * **New provider:** `github` - Manage GitHub Organization permissions with Terraform config ([#5194](https://github.com/hashicorp/terraform/issues/5194)) - * **New provider:** `influxdb` - Manage InfluxDB databases ([#3478](https://github.com/hashicorp/terraform/issues/3478)) - * **New provider:** `ultradns` - Manage UltraDNS records ([#5716](https://github.com/hashicorp/terraform/issues/5716)) - * **New resource:** `aws_cloudwatch_log_metric_filter` ([#5444](https://github.com/hashicorp/terraform/issues/5444)) - * **New resource:** `azurerm_virtual_machine` ([#5514](https://github.com/hashicorp/terraform/issues/5514)) - * **New resource:** `azurerm_template_deployment` ([#5758](https://github.com/hashicorp/terraform/issues/5758)) - * **New interpolation function:** `uuid` ([#5575](https://github.com/hashicorp/terraform/issues/5575)) - -IMPROVEMENTS: - - * core: provisioners connecting via WinRM now respect HTTPS settings ([#5761](https://github.com/hashicorp/terraform/issues/5761)) - * provider/aws: `aws_db_instance` now makes `identifier` optional and generates a unique ID when it is omitted ([#5723](https://github.com/hashicorp/terraform/issues/5723)) - * provider/aws: `aws_redshift_cluster` now allows`publicly_accessible` to be modified ([#5721](https://github.com/hashicorp/terraform/issues/5721)) - * provider/aws: `aws_kms_alias` now allows name to be auto-generated with a `name_prefix` ([#5594](https://github.com/hashicorp/terraform/issues/5594)) - -BUG FIXES: - - * core: Color output is now shown correctly when running Terraform on Windows ([#5718](https://github.com/hashicorp/terraform/issues/5718)) - * core: HEREDOCs can now be indented in line with configuration using `<<-` and hanging indent is removed ([#5740](https://github.com/hashicorp/terraform/issues/5740)) - * core: Invalid HCL syntax of nested object blocks no longer causes a crash ([#5740](https://github.com/hashicorp/terraform/issues/5740)) - * core: Local directory-based modules now use junctions instead of symbolic links on Windows ([#5739](https://github.com/hashicorp/terraform/issues/5739)) - * core: Modules sourced from a Mercurial repository now work correctly on Windows ([#5739](https://github.com/hashicorp/terraform/issues/5739)) - * core: Address some issues with ignore_changes ([#5635](https://github.com/hashicorp/terraform/issues/5635)) - * core: Add a lock to fix an interpolation issue caught by the Go 1.6 concurrent map access detector ([#5772](https://github.com/hashicorp/terraform/issues/5772)) - * provider/aws: Fix crash when an `aws_rds_cluster_instance` is removed outside of Terraform ([#5717](https://github.com/hashicorp/terraform/issues/5717)) - * provider/aws: `aws_cloudformation_stack` use `timeout_in_minutes` for retry timeout to prevent unecessary timeouts ([#5712](https://github.com/hashicorp/terraform/issues/5712)) - * provider/aws: `aws_lambda_function` resources no longer error on refresh if deleted externally to Terraform ([#5668](https://github.com/hashicorp/terraform/issues/5668)) - * provider/aws: `aws_vpn_connection` resources deleted via the console on longer cause a crash ([#5747](https://github.com/hashicorp/terraform/issues/5747)) - * provider/aws: Fix crasher in Elastic Beanstalk Configuration when using options ([#5756](https://github.com/hashicorp/terraform/issues/5756)) - * provider/aws: Fix issue preventing `aws_opsworks_stck` from working with Windows set as the OS ([#5724](https://github.com/hashicorp/terraform/issues/5724)) - * provider/digitalocean: `digitalocean_ssh_key` resources no longer cause a panic if there is no network connectivity ([#5748](https://github.com/hashicorp/terraform/issues/5748)) - * provider/google: Default description `google_dns_managed_zone` resources to "Managed By Terraform" ([#5428](https://github.com/hashicorp/terraform/issues/5428)) - * provider/google: Fix error message on invalid instance URL for `google_compute_instance_group` ([#5715](https://github.com/hashicorp/terraform/issues/5715)) - * provider/vsphere: provide `host` to provisioner connections ([#5558](https://github.com/hashicorp/terraform/issues/5558)) - * provisioner/remote-exec: Address race condition introduced with script cleanup step introduced in 0.6.13 ([#5751](https://github.com/hashicorp/terraform/issues/5751)) - -## 0.6.13 (March 16, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_s3_bucket_object` field `etag` is now trimming off quotes (returns raw MD5 hash) ([#5305](https://github.com/hashicorp/terraform/issues/5305)) - * provider/aws: `aws_autoscaling_group` now supports metrics collection, so a diff installing the default value of `1Minute` for the `metrics_granularity` field is expected. This diff should resolve in the next `terraform apply` w/ no AWS API calls ([#4688](https://github.com/hashicorp/terraform/issues/4688)) - * provider/consul: `consul_keys` `key` blocks now respect `delete` flag for removing individual blocks. Previously keys would be deleted only when the entire resource was removed. - * provider/google: `next_hop_network` on `google_compute_route` is now read-only, to mirror the behavior in the official docs ([#5564](https://github.com/hashicorp/terraform/issues/5564)) - * state/remote/http: PUT requests for this backend will now have `Content-Type: application/json` instead of `application/octet-stream` ([#5499](https://github.com/hashicorp/terraform/issues/5499)) - -FEATURES: - - * **New command:** `terraform untaint` ([#5527](https://github.com/hashicorp/terraform/issues/5527)) - * **New resource:** `aws_api_gateway_api_key` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_deployment` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_integration_response` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_integration` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_method_response` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_method` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_model` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_resource` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_rest_api` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_elastic_beanstalk_application` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_elastic_beanstalk_configuration_template` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_elastic_beanstalk_environment` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_iam_account_password_policy` ([#5029](https://github.com/hashicorp/terraform/issues/5029)) - * **New resource:** `aws_kms_alias` ([#3928](https://github.com/hashicorp/terraform/issues/3928)) - * **New resource:** `aws_kms_key` ([#3928](https://github.com/hashicorp/terraform/issues/3928)) - * **New resource:** `google_compute_instance_group` ([#4087](https://github.com/hashicorp/terraform/issues/4087)) - -IMPROVEMENTS: - - * provider/aws: Add `repository_link` as a computed field for `aws_ecr_repository` ([#5524](https://github.com/hashicorp/terraform/issues/5524)) - * provider/aws: Add ability to update Route53 zone comments ([#5318](https://github.com/hashicorp/terraform/issues/5318)) - * provider/aws: Add support for Metrics Collection to `aws_autoscaling_group` ([#4688](https://github.com/hashicorp/terraform/issues/4688)) - * provider/aws: Add support for `description` to `aws_network_interface` ([#5523](https://github.com/hashicorp/terraform/issues/5523)) - * provider/aws: Add support for `storage_encrypted` to `aws_rds_cluster` ([#5520](https://github.com/hashicorp/terraform/issues/5520)) - * provider/aws: Add support for routing rules on `aws_s3_bucket` resources ([#5327](https://github.com/hashicorp/terraform/issues/5327)) - * provider/aws: Enable updates & versioning for `aws_s3_bucket_object` ([#5305](https://github.com/hashicorp/terraform/issues/5305)) - * provider/aws: Guard against Nil Reference in Redshift Endpoints ([#5593](https://github.com/hashicorp/terraform/issues/5593)) - * provider/aws: Lambda S3 object version defaults to `$LATEST` if unspecified ([#5370](https://github.com/hashicorp/terraform/issues/5370)) - * provider/aws: Retry DB Creation on IAM propigation error ([#5515](https://github.com/hashicorp/terraform/issues/5515)) - * provider/aws: Support KMS encryption of S3 objects ([#5453](https://github.com/hashicorp/terraform/issues/5453)) - * provider/aws: `aws_autoscaling_lifecycle_hook` now have `notification_target_arn` and `role_arn` as optional ([#5616](https://github.com/hashicorp/terraform/issues/5616)) - * provider/aws: `aws_ecs_service` validates number of `load_balancer`s before creation/updates ([#5605](https://github.com/hashicorp/terraform/issues/5605)) - * provider/aws: send Terraform version in User-Agent ([#5621](https://github.com/hashicorp/terraform/issues/5621)) - * provider/cloudflare: Change `cloudflare_record` type to ForceNew ([#5353](https://github.com/hashicorp/terraform/issues/5353)) - * provider/consul: `consul_keys` now detects drift and supports deletion of individual `key` blocks ([#5210](https://github.com/hashicorp/terraform/issues/5210)) - * provider/digitalocean: Guard against Nil reference in `digitalocean_droplet` ([#5588](https://github.com/hashicorp/terraform/issues/5588)) - * provider/docker: Add support for `unless-stopped` to docker container `restart_policy` ([#5337](https://github.com/hashicorp/terraform/issues/5337)) - * provider/google: Mark `next_hop_network` as read-only on `google_compute_route` ([#5564](https://github.com/hashicorp/terraform/issues/5564)) - * provider/google: Validate VPN tunnel peer_ip at plan time ([#5501](https://github.com/hashicorp/terraform/issues/5501)) - * provider/openstack: Add Support for Domain ID and Domain Name environment variables ([#5355](https://github.com/hashicorp/terraform/issues/5355)) - * provider/openstack: Add support for instances to have multiple ephemeral disks. ([#5131](https://github.com/hashicorp/terraform/issues/5131)) - * provider/openstack: Re-Add server.AccessIPv4 and server.AccessIPv6 ([#5366](https://github.com/hashicorp/terraform/issues/5366)) - * provider/vsphere: Add support for disk init types ([#4284](https://github.com/hashicorp/terraform/issues/4284)) - * provisioner/remote-exec: Clear out scripts after uploading ([#5577](https://github.com/hashicorp/terraform/issues/5577)) - * state/remote/http: Change content type of PUT requests to the more appropriate `application/json` ([#5499](https://github.com/hashicorp/terraform/issues/5499)) - -BUG FIXES: - - * core: Disallow negative indices in the element() interpolation function, preventing crash ([#5263](https://github.com/hashicorp/terraform/issues/5263)) - * core: Fix issue that caused tainted resource destroys to be improperly filtered out when using -target and a plan file ([#5516](https://github.com/hashicorp/terraform/issues/5516)) - * core: Fix several issues with retry logic causing spurious "timeout while waiting for state to become ..." errors and unnecessary retry loops ([#5460](https://github.com/hashicorp/terraform/issues/5460)), ([#5538](https://github.com/hashicorp/terraform/issues/5538)), ([#5543](https://github.com/hashicorp/terraform/issues/5543)), ([#5553](https://github.com/hashicorp/terraform/issues/5553)) - * core: Includes upstream HCL fix to properly detect unbalanced braces and throw an error ([#5400](https://github.com/hashicorp/terraform/issues/5400)) - * provider/aws: Allow recovering from failed CloudWatch Event Target creation ([#5395](https://github.com/hashicorp/terraform/issues/5395)) - * provider/aws: Fix EC2 Classic SG Rule issue when referencing rules by name ([#5533](https://github.com/hashicorp/terraform/issues/5533)) - * provider/aws: Fix `aws_cloudformation_stack` update for `parameters` & `capabilities` if unmodified ([#5603](https://github.com/hashicorp/terraform/issues/5603)) - * provider/aws: Fix a bug where AWS Kinesis Stream includes closed shards in the shard_count ([#5401](https://github.com/hashicorp/terraform/issues/5401)) - * provider/aws: Fix a bug where ElasticSearch Domain tags were not being set correctly ([#5361](https://github.com/hashicorp/terraform/issues/5361)) - * provider/aws: Fix a bug where `aws_route` would show continual changes in the plan when not computed ([#5321](https://github.com/hashicorp/terraform/issues/5321)) - * provider/aws: Fix a bug where `publicly_assessible` wasn't being set to state in `aws_db_instance` ([#5535](https://github.com/hashicorp/terraform/issues/5535)) - * provider/aws: Fix a bug where listener protocol on `aws_elb` resources was case insensitive ([#5376](https://github.com/hashicorp/terraform/issues/5376)) - * provider/aws: Fix a bug which caused panics creating rules on security groups in EC2 Classic ([#5329](https://github.com/hashicorp/terraform/issues/5329)) - * provider/aws: Fix crash when `aws_lambda_function` VpcId is nil ([#5182](https://github.com/hashicorp/terraform/issues/5182)) - * provider/aws: Fix error with parsing JSON in `aws_s3_bucket` policy attribute ([#5474](https://github.com/hashicorp/terraform/issues/5474)) - * provider/aws: `aws_lambda_function` can be properly updated, either via `s3_object_version` or via `filename` & `source_code_hash` as described in docs ([#5239](https://github.com/hashicorp/terraform/issues/5239)) - * provider/google: Fix managed instance group preemptible instance creation ([#4834](https://github.com/hashicorp/terraform/issues/4834)) - * provider/openstack: Account for a 403 reply when os-tenant-networks is disabled ([#5432](https://github.com/hashicorp/terraform/issues/5432)) - * provider/openstack: Fix crashing during certain network updates in instances ([#5365](https://github.com/hashicorp/terraform/issues/5365)) - * provider/openstack: Fix create/delete statuses in load balancing resources ([#5557](https://github.com/hashicorp/terraform/issues/5557)) - * provider/openstack: Fix race condition between instance deletion and volume detachment ([#5359](https://github.com/hashicorp/terraform/issues/5359)) - * provider/template: Warn when `template` attribute specified as path ([#5563](https://github.com/hashicorp/terraform/issues/5563)) - -INTERNAL IMPROVEMENTS: - - * helper/schema: `MaxItems` attribute on schema lists and sets ([#5218](https://github.com/hashicorp/terraform/issues/5218)) - -## 0.6.12 (February 24, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `publicly_accessible` attribute on `aws_redshift_cluster` resources now defaults to true - -FEATURES: - - * **New command:** `validate` to perform syntax validation ([#3783](https://github.com/hashicorp/terraform/issues/3783)) - * **New provider:** `datadog` ([#5251](https://github.com/hashicorp/terraform/issues/5251)) - * **New interpolation function:** `md5` ([#5267](https://github.com/hashicorp/terraform/issues/5267)) - * **New interpolation function:** `signum` ([#4854](https://github.com/hashicorp/terraform/issues/4854)) - * **New resource:** `aws_cloudwatch_event_rule` ([#4986](https://github.com/hashicorp/terraform/issues/4986)) - * **New resource:** `aws_cloudwatch_event_target` ([#4986](https://github.com/hashicorp/terraform/issues/4986)) - * **New resource:** `aws_lambda_permission` ([#4826](https://github.com/hashicorp/terraform/issues/4826)) - * **New resource:** `azurerm_dns_a_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_aaaa_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_cname_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_mx_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_ns_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_srv_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_txt_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_zone` ([#4979](https://github.com/hashicorp/terraform/issues/4979)) - * **New resource:** `azurerm_search_service` ([#5203](https://github.com/hashicorp/terraform/issues/5203)) - * **New resource:** `azurerm_sql_database` ([#5003](https://github.com/hashicorp/terraform/issues/5003)) - * **New resource:** `azurerm_sql_firewall_rule` ([#5057](https://github.com/hashicorp/terraform/issues/5057)) - * **New resource:** `azurerm_sql_server` ([#4991](https://github.com/hashicorp/terraform/issues/4991)) - * **New resource:** `google_compute_subnetwork` ([#5130](https://github.com/hashicorp/terraform/issues/5130)) - -IMPROVEMENTS: - - * core: Backend names are now down cased during `init` in the same manner as `remote config` ([#5012](https://github.com/hashicorp/terraform/issues/5012)) - * core: Upgrade resource name validation warning to an error as planned ([#5272](https://github.com/hashicorp/terraform/issues/5272)) - * core: output "diffs didn't match" error details ([#5276](https://github.com/hashicorp/terraform/issues/5276)) - * provider/aws: Add `is_multi_region_trail` option to CloudTrail ([#4939](https://github.com/hashicorp/terraform/issues/4939)) - * provider/aws: Add support for HTTP(S) endpoints that auto confirm SNS subscription ([#4711](https://github.com/hashicorp/terraform/issues/4711)) - * provider/aws: Add support for Tags to CloudTrail ([#5135](https://github.com/hashicorp/terraform/issues/5135)) - * provider/aws: Add support for Tags to ElasticSearch ([#4973](https://github.com/hashicorp/terraform/issues/4973)) - * provider/aws: Add support for deployment configuration to `aws_ecs_service` ([#5220](https://github.com/hashicorp/terraform/issues/5220)) - * provider/aws: Add support for log validation + KMS encryption to `aws_cloudtrail` ([#5051](https://github.com/hashicorp/terraform/issues/5051)) - * provider/aws: Allow name-prefix and auto-generated names for IAM Server Cert ([#5178](https://github.com/hashicorp/terraform/issues/5178)) - * provider/aws: Expose additional VPN Connection attributes ([#5032](https://github.com/hashicorp/terraform/issues/5032)) - * provider/aws: Return an error if no matching route is found for an AWS Route ([#5155](https://github.com/hashicorp/terraform/issues/5155)) - * provider/aws: Support custom endpoints for AWS EC2 ELB and IAM ([#5114](https://github.com/hashicorp/terraform/issues/5114)) - * provider/aws: The `cluster_type` on `aws_redshift_cluster` resources is now computed ([#5238](https://github.com/hashicorp/terraform/issues/5238)) - * provider/aws: `aws_lambda_function` resources now support VPC configuration ([#5149](https://github.com/hashicorp/terraform/issues/5149)) - * provider/aws: Add support for Enhanced Monitoring to RDS Instances ([#4945](https://github.com/hashicorp/terraform/issues/4945)) - * provider/aws: Improve vpc cidr_block err message ([#5255](https://github.com/hashicorp/terraform/issues/5255)) - * provider/aws: Implement Retention Period for `aws_kinesis_stream` ([#5223](https://github.com/hashicorp/terraform/issues/5223)) - * provider/aws: Enable `stream_arm` output for DynamoDB Table when streams are enabled ([#5271](https://github.com/hashicorp/terraform/issues/5271)) - * provider/digitalocean: `digitalocean_record` resources now export a computed `fqdn` attribute ([#5071](https://github.com/hashicorp/terraform/issues/5071)) - * provider/google: Add assigned IP Address to CloudSQL Instance `google_sql_database_instance` ([#5245](https://github.com/hashicorp/terraform/issues/5245)) - * provider/openstack: Add support for Distributed Routers ([#4878](https://github.com/hashicorp/terraform/issues/4878)) - * provider/openstack: Add support for optional cacert_file parameter ([#5106](https://github.com/hashicorp/terraform/issues/5106)) - -BUG FIXES: - - * core: Fix bug detecting deeply nested module orphans ([#5022](https://github.com/hashicorp/terraform/issues/5022)) - * core: Fix bug where `ignore_changes` could produce "diffs didn't match during apply" errors ([#4965](https://github.com/hashicorp/terraform/issues/4965)) - * core: Fix race condition when handling tainted resource destroys ([#5026](https://github.com/hashicorp/terraform/issues/5026)) - * core: Improve handling of Provisioners in the graph, fixing "Provisioner already initialized" errors ([#4877](https://github.com/hashicorp/terraform/issues/4877)) - * core: Skip `create_before_destroy` processing during a `terraform destroy`, solving several issues preventing `destroy` - from working properly with CBD resources ([#5096](https://github.com/hashicorp/terraform/issues/5096)) - * core: Error instead of panic on self var in wrong scope ([#5273](https://github.com/hashicorp/terraform/issues/5273)) - * provider/aws: Fix Copy of Tags to DB Instance when created from Snapshot ([#5197](https://github.com/hashicorp/terraform/issues/5197)) - * provider/aws: Fix DynamoDB Table Refresh to ensure deleted tables are removed from state ([#4943](https://github.com/hashicorp/terraform/issues/4943)) - * provider/aws: Fix ElasticSearch `domain_name` validation ([#4973](https://github.com/hashicorp/terraform/issues/4973)) - * provider/aws: Fix issue applying security group changes in EC2 Classic RDS for aws_db_instance ([#4969](https://github.com/hashicorp/terraform/issues/4969)) - * provider/aws: Fix reading auto scaling group availability zones ([#5044](https://github.com/hashicorp/terraform/issues/5044)) - * provider/aws: Fix reading auto scaling group load balancers ([#5045](https://github.com/hashicorp/terraform/issues/5045)) - * provider/aws: Fix `aws_redshift_cluster` to allow `publicly_accessible` to be false ([#5262](https://github.com/hashicorp/terraform/issues/5262)) - * provider/aws: Wait longer for internet gateways to detach ([#5120](https://github.com/hashicorp/terraform/issues/5120)) - * provider/aws: Fix issue reading auto scaling group termination policies ([#5101](https://github.com/hashicorp/terraform/issues/5101)) - * provider/cloudflare: `ttl` no longer shows a change on each plan on `cloudflare_record` resources ([#5042](https://github.com/hashicorp/terraform/issues/5042)) - * provider/docker: Fix the default docker_host value ([#5088](https://github.com/hashicorp/terraform/issues/5088)) - * provider/google: Fix backend service max_utilization attribute ([#5075](https://github.com/hashicorp/terraform/issues/5075)) - * provider/google: Fix reading of `google_compute_vpn_gateway` without an explicit ([#5125](https://github.com/hashicorp/terraform/issues/5125)) - * provider/google: Fix crash when setting `ack_deadline_seconds` on `google_pubsub_subscription` ([#5110](https://github.com/hashicorp/terraform/issues/5110)) - * provider/openstack: Fix crash when `access_network` was not defined in instances ([#4966](https://github.com/hashicorp/terraform/issues/4966)) - * provider/powerdns: Fix refresh of `powerdns_record` no longer fails if the record name contains a `-` ([#5228](https://github.com/hashicorp/terraform/issues/5228)) - * provider/vcd: Wait for DHCP assignment when creating `vcd_vapp` resources with no static IP assignment ([#5195](https://github.com/hashicorp/terraform/issues/5195)) - -## 0.6.11 (February 1, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `max_size`, `min_size` and `desired_capacity` attributes on `aws_autoscaling_schedule` resources now default to 0 - -FEATURES: - - * **New provider: `powerdns` - PowerDNS REST API** ([#4885](https://github.com/hashicorp/terraform/issues/4885)) - * **New builtin function:** `trimspace` for trimming whitespaces ([#4910](https://github.com/hashicorp/terraform/issues/4910)) - * **New builtin function:** `base64sha256` for base64 encoding raw sha256 sum of a given string ([#4899](https://github.com/hashicorp/terraform/issues/4899)) - * **New resource:** `openstack_lb_member_v1` ([#4359](https://github.com/hashicorp/terraform/issues/4359)) - -IMPROVEMENTS: - - * provider/template: Remove unnecessary mime-type validation from `template_cloudinit_config` resources ([#4873](https://github.com/hashicorp/terraform/issues/4873)) - * provider/template: Correct spelling of "Boundary" in the part separator of rendered `template_cloudinit_config` resources ([#4873](https://github.com/hashicorp/terraform/issues/4873)) - * provider/aws: Provide a better message if no AWS creds are found ([#4869](https://github.com/hashicorp/terraform/issues/4869)) - * provider/openstack: Ability to specify per-network Floating IPs ([#4812](https://github.com/hashicorp/terraform/issues/4812)) - -BUG FIXES: - - * provider/aws: `aws_autoscale_schedule` 0 values ([#4693](https://github.com/hashicorp/terraform/issues/4693)) - * provider/aws: Fix regression with VPCs and ClassicLink for regions that do not support it ([#4879](https://github.com/hashicorp/terraform/issues/4879)) - * provider/aws: Change VPC ClassicLink to be computed ([#4933](https://github.com/hashicorp/terraform/issues/4933)) - * provider/aws: Fix SNS Topic Refresh to ensure deleted topics are removed from state ([#4891](https://github.com/hashicorp/terraform/issues/4891)) - * provider/aws: Refactor Route53 record to fix regression in deleting records created in previous versions of Terraform ([#4892](https://github.com/hashicorp/terraform/issues/4892)) - * provider/azurerm: Fix panic if no creds supplied ([#4902](https://github.com/hashicorp/terraform/issues/4902)) - * provider/openstack: Changing the port resource to mark the ip_address as optional ([#4850](https://github.com/hashicorp/terraform/issues/4850)) - * provider/docker: Catch potential custom network errors in docker ([#4918](https://github.com/hashicorp/terraform/issues/4918)) - - - -## 0.6.10 (January 27, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `-module-depth` flag available on `plan`, `apply`, `show`, and `graph` now defaults to `-1`, causing - resources within modules to be expanded in command output. This is only a cosmetic change; it does not affect - any behavior. - * This release includes a bugfix for `$${}` interpolation escaping. These strings are now properly converted to `${}` - during interpolation. This may cause diffs on existing configurations in certain cases. - * Users of `consul_keys` should note that the `value` sub-attribute of `key` will no longer be updated with the remote value of the key. It should be only used to _set_ a key in Consul K/V. To reference key values, use the `var` attribute. - * The 0.6.9 release contained a regression in `aws_autoscaling_group` capacity waiting behavior for configs where `min_elb_capacity != desired_capacity` or `min_size != desired_capacity`. This release remedies that regression by un-deprecating `min_elb_capacity` and restoring the prior behavior. - * Users of `aws_security_group` may notice new diffs in initial plans with 0.6.10 due to a bugfix that fixes drift detection on nested security group rules. These new diffs should reflect the actual state of the resources, which Terraform previously was unable to see. - - -FEATURES: - - * **New resource: `aws_lambda_alias`** ([#4664](https://github.com/hashicorp/terraform/issues/4664)) - * **New resource: `aws_redshift_cluster`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_parameter_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_security_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_subnet_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `azurerm_cdn_endpoint`** ([#4759](https://github.com/hashicorp/terraform/issues/4759)) - * **New resource: `azurerm_cdn_profile`** ([#4740](https://github.com/hashicorp/terraform/issues/4740)) - * **New resource: `azurerm_network_interface`** ([#4598](https://github.com/hashicorp/terraform/issues/4598)) - * **New resource: `azurerm_network_security_rule`** ([#4586](https://github.com/hashicorp/terraform/issues/4586)) - * **New resource: `azurerm_route_table`** ([#4602](https://github.com/hashicorp/terraform/issues/4602)) - * **New resource: `azurerm_route`** ([#4604](https://github.com/hashicorp/terraform/issues/4604)) - * **New resource: `azurerm_storage_account`** ([#4698](https://github.com/hashicorp/terraform/issues/4698)) - * **New resource: `azurerm_storage_blob`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_storage_container`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_storage_queue`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_subnet`** ([#4595](https://github.com/hashicorp/terraform/issues/4595)) - * **New resource: `docker_network`** ([#4483](https://github.com/hashicorp/terraform/issues/4483)) - * **New resource: `docker_volume`** ([#4483](https://github.com/hashicorp/terraform/issues/4483)) - * **New resource: `google_sql_user`** ([#4669](https://github.com/hashicorp/terraform/issues/4669)) - -IMPROVEMENTS: - - * core: Add `sha256()` interpolation function ([#4704](https://github.com/hashicorp/terraform/issues/4704)) - * core: Validate lifecycle keys to show helpful error messages whe they are mistypes ([#4745](https://github.com/hashicorp/terraform/issues/4745)) - * core: Default `module-depth` parameter to `-1`, which expands resources within modules in command output ([#4763](https://github.com/hashicorp/terraform/issues/4763)) - * core: Variable types may now be specified explicitly using the `type` argument ([#4795](https://github.com/hashicorp/terraform/issues/4795)) - * provider/aws: Add new parameters `az_mode` and `availability_zone(s)` in ElastiCache ([#4631](https://github.com/hashicorp/terraform/issues/4631)) - * provider/aws: Allow ap-northeast-2 (Seoul) as valid region ([#4637](https://github.com/hashicorp/terraform/issues/4637)) - * provider/aws: Limit SNS Topic Subscription protocols ([#4639](https://github.com/hashicorp/terraform/issues/4639)) - * provider/aws: Add support for configuring logging on `aws_s3_bucket` resources ([#4482](https://github.com/hashicorp/terraform/issues/4482)) - * provider/aws: Add AWS Classiclink for AWS VPC resource ([#3994](https://github.com/hashicorp/terraform/issues/3994)) - * provider/aws: Supporting New AWS Route53 HealthCheck additions ([#4564](https://github.com/hashicorp/terraform/issues/4564)) - * provider/aws: Store instance state ([#3261](https://github.com/hashicorp/terraform/issues/3261)) - * provider/aws: Add support for updating ELB availability zones and subnets ([#4597](https://github.com/hashicorp/terraform/issues/4597)) - * provider/aws: Enable specifying aws s3 redirect protocol ([#4098](https://github.com/hashicorp/terraform/issues/4098)) - * provider/aws: Added support for `encrypted` on `ebs_block_devices` in Launch Configurations ([#4481](https://github.com/hashicorp/terraform/issues/4481)) - * provider/aws: Retry Listener Creation for ELBs ([#4825](https://github.com/hashicorp/terraform/issues/4825)) - * provider/aws: Add support for creating Managed Microsoft Active Directory - and Directory Connectors ([#4388](https://github.com/hashicorp/terraform/issues/4388)) - * provider/aws: Mark some `aws_db_instance` fields as optional ([#3138](https://github.com/hashicorp/terraform/issues/3138)) - * provider/digitalocean: Add support for reassigning `digitalocean_floating_ip` resources ([#4476](https://github.com/hashicorp/terraform/issues/4476)) - * provider/dme: Add support for Global Traffic Director locations on `dme_record` resources ([#4305](https://github.com/hashicorp/terraform/issues/4305)) - * provider/docker: Add support for adding host entries on `docker_container` resources ([#3463](https://github.com/hashicorp/terraform/issues/3463)) - * provider/docker: Add support for mounting named volumes on `docker_container` resources ([#4480](https://github.com/hashicorp/terraform/issues/4480)) - * provider/google: Add content field to bucket object ([#3893](https://github.com/hashicorp/terraform/issues/3893)) - * provider/google: Add support for `named_port` blocks on `google_compute_instance_group_manager` resources ([#4605](https://github.com/hashicorp/terraform/issues/4605)) - * provider/openstack: Add "personality" support to instance resource ([#4623](https://github.com/hashicorp/terraform/issues/4623)) - * provider/packet: Handle external state changes for Packet resources gracefully ([#4676](https://github.com/hashicorp/terraform/issues/4676)) - * provider/tls: `tls_private_key` now exports attributes with public key in both PEM and OpenSSH format ([#4606](https://github.com/hashicorp/terraform/issues/4606)) - * provider/vdc: Add `allow_unverified_ssl` for connections to vCloud API ([#4811](https://github.com/hashicorp/terraform/issues/4811)) - * state/remote: Allow KMS Key Encryption to be used with S3 backend ([#2903](https://github.com/hashicorp/terraform/issues/2903)) - -BUG FIXES: - - * core: Fix handling of literals with escaped interpolations `$${var}` ([#4747](https://github.com/hashicorp/terraform/issues/4747)) - * core: Fix diff mismatch when RequiresNew field and list both change ([#4749](https://github.com/hashicorp/terraform/issues/4749)) - * core: Respect module target path argument on `terraform init` ([#4753](https://github.com/hashicorp/terraform/issues/4753)) - * core: Write planfile even on empty plans ([#4766](https://github.com/hashicorp/terraform/issues/4766)) - * core: Add validation error when output is missing value field ([#4762](https://github.com/hashicorp/terraform/issues/4762)) - * core: Fix improper handling of orphan resources when targeting ([#4574](https://github.com/hashicorp/terraform/issues/4574)) - * core: Properly handle references to computed set attributes ([#4840](https://github.com/hashicorp/terraform/issues/4840)) - * config: Detect a specific JSON edge case and show a helpful workaround ([#4746](https://github.com/hashicorp/terraform/issues/4746)) - * provider/openstack: Ensure valid Security Group Rule attribute combination ([#4466](https://github.com/hashicorp/terraform/issues/4466)) - * provider/openstack: Don't put fixed_ip in port creation request if not defined ([#4617](https://github.com/hashicorp/terraform/issues/4617)) - * provider/google: Clarify SQL Database Instance recent name restriction ([#4577](https://github.com/hashicorp/terraform/issues/4577)) - * provider/google: Split Instance network interface into two fields ([#4265](https://github.com/hashicorp/terraform/issues/4265)) - * provider/aws: Error with empty list item on security group ([#4140](https://github.com/hashicorp/terraform/issues/4140)) - * provider/aws: Fix issue with detecting drift in AWS Security Groups rules ([#4779](https://github.com/hashicorp/terraform/issues/4779)) - * provider/aws: Trap Instance error from mismatched SG IDs and Names ([#4240](https://github.com/hashicorp/terraform/issues/4240)) - * provider/aws: EBS optimised to force new resource in AWS Instance ([#4627](https://github.com/hashicorp/terraform/issues/4627)) - * provider/aws: Wait for NACL rule to be visible ([#4734](https://github.com/hashicorp/terraform/issues/4734)) - * provider/aws: `default_result` on `aws_autoscaling_lifecycle_hook` resources is now computed ([#4695](https://github.com/hashicorp/terraform/issues/4695)) - * provider/aws: fix ASG capacity waiting regression by un-deprecating `min_elb_capacity` ([#4864](https://github.com/hashicorp/terraform/issues/4864)) - * provider/consul: fix several bugs surrounding update behavior ([#4787](https://github.com/hashicorp/terraform/issues/4787)) - * provider/mailgun: Handle the fact that the domain destroy API is eventually consistent ([#4777](https://github.com/hashicorp/terraform/issues/4777)) - * provider/template: Fix race causing sporadic crashes in template_file with count > 1 ([#4694](https://github.com/hashicorp/terraform/issues/4694)) - * provider/template: Add support for updating `template_cloudinit_config` resources ([#4757](https://github.com/hashicorp/terraform/issues/4757)) - * provisioner/chef: Add ENV['no_proxy'] to chef provisioner if no_proxy is detected ([#4661](https://github.com/hashicorp/terraform/issues/4661)) - -## 0.6.9 (January 8, 2016) - -FEATURES: - - * **New provider: `vcd` - VMware vCloud Director** ([#3785](https://github.com/hashicorp/terraform/issues/3785)) - * **New provider: `postgresql` - Create PostgreSQL databases and roles** ([#3653](https://github.com/hashicorp/terraform/issues/3653)) - * **New provider: `chef` - Create chef environments, roles, etc** ([#3084](https://github.com/hashicorp/terraform/issues/3084)) - * **New provider: `azurerm` - Preliminary support for Azure Resource Manager** ([#4226](https://github.com/hashicorp/terraform/issues/4226)) - * **New provider: `mysql` - Create MySQL databases** ([#3122](https://github.com/hashicorp/terraform/issues/3122)) - * **New resource: `aws_autoscaling_schedule`** ([#4256](https://github.com/hashicorp/terraform/issues/4256)) - * **New resource: `aws_nat_gateway`** ([#4381](https://github.com/hashicorp/terraform/issues/4381)) - * **New resource: `aws_network_acl_rule`** ([#4286](https://github.com/hashicorp/terraform/issues/4286)) - * **New resources: `aws_ecr_repository` and `aws_ecr_repository_policy`** ([#4415](https://github.com/hashicorp/terraform/issues/4415)) - * **New resource: `google_pubsub_topic`** ([#3671](https://github.com/hashicorp/terraform/issues/3671)) - * **New resource: `google_pubsub_subscription`** ([#3671](https://github.com/hashicorp/terraform/issues/3671)) - * **New resource: `template_cloudinit_config`** ([#4095](https://github.com/hashicorp/terraform/issues/4095)) - * **New resource: `tls_locally_signed_cert`** ([#3930](https://github.com/hashicorp/terraform/issues/3930)) - * **New remote state backend: `artifactory`** ([#3684](https://github.com/hashicorp/terraform/issues/3684)) - -IMPROVEMENTS: - - * core: Change set internals for performance improvements ([#3992](https://github.com/hashicorp/terraform/issues/3992)) - * core: Support HTTP basic auth in consul remote state ([#4166](https://github.com/hashicorp/terraform/issues/4166)) - * core: Improve error message on resource arity mismatch ([#4244](https://github.com/hashicorp/terraform/issues/4244)) - * core: Add support for unary operators + and - to the interpolation syntax ([#3621](https://github.com/hashicorp/terraform/issues/3621)) - * core: Add SSH agent support for Windows ([#4323](https://github.com/hashicorp/terraform/issues/4323)) - * core: Add `sha1()` interpolation function ([#4450](https://github.com/hashicorp/terraform/issues/4450)) - * provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` ([#3704](https://github.com/hashicorp/terraform/issues/3704)) - * provider/aws: Add support for DynamoDB Table StreamSpecifications ([#4208](https://github.com/hashicorp/terraform/issues/4208)) - * provider/aws: Add `name_prefix` to Security Groups ([#4167](https://github.com/hashicorp/terraform/issues/4167)) - * provider/aws: Add support for removing nodes to `aws_elasticache_cluster` ([#3809](https://github.com/hashicorp/terraform/issues/3809)) - * provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` ([#3853](https://github.com/hashicorp/terraform/issues/3853)) - * provider/aws: Adding support for Tags to DB SecurityGroup ([#4260](https://github.com/hashicorp/terraform/issues/4260)) - * provider/aws: Adding Tag support for DB Param Groups ([#4259](https://github.com/hashicorp/terraform/issues/4259)) - * provider/aws: Fix issue with updated route ids for VPC Endpoints ([#4264](https://github.com/hashicorp/terraform/issues/4264)) - * provider/aws: Added measure_latency option to Route 53 Health Check resource ([#3688](https://github.com/hashicorp/terraform/issues/3688)) - * provider/aws: Validate IOPs for EBS Volumes ([#4146](https://github.com/hashicorp/terraform/issues/4146)) - * provider/aws: DB Subnet group arn output ([#4261](https://github.com/hashicorp/terraform/issues/4261)) - * provider/aws: Get full Kinesis streams view with pagination ([#4368](https://github.com/hashicorp/terraform/issues/4368)) - * provider/aws: Allow changing private IPs for ENIs ([#4307](https://github.com/hashicorp/terraform/issues/4307)) - * provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets ([#4315](https://github.com/hashicorp/terraform/issues/4315)) - * provider/aws: Validate `name` on `db_subnet_group` against AWS requirements ([#4340](https://github.com/hashicorp/terraform/issues/4340)) - * provider/aws: wait for ASG capacity on update ([#3947](https://github.com/hashicorp/terraform/issues/3947)) - * provider/aws: Add validation for ECR repository name ([#4431](https://github.com/hashicorp/terraform/issues/4431)) - * provider/cloudstack: performance improvements ([#4150](https://github.com/hashicorp/terraform/issues/4150)) - * provider/docker: Add support for setting the entry point on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting the restart policy on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting labels on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting log driver and options on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for settings network mode on `docker_container` resources ([#4475](https://github.com/hashicorp/terraform/issues/4475)) - * provider/heroku: Improve handling of Applications within an Organization ([#4495](https://github.com/hashicorp/terraform/issues/4495)) - * provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` ([#3867](https://github.com/hashicorp/terraform/issues/3867)) - * provider/vsphere: Rename vcenter_server config parameter to something clearer ([#3718](https://github.com/hashicorp/terraform/issues/3718)) - * provider/vsphere: Make allow_unverified_ssl a configuable on the provider ([#3933](https://github.com/hashicorp/terraform/issues/3933)) - * provider/vsphere: Add folder handling for folder-qualified vm names ([#3939](https://github.com/hashicorp/terraform/issues/3939)) - * provider/vsphere: Change ip_address parameter for ipv6 support ([#4035](https://github.com/hashicorp/terraform/issues/4035)) - * provider/openstack: Increase instance timeout from 10 to 30 minutes ([#4223](https://github.com/hashicorp/terraform/issues/4223)) - * provider/google: Add `restart_policy` attribute to `google_managed_instance_group` ([#3892](https://github.com/hashicorp/terraform/issues/3892)) - -BUG FIXES: - - * core: skip provider input for deprecated fields ([#4193](https://github.com/hashicorp/terraform/issues/4193)) - * core: Fix issue which could cause fields that become empty to retain old values in the state ([#3257](https://github.com/hashicorp/terraform/issues/3257)) - * provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name ([#4148](https://github.com/hashicorp/terraform/issues/4148)) - * provider/openstack: Better handling of load balancing resource state changes ([#3926](https://github.com/hashicorp/terraform/issues/3926)) - * provider/aws: Treat `INACTIVE` ECS cluster as deleted ([#4364](https://github.com/hashicorp/terraform/issues/4364)) - * provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs ([#4075](https://github.com/hashicorp/terraform/issues/4075)) - * provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists ([#4198](https://github.com/hashicorp/terraform/issues/4198)) - * provider/aws: Fix issue force destroying a versioned S3 bucket ([#4168](https://github.com/hashicorp/terraform/issues/4168)) - * provider/aws: Update DB Replica to honor storage type ([#4155](https://github.com/hashicorp/terraform/issues/4155)) - * provider/aws: Fix issue creating AWS RDS replicas across regions ([#4215](https://github.com/hashicorp/terraform/issues/4215)) - * provider/aws: Fix issue with Route53 and zero weighted records ([#4427](https://github.com/hashicorp/terraform/issues/4427)) - * provider/aws: Fix issue with iam_profile in aws_instance when a path is specified ([#3663](https://github.com/hashicorp/terraform/issues/3663)) - * provider/aws: Refactor AWS Authentication chain to fix issue with authentication and IAM ([#4254](https://github.com/hashicorp/terraform/issues/4254)) - * provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region ([#4236](https://github.com/hashicorp/terraform/issues/4236)) - * provider/aws: Fix missing AMI issue with Launch Configurations ([#4242](https://github.com/hashicorp/terraform/issues/4242)) - * provider/aws: Opsworks stack SSH key is write-only ([#4241](https://github.com/hashicorp/terraform/issues/4241)) - * provider/aws: Update VPC Endpoint to correctly set route table ids ([#4392](https://github.com/hashicorp/terraform/issues/4392)) - * provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed ([#4245](https://github.com/hashicorp/terraform/issues/4245)) - * provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` ([#4318](https://github.com/hashicorp/terraform/issues/4318)) - * provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually ([#4387](https://github.com/hashicorp/terraform/issues/4387)) - * provider/aws: Use body or URL for all CloudFormation stack updates ([#4370](https://github.com/hashicorp/terraform/issues/4370)) - * provider/aws: Fix template_url/template_body conflict ([#4540](https://github.com/hashicorp/terraform/issues/4540)) - * provider/aws: Fix bug w/ changing ECS svc/ELB association ([#4366](https://github.com/hashicorp/terraform/issues/4366)) - * provider/aws: Fix RDS unexpected state config ([#4490](https://github.com/hashicorp/terraform/issues/4490)) - * provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic ([#4214](https://github.com/hashicorp/terraform/issues/4214)) - * provider/google: Fix project metadata sshKeys from showing up and causing unnecessary diffs ([#4512](https://github.com/hashicorp/terraform/issues/4512)) - * provider/heroku: Retry drain create until log channel is assigned ([#4823](https://github.com/hashicorp/terraform/issues/4823)) - * provider/openstack: Handle volumes in "deleting" state ([#4204](https://github.com/hashicorp/terraform/issues/4204)) - * provider/rundeck: Tolerate Rundeck server not returning project name when reading a job ([#4301](https://github.com/hashicorp/terraform/issues/4301)) - * provider/vsphere: Create and attach additional disks before bootup ([#4196](https://github.com/hashicorp/terraform/issues/4196)) - * provider/openstack: Convert block_device from a Set to a List ([#4288](https://github.com/hashicorp/terraform/issues/4288)) - * provider/google: Terraform identifies deleted resources and handles them appropriately on Read ([#3913](https://github.com/hashicorp/terraform/issues/3913)) - -## 0.6.8 (December 2, 2015) - -FEATURES: - - * **New provider: `statuscake`** ([#3340](https://github.com/hashicorp/terraform/issues/3340)) - * **New resource: `digitalocean_floating_ip`** ([#3748](https://github.com/hashicorp/terraform/issues/3748)) - * **New resource: `aws_lambda_event_source_mapping`** ([#4093](https://github.com/hashicorp/terraform/issues/4093)) - -IMPROVEMENTS: - - * provider/cloudstack: Reduce the number of network calls required for common operations ([#4051](https://github.com/hashicorp/terraform/issues/4051)) - * provider/aws: Make `publically_accessible` on an `aws_db_instance` update existing instances instead of forcing new ones ([#3895](https://github.com/hashicorp/terraform/issues/3895)) - * provider/aws: Allow `block_duration_minutes` to be set for spot instance requests ([#4071](https://github.com/hashicorp/terraform/issues/4071)) - * provider/aws: Make setting `acl` on S3 buckets update existing buckets instead of forcing new ones ([#4080](https://github.com/hashicorp/terraform/issues/4080)) - * provider/aws: Make updates to `assume_role_policy` modify existing IAM roles instead of forcing new ones ([#4107](https://github.com/hashicorp/terraform/issues/4107)) - -BUG FIXES: - - * core: Fix a bug which prevented HEREDOC syntax being used in lists ([#4078](https://github.com/hashicorp/terraform/issues/4078)) - * core: Fix a bug which prevented HEREDOC syntax where the anchor ends in a number ([#4128](https://github.com/hashicorp/terraform/issues/4128)) - * core: Fix a bug which prevented HEREDOC syntax being used with Windows line endings ([#4069](https://github.com/hashicorp/terraform/issues/4069)) - * provider/aws: Fix a bug which could result in a panic when reading EC2 metadata ([#4024](https://github.com/hashicorp/terraform/issues/4024)) - * provider/aws: Fix issue recreating security group rule if it has been destroyed ([#4050](https://github.com/hashicorp/terraform/issues/4050)) - * provider/aws: Fix issue with some attributes in Spot Instance Requests returning as nil ([#4132](https://github.com/hashicorp/terraform/issues/4132)) - * provider/aws: Fix issue where SPF records in Route 53 could show differences with no modification to the configuration ([#4108](https://github.com/hashicorp/terraform/issues/4108)) - * provisioner/chef: Fix issue with path separators breaking the Chef provisioner on Windows ([#4041](https://github.com/hashicorp/terraform/issues/4041)) - -## 0.6.7 (November 23, 2015) - -FEATURES: - - * **New provider: `tls`** - A utility provider for generating TLS keys/self-signed certificates for development and testing ([#2778](https://github.com/hashicorp/terraform/issues/2778)) - * **New provider: `dyn`** - Manage DNS records on Dyn - * **New resource: `aws_cloudformation_stack`** ([#2636](https://github.com/hashicorp/terraform/issues/2636)) - * **New resource: `aws_cloudtrail`** ([#3094](https://github.com/hashicorp/terraform/issues/3094)), ([#4010](https://github.com/hashicorp/terraform/issues/4010)) - * **New resource: `aws_route`** ([#3548](https://github.com/hashicorp/terraform/issues/3548)) - * **New resource: `aws_codecommit_repository`** ([#3274](https://github.com/hashicorp/terraform/issues/3274)) - * **New resource: `aws_kinesis_firehose_delivery_stream`** ([#3833](https://github.com/hashicorp/terraform/issues/3833)) - * **New resource: `google_sql_database` and `google_sql_database_instance`** ([#3617](https://github.com/hashicorp/terraform/issues/3617)) - * **New resource: `google_compute_global_address`** ([#3701](https://github.com/hashicorp/terraform/issues/3701)) - * **New resource: `google_compute_https_health_check`** ([#3883](https://github.com/hashicorp/terraform/issues/3883)) - * **New resource: `google_compute_ssl_certificate`** ([#3723](https://github.com/hashicorp/terraform/issues/3723)) - * **New resource: `google_compute_url_map`** ([#3722](https://github.com/hashicorp/terraform/issues/3722)) - * **New resource: `google_compute_target_http_proxy`** ([#3727](https://github.com/hashicorp/terraform/issues/3727)) - * **New resource: `google_compute_target_https_proxy`** ([#3728](https://github.com/hashicorp/terraform/issues/3728)) - * **New resource: `google_compute_global_forwarding_rule`** ([#3702](https://github.com/hashicorp/terraform/issues/3702)) - * **New resource: `openstack_networking_port_v2`** ([#3731](https://github.com/hashicorp/terraform/issues/3731)) - * New interpolation function: `coalesce` ([#3814](https://github.com/hashicorp/terraform/issues/3814)) - -IMPROVEMENTS: - - * core: Improve message to list only resources which will be destroyed when using `--target` ([#3859](https://github.com/hashicorp/terraform/issues/3859)) - * connection/ssh: Accept `private_key` contents instead of paths ([#3846](https://github.com/hashicorp/terraform/issues/3846)) - * provider/google: `preemptible` option for instance_template ([#3667](https://github.com/hashicorp/terraform/issues/3667)) - * provider/google: Accurate Terraform Version ([#3554](https://github.com/hashicorp/terraform/issues/3554)) - * provider/google: Simplified auth (DefaultClient support) ([#3553](https://github.com/hashicorp/terraform/issues/3553)) - * provider/google: `automatic_restart`, `preemptible`, `on_host_maintenance` options ([#3643](https://github.com/hashicorp/terraform/issues/3643)) - * provider/google: Read credentials as contents instead of path ([#3901](https://github.com/hashicorp/terraform/issues/3901)) - * null_resource: Enhance and document ([#3244](https://github.com/hashicorp/terraform/issues/3244), [#3659](https://github.com/hashicorp/terraform/issues/3659)) - * provider/aws: Add CORS settings to S3 bucket ([#3387](https://github.com/hashicorp/terraform/issues/3387)) - * provider/aws: Add notification topic ARN for ElastiCache clusters ([#3674](https://github.com/hashicorp/terraform/issues/3674)) - * provider/aws: Add `kinesis_endpoint` for configuring Kinesis ([#3255](https://github.com/hashicorp/terraform/issues/3255)) - * provider/aws: Add a computed ARN for S3 Buckets ([#3685](https://github.com/hashicorp/terraform/issues/3685)) - * provider/aws: Add S3 support for Lambda Function resource ([#3794](https://github.com/hashicorp/terraform/issues/3794)) - * provider/aws: Add `name_prefix` option to launch configurations ([#3802](https://github.com/hashicorp/terraform/issues/3802)) - * provider/aws: Add support for group name and path changes with IAM group update function ([#3237](https://github.com/hashicorp/terraform/issues/3237)) - * provider/aws: Provide `source_security_group_id` for ELBs inside a VPC ([#3780](https://github.com/hashicorp/terraform/issues/3780)) - * provider/aws: Add snapshot window and retention limits for ElastiCache (Redis) ([#3707](https://github.com/hashicorp/terraform/issues/3707)) - * provider/aws: Add username updates for `aws_iam_user` ([#3227](https://github.com/hashicorp/terraform/issues/3227)) - * provider/aws: Add AutoMinorVersionUpgrade to RDS Instances ([#3677](https://github.com/hashicorp/terraform/issues/3677)) - * provider/aws: Add `access_logs` to ELB resource ([#3756](https://github.com/hashicorp/terraform/issues/3756)) - * provider/aws: Add a retry function to rescue an error in creating Autoscaling Lifecycle Hooks ([#3694](https://github.com/hashicorp/terraform/issues/3694)) - * provider/aws: `engine_version` is now optional for DB Instance ([#3744](https://github.com/hashicorp/terraform/issues/3744)) - * provider/aws: Add configuration to enable copying RDS tags to final snapshot ([#3529](https://github.com/hashicorp/terraform/issues/3529)) - * provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) ([#3757](https://github.com/hashicorp/terraform/issues/3757)) - * provider/aws: Document and validate ELB `ssl_certificate_id` and protocol requirements ([#3887](https://github.com/hashicorp/terraform/issues/3887)) - * provider/azure: Read `publish_settings` as contents instead of path ([#3899](https://github.com/hashicorp/terraform/issues/3899)) - * provider/openstack: Use IPv4 as the default IP version for subnets ([#3091](https://github.com/hashicorp/terraform/issues/3091)) - * provider/aws: Apply security group after restoring `db_instance` from snapshot ([#3513](https://github.com/hashicorp/terraform/issues/3513)) - * provider/aws: Make the AutoScalingGroup `name` optional ([#3710](https://github.com/hashicorp/terraform/issues/3710)) - * provider/openstack: Add "delete on termination" boot-from-volume option ([#3232](https://github.com/hashicorp/terraform/issues/3232)) - * provider/digitalocean: Make `user_data` force a new droplet ([#3740](https://github.com/hashicorp/terraform/issues/3740)) - * provider/vsphere: Do not add network interfaces by default ([#3652](https://github.com/hashicorp/terraform/issues/3652)) - * provider/openstack: Configure Fixed IPs through ports ([#3772](https://github.com/hashicorp/terraform/issues/3772)) - * provider/openstack: Specify a port ID on a Router Interface ([#3903](https://github.com/hashicorp/terraform/issues/3903)) - * provider/openstack: Make LBaaS Virtual IP computed ([#3927](https://github.com/hashicorp/terraform/issues/3927)) - -BUG FIXES: - - * `terraform remote config`: update `--help` output ([#3632](https://github.com/hashicorp/terraform/issues/3632)) - * core: Modules on Git branches now update properly ([#1568](https://github.com/hashicorp/terraform/issues/1568)) - * core: Fix issue preventing input prompts for unset variables during plan ([#3843](https://github.com/hashicorp/terraform/issues/3843)) - * core: Fix issue preventing input prompts for unset variables during refresh ([#4017](https://github.com/hashicorp/terraform/issues/4017)) - * core: Orphan resources can now be targets ([#3912](https://github.com/hashicorp/terraform/issues/3912)) - * helper/schema: Skip StateFunc when value is nil ([#4002](https://github.com/hashicorp/terraform/issues/4002)) - * provider/google: Timeout when deleting large `instance_group_manager` ([#3591](https://github.com/hashicorp/terraform/issues/3591)) - * provider/aws: Fix issue with order of Termination Policies in AutoScaling Groups. - This will introduce plans on upgrade to this version, in order to correct the ordering ([#2890](https://github.com/hashicorp/terraform/issues/2890)) - * provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` ([#3668](https://github.com/hashicorp/terraform/issues/3668)) - * provider/aws: Fix a bug where a non-lower-cased `maintenance_window` can cause unnecessary planned changes ([#4020](https://github.com/hashicorp/terraform/issues/4020)) - * provider/aws: Only set `weight` on an `aws_route53_record` if it has been set in configuration ([#3900](https://github.com/hashicorp/terraform/issues/3900)) - * provider/aws: Ignore association not existing on route table destroy ([#3615](https://github.com/hashicorp/terraform/issues/3615)) - * provider/aws: Fix policy encoding issue with SNS Topics ([#3700](https://github.com/hashicorp/terraform/issues/3700)) - * provider/aws: Correctly export ARN in `aws_iam_saml_provider` ([#3827](https://github.com/hashicorp/terraform/issues/3827)) - * provider/aws: Fix issue deleting users who are attached to a group ([#4005](https://github.com/hashicorp/terraform/issues/4005)) - * provider/aws: Fix crash in Route53 Record if Zone not found ([#3945](https://github.com/hashicorp/terraform/issues/3945)) - * provider/aws: Retry deleting IAM Server Cert on dependency violation ([#3898](https://github.com/hashicorp/terraform/issues/3898)) - * provider/aws: Update Spot Instance request to provide connection information ([#3940](https://github.com/hashicorp/terraform/issues/3940)) - * provider/aws: Fix typo in error checking for IAM Policy Attachments ([#3970](https://github.com/hashicorp/terraform/issues/3970)) - * provider/aws: Fix issue with LB Cookie Stickiness and empty expiration period ([#3908](https://github.com/hashicorp/terraform/issues/3908)) - * provider/aws: Tolerate ElastiCache clusters being deleted outside Terraform ([#3767](https://github.com/hashicorp/terraform/issues/3767)) - * provider/aws: Downcase Route 53 record names in state file to match API output ([#3574](https://github.com/hashicorp/terraform/issues/3574)) - * provider/aws: Fix issue that could occur if no ECS Cluster was found for a given name ([#3829](https://github.com/hashicorp/terraform/issues/3829)) - * provider/aws: Fix issue with SNS topic policy if omitted ([#3777](https://github.com/hashicorp/terraform/issues/3777)) - * provider/aws: Support scratch volumes in `aws_ecs_task_definition` ([#3810](https://github.com/hashicorp/terraform/issues/3810)) - * provider/aws: Treat `aws_ecs_service` w/ Status==INACTIVE as deleted ([#3828](https://github.com/hashicorp/terraform/issues/3828)) - * provider/aws: Expand ~ to homedir in `aws_s3_bucket_object.source` ([#3910](https://github.com/hashicorp/terraform/issues/3910)) - * provider/aws: Fix issue with updating the `aws_ecs_task_definition` where `aws_ecs_service` didn't wait for a new computed ARN ([#3924](https://github.com/hashicorp/terraform/issues/3924)) - * provider/aws: Prevent crashing when deleting `aws_ecs_service` that is already gone ([#3914](https://github.com/hashicorp/terraform/issues/3914)) - * provider/aws: Allow spaces in `aws_db_subnet_group.name` (undocumented in the API) ([#3955](https://github.com/hashicorp/terraform/issues/3955)) - * provider/aws: Make VPC ID required on subnets ([#4021](https://github.com/hashicorp/terraform/issues/4021)) - * provider/azure: Various bug fixes ([#3695](https://github.com/hashicorp/terraform/issues/3695)) - * provider/digitalocean: Fix issue preventing SSH fingerprints from working ([#3633](https://github.com/hashicorp/terraform/issues/3633)) - * provider/digitalocean: Fix the DigitalOcean Droplet 404 potential on refresh of state ([#3768](https://github.com/hashicorp/terraform/issues/3768)) - * provider/openstack: Fix several issues causing unresolvable diffs ([#3440](https://github.com/hashicorp/terraform/issues/3440)) - * provider/openstack: Safely delete security groups ([#3696](https://github.com/hashicorp/terraform/issues/3696)) - * provider/openstack: Ignore order of `security_groups` in instance ([#3651](https://github.com/hashicorp/terraform/issues/3651)) - * provider/vsphere: Fix d.SetConnInfo error in case of a missing IP address ([#3636](https://github.com/hashicorp/terraform/issues/3636)) - * provider/openstack: Fix boot from volume ([#3206](https://github.com/hashicorp/terraform/issues/3206)) - * provider/openstack: Fix crashing when image is no longer accessible ([#2189](https://github.com/hashicorp/terraform/issues/2189)) - * provider/openstack: Better handling of network resource state changes ([#3712](https://github.com/hashicorp/terraform/issues/3712)) - * provider/openstack: Fix crashing when no security group is specified ([#3801](https://github.com/hashicorp/terraform/issues/3801)) - * provider/packet: Fix issue that could cause errors when provisioning many devices at once ([#3847](https://github.com/hashicorp/terraform/issues/3847)) - * provider/packet: Fix connection information for devices, allowing provisioners to run ([#3948](https://github.com/hashicorp/terraform/issues/3948)) - * provider/openstack: Fix issue preventing security group rules from being removed ([#3796](https://github.com/hashicorp/terraform/issues/3796)) - * provider/template: `template_file`: source contents instead of path ([#3909](https://github.com/hashicorp/terraform/issues/3909)) - -## 0.6.6 (October 23, 2015) - -FEATURES: - - * New interpolation functions: `cidrhost`, `cidrnetmask` and `cidrsubnet` ([#3127](https://github.com/hashicorp/terraform/issues/3127)) - -IMPROVEMENTS: - - * "forces new resource" now highlighted in plan output ([#3136](https://github.com/hashicorp/terraform/issues/3136)) - -BUG FIXES: - - * helper/schema: Better error message for assigning list/map to string ([#3009](https://github.com/hashicorp/terraform/issues/3009)) - * remote/state/atlas: Additional remote state conflict handling for semantically neutral state changes ([#3603](https://github.com/hashicorp/terraform/issues/3603)) - -## 0.6.5 (October 21, 2015) - -FEATURES: - - * **New resources: `aws_codeploy_app` and `aws_codeploy_deployment_group`** ([#2783](https://github.com/hashicorp/terraform/issues/2783)) - * New remote state backend: `etcd` ([#3487](https://github.com/hashicorp/terraform/issues/3487)) - * New interpolation functions: `upper` and `lower` ([#3558](https://github.com/hashicorp/terraform/issues/3558)) - -BUG FIXES: - - * core: Fix remote state conflicts caused by ambiguity in ordering of deeply nested modules ([#3573](https://github.com/hashicorp/terraform/issues/3573)) - * core: Fix remote state conflicts caused by state metadata differences ([#3569](https://github.com/hashicorp/terraform/issues/3569)) - * core: Avoid using http.DefaultClient ([#3532](https://github.com/hashicorp/terraform/issues/3532)) - -INTERNAL IMPROVEMENTS: - - * provider/digitalocean: use official Go client ([#3333](https://github.com/hashicorp/terraform/issues/3333)) - * core: extract module fetching to external library ([#3516](https://github.com/hashicorp/terraform/issues/3516)) - -## 0.6.4 (October 15, 2015) - -FEATURES: - - * **New provider: `rundeck`** ([#2412](https://github.com/hashicorp/terraform/issues/2412)) - * **New provider: `packet`** ([#2260](https://github.com/hashicorp/terraform/issues/2260)), ([#3472](https://github.com/hashicorp/terraform/issues/3472)) - * **New provider: `vsphere`**: Initial support for a VM resource ([#3419](https://github.com/hashicorp/terraform/issues/3419)) - * **New resource: `cloudstack_loadbalancer_rule`** ([#2934](https://github.com/hashicorp/terraform/issues/2934)) - * **New resource: `google_compute_project_metadata`** ([#3065](https://github.com/hashicorp/terraform/issues/3065)) - * **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** ([#2784](https://github.com/hashicorp/terraform/issues/2784)) - * **New resources: `aws_cloudwatch_log_group`** ([#2415](https://github.com/hashicorp/terraform/issues/2415)) - * **New resource: `google_storage_bucket_object`** ([#3192](https://github.com/hashicorp/terraform/issues/3192)) - * **New resources: `google_compute_vpn_gateway`, `google_compute_vpn_tunnel`** ([#3213](https://github.com/hashicorp/terraform/issues/3213)) - * **New resources: `google_storage_bucket_acl`, `google_storage_object_acl`** ([#3272](https://github.com/hashicorp/terraform/issues/3272)) - * **New resource: `aws_iam_saml_provider`** ([#3156](https://github.com/hashicorp/terraform/issues/3156)) - * **New resources: `aws_efs_file_system` and `aws_efs_mount_target`** ([#2196](https://github.com/hashicorp/terraform/issues/2196)) - * **New resources: `aws_opsworks_*`** ([#2162](https://github.com/hashicorp/terraform/issues/2162)) - * **New resource: `aws_elasticsearch_domain`** ([#3443](https://github.com/hashicorp/terraform/issues/3443)) - * **New resource: `aws_directory_service_directory`** ([#3228](https://github.com/hashicorp/terraform/issues/3228)) - * **New resource: `aws_autoscaling_lifecycle_hook`** ([#3351](https://github.com/hashicorp/terraform/issues/3351)) - * **New resource: `aws_placement_group`** ([#3457](https://github.com/hashicorp/terraform/issues/3457)) - * **New resource: `aws_glacier_vault`** ([#3491](https://github.com/hashicorp/terraform/issues/3491)) - * **New lifecycle flag: `ignore_changes`** ([#2525](https://github.com/hashicorp/terraform/issues/2525)) - -IMPROVEMENTS: - - * core: Add a function to find the index of an element in a list. ([#2704](https://github.com/hashicorp/terraform/issues/2704)) - * core: Print all outputs when `terraform output` is called with no arguments ([#2920](https://github.com/hashicorp/terraform/issues/2920)) - * core: In plan output summary, count resource replacement as Add/Remove instead of Change ([#3173](https://github.com/hashicorp/terraform/issues/3173)) - * core: Add interpolation functions for base64 encoding and decoding. ([#3325](https://github.com/hashicorp/terraform/issues/3325)) - * core: Expose parallelism as a CLI option instead of a hard-coding the default of 10 ([#3365](https://github.com/hashicorp/terraform/issues/3365)) - * core: Add interpolation function `compact`, to remove empty elements from a list. ([#3239](https://github.com/hashicorp/terraform/issues/3239)), ([#3479](https://github.com/hashicorp/terraform/issues/3479)) - * core: Allow filtering of log output by level, using e.g. ``TF_LOG=INFO`` ([#3380](https://github.com/hashicorp/terraform/issues/3380)) - * provider/aws: Add `instance_initiated_shutdown_behavior` to AWS Instance ([#2887](https://github.com/hashicorp/terraform/issues/2887)) - * provider/aws: Support IAM role names (previously just ARNs) in `aws_ecs_service.iam_role` ([#3061](https://github.com/hashicorp/terraform/issues/3061)) - * provider/aws: Add update method to RDS Subnet groups, can modify subnets without recreating ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Paginate notifications returned for ASG Notifications ([#3043](https://github.com/hashicorp/terraform/issues/3043)) - * provider/aws: Adds additional S3 Bucket Object inputs ([#3265](https://github.com/hashicorp/terraform/issues/3265)) - * provider/aws: add `ses_smtp_password` to `aws_iam_access_key` ([#3165](https://github.com/hashicorp/terraform/issues/3165)) - * provider/aws: read `iam_instance_profile` for `aws_instance` and save to state ([#3167](https://github.com/hashicorp/terraform/issues/3167)) - * provider/aws: allow `instance` to be computed in `aws_eip` ([#3036](https://github.com/hashicorp/terraform/issues/3036)) - * provider/aws: Add `versioning` option to `aws_s3_bucket` ([#2942](https://github.com/hashicorp/terraform/issues/2942)) - * provider/aws: Add `configuration_endpoint` to `aws_elasticache_cluster` ([#3250](https://github.com/hashicorp/terraform/issues/3250)) - * provider/aws: Add validation for `app_cookie_stickiness_policy.name` ([#3277](https://github.com/hashicorp/terraform/issues/3277)) - * provider/aws: Add validation for `db_parameter_group.name` ([#3279](https://github.com/hashicorp/terraform/issues/3279)) - * provider/aws: Set DynamoDB Table ARN after creation ([#3500](https://github.com/hashicorp/terraform/issues/3500)) - * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. ([#3200](https://github.com/hashicorp/terraform/issues/3200)) - * provider/aws: Allow tags for `aws_kinesis_stream` resource. ([#3397](https://github.com/hashicorp/terraform/issues/3397)) - * provider/aws: Configurable capacity waiting duration for ASGs ([#3191](https://github.com/hashicorp/terraform/issues/3191)) - * provider/aws: Allow non-persistent Spot Requests ([#3311](https://github.com/hashicorp/terraform/issues/3311)) - * provider/aws: Support tags for AWS DB subnet group ([#3138](https://github.com/hashicorp/terraform/issues/3138)) - * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` ([#3035](https://github.com/hashicorp/terraform/issues/3035)) - * provider/openstack: add functionality to attach FloatingIP to Port ([#1788](https://github.com/hashicorp/terraform/issues/1788)) - * provider/google: Can now do multi-region deployments without using multiple providers ([#3258](https://github.com/hashicorp/terraform/issues/3258)) - * remote/s3: Allow canned ACLs to be set on state objects. ([#3233](https://github.com/hashicorp/terraform/issues/3233)) - * remote/s3: Remote state is stored in S3 with `Content-Type: application/json` ([#3385](https://github.com/hashicorp/terraform/issues/3385)) - -BUG FIXES: - - * core: Fix problems referencing list attributes in interpolations ([#2157](https://github.com/hashicorp/terraform/issues/2157)) - * core: don't error on computed value during input walk ([#2988](https://github.com/hashicorp/terraform/issues/2988)) - * core: Ignore missing variables during destroy phase ([#3393](https://github.com/hashicorp/terraform/issues/3393)) - * provider/google: Crashes with interface conversion in GCE Instance Template ([#3027](https://github.com/hashicorp/terraform/issues/3027)) - * provider/google: Convert int to int64 when building the GKE cluster.NodeConfig struct ([#2978](https://github.com/hashicorp/terraform/issues/2978)) - * provider/google: google_compute_instance_template.network_interface.network should be a URL ([#3226](https://github.com/hashicorp/terraform/issues/3226)) - * provider/aws: Retry creation of `aws_ecs_service` if IAM policy isn't ready yet ([#3061](https://github.com/hashicorp/terraform/issues/3061)) - * provider/aws: Fix issue with mixed capitalization for RDS Instances ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Fix issue with RDS to allow major version upgrades ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Fix shard_count in `aws_kinesis_stream` ([#2986](https://github.com/hashicorp/terraform/issues/2986)) - * provider/aws: Fix issue with `key_name` and using VPCs with spot instance requests ([#2954](https://github.com/hashicorp/terraform/issues/2954)) - * provider/aws: Fix unresolvable diffs coming from `aws_elasticache_cluster` names being downcased - by AWS ([#3120](https://github.com/hashicorp/terraform/issues/3120)) - * provider/aws: Read instance source_dest_check and save to state ([#3152](https://github.com/hashicorp/terraform/issues/3152)) - * provider/aws: Allow `weight = 0` in Route53 records ([#3196](https://github.com/hashicorp/terraform/issues/3196)) - * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. ([#3235](https://github.com/hashicorp/terraform/issues/3235)) - * provider/aws: Fix ValidateAccountId for IAM Instance Profiles ([#3313](https://github.com/hashicorp/terraform/issues/3313)) - * provider/aws: Update Security Group Rules to Version 2 ([#3019](https://github.com/hashicorp/terraform/issues/3019)) - * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` ([#3470](https://github.com/hashicorp/terraform/issues/3470)) - * provider/aws: Fix force_delete on autoscaling groups ([#3485](https://github.com/hashicorp/terraform/issues/3485)) - * provider/aws: Fix crash with VPC Peering connections ([#3490](https://github.com/hashicorp/terraform/issues/3490)) - * provider/aws: fix bug with reading GSIs from dynamodb ([#3300](https://github.com/hashicorp/terraform/issues/3300)) - * provider/docker: Fix issue preventing private images from being referenced ([#2619](https://github.com/hashicorp/terraform/issues/2619)) - * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case ([#3284](https://github.com/hashicorp/terraform/issues/3284)) - * provider/openstack: add state 'downloading' to list of expected states in - `blockstorage_volume_v1` creation ([#2866](https://github.com/hashicorp/terraform/issues/2866)) - * provider/openstack: remove security groups (by name) before adding security - groups (by id) ([#2008](https://github.com/hashicorp/terraform/issues/2008)) - -INTERNAL IMPROVEMENTS: - - * core: Makefile target "plugin-dev" for building just one plugin. ([#3229](https://github.com/hashicorp/terraform/issues/3229)) - * helper/schema: Don't allow ``Update`` func if no attributes can actually be updated, per schema. ([#3288](https://github.com/hashicorp/terraform/issues/3288)) - * helper/schema: Default hashing function for sets ([#3018](https://github.com/hashicorp/terraform/issues/3018)) - * helper/multierror: Remove in favor of [github.com/hashicorp/go-multierror](http://github.com/hashicorp/go-multierror). ([#3336](https://github.com/hashicorp/terraform/issues/3336)) - -## 0.6.3 (August 11, 2015) - -BUG FIXES: - - * core: Skip all descendents after error, not just children; helps prevent confusing - additional errors/crashes after initial failure ([#2963](https://github.com/hashicorp/terraform/issues/2963)) - * core: fix deadlock possibility when both a module and a dependent resource are - removed in the same run ([#2968](https://github.com/hashicorp/terraform/issues/2968)) - * provider/aws: Fix issue with authenticating when using IAM profiles ([#2959](https://github.com/hashicorp/terraform/issues/2959)) - -## 0.6.2 (August 6, 2015) - -FEATURES: - - * **New resource: `google_compute_instance_group_manager`** ([#2868](https://github.com/hashicorp/terraform/issues/2868)) - * **New resource: `google_compute_autoscaler`** ([#2868](https://github.com/hashicorp/terraform/issues/2868)) - * **New resource: `aws_s3_bucket_object`** ([#2898](https://github.com/hashicorp/terraform/issues/2898)) - -IMPROVEMENTS: - - * core: Add resource IDs to errors coming from `apply`/`refresh` ([#2815](https://github.com/hashicorp/terraform/issues/2815)) - * provider/aws: Validate credentials before walking the graph ([#2730](https://github.com/hashicorp/terraform/issues/2730)) - * provider/aws: Added website_domain for S3 buckets ([#2210](https://github.com/hashicorp/terraform/issues/2210)) - * provider/aws: ELB names are now optional, and generated by Terraform if omitted ([#2571](https://github.com/hashicorp/terraform/issues/2571)) - * provider/aws: Downcase RDS engine names to prevent continuous diffs ([#2745](https://github.com/hashicorp/terraform/issues/2745)) - * provider/aws: Added `source_dest_check` attribute to the aws_network_interface ([#2741](https://github.com/hashicorp/terraform/issues/2741)) - * provider/aws: Clean up externally removed Launch Configurations ([#2806](https://github.com/hashicorp/terraform/issues/2806)) - * provider/aws: Allow configuration of the DynamoDB Endpoint ([#2825](https://github.com/hashicorp/terraform/issues/2825)) - * provider/aws: Compute private ip addresses of ENIs if they are not specified ([#2743](https://github.com/hashicorp/terraform/issues/2743)) - * provider/aws: Add `arn` attribute for DynamoDB tables ([#2924](https://github.com/hashicorp/terraform/issues/2924)) - * provider/aws: Fail silently when account validation fails while from instance profile ([#3001](https://github.com/hashicorp/terraform/issues/3001)) - * provider/azure: Allow `settings_file` to accept XML string ([#2922](https://github.com/hashicorp/terraform/issues/2922)) - * provider/azure: Provide a simpler error when using a Platform Image without a - Storage Service ([#2861](https://github.com/hashicorp/terraform/issues/2861)) - * provider/google: `account_file` is now expected to be JSON. Paths are still supported for - backwards compatibility. ([#2839](https://github.com/hashicorp/terraform/issues/2839)) - -BUG FIXES: - - * core: Prevent error duplication in `apply` ([#2815](https://github.com/hashicorp/terraform/issues/2815)) - * core: Fix crash when a provider validation adds a warning ([#2878](https://github.com/hashicorp/terraform/issues/2878)) - * provider/aws: Fix issue with toggling monitoring in AWS Instances ([#2794](https://github.com/hashicorp/terraform/issues/2794)) - * provider/aws: Fix issue with Spot Instance Requests and cancellation ([#2805](https://github.com/hashicorp/terraform/issues/2805)) - * provider/aws: Fix issue with checking for ElastiCache cluster cache node status ([#2842](https://github.com/hashicorp/terraform/issues/2842)) - * provider/aws: Fix issue when unable to find a Root Block Device name of an Instance Backed - AMI ([#2646](https://github.com/hashicorp/terraform/issues/2646)) - * provider/dnsimple: Domain and type should force new records ([#2777](https://github.com/hashicorp/terraform/issues/2777)) - * provider/aws: Fix issue with IAM Server Certificates and Chains ([#2871](https://github.com/hashicorp/terraform/issues/2871)) - * provider/aws: Fix issue with IAM Server Certificates when using `path` ([#2871](https://github.com/hashicorp/terraform/issues/2871)) - * provider/aws: Fix issue in Security Group Rules when the Security Group is not found ([#2897](https://github.com/hashicorp/terraform/issues/2897)) - * provider/aws: allow external ENI attachments ([#2943](https://github.com/hashicorp/terraform/issues/2943)) - * provider/aws: Fix issue with S3 Buckets, and throwing an error when not found ([#2925](https://github.com/hashicorp/terraform/issues/2925)) - -## 0.6.1 (July 20, 2015) - -FEATURES: - - * **New resource: `google_container_cluster`** ([#2357](https://github.com/hashicorp/terraform/issues/2357)) - * **New resource: `aws_vpc_endpoint`** ([#2695](https://github.com/hashicorp/terraform/issues/2695)) - -IMPROVEMENTS: - - * connection/ssh: Print SSH bastion host details to output ([#2684](https://github.com/hashicorp/terraform/issues/2684)) - * provider/aws: Create RDS databases from snapshots ([#2062](https://github.com/hashicorp/terraform/issues/2062)) - * provider/aws: Add support for restoring from Redis backup stored in S3 ([#2634](https://github.com/hashicorp/terraform/issues/2634)) - * provider/aws: Add `maintenance_window` to ElastiCache cluster ([#2642](https://github.com/hashicorp/terraform/issues/2642)) - * provider/aws: Availability Zones are optional when specifying VPC Zone Identifiers in - Auto Scaling Groups updates ([#2724](https://github.com/hashicorp/terraform/issues/2724)) - * provider/google: Add metadata_startup_script to google_compute_instance ([#2375](https://github.com/hashicorp/terraform/issues/2375)) - -BUG FIXES: - - * core: Don't prompt for variables with defaults ([#2613](https://github.com/hashicorp/terraform/issues/2613)) - * core: Return correct number of planned updates ([#2620](https://github.com/hashicorp/terraform/issues/2620)) - * core: Fix "provider not found" error that can occur while running - a destroy plan with grandchildren modules ([#2755](https://github.com/hashicorp/terraform/issues/2755)) - * core: Fix UUID showing up in diff for computed splat (`foo.*.bar`) - variables. ([#2788](https://github.com/hashicorp/terraform/issues/2788)) - * core: Orphan modules that contain no resources (only other modules) - are properly destroyed up to arbitrary depth ([#2786](https://github.com/hashicorp/terraform/issues/2786)) - * core: Fix "attribute not available" during destroy plans in - cases where the parameter is passed between modules ([#2775](https://github.com/hashicorp/terraform/issues/2775)) - * core: Record schema version when destroy fails ([#2923](https://github.com/hashicorp/terraform/issues/2923)) - * connection/ssh: fix issue on machines with an SSH Agent available - preventing `key_file` from being read without explicitly - setting `agent = false` ([#2615](https://github.com/hashicorp/terraform/issues/2615)) - * provider/aws: Allow uppercase characters in `aws_elb.name` ([#2580](https://github.com/hashicorp/terraform/issues/2580)) - * provider/aws: Allow underscores in `aws_db_subnet_group.name` (undocumented by AWS) ([#2604](https://github.com/hashicorp/terraform/issues/2604)) - * provider/aws: Allow dots in `aws_db_subnet_group.name` (undocumented by AWS) ([#2665](https://github.com/hashicorp/terraform/issues/2665)) - * provider/aws: Fix issue with pending Spot Instance requests ([#2640](https://github.com/hashicorp/terraform/issues/2640)) - * provider/aws: Fix issue in AWS Classic environment with referencing external - Security Groups ([#2644](https://github.com/hashicorp/terraform/issues/2644)) - * provider/aws: Bump internet gateway detach timeout ([#2669](https://github.com/hashicorp/terraform/issues/2669)) - * provider/aws: Fix issue with detecting differences in DB Parameters ([#2728](https://github.com/hashicorp/terraform/issues/2728)) - * provider/aws: `ecs_cluster` rename (recreation) and deletion is handled correctly ([#2698](https://github.com/hashicorp/terraform/issues/2698)) - * provider/aws: `aws_route_table` ignores routes generated for VPC endpoints ([#2695](https://github.com/hashicorp/terraform/issues/2695)) - * provider/aws: Fix issue with Launch Configurations and enable_monitoring ([#2735](https://github.com/hashicorp/terraform/issues/2735)) - * provider/openstack: allow empty api_key and endpoint_type ([#2626](https://github.com/hashicorp/terraform/issues/2626)) - * provisioner/chef: Fix permission denied error with ohai hints ([#2781](https://github.com/hashicorp/terraform/issues/2781)) - -## 0.6.0 (June 30, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * command/push: If a variable is already set within Atlas, it won't be - updated unless the `-overwrite` flag is present ([#2373](https://github.com/hashicorp/terraform/issues/2373)) - * connection/ssh: The `agent` field now defaults to `true` if - the `SSH_AGENT_SOCK` environment variable is present. In other words, - `ssh-agent` support is now opt-out instead of opt-in functionality. ([#2408](https://github.com/hashicorp/terraform/issues/2408)) - * provider/aws: If you were setting access and secret key to blank ("") - to force Terraform to load credentials from another source such as the - EC2 role, this will now error. Remove the blank lines and Terraform - will load from other sources. - * `concat()` has been repurposed to combine lists instead of strings (old behavior - of joining strings is maintained in this version but is deprecated, strings - should be combined using interpolation syntax, like "${var.foo}{var.bar}") - ([#1790](https://github.com/hashicorp/terraform/issues/1790)) - -FEATURES: - - * **New provider: `azure`** ([#2052](https://github.com/hashicorp/terraform/issues/2052), [#2053](https://github.com/hashicorp/terraform/issues/2053), [#2372](https://github.com/hashicorp/terraform/issues/2372), [#2380](https://github.com/hashicorp/terraform/issues/2380), [#2394](https://github.com/hashicorp/terraform/issues/2394), [#2515](https://github.com/hashicorp/terraform/issues/2515), [#2530](https://github.com/hashicorp/terraform/issues/2530), [#2562](https://github.com/hashicorp/terraform/issues/2562)) - * **New resource: `aws_autoscaling_notification`** ([#2197](https://github.com/hashicorp/terraform/issues/2197)) - * **New resource: `aws_autoscaling_policy`** ([#2201](https://github.com/hashicorp/terraform/issues/2201)) - * **New resource: `aws_cloudwatch_metric_alarm`** ([#2201](https://github.com/hashicorp/terraform/issues/2201)) - * **New resource: `aws_dynamodb_table`** ([#2121](https://github.com/hashicorp/terraform/issues/2121)) - * **New resource: `aws_ecs_cluster`** ([#1803](https://github.com/hashicorp/terraform/issues/1803)) - * **New resource: `aws_ecs_service`** ([#1803](https://github.com/hashicorp/terraform/issues/1803)) - * **New resource: `aws_ecs_task_definition`** ([#1803](https://github.com/hashicorp/terraform/issues/1803), [#2402](https://github.com/hashicorp/terraform/issues/2402)) - * **New resource: `aws_elasticache_parameter_group`** ([#2276](https://github.com/hashicorp/terraform/issues/2276)) - * **New resource: `aws_flow_log`** ([#2384](https://github.com/hashicorp/terraform/issues/2384)) - * **New resource: `aws_iam_group_association`** ([#2273](https://github.com/hashicorp/terraform/issues/2273)) - * **New resource: `aws_iam_policy_attachment`** ([#2395](https://github.com/hashicorp/terraform/issues/2395)) - * **New resource: `aws_lambda_function`** ([#2170](https://github.com/hashicorp/terraform/issues/2170)) - * **New resource: `aws_route53_delegation_set`** ([#1999](https://github.com/hashicorp/terraform/issues/1999)) - * **New resource: `aws_route53_health_check`** ([#2226](https://github.com/hashicorp/terraform/issues/2226)) - * **New resource: `aws_spot_instance_request`** ([#2263](https://github.com/hashicorp/terraform/issues/2263)) - * **New resource: `cloudstack_ssh_keypair`** ([#2004](https://github.com/hashicorp/terraform/issues/2004)) - * **New remote state backend: `swift`**: You can now store remote state in - a OpenStack Swift. ([#2254](https://github.com/hashicorp/terraform/issues/2254)) - * command/output: support display of module outputs ([#2102](https://github.com/hashicorp/terraform/issues/2102)) - * core: `keys()` and `values()` funcs for map variables ([#2198](https://github.com/hashicorp/terraform/issues/2198)) - * connection/ssh: SSH bastion host support and ssh-agent forwarding ([#2425](https://github.com/hashicorp/terraform/issues/2425)) - -IMPROVEMENTS: - - * core: HTTP remote state now accepts `skip_cert_verification` - option to ignore TLS cert verification. ([#2214](https://github.com/hashicorp/terraform/issues/2214)) - * core: S3 remote state now accepts the 'encrypt' option for SSE ([#2405](https://github.com/hashicorp/terraform/issues/2405)) - * core: `plan` now reports sum of resources to be changed/created/destroyed ([#2458](https://github.com/hashicorp/terraform/issues/2458)) - * core: Change string list representation so we can distinguish empty, single - element lists ([#2504](https://github.com/hashicorp/terraform/issues/2504)) - * core: Properly close provider and provisioner plugin connections ([#2406](https://github.com/hashicorp/terraform/issues/2406), [#2527](https://github.com/hashicorp/terraform/issues/2527)) - * provider/aws: AutoScaling groups now support updating Load Balancers without - recreation ([#2472](https://github.com/hashicorp/terraform/issues/2472)) - * provider/aws: Allow more in-place updates for ElastiCache cluster without recreating - ([#2469](https://github.com/hashicorp/terraform/issues/2469)) - * provider/aws: ElastiCache Subnet Groups can be updated - without destroying first ([#2191](https://github.com/hashicorp/terraform/issues/2191)) - * provider/aws: Normalize `certificate_chain` in `aws_iam_server_certificate` to - prevent unnecessary replacement. ([#2411](https://github.com/hashicorp/terraform/issues/2411)) - * provider/aws: `aws_instance` supports `monitoring' ([#2489](https://github.com/hashicorp/terraform/issues/2489)) - * provider/aws: `aws_launch_configuration` now supports `enable_monitoring` ([#2410](https://github.com/hashicorp/terraform/issues/2410)) - * provider/aws: Show outputs after `terraform refresh` ([#2347](https://github.com/hashicorp/terraform/issues/2347)) - * provider/aws: Add backoff/throttling during DynamoDB creation ([#2462](https://github.com/hashicorp/terraform/issues/2462)) - * provider/aws: Add validation for aws_vpc.cidr_block ([#2514](https://github.com/hashicorp/terraform/issues/2514)) - * provider/aws: Add validation for aws_db_subnet_group.name ([#2513](https://github.com/hashicorp/terraform/issues/2513)) - * provider/aws: Add validation for aws_db_instance.identifier ([#2516](https://github.com/hashicorp/terraform/issues/2516)) - * provider/aws: Add validation for aws_elb.name ([#2517](https://github.com/hashicorp/terraform/issues/2517)) - * provider/aws: Add validation for aws_security_group (name+description) ([#2518](https://github.com/hashicorp/terraform/issues/2518)) - * provider/aws: Add validation for aws_launch_configuration ([#2519](https://github.com/hashicorp/terraform/issues/2519)) - * provider/aws: Add validation for aws_autoscaling_group.name ([#2520](https://github.com/hashicorp/terraform/issues/2520)) - * provider/aws: Add validation for aws_iam_role.name ([#2521](https://github.com/hashicorp/terraform/issues/2521)) - * provider/aws: Add validation for aws_iam_role_policy.name ([#2552](https://github.com/hashicorp/terraform/issues/2552)) - * provider/aws: Add validation for aws_iam_instance_profile.name ([#2553](https://github.com/hashicorp/terraform/issues/2553)) - * provider/aws: aws_auto_scaling_group.default_cooldown no longer requires - resource replacement ([#2510](https://github.com/hashicorp/terraform/issues/2510)) - * provider/aws: add AH and ESP protocol integers ([#2321](https://github.com/hashicorp/terraform/issues/2321)) - * provider/docker: `docker_container` has the `privileged` - option. ([#2227](https://github.com/hashicorp/terraform/issues/2227)) - * provider/openstack: allow `OS_AUTH_TOKEN` environment variable - to set the openstack `api_key` field ([#2234](https://github.com/hashicorp/terraform/issues/2234)) - * provider/openstack: Can now configure endpoint type (public, admin, - internal) ([#2262](https://github.com/hashicorp/terraform/issues/2262)) - * provider/cloudstack: `cloudstack_instance` now supports projects ([#2115](https://github.com/hashicorp/terraform/issues/2115)) - * provisioner/chef: Added a `os_type` to specifically specify the target OS ([#2483](https://github.com/hashicorp/terraform/issues/2483)) - * provisioner/chef: Added a `ohai_hints` option to upload hint files ([#2487](https://github.com/hashicorp/terraform/issues/2487)) - -BUG FIXES: - - * core: lifecycle `prevent_destroy` can be any value that can be - coerced into a bool ([#2268](https://github.com/hashicorp/terraform/issues/2268)) - * core: matching provider types in sibling modules won't override - each other's config. ([#2464](https://github.com/hashicorp/terraform/issues/2464)) - * core: computed provider configurations now properly validate ([#2457](https://github.com/hashicorp/terraform/issues/2457)) - * core: orphan (commented out) resource dependencies are destroyed in - the correct order ([#2453](https://github.com/hashicorp/terraform/issues/2453)) - * core: validate object types in plugins are actually objects ([#2450](https://github.com/hashicorp/terraform/issues/2450)) - * core: fix `-no-color` flag in subcommands ([#2414](https://github.com/hashicorp/terraform/issues/2414)) - * core: Fix error of 'attribute not found for variable' when a computed - resource attribute is used as a parameter to a module ([#2477](https://github.com/hashicorp/terraform/issues/2477)) - * core: moduled orphans will properly inherit provider configs ([#2476](https://github.com/hashicorp/terraform/issues/2476)) - * core: modules with provider aliases work properly if the parent - doesn't implement those aliases ([#2475](https://github.com/hashicorp/terraform/issues/2475)) - * core: unknown resource attributes passed in as parameters to modules - now error ([#2478](https://github.com/hashicorp/terraform/issues/2478)) - * core: better error messages for missing variables ([#2479](https://github.com/hashicorp/terraform/issues/2479)) - * core: removed set items now properly appear in diffs and applies ([#2507](https://github.com/hashicorp/terraform/issues/2507)) - * core: '*' will not be added as part of the variable name when you - attempt multiplication without a space ([#2505](https://github.com/hashicorp/terraform/issues/2505)) - * core: fix target dependency calculation across module boundaries ([#2555](https://github.com/hashicorp/terraform/issues/2555)) - * command/*: fixed bug where variable input was not asked for unset - vars if terraform.tfvars existed ([#2502](https://github.com/hashicorp/terraform/issues/2502)) - * command/apply: prevent output duplication when reporting errors ([#2267](https://github.com/hashicorp/terraform/issues/2267)) - * command/apply: destroyed orphan resources are properly counted ([#2506](https://github.com/hashicorp/terraform/issues/2506)) - * provider/aws: loading credentials from the environment (vars, EC2 role, - etc.) is more robust and will not ask for credentials from stdin ([#1841](https://github.com/hashicorp/terraform/issues/1841)) - * provider/aws: fix panic when route has no `cidr_block` ([#2215](https://github.com/hashicorp/terraform/issues/2215)) - * provider/aws: fix issue preventing destruction of IAM Roles ([#2177](https://github.com/hashicorp/terraform/issues/2177)) - * provider/aws: fix issue where Security Group Rules could collide and fail - to save to the state file correctly ([#2376](https://github.com/hashicorp/terraform/issues/2376)) - * provider/aws: fix issue preventing destruction self referencing Securtity - Group Rules ([#2305](https://github.com/hashicorp/terraform/issues/2305)) - * provider/aws: fix issue causing perpetual diff on ELB listeners - when non-lowercase protocol strings were used ([#2246](https://github.com/hashicorp/terraform/issues/2246)) - * provider/aws: corrected frankfurt S3 website region ([#2259](https://github.com/hashicorp/terraform/issues/2259)) - * provider/aws: `aws_elasticache_cluster` port is required ([#2160](https://github.com/hashicorp/terraform/issues/2160)) - * provider/aws: Handle AMIs where RootBlockDevice does not appear in the - BlockDeviceMapping, preventing root_block_device from working ([#2271](https://github.com/hashicorp/terraform/issues/2271)) - * provider/aws: fix `terraform show` with remote state ([#2371](https://github.com/hashicorp/terraform/issues/2371)) - * provider/aws: detect `instance_type` drift on `aws_instance` ([#2374](https://github.com/hashicorp/terraform/issues/2374)) - * provider/aws: fix crash when `security_group_rule` referenced non-existent - security group ([#2434](https://github.com/hashicorp/terraform/issues/2434)) - * provider/aws: `aws_launch_configuration` retries if IAM instance - profile is not ready yet. ([#2452](https://github.com/hashicorp/terraform/issues/2452)) - * provider/aws: `fqdn` is populated during creation for `aws_route53_record` ([#2528](https://github.com/hashicorp/terraform/issues/2528)) - * provider/aws: retry VPC delete on DependencyViolation due to eventual - consistency ([#2532](https://github.com/hashicorp/terraform/issues/2532)) - * provider/aws: VPC peering connections in "failed" state are deleted ([#2544](https://github.com/hashicorp/terraform/issues/2544)) - * provider/aws: EIP deletion works if it was manually disassociated ([#2543](https://github.com/hashicorp/terraform/issues/2543)) - * provider/aws: `elasticache_subnet_group.subnet_ids` is now a required argument ([#2534](https://github.com/hashicorp/terraform/issues/2534)) - * provider/aws: handle nil response from VPN connection describes ([#2533](https://github.com/hashicorp/terraform/issues/2533)) - * provider/cloudflare: manual record deletion doesn't cause error ([#2545](https://github.com/hashicorp/terraform/issues/2545)) - * provider/digitalocean: handle case where droplet is deleted outside of - terraform ([#2497](https://github.com/hashicorp/terraform/issues/2497)) - * provider/dme: No longer an error if record deleted manually ([#2546](https://github.com/hashicorp/terraform/issues/2546)) - * provider/docker: Fix issues when using containers with links ([#2327](https://github.com/hashicorp/terraform/issues/2327)) - * provider/openstack: fix panic case if API returns nil network ([#2448](https://github.com/hashicorp/terraform/issues/2448)) - * provider/template: fix issue causing "unknown variable" rendering errors - when an existing set of template variables is changed ([#2386](https://github.com/hashicorp/terraform/issues/2386)) - * provisioner/chef: improve the decoding logic to prevent parameter not found errors ([#2206](https://github.com/hashicorp/terraform/issues/2206)) - -## 0.5.3 (June 1, 2015) - -IMPROVEMENTS: - - * **New resource: `aws_kinesis_stream`** ([#2110](https://github.com/hashicorp/terraform/issues/2110)) - * **New resource: `aws_iam_server_certificate`** ([#2086](https://github.com/hashicorp/terraform/issues/2086)) - * **New resource: `aws_sqs_queue`** ([#1939](https://github.com/hashicorp/terraform/issues/1939)) - * **New resource: `aws_sns_topic`** ([#1974](https://github.com/hashicorp/terraform/issues/1974)) - * **New resource: `aws_sns_topic_subscription`** ([#1974](https://github.com/hashicorp/terraform/issues/1974)) - * **New resource: `aws_volume_attachment`** ([#2050](https://github.com/hashicorp/terraform/issues/2050)) - * **New resource: `google_storage_bucket`** ([#2060](https://github.com/hashicorp/terraform/issues/2060)) - * provider/aws: support ec2 termination protection ([#1988](https://github.com/hashicorp/terraform/issues/1988)) - * provider/aws: support for RDS Read Replicas ([#1946](https://github.com/hashicorp/terraform/issues/1946)) - * provider/aws: `aws_s3_bucket` add support for `policy` ([#1992](https://github.com/hashicorp/terraform/issues/1992)) - * provider/aws: `aws_ebs_volume` add support for `tags` ([#2135](https://github.com/hashicorp/terraform/issues/2135)) - * provider/aws: `aws_elasticache_cluster` Confirm node status before reporting - available - * provider/aws: `aws_network_acl` Add support for ICMP Protocol ([#2148](https://github.com/hashicorp/terraform/issues/2148)) - * provider/aws: New `force_destroy` parameter for S3 buckets, to destroy - Buckets that contain objects ([#2007](https://github.com/hashicorp/terraform/issues/2007)) - * provider/aws: switching `health_check_type` on ASGs no longer requires - resource refresh ([#2147](https://github.com/hashicorp/terraform/issues/2147)) - * provider/aws: ignore empty `vpc_security_group_ids` on `aws_instance` ([#2311](https://github.com/hashicorp/terraform/issues/2311)) - -BUG FIXES: - - * provider/aws: Correctly handle AWS keypairs which no longer exist ([#2032](https://github.com/hashicorp/terraform/issues/2032)) - * provider/aws: Fix issue with restoring an Instance from snapshot ID ([#2120](https://github.com/hashicorp/terraform/issues/2120)) - * provider/template: store relative path in the state ([#2038](https://github.com/hashicorp/terraform/issues/2038)) - * provisioner/chef: fix interpolation in the Chef provisioner ([#2168](https://github.com/hashicorp/terraform/issues/2168)) - * provisioner/remote-exec: Don't prepend shebang on scripts that already - have one ([#2041](https://github.com/hashicorp/terraform/issues/2041)) - -## 0.5.2 (May 15, 2015) - -FEATURES: - - * **Chef provisioning**: You can now provision new hosts (both Linux and - Windows) with [Chef](https://chef.io) using a native provisioner ([#1868](https://github.com/hashicorp/terraform/issues/1868)) - -IMPROVEMENTS: - - * **New config function: `formatlist`** - Format lists in a similar way to `format`. - Useful for creating URLs from a list of IPs. ([#1829](https://github.com/hashicorp/terraform/issues/1829)) - * **New resource: `aws_route53_zone_association`** - * provider/aws: `aws_autoscaling_group` can wait for capacity in ELB - via `min_elb_capacity` ([#1970](https://github.com/hashicorp/terraform/issues/1970)) - * provider/aws: `aws_db_instances` supports `license_model` ([#1966](https://github.com/hashicorp/terraform/issues/1966)) - * provider/aws: `aws_elasticache_cluster` add support for Tags ([#1965](https://github.com/hashicorp/terraform/issues/1965)) - * provider/aws: `aws_network_acl` Network ACLs can be applied to multiple subnets ([#1931](https://github.com/hashicorp/terraform/issues/1931)) - * provider/aws: `aws_s3_bucket` exports `hosted_zone_id` and `region` ([#1865](https://github.com/hashicorp/terraform/issues/1865)) - * provider/aws: `aws_s3_bucket` add support for website `redirect_all_requests_to` ([#1909](https://github.com/hashicorp/terraform/issues/1909)) - * provider/aws: `aws_route53_record` exports `fqdn` ([#1847](https://github.com/hashicorp/terraform/issues/1847)) - * provider/aws: `aws_route53_zone` can create private hosted zones ([#1526](https://github.com/hashicorp/terraform/issues/1526)) - * provider/google: `google_compute_instance` `scratch` attribute added ([#1920](https://github.com/hashicorp/terraform/issues/1920)) - -BUG FIXES: - - * core: fix "resource not found" for interpolation issues with modules - * core: fix unflattenable error for orphans ([#1922](https://github.com/hashicorp/terraform/issues/1922)) - * core: fix deadlock with create-before-destroy + modules ([#1949](https://github.com/hashicorp/terraform/issues/1949)) - * core: fix "no roots found" error with create-before-destroy ([#1953](https://github.com/hashicorp/terraform/issues/1953)) - * core: variables set with environment variables won't validate as - not set without a default ([#1930](https://github.com/hashicorp/terraform/issues/1930)) - * core: resources with a blank ID in the state are now assumed to not exist ([#1905](https://github.com/hashicorp/terraform/issues/1905)) - * command/push: local vars override remote ones ([#1881](https://github.com/hashicorp/terraform/issues/1881)) - * provider/aws: Mark `aws_security_group` description as `ForceNew` ([#1871](https://github.com/hashicorp/terraform/issues/1871)) - * provider/aws: `aws_db_instance` ARN value is correct ([#1910](https://github.com/hashicorp/terraform/issues/1910)) - * provider/aws: `aws_db_instance` only submit modify request if there - is a change. ([#1906](https://github.com/hashicorp/terraform/issues/1906)) - * provider/aws: `aws_elasticache_cluster` export missing information on cluster nodes ([#1965](https://github.com/hashicorp/terraform/issues/1965)) - * provider/aws: bad AMI on a launch configuration won't block refresh ([#1901](https://github.com/hashicorp/terraform/issues/1901)) - * provider/aws: `aws_security_group` + `aws_subnet` - destroy timeout increased - to prevent DependencyViolation errors. ([#1886](https://github.com/hashicorp/terraform/issues/1886)) - * provider/google: `google_compute_instance` Local SSDs no-longer cause crash - ([#1088](https://github.com/hashicorp/terraform/issues/1088)) - * provider/google: `google_http_health_check` Defaults now driven from Terraform, - avoids errors on update ([#1894](https://github.com/hashicorp/terraform/issues/1894)) - * provider/google: `google_compute_template` Update Instance Template network - definition to match changes to Instance ([#980](https://github.com/hashicorp/terraform/issues/980)) - * provider/template: Fix infinite diff ([#1898](https://github.com/hashicorp/terraform/issues/1898)) - -## 0.5.1 (never released) - -This version was never released since we accidentally skipped it! - -## 0.5.0 (May 7, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * provider/aws: Terraform now remove the default egress rule created by AWS in - a new security group. - -FEATURES: - - * **Multi-provider (a.k.a multi-region)**: Multiple instances of a single - provider can be configured so resources can apply to different settings. - As an example, this allows Terraform to manage multiple regions with AWS. - * **Environmental variables to set variables**: Environment variables can be - used to set variables. The environment variables must be in the format - `TF_VAR_name` and this will be checked last for a value. - * **New remote state backend: `s3`**: You can now store remote state in - an S3 bucket. ([#1723](https://github.com/hashicorp/terraform/issues/1723)) - * **Automatic AWS retries**: This release includes a lot of improvement - around automatic retries of transient errors in AWS. The number of - retry attempts is also configurable. - * **Templates**: A new `template_file` resource allows long strings needing - variable interpolation to be moved into files. ([#1778](https://github.com/hashicorp/terraform/issues/1778)) - * **Provision with WinRM**: Provisioners can now run remote commands on - Windows hosts. ([#1483](https://github.com/hashicorp/terraform/issues/1483)) - -IMPROVEMENTS: - - * **New config function: `length`** - Get the length of a string or a list. - Useful in conjunction with `split`. ([#1495](https://github.com/hashicorp/terraform/issues/1495)) - * **New resource: `aws_app_cookie_stickiness_policy`** - * **New resource: `aws_customer_gateway`** - * **New resource: `aws_ebs_volume`** - * **New resource: `aws_elasticache_cluster`** - * **New resource: `aws_elasticache_security_group`** - * **New resource: `aws_elasticache_subnet_group`** - * **New resource: `aws_iam_access_key`** - * **New resource: `aws_iam_group_policy`** - * **New resource: `aws_iam_group`** - * **New resource: `aws_iam_instance_profile`** - * **New resource: `aws_iam_policy`** - * **New resource: `aws_iam_role_policy`** - * **New resource: `aws_iam_role`** - * **New resource: `aws_iam_user_policy`** - * **New resource: `aws_iam_user`** - * **New resource: `aws_lb_cookie_stickiness_policy`** - * **New resource: `aws_proxy_protocol_policy`** - * **New resource: `aws_security_group_rule`** - * **New resource: `aws_vpc_dhcp_options_association`** - * **New resource: `aws_vpc_dhcp_options`** - * **New resource: `aws_vpn_connection_route`** - * **New resource: `google_dns_managed_zone`** - * **New resource: `google_dns_record_set`** - * **Migrate to upstream AWS SDK:** Migrate the AWS provider to - [awslabs/aws-sdk-go](https://github.com/awslabs/aws-sdk-go), - the official `awslabs` library. Previously we had forked the library for - stability while `awslabs` refactored. Now that work has completed, and we've - migrated back to the upstream version. - * core: Improve error message on diff mismatch ([#1501](https://github.com/hashicorp/terraform/issues/1501)) - * provisioner/file: expand `~` in source path ([#1569](https://github.com/hashicorp/terraform/issues/1569)) - * provider/aws: Better retry logic, now retries up to 11 times by default - with exponentional backoff. This number is configurable. ([#1787](https://github.com/hashicorp/terraform/issues/1787)) - * provider/aws: Improved credential detection ([#1470](https://github.com/hashicorp/terraform/issues/1470)) - * provider/aws: Can specify a `token` via the config file ([#1601](https://github.com/hashicorp/terraform/issues/1601)) - * provider/aws: Added new `vpc_security_group_ids` attribute for AWS - Instances. If using a VPC, you can now modify the security groups for that - Instance without destroying it ([#1539](https://github.com/hashicorp/terraform/issues/1539)) - * provider/aws: White or blacklist account IDs that can be used to - protect against accidents. ([#1595](https://github.com/hashicorp/terraform/issues/1595)) - * provider/aws: Add a subset of IAM resources ([#939](https://github.com/hashicorp/terraform/issues/939)) - * provider/aws: `aws_autoscaling_group` retries deletes through "in progress" - errors ([#1840](https://github.com/hashicorp/terraform/issues/1840)) - * provider/aws: `aws_autoscaling_group` waits for healthy capacity during - ASG creation ([#1839](https://github.com/hashicorp/terraform/issues/1839)) - * provider/aws: `aws_instance` supports placement groups ([#1358](https://github.com/hashicorp/terraform/issues/1358)) - * provider/aws: `aws_eip` supports network interface attachment ([#1681](https://github.com/hashicorp/terraform/issues/1681)) - * provider/aws: `aws_elb` supports in-place changing of listeners ([#1619](https://github.com/hashicorp/terraform/issues/1619)) - * provider/aws: `aws_elb` supports connection draining settings ([#1502](https://github.com/hashicorp/terraform/issues/1502)) - * provider/aws: `aws_elb` increase default idle timeout to 60s ([#1646](https://github.com/hashicorp/terraform/issues/1646)) - * provider/aws: `aws_key_pair` name can be omitted and generated ([#1751](https://github.com/hashicorp/terraform/issues/1751)) - * provider/aws: `aws_network_acl` improved validation for network ACL ports - and protocols ([#1798](https://github.com/hashicorp/terraform/issues/1798)) ([#1808](https://github.com/hashicorp/terraform/issues/1808)) - * provider/aws: `aws_route_table` can target network interfaces ([#968](https://github.com/hashicorp/terraform/issues/968)) - * provider/aws: `aws_route_table` can specify propagating VGWs ([#1516](https://github.com/hashicorp/terraform/issues/1516)) - * provider/aws: `aws_route53_record` supports weighted sets ([#1578](https://github.com/hashicorp/terraform/issues/1578)) - * provider/aws: `aws_route53_zone` exports nameservers ([#1525](https://github.com/hashicorp/terraform/issues/1525)) - * provider/aws: `aws_s3_bucket` website support ([#1738](https://github.com/hashicorp/terraform/issues/1738)) - * provider/aws: `aws_security_group` name becomes optional and can be - automatically set to a unique identifier; this helps with - `create_before_destroy` scenarios ([#1632](https://github.com/hashicorp/terraform/issues/1632)) - * provider/aws: `aws_security_group` description becomes optional with a - static default value ([#1632](https://github.com/hashicorp/terraform/issues/1632)) - * provider/aws: automatically set the private IP as the SSH address - if not specified and no public IP is available ([#1623](https://github.com/hashicorp/terraform/issues/1623)) - * provider/aws: `aws_elb` exports `source_security_group` field ([#1708](https://github.com/hashicorp/terraform/issues/1708)) - * provider/aws: `aws_route53_record` supports alias targeting ([#1775](https://github.com/hashicorp/terraform/issues/1775)) - * provider/aws: Remove default AWS egress rule for newly created Security Groups ([#1765](https://github.com/hashicorp/terraform/issues/1765)) - * provider/consul: add `scheme` configuration argument ([#1838](https://github.com/hashicorp/terraform/issues/1838)) - * provider/docker: `docker_container` can specify links ([#1564](https://github.com/hashicorp/terraform/issues/1564)) - * provider/google: `resource_compute_disk` supports snapshots ([#1426](https://github.com/hashicorp/terraform/issues/1426)) - * provider/google: `resource_compute_instance` supports specifying the - device name ([#1426](https://github.com/hashicorp/terraform/issues/1426)) - * provider/openstack: Floating IP support for LBaaS ([#1550](https://github.com/hashicorp/terraform/issues/1550)) - * provider/openstack: Add AZ to `openstack_blockstorage_volume_v1` ([#1726](https://github.com/hashicorp/terraform/issues/1726)) - -BUG FIXES: - - * core: Fix graph cycle issues surrounding modules ([#1582](https://github.com/hashicorp/terraform/issues/1582)) ([#1637](https://github.com/hashicorp/terraform/issues/1637)) - * core: math on arbitrary variables works if first operand isn't a - numeric primitive. ([#1381](https://github.com/hashicorp/terraform/issues/1381)) - * core: avoid unnecessary cycles by pruning tainted destroys from - graph if there are no tainted resources ([#1475](https://github.com/hashicorp/terraform/issues/1475)) - * core: fix issue where destroy nodes weren't pruned in specific - edge cases around matching prefixes, which could cause cycles ([#1527](https://github.com/hashicorp/terraform/issues/1527)) - * core: fix issue causing diff mismatch errors in certain scenarios during - resource replacement ([#1515](https://github.com/hashicorp/terraform/issues/1515)) - * core: dependencies on resources with a different index work when - count > 1 ([#1540](https://github.com/hashicorp/terraform/issues/1540)) - * core: don't panic if variable default type is invalid ([#1344](https://github.com/hashicorp/terraform/issues/1344)) - * core: fix perpetual diff issue for computed maps that are empty ([#1607](https://github.com/hashicorp/terraform/issues/1607)) - * core: validation added to check for `self` variables in modules ([#1609](https://github.com/hashicorp/terraform/issues/1609)) - * core: fix edge case where validation didn't pick up unknown fields - if the value was computed ([#1507](https://github.com/hashicorp/terraform/issues/1507)) - * core: Fix issue where values in sets on resources couldn't contain - hyphens. ([#1641](https://github.com/hashicorp/terraform/issues/1641)) - * core: Outputs removed from the config are removed from the state ([#1714](https://github.com/hashicorp/terraform/issues/1714)) - * core: Validate against the worst-case graph during plan phase to catch cycles - that would previously only show up during apply ([#1655](https://github.com/hashicorp/terraform/issues/1655)) - * core: Referencing invalid module output in module validates ([#1448](https://github.com/hashicorp/terraform/issues/1448)) - * command: remote states with uppercase types work ([#1356](https://github.com/hashicorp/terraform/issues/1356)) - * provider/aws: Support `AWS_SECURITY_TOKEN` env var again ([#1785](https://github.com/hashicorp/terraform/issues/1785)) - * provider/aws: Don't save "instance" for EIP if association fails ([#1776](https://github.com/hashicorp/terraform/issues/1776)) - * provider/aws: launch configuration ID set after create success ([#1518](https://github.com/hashicorp/terraform/issues/1518)) - * provider/aws: Fixed an issue with creating ELBs without any tags ([#1580](https://github.com/hashicorp/terraform/issues/1580)) - * provider/aws: Fix issue in Security Groups with empty IPRanges ([#1612](https://github.com/hashicorp/terraform/issues/1612)) - * provider/aws: manually deleted S3 buckets are refreshed properly ([#1574](https://github.com/hashicorp/terraform/issues/1574)) - * provider/aws: only check for EIP allocation ID in VPC ([#1555](https://github.com/hashicorp/terraform/issues/1555)) - * provider/aws: raw protocol numbers work in `aws_network_acl` ([#1435](https://github.com/hashicorp/terraform/issues/1435)) - * provider/aws: Block devices can be encrypted ([#1718](https://github.com/hashicorp/terraform/issues/1718)) - * provider/aws: ASG health check grace period can be updated in-place ([#1682](https://github.com/hashicorp/terraform/issues/1682)) - * provider/aws: ELB security groups can be updated in-place ([#1662](https://github.com/hashicorp/terraform/issues/1662)) - * provider/aws: `aws_main_route_table +For earlier versions, see [the changelog as of v0.8.8](https://github.com/hashicorp/terraform/blob/v0.8.8/CHANGELOG.md). From 5b6eee76137e744b56fed990bb0b9efa78ad0e4f Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 5 Nov 2018 16:35:50 -0800 Subject: [PATCH 041/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a003c915231b..93ffcb8d9f49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ BUG FIXES: * helper/schema: Prevent the insertion of empty diff values when converting legacy diffs [GH-19253] * core: Fix inconsistent plans when replacing instances. [GH-19233] +* core: Correct handling of unknown values in module outputs during planning and final resolution of them during apply. [GH-19237] ## 0.12.0-alpha2 (October 30, 2018) From c795302ab2d844a6d56d520e32639af3d0801763 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 6 Nov 2018 11:19:34 +0000 Subject: [PATCH 042/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 93ffcb8d9f49..92fcfe365632 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ IMPROVEMENTS: BUG FIXES: * helper/schema: Prevent the insertion of empty diff values when converting legacy diffs [GH-19253] +* helper/schema: Fix timeout parsing during Provider.Diff (plan) [GH-19286] * core: Fix inconsistent plans when replacing instances. [GH-19233] * core: Correct handling of unknown values in module outputs during planning and final resolution of them during apply. [GH-19237] From 52a1b22f7a20a760455c36891b497f251b63f01d Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Wed, 31 Oct 2018 16:45:03 +0100 Subject: [PATCH 043/149] Implement the remote enhanced backend This is a refactored version of the `remote` backend that was initially added to Terraform v0.11.8 which should now be compatible with v0.12.0. --- backend/atlas/backend.go | 11 +- backend/atlas/backend_test.go | 4 +- backend/atlas/state_client_test.go | 2 +- backend/backend.go | 44 +- backend/init/init.go | 66 +- backend/init/init_test.go | 2 + backend/local/backend.go | 27 +- backend/local/backend_apply.go | 8 +- backend/local/backend_plan.go | 17 +- backend/local/backend_test.go | 24 +- backend/local/testing.go | 114 +- backend/remote-state/artifactory/backend.go | 6 +- backend/remote-state/backend.go | 6 +- backend/remote-state/etcdv2/backend.go | 6 +- backend/remote-state/http/backend.go | 6 +- backend/remote-state/manta/backend.go | 10 +- backend/remote-state/manta/backend_test.go | 12 +- backend/remote-state/manta/client_test.go | 12 +- backend/remote-state/swift/backend_state.go | 6 +- backend/remote/backend.go | 677 ++++++++++++ backend/remote/backend_apply.go | 237 +++++ backend/remote/backend_apply_test.go | 880 +++++++++++++++ backend/remote/backend_common.go | 334 ++++++ backend/remote/backend_mock.go | 998 ++++++++++++++++++ backend/remote/backend_plan.go | 304 ++++++ backend/remote/backend_plan_test.go | 561 ++++++++++ backend/remote/backend_state.go | 181 ++++ backend/remote/backend_state_test.go | 58 + backend/remote/backend_test.go | 234 ++++ backend/remote/cli.go | 14 + backend/remote/colorize.go | 47 + .../test-fixtures/apply-destroy/apply.log | 4 + .../test-fixtures/apply-destroy/main.tf | 1 + .../test-fixtures/apply-destroy/plan.log | 22 + .../test-fixtures/apply-no-changes/main.tf | 1 + .../test-fixtures/apply-no-changes/plan.log | 17 + .../apply-policy-hard-failed/main.tf | 1 + .../apply-policy-hard-failed/plan.log | 21 + .../apply-policy-hard-failed/policy.log | 12 + .../apply-policy-passed/apply.log | 4 + .../test-fixtures/apply-policy-passed/main.tf | 1 + .../apply-policy-passed/plan.log | 21 + .../apply-policy-passed/policy.log | 12 + .../apply-policy-soft-failed/apply.log | 4 + .../apply-policy-soft-failed/main.tf | 1 + .../apply-policy-soft-failed/plan.log | 21 + .../apply-policy-soft-failed/policy.log | 12 + .../test-fixtures/apply-variables/apply.log | 4 + .../test-fixtures/apply-variables/main.tf | 4 + .../test-fixtures/apply-variables/plan.log | 21 + .../test-fixtures/apply-with-error/main.tf | 5 + .../test-fixtures/apply-with-error/plan.log | 10 + backend/remote/test-fixtures/apply/apply.log | 4 + backend/remote/test-fixtures/apply/main.tf | 1 + backend/remote/test-fixtures/apply/plan.log | 21 + backend/remote/test-fixtures/empty/.gitignore | 0 .../plan-policy-hard-failed/main.tf | 1 + .../plan-policy-hard-failed/plan.log | 21 + .../plan-policy-hard-failed/policy.log | 12 + .../test-fixtures/plan-policy-passed/main.tf | 1 + .../test-fixtures/plan-policy-passed/plan.log | 21 + .../plan-policy-passed/policy.log | 12 + .../plan-policy-soft-failed/main.tf | 1 + .../plan-policy-soft-failed/plan.log | 21 + .../plan-policy-soft-failed/policy.log | 12 + .../test-fixtures/plan-variables/main.tf | 4 + .../test-fixtures/plan-variables/plan.log | 21 + .../test-fixtures/plan-with-error/main.tf | 5 + .../test-fixtures/plan-with-error/plan.log | 10 + .../terraform/main.tf | 1 + .../terraform/plan.log | 21 + backend/remote/test-fixtures/plan/main.tf | 1 + backend/remote/test-fixtures/plan/plan.log | 21 + backend/remote/testing.go | 182 ++++ backend/testing.go | 53 +- .../providers/terraform/data_source_state.go | 8 +- builtin/providers/terraform/provider_test.go | 7 +- command/apply.go | 8 +- command/command_test.go | 11 +- command/init.go | 17 +- command/meta.go | 5 - command/meta_backend.go | 135 ++- command/meta_backend_migrate.go | 100 +- command/meta_backend_test.go | 211 +++- command/plan.go | 4 +- command/state_meta.go | 5 +- .../main.tf | 2 +- .../main.tf | 2 +- go.mod | 4 + go.sum | 8 + main.go | 3 +- registry/client_test.go | 18 +- website/docs/backends/types/manta.html.md | 1 - .../types/terraform-enterprise.html.md | 3 + 94 files changed, 5807 insertions(+), 299 deletions(-) create mode 100644 backend/remote/backend.go create mode 100644 backend/remote/backend_apply.go create mode 100644 backend/remote/backend_apply_test.go create mode 100644 backend/remote/backend_common.go create mode 100644 backend/remote/backend_mock.go create mode 100644 backend/remote/backend_plan.go create mode 100644 backend/remote/backend_plan_test.go create mode 100644 backend/remote/backend_state.go create mode 100644 backend/remote/backend_state_test.go create mode 100644 backend/remote/backend_test.go create mode 100644 backend/remote/cli.go create mode 100644 backend/remote/colorize.go create mode 100644 backend/remote/test-fixtures/apply-destroy/apply.log create mode 100644 backend/remote/test-fixtures/apply-destroy/main.tf create mode 100644 backend/remote/test-fixtures/apply-destroy/plan.log create mode 100644 backend/remote/test-fixtures/apply-no-changes/main.tf create mode 100644 backend/remote/test-fixtures/apply-no-changes/plan.log create mode 100644 backend/remote/test-fixtures/apply-policy-hard-failed/main.tf create mode 100644 backend/remote/test-fixtures/apply-policy-hard-failed/plan.log create mode 100644 backend/remote/test-fixtures/apply-policy-hard-failed/policy.log create mode 100644 backend/remote/test-fixtures/apply-policy-passed/apply.log create mode 100644 backend/remote/test-fixtures/apply-policy-passed/main.tf create mode 100644 backend/remote/test-fixtures/apply-policy-passed/plan.log create mode 100644 backend/remote/test-fixtures/apply-policy-passed/policy.log create mode 100644 backend/remote/test-fixtures/apply-policy-soft-failed/apply.log create mode 100644 backend/remote/test-fixtures/apply-policy-soft-failed/main.tf create mode 100644 backend/remote/test-fixtures/apply-policy-soft-failed/plan.log create mode 100644 backend/remote/test-fixtures/apply-policy-soft-failed/policy.log create mode 100644 backend/remote/test-fixtures/apply-variables/apply.log create mode 100644 backend/remote/test-fixtures/apply-variables/main.tf create mode 100644 backend/remote/test-fixtures/apply-variables/plan.log create mode 100644 backend/remote/test-fixtures/apply-with-error/main.tf create mode 100644 backend/remote/test-fixtures/apply-with-error/plan.log create mode 100644 backend/remote/test-fixtures/apply/apply.log create mode 100644 backend/remote/test-fixtures/apply/main.tf create mode 100644 backend/remote/test-fixtures/apply/plan.log create mode 100644 backend/remote/test-fixtures/empty/.gitignore create mode 100644 backend/remote/test-fixtures/plan-policy-hard-failed/main.tf create mode 100644 backend/remote/test-fixtures/plan-policy-hard-failed/plan.log create mode 100644 backend/remote/test-fixtures/plan-policy-hard-failed/policy.log create mode 100644 backend/remote/test-fixtures/plan-policy-passed/main.tf create mode 100644 backend/remote/test-fixtures/plan-policy-passed/plan.log create mode 100644 backend/remote/test-fixtures/plan-policy-passed/policy.log create mode 100644 backend/remote/test-fixtures/plan-policy-soft-failed/main.tf create mode 100644 backend/remote/test-fixtures/plan-policy-soft-failed/plan.log create mode 100644 backend/remote/test-fixtures/plan-policy-soft-failed/policy.log create mode 100644 backend/remote/test-fixtures/plan-variables/main.tf create mode 100644 backend/remote/test-fixtures/plan-variables/plan.log create mode 100644 backend/remote/test-fixtures/plan-with-error/main.tf create mode 100644 backend/remote/test-fixtures/plan-with-error/plan.log create mode 100644 backend/remote/test-fixtures/plan-with-working-directory/terraform/main.tf create mode 100644 backend/remote/test-fixtures/plan-with-working-directory/terraform/plan.log create mode 100644 backend/remote/test-fixtures/plan/main.tf create mode 100644 backend/remote/test-fixtures/plan/plan.log create mode 100644 backend/remote/testing.go diff --git a/backend/atlas/backend.go b/backend/atlas/backend.go index d29123c463f4..e4f2474559dd 100644 --- a/backend/atlas/backend.go +++ b/backend/atlas/backend.go @@ -52,6 +52,11 @@ type Backend struct { var _ backend.Backend = (*Backend)(nil) +// New returns a new initialized Atlas backend. +func New() *Backend { + return &Backend{} +} + func (b *Backend) ConfigSchema() *configschema.Block { return &configschema.Block{ Attributes: map[string]*configschema.Attribute{ @@ -163,16 +168,16 @@ func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { } func (b *Backend) Workspaces() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } func (b *Backend) DeleteWorkspace(name string) error { - return backend.ErrNamedStatesNotSupported + return backend.ErrWorkspacesNotSupported } func (b *Backend) StateMgr(name string) (state.State, error) { if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } return &remote.State{Client: b.stateClient}, nil diff --git a/backend/atlas/backend_test.go b/backend/atlas/backend_test.go index d42418865e9a..b85eb3404297 100644 --- a/backend/atlas/backend_test.go +++ b/backend/atlas/backend_test.go @@ -18,7 +18,7 @@ func TestConfigure_envAddr(t *testing.T) { defer os.Setenv("ATLAS_ADDRESS", os.Getenv("ATLAS_ADDRESS")) os.Setenv("ATLAS_ADDRESS", "http://foo.com") - b := &Backend{} + b := New() diags := b.Configure(cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("foo/bar"), "address": cty.NullVal(cty.String), @@ -37,7 +37,7 @@ func TestConfigure_envToken(t *testing.T) { defer os.Setenv("ATLAS_TOKEN", os.Getenv("ATLAS_TOKEN")) os.Setenv("ATLAS_TOKEN", "foo") - b := &Backend{} + b := New() diags := b.Configure(cty.ObjectVal(map[string]cty.Value{ "name": cty.StringVal("foo/bar"), "address": cty.NullVal(cty.String), diff --git a/backend/atlas/state_client_test.go b/backend/atlas/state_client_test.go index 355a537f04f7..28a2c701c1a8 100644 --- a/backend/atlas/state_client_test.go +++ b/backend/atlas/state_client_test.go @@ -29,7 +29,7 @@ func testStateClient(t *testing.T, c map[string]string) remote.Client { } synthBody := configs.SynthBody("", vals) - b := backend.TestBackendConfig(t, &Backend{}, synthBody) + b := backend.TestBackendConfig(t, New(), synthBody) raw, err := b.StateMgr(backend.DefaultStateName) if err != nil { t.Fatalf("err: %s", err) diff --git a/backend/backend.go b/backend/backend.go index ef1da1885ebc..5259af8ad125 100644 --- a/backend/backend.go +++ b/backend/backend.go @@ -9,8 +9,6 @@ import ( "errors" "time" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/configs" @@ -22,24 +20,35 @@ import ( "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) // DefaultStateName is the name of the default, initial state that every // backend must have. This state cannot be deleted. const DefaultStateName = "default" -// ErrWorkspacesNotSupported is an error returned when a caller attempts -// to perform an operation on a workspace other than "default" for a -// backend that doesn't support multiple workspaces. -// -// The caller can detect this to do special fallback behavior or produce -// a specific, helpful error message. -var ErrWorkspacesNotSupported = errors.New("workspaces not supported") +var ( + // ErrDefaultWorkspaceNotSupported is returned when an operation does not + // support using the default workspace, but requires a named workspace to + // be selected. + ErrDefaultWorkspaceNotSupported = errors.New("default workspace not supported\n" + + "You can create a new workspace with the \"workspace new\" command.") -// ErrNamedStatesNotSupported is an older name for ErrWorkspacesNotSupported. -// -// Deprecated: Use ErrWorkspacesNotSupported instead. -var ErrNamedStatesNotSupported = ErrWorkspacesNotSupported + // ErrOperationNotSupported is returned when an unsupported operation + // is detected by the configured backend. + ErrOperationNotSupported = errors.New("operation not supported") + + // ErrWorkspacesNotSupported is an error returned when a caller attempts + // to perform an operation on a workspace other than "default" for a + // backend that doesn't support multiple workspaces. + // + // The caller can detect this to do special fallback behavior or produce + // a specific, helpful error message. + ErrWorkspacesNotSupported = errors.New("workspaces not supported") +) + +// InitFn is used to initialize a new backend. +type InitFn func() Backend // Backend is the minimal interface that must be implemented to enable Terraform. type Backend interface { @@ -179,11 +188,12 @@ type Operation struct { // The options below are more self-explanatory and affect the runtime // behavior of the operation. + AutoApprove bool Destroy bool + DestroyForce bool + Parallelism int Targets []addrs.Targetable Variables map[string]UnparsedVariableValue - AutoApprove bool - DestroyForce bool // Input/output/control options. UIIn terraform.UIInput @@ -244,10 +254,6 @@ type RunningOperation struct { // operation has completed. Result OperationResult - // ExitCode can be used to set a custom exit code. This enables enhanced - // backends to set specific exit codes that miror any remote exit codes. - ExitCode int - // PlanEmpty is populated after a Plan operation completes without error // to note whether a plan is empty or has changes. PlanEmpty bool diff --git a/backend/init/init.go b/backend/init/init.go index 81286406b9ae..0e4f7188b1ae 100644 --- a/backend/init/init.go +++ b/backend/init/init.go @@ -3,27 +3,28 @@ package init import ( + "os" "sync" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/svchost/disco" "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" - backendatlas "github.com/hashicorp/terraform/backend/atlas" - backendlocal "github.com/hashicorp/terraform/backend/local" - backendartifactory "github.com/hashicorp/terraform/backend/remote-state/artifactory" + backendAtlas "github.com/hashicorp/terraform/backend/atlas" + backendLocal "github.com/hashicorp/terraform/backend/local" + backendRemote "github.com/hashicorp/terraform/backend/remote" + backendArtifactory "github.com/hashicorp/terraform/backend/remote-state/artifactory" backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure" - backendconsul "github.com/hashicorp/terraform/backend/remote-state/consul" - backendetcdv2 "github.com/hashicorp/terraform/backend/remote-state/etcdv2" - backendetcdv3 "github.com/hashicorp/terraform/backend/remote-state/etcdv3" + backendConsul "github.com/hashicorp/terraform/backend/remote-state/consul" + backendEtcdv2 "github.com/hashicorp/terraform/backend/remote-state/etcdv2" + backendEtcdv3 "github.com/hashicorp/terraform/backend/remote-state/etcdv3" backendGCS "github.com/hashicorp/terraform/backend/remote-state/gcs" - backendhttp "github.com/hashicorp/terraform/backend/remote-state/http" - backendinmem "github.com/hashicorp/terraform/backend/remote-state/inmem" + backendHTTP "github.com/hashicorp/terraform/backend/remote-state/http" + backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem" backendManta "github.com/hashicorp/terraform/backend/remote-state/manta" backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3" backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift" - - "github.com/zclconf/go-cty/cty" ) // backends is the list of available backends. This is a global variable @@ -37,27 +38,40 @@ import ( // complex structures and supporting that over the plugin system is currently // prohibitively difficult. For those wanting to implement a custom backend, // they can do so with recompilation. -var backends map[string]func() backend.Backend +var backends map[string]backend.InitFn var backendsLock sync.Mutex +// Init initializes the backends map with all our hardcoded backends. func Init(services *disco.Disco) { - // Our hardcoded backends. We don't need to acquire a lock here - // since init() code is serial and can't spawn goroutines. - backends = map[string]func() backend.Backend{ - "artifactory": func() backend.Backend { return backendartifactory.New() }, - "atlas": func() backend.Backend { return &backendatlas.Backend{} }, - "http": func() backend.Backend { return backendhttp.New() }, - "local": func() backend.Backend { return &backendlocal.Local{} }, - "consul": func() backend.Backend { return backendconsul.New() }, - "inmem": func() backend.Backend { return backendinmem.New() }, - "swift": func() backend.Backend { return backendSwift.New() }, - "s3": func() backend.Backend { return backendS3.New() }, + backendsLock.Lock() + defer backendsLock.Unlock() + + backends = map[string]backend.InitFn{ + // Enhanced backends. + "local": func() backend.Backend { return backendLocal.New() }, + "remote": func() backend.Backend { + b := backendRemote.New(services) + if os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" { + return backendLocal.NewWithBackend(b) + } + return b + }, + + // Remote State backends. + "artifactory": func() backend.Backend { return backendArtifactory.New() }, + "atlas": func() backend.Backend { return backendAtlas.New() }, "azurerm": func() backend.Backend { return backendAzure.New() }, - "etcd": func() backend.Backend { return backendetcdv2.New() }, - "etcdv3": func() backend.Backend { return backendetcdv3.New() }, + "consul": func() backend.Backend { return backendConsul.New() }, + "etcd": func() backend.Backend { return backendEtcdv2.New() }, + "etcdv3": func() backend.Backend { return backendEtcdv3.New() }, "gcs": func() backend.Backend { return backendGCS.New() }, + "http": func() backend.Backend { return backendHTTP.New() }, + "inmem": func() backend.Backend { return backendInmem.New() }, "manta": func() backend.Backend { return backendManta.New() }, + "s3": func() backend.Backend { return backendS3.New() }, + "swift": func() backend.Backend { return backendSwift.New() }, + // Deprecated backends. "azure": func() backend.Backend { return deprecateBackend( backendAzure.New(), @@ -69,7 +83,7 @@ func Init(services *disco.Disco) { // Backend returns the initialization factory for the given backend, or // nil if none exists. -func Backend(name string) func() backend.Backend { +func Backend(name string) backend.InitFn { backendsLock.Lock() defer backendsLock.Unlock() return backends[name] @@ -82,7 +96,7 @@ func Backend(name string) func() backend.Backend { // This method sets this backend globally and care should be taken to do // this only before Terraform is executing to prevent odd behavior of backends // changing mid-execution. -func Set(name string, f func() backend.Backend) { +func Set(name string, f backend.InitFn) { backendsLock.Lock() defer backendsLock.Unlock() diff --git a/backend/init/init_test.go b/backend/init/init_test.go index 804033bdfcfa..02eacb63831a 100644 --- a/backend/init/init_test.go +++ b/backend/init/init_test.go @@ -17,6 +17,7 @@ func TestInit_backend(t *testing.T) { Type string }{ {"local", "*local.Local"}, + {"remote", "*remote.Remote"}, {"atlas", "*atlas.Backend"}, {"azurerm", "*azure.Backend"}, {"consul", "*consul.Backend"}, @@ -53,6 +54,7 @@ func TestInit_forceLocalBackend(t *testing.T) { Type string }{ {"local", "nil"}, + {"remote", "*remote.Remote"}, } // Set the TF_FORCE_LOCAL_BACKEND flag so all enhanced backends will diff --git a/backend/local/backend.go b/backend/local/backend.go index 601c0289bfc4..ce4edc0c3564 100644 --- a/backend/local/backend.go +++ b/backend/local/backend.go @@ -12,14 +12,13 @@ import ( "strings" "sync" - "github.com/hashicorp/terraform/tfdiags" - "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" "github.com/zclconf/go-cty/cty" @@ -96,12 +95,25 @@ type Local struct { // exact commands that are being run. RunningInAutomation bool + // opLock locks operations opLock sync.Mutex - once sync.Once } var _ backend.Backend = (*Local)(nil) +// New returns a new initialized local backend. +func New() *Local { + return NewWithBackend(nil) +} + +// NewWithBackend returns a new local backend initialized with a +// dedicated backend for non-enhanced behavior. +func NewWithBackend(backend backend.Backend) *Local { + return &Local{ + Backend: backend, + } +} + func (b *Local) ConfigSchema() *configschema.Block { if b.Backend != nil { return b.Backend.ConfigSchema() @@ -116,8 +128,6 @@ func (b *Local) ConfigSchema() *configschema.Block { Type: cty.String, Optional: true, }, - // environment_dir was previously a deprecated alias for - // workspace_dir, but now removed. }, } } @@ -342,7 +352,7 @@ func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend. return runningOp, nil } -// opWait wats for the operation to complete, and a stop signal or a +// opWait waits for the operation to complete, and a stop signal or a // cancelation signal. func (b *Local) opWait( doneCh <-chan struct{}, @@ -416,7 +426,10 @@ func (b *Local) ReportResult(op *backend.RunningOperation, diags tfdiags.Diagnos // Shouldn't generally happen, but if it does then we'll at least // make some noise in the logs to help us spot it. if len(diags) != 0 { - log.Printf("[ERROR] Local backend needs to report diagnostics but ShowDiagnostics callback is not set: %s", diags.ErrWithWarnings()) + log.Printf( + "[ERROR] Local backend needs to report diagnostics but ShowDiagnostics is not set:\n%s", + diags.ErrWithWarnings(), + ) } } } diff --git a/backend/local/backend_apply.go b/backend/local/backend_apply.go index 66c437971342..ce118a863f3f 100644 --- a/backend/local/backend_apply.go +++ b/backend/local/backend_apply.go @@ -8,7 +8,6 @@ import ( "log" "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statefile" @@ -25,7 +24,6 @@ func (b *Local) opApply( log.Printf("[INFO] backend/local: starting Apply operation") var diags tfdiags.Diagnostics - var err error // If we have a nil module at this point, then set it to an empty tree // to avoid any potential crashes. @@ -33,7 +31,9 @@ func (b *Local) opApply( diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, "No configuration files", - "Apply requires configuration to be present. Applying without a configuration would mark everything for destruction, which is normally not what is desired. If you would like to destroy everything, run 'terraform destroy' instead.", + "Apply requires configuration to be present. Applying without a configuration "+ + "would mark everything for destruction, which is normally not what is desired. "+ + "If you would like to destroy everything, run 'terraform destroy' instead.", )) b.ReportResult(runningOp, diags) return @@ -155,7 +155,7 @@ func (b *Local) opApply( // Store the final state runningOp.State = applyState - err = statemgr.WriteAndPersist(opState, applyState) + err := statemgr.WriteAndPersist(opState, applyState) if err != nil { diags = diags.Append(b.backupStateForError(applyState, err)) b.ReportResult(runningOp, diags) diff --git a/backend/local/backend_plan.go b/backend/local/backend_plan.go index 553d68e3a487..950d83b77621 100644 --- a/backend/local/backend_plan.go +++ b/backend/local/backend_plan.go @@ -26,13 +26,13 @@ func (b *Local) opPlan( log.Printf("[INFO] backend/local: starting Plan operation") var diags tfdiags.Diagnostics - var err error - if b.CLI != nil && op.PlanFile != nil { + if op.PlanFile != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, "Can't re-plan a saved plan", - "The plan command was given a saved plan file as its input. This command generates a new plan, and so it requires a configuration directory as its argument.", + "The plan command was given a saved plan file as its input. This command generates "+ + "a new plan, and so it requires a configuration directory as its argument.", )) b.ReportResult(runningOp, diags) return @@ -43,7 +43,10 @@ func (b *Local) opPlan( diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, "No configuration files", - "Plan requires configuration to be present. Planning without a configuration would mark everything for destruction, which is normally not what is desired. If you would like to destroy everything, run plan with the -destroy option. Otherwise, create a Terraform configuration file (.tf file) and try again.", + "Plan requires configuration to be present. Planning without a configuration would "+ + "mark everything for destruction, which is normally not what is desired. If you "+ + "would like to destroy everything, run plan with the -destroy option. Otherwise, "+ + "create a Terraform configuration file (.tf file) and try again.", )) b.ReportResult(runningOp, diags) return @@ -122,7 +125,9 @@ func (b *Local) opPlan( if op.PlanOutBackend == nil { // This is always a bug in the operation caller; it's not valid // to set PlanOutPath without also setting PlanOutBackend. - diags = diags.Append(fmt.Errorf("PlanOutPath set without also setting PlanOutBackend (this is a bug in Terraform)")) + diags = diags.Append(fmt.Errorf( + "PlanOutPath set without also setting PlanOutBackend (this is a bug in Terraform)"), + ) b.ReportResult(runningOp, diags) return } @@ -134,7 +139,7 @@ func (b *Local) opPlan( plannedStateFile := statemgr.PlannedStateUpdate(opState, baseState) log.Printf("[INFO] backend/local: writing plan output to: %s", path) - err = planfile.Create(path, configSnap, plannedStateFile, plan) + err := planfile.Create(path, configSnap, plannedStateFile, plan) if err != nil { diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, diff --git a/backend/local/backend_test.go b/backend/local/backend_test.go index de3f75c07ae1..863020efaeb2 100644 --- a/backend/local/backend_test.go +++ b/backend/local/backend_test.go @@ -15,14 +15,14 @@ import ( ) func TestLocal_impl(t *testing.T) { - var _ backend.Enhanced = new(Local) - var _ backend.Local = new(Local) - var _ backend.CLI = new(Local) + var _ backend.Enhanced = New() + var _ backend.Local = New() + var _ backend.CLI = New() } func TestLocal_backend(t *testing.T) { defer testTmpDir(t)() - b := &Local{} + b := New() backend.TestBackendStates(t, b) backend.TestBackendStateLocks(t, b, b) } @@ -49,7 +49,7 @@ func checkState(t *testing.T, path, expected string) { } func TestLocal_StatePaths(t *testing.T) { - b := &Local{} + b := New() // Test the defaults path, out, back := b.StatePaths("") @@ -94,7 +94,7 @@ func TestLocal_addAndRemoveStates(t *testing.T) { dflt := backend.DefaultStateName expectedStates := []string{dflt} - b := &Local{} + b := New() states, err := b.Workspaces() if err != nil { t.Fatal(err) @@ -207,13 +207,11 @@ func (b *testDelegateBackend) DeleteWorkspace(name string) error { // verify that the MultiState methods are dispatched to the correct Backend. func TestLocal_multiStateBackend(t *testing.T) { // assign a separate backend where we can read the state - b := &Local{ - Backend: &testDelegateBackend{ - stateErr: true, - statesErr: true, - deleteErr: true, - }, - } + b := NewWithBackend(&testDelegateBackend{ + stateErr: true, + statesErr: true, + deleteErr: true, + }) if _, err := b.StateMgr("test"); err != errTestDelegateState { t.Fatal("expected errTestDelegateState, got:", err) diff --git a/backend/local/testing.go b/backend/local/testing.go index bc833dc999d6..239706057059 100644 --- a/backend/local/testing.go +++ b/backend/local/testing.go @@ -21,30 +21,30 @@ import ( // public fields without any locks. func TestLocal(t *testing.T) (*Local, func()) { t.Helper() - tempDir := testTempDir(t) - var local *Local - local = &Local{ - StatePath: filepath.Join(tempDir, "state.tfstate"), - StateOutPath: filepath.Join(tempDir, "state.tfstate"), - StateBackupPath: filepath.Join(tempDir, "state.tfstate.bak"), - StateWorkspaceDir: filepath.Join(tempDir, "state.tfstate.d"), - ContextOpts: &terraform.ContextOpts{}, - ShowDiagnostics: func(vals ...interface{}) { - var diags tfdiags.Diagnostics - diags = diags.Append(vals...) - for _, diag := range diags { - // NOTE: Since the caller here is not directly the TestLocal - // function, t.Helper doesn't apply and so the log source - // isn't correctly shown in the test log output. This seems - // unavoidable as long as this is happening so indirectly. - t.Log(diag.Description().Summary) - if local.CLI != nil { - local.CLI.Error(diag.Description().Summary) - } + + local := New() + local.StatePath = filepath.Join(tempDir, "state.tfstate") + local.StateOutPath = filepath.Join(tempDir, "state.tfstate") + local.StateBackupPath = filepath.Join(tempDir, "state.tfstate.bak") + local.StateWorkspaceDir = filepath.Join(tempDir, "state.tfstate.d") + local.ContextOpts = &terraform.ContextOpts{} + + local.ShowDiagnostics = func(vals ...interface{}) { + var diags tfdiags.Diagnostics + diags = diags.Append(vals...) + for _, diag := range diags { + // NOTE: Since the caller here is not directly the TestLocal + // function, t.Helper doesn't apply and so the log source + // isn't correctly shown in the test log output. This seems + // unavoidable as long as this is happening so indirectly. + t.Log(diag.Description().Summary) + if local.CLI != nil { + local.CLI.Error(diag.Description().Summary) } - }, + } } + cleanup := func() { if err := os.RemoveAll(tempDir); err != nil { t.Fatal("error cleanup up test:", err) @@ -86,36 +86,80 @@ func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.Pr } -// TestNewLocalSingle is a factory for creating a TestLocalSingleState. -// This function matches the signature required for backend/init. -func TestNewLocalSingle() backend.Backend { - return &TestLocalSingleState{} -} - // TestLocalSingleState is a backend implementation that wraps Local // and modifies it to only support single states (returns -// ErrNamedStatesNotSupported for multi-state operations). +// ErrWorkspacesNotSupported for multi-state operations). // // This isn't an actual use case, this is exported just to provide a // easy way to test that behavior. type TestLocalSingleState struct { - Local + *Local } -func (b *TestLocalSingleState) State(name string) (statemgr.Full, error) { +// TestNewLocalSingle is a factory for creating a TestLocalSingleState. +// This function matches the signature required for backend/init. +func TestNewLocalSingle() backend.Backend { + return &TestLocalSingleState{Local: New()} +} + +func (b *TestLocalSingleState) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) DeleteWorkspace(string) error { + return backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) StateMgr(name string) (statemgr.Full, error) { if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } return b.Local.StateMgr(name) } -func (b *TestLocalSingleState) States() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported +// TestLocalNoDefaultState is a backend implementation that wraps +// Local and modifies it to support named states, but not the +// default state. It returns ErrDefaultWorkspaceNotSupported when +// the DefaultStateName is used. +type TestLocalNoDefaultState struct { + *Local } -func (b *TestLocalSingleState) DeleteState(string) error { - return backend.ErrNamedStatesNotSupported +// TestNewLocalNoDefault is a factory for creating a TestLocalNoDefaultState. +// This function matches the signature required for backend/init. +func TestNewLocalNoDefault() backend.Backend { + return &TestLocalNoDefaultState{Local: New()} +} + +func (b *TestLocalNoDefaultState) Workspaces() ([]string, error) { + workspaces, err := b.Local.Workspaces() + if err != nil { + return nil, err + } + + filtered := workspaces[:0] + for _, name := range workspaces { + if name != backend.DefaultStateName { + filtered = append(filtered, name) + } + } + + return filtered, nil +} + +func (b *TestLocalNoDefaultState) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.DeleteWorkspace(name) +} + +func (b *TestLocalNoDefaultState) StateMgr(name string) (statemgr.Full, error) { + if name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.StateMgr(name) } func testTempDir(t *testing.T) string { diff --git a/backend/remote-state/artifactory/backend.go b/backend/remote-state/artifactory/backend.go index c969011341df..d085f21b5a66 100644 --- a/backend/remote-state/artifactory/backend.go +++ b/backend/remote-state/artifactory/backend.go @@ -83,16 +83,16 @@ func (b *Backend) configure(ctx context.Context) error { } func (b *Backend) Workspaces() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } func (b *Backend) DeleteWorkspace(string) error { - return backend.ErrNamedStatesNotSupported + return backend.ErrWorkspacesNotSupported } func (b *Backend) StateMgr(name string) (state.State, error) { if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } return &remote.State{ Client: b.client, diff --git a/backend/remote-state/backend.go b/backend/remote-state/backend.go index a47aefc0896c..ef32356d1a42 100644 --- a/backend/remote-state/backend.go +++ b/backend/remote-state/backend.go @@ -50,11 +50,11 @@ func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { } func (b *Backend) Workspaces() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } func (b *Backend) DeleteWorkspace(name string) error { - return backend.ErrNamedStatesNotSupported + return backend.ErrWorkspacesNotSupported } func (b *Backend) StateMgr(name string) (statemgr.Full, error) { @@ -64,7 +64,7 @@ func (b *Backend) StateMgr(name string) (statemgr.Full, error) { } if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } s := &remote.State{Client: b.client} diff --git a/backend/remote-state/etcdv2/backend.go b/backend/remote-state/etcdv2/backend.go index 729789be3a0e..fed0d2a1b729 100644 --- a/backend/remote-state/etcdv2/backend.go +++ b/backend/remote-state/etcdv2/backend.go @@ -76,16 +76,16 @@ func (b *Backend) configure(ctx context.Context) error { } func (b *Backend) Workspaces() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } func (b *Backend) DeleteWorkspace(string) error { - return backend.ErrNamedStatesNotSupported + return backend.ErrWorkspacesNotSupported } func (b *Backend) StateMgr(name string) (state.State, error) { if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } return &remote.State{ Client: &EtcdClient{ diff --git a/backend/remote-state/http/backend.go b/backend/remote-state/http/backend.go index 00014097427a..aaf2515fa8e3 100644 --- a/backend/remote-state/http/backend.go +++ b/backend/remote-state/http/backend.go @@ -151,16 +151,16 @@ func (b *Backend) configure(ctx context.Context) error { func (b *Backend) StateMgr(name string) (state.State, error) { if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } return &remote.State{Client: b.client}, nil } func (b *Backend) Workspaces() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } func (b *Backend) DeleteWorkspace(string) error { - return backend.ErrNamedStatesNotSupported + return backend.ErrWorkspacesNotSupported } diff --git a/backend/remote-state/manta/backend.go b/backend/remote-state/manta/backend.go index d651e6bd637f..d4ec85c9c77c 100644 --- a/backend/remote-state/manta/backend.go +++ b/backend/remote-state/manta/backend.go @@ -61,18 +61,10 @@ func New() backend.Backend { Required: true, }, - "objectName": { - Type: schema.TypeString, - Optional: true, - Default: "terraform.tfstate", - Deprecated: "please use the object_name attribute", - }, - "object_name": { Type: schema.TypeString, Optional: true, - // Set this default once the objectName attribute is removed! - // Default: "terraform.tfstate", + Default: "terraform.tfstate", }, }, } diff --git a/backend/remote-state/manta/backend_test.go b/backend/remote-state/manta/backend_test.go index f10a14239577..7e53b928ec00 100644 --- a/backend/remote-state/manta/backend_test.go +++ b/backend/remote-state/manta/backend_test.go @@ -31,8 +31,8 @@ func TestBackend(t *testing.T) { keyName := "testState" b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "path": directory, - "objectName": keyName, + "path": directory, + "object_name": keyName, })).(*Backend) createMantaFolder(t, b.storageClient, directory) @@ -48,13 +48,13 @@ func TestBackendLocked(t *testing.T) { keyName := "testState" b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "path": directory, - "objectName": keyName, + "path": directory, + "object_name": keyName, })).(*Backend) b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "path": directory, - "objectName": keyName, + "path": directory, + "object_name": keyName, })).(*Backend) createMantaFolder(t, b1.storageClient, directory) diff --git a/backend/remote-state/manta/client_test.go b/backend/remote-state/manta/client_test.go index 4daabf021796..8a276a7802e4 100644 --- a/backend/remote-state/manta/client_test.go +++ b/backend/remote-state/manta/client_test.go @@ -21,8 +21,8 @@ func TestRemoteClient(t *testing.T) { keyName := "testState" b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "path": directory, - "objectName": keyName, + "path": directory, + "object_name": keyName, })).(*Backend) createMantaFolder(t, b.storageClient, directory) @@ -42,13 +42,13 @@ func TestRemoteClientLocks(t *testing.T) { keyName := "testState" b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "path": directory, - "objectName": keyName, + "path": directory, + "object_name": keyName, })).(*Backend) b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ - "path": directory, - "objectName": keyName, + "path": directory, + "object_name": keyName, })).(*Backend) createMantaFolder(t, b1.storageClient, directory) diff --git a/backend/remote-state/swift/backend_state.go b/backend/remote-state/swift/backend_state.go index 42a15d614267..6f0a922555cc 100644 --- a/backend/remote-state/swift/backend_state.go +++ b/backend/remote-state/swift/backend_state.go @@ -7,16 +7,16 @@ import ( ) func (b *Backend) Workspaces() ([]string, error) { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } func (b *Backend) DeleteWorkspace(name string) error { - return backend.ErrNamedStatesNotSupported + return backend.ErrWorkspacesNotSupported } func (b *Backend) StateMgr(name string) (state.State, error) { if name != backend.DefaultStateName { - return nil, backend.ErrNamedStatesNotSupported + return nil, backend.ErrWorkspacesNotSupported } client := &RemoteClient{ diff --git a/backend/remote/backend.go b/backend/remote/backend.go new file mode 100644 index 000000000000..47a2f1a2e7f2 --- /dev/null +++ b/backend/remote/backend.go @@ -0,0 +1,677 @@ +package remote + +import ( + "context" + "fmt" + "log" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" +) + +const ( + defaultHostname = "app.terraform.io" + defaultParallelism = 10 + serviceID = "tfe.v2" +) + +// Remote is an implementation of EnhancedBackend that performs all +// operations in a remote backend. +type Remote struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ShowDiagnostics prints diagnostic messages to the UI. + ShowDiagnostics func(vals ...interface{}) + + // ContextOpts are the base context options to set when initializing a + // new Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // client is the remote backend API client + client *tfe.Client + + // hostname of the remote backend server + hostname string + + // organization is the organization that contains the target workspaces + organization string + + // workspace is used to map the default workspace to a remote workspace + workspace string + + // prefix is used to filter down a set of workspaces that use a single + // configuration + prefix string + + // schema defines the configuration for the backend + schema *schema.Backend + + // services is used for service discovery + services *disco.Disco + + // opLock locks operations + opLock sync.Mutex +} + +var _ backend.Backend = (*Remote)(nil) + +// New creates a new initialized remote backend. +func New(services *disco.Disco) *Remote { + return &Remote{ + services: services, + } +} + +func (b *Remote) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "hostname": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["hostname"], + }, + "organization": { + Type: cty.String, + Required: true, + Description: schemaDescriptions["organization"], + }, + "token": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["token"], + }, + }, + + BlockTypes: map[string]*configschema.NestedBlock{ + "workspaces": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["name"], + }, + "prefix": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["prefix"], + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + } +} + +func (b *Remote) ValidateConfig(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if val := obj.GetAttr("organization"); !val.IsNull() { + if val.AsString() == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid organization value", + `The "organization" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + } + } + + var name, prefix string + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + name = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + prefix = val.AsString() + } + } + + // Make sure that we have either a workspace name or a prefix. + if name == "" && prefix == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Either workspace "name" or "prefix" is required.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + // Make sure that only one of workspace name or a prefix is configured. + if name != "" && prefix != "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Only one of workspace "name" or "prefix" is allowed.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + return diags +} + +func (b *Remote) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Get the hostname. + if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { + b.hostname = val.AsString() + } else { + b.hostname = defaultHostname + } + + // Get the organization. + if val := obj.GetAttr("organization"); !val.IsNull() { + b.organization = val.AsString() + } + + // Get the workspaces configuration block and retrieve the + // default workspace name and prefix. + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + b.workspace = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + b.prefix = val.AsString() + } + } + + // Discover the service URL for this host to confirm that it provides + // a remote backend API and to discover the required base path. + service, err := b.discover(b.hostname) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + `If you are sure the hostname is correct, this could also indicate SSL `+ + `verification issues. Please use "openssl s_client -connect " to `+ + `identify any certificate or certificate chain issues.`, + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + + // Retrieve the token for this host as configured in the credentials + // section of the CLI Config File. + token, err := b.token(b.hostname) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + `If you are sure the hostname is correct, this could also indicate SSL `+ + `verification issues. Please use "openssl s_client -connect " to `+ + `identify any certificate or certificate chain issues.`, + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + if token == "" { + if val := obj.GetAttr("token"); !val.IsNull() { + token = val.AsString() + } + } + + cfg := &tfe.Config{ + Address: service.String(), + BasePath: service.Path, + Token: token, + Headers: make(http.Header), + } + + // Set the version header to the current version. + cfg.Headers.Set(version.Header, version.Version) + + // Create the remote backend API client. + b.client, err = tfe.NewClient(cfg) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create the Terraform Enterprise client", + fmt.Sprintf( + `The "remote" backend encountered an unexpected error while creating the `+ + `Terraform Enterprise client: %s.`, err, + ), + )) + } + + return diags +} + +// discover the remote backend API service URL and token. +func (b *Remote) discover(hostname string) (*url.URL, error) { + host, err := svchost.ForComparison(hostname) + if err != nil { + return nil, err + } + service := b.services.DiscoverServiceURL(host, serviceID) + if service == nil { + return nil, fmt.Errorf("host %s does not provide a remote backend API", host) + } + return service, nil +} + +// token returns the token for this host as configured in the credentials +// section of the CLI Config File. If no token was configured, an empty +// string will be returned instead. +func (b *Remote) token(hostname string) (string, error) { + host, err := svchost.ForComparison(hostname) + if err != nil { + return "", err + } + creds, err := b.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return "", nil + } + if creds != nil { + return creds.Token(), nil + } + return "", nil +} + +// Workspaces returns a filtered list of remote workspace names. +func (b *Remote) Workspaces() ([]string, error) { + if b.prefix == "" { + return nil, backend.ErrWorkspacesNotSupported + } + return b.workspaces() +} + +func (b *Remote) workspaces() ([]string, error) { + // Check if the configured organization exists. + _, err := b.client.Organizations.Read(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + return nil, fmt.Errorf("organization %s does not exist", b.organization) + } + return nil, err + } + + options := tfe.WorkspaceListOptions{} + switch { + case b.workspace != "": + options.Search = tfe.String(b.workspace) + case b.prefix != "": + options.Search = tfe.String(b.prefix) + } + + // Create a slice to contain all the names. + var names []string + + for { + wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) + if err != nil { + return nil, err + } + + for _, w := range wl.Items { + if b.workspace != "" && w.Name == b.workspace { + names = append(names, backend.DefaultStateName) + continue + } + if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) { + names = append(names, strings.TrimPrefix(w.Name, b.prefix)) + } + } + + // Exit the loop when we've seen all pages. + if wl.CurrentPage >= wl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = wl.NextPage + } + + // Sort the result so we have consistent output. + sort.StringSlice(names).Sort() + + return names, nil +} + +// DeleteWorkspace removes the remote workspace if it exists. +func (b *Remote) DeleteWorkspace(name string) error { + if b.workspace == "" && name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return backend.ErrWorkspacesNotSupported + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + // Check if the configured organization exists. + _, err := b.client.Organizations.Read(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + return fmt.Errorf("organization %s does not exist", b.organization) + } + return err + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: name, + } + + return client.Delete() +} + +// StateMgr returns the latest state of the given remote workspace. The +// workspace will be created if it doesn't exist. +func (b *Remote) StateMgr(name string) (state.State, error) { + if b.workspace == "" && name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + workspaces, err := b.workspaces() + if err != nil { + return nil, fmt.Errorf("Error retrieving workspaces: %v", err) + } + + exists := false + for _, workspace := range workspaces { + if name == workspace { + exists = true + break + } + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + if !exists { + options := tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), + } + + // We only set the Terraform Version for the new workspace if this is + // a release candidate or a final release. + if version.Prerelease == "" || strings.HasPrefix(version.Prerelease, "rc") { + options.TerraformVersion = tfe.String(version.String()) + } + + _, err = b.client.Workspaces.Create(context.Background(), b.organization, options) + if err != nil { + return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) + } + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: name, + + // This is optionally set during Terraform Enterprise runs. + runID: os.Getenv("TFE_RUN_ID"), + } + + return &remote.State{Client: client}, nil +} + +// Operation implements backend.Enhanced +func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + // Configure the remote workspace name. + switch { + case op.Workspace == backend.DefaultStateName: + op.Workspace = b.workspace + case b.prefix != "" && !strings.HasPrefix(op.Workspace, b.prefix): + op.Workspace = b.prefix + op.Workspace + } + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation) (*tfe.Run, error) + switch op.Type { + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + default: + return nil, fmt.Errorf( + "\n\nThe \"remote\" backend does not support the %q operation.\n"+ + "Please use the remote backend web UI for running this operation:\n"+ + "https://%s/app/%s/%s", op.Type, b.hostname, b.organization, op.Workspace) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + PlanEmpty: true, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + // Do it. + go func() { + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + + r, opErr := f(stopCtx, cancelCtx, op) + if opErr != nil && opErr != context.Canceled { + b.ReportResult(runningOp, opErr) + return + } + + if r != nil { + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + b.ReportResult(runningOp, generalError("Failed to retrieve run", err)) + return + } + + // Record if there are any changes. + runningOp.PlanEmpty = !r.HasChanges + + if opErr == context.Canceled { + if err := b.cancel(cancelCtx, op, r); err != nil { + b.ReportResult(runningOp, generalError("Failed to retrieve run", err)) + return + } + } + + if r.Status == tfe.RunErrored { + runningOp.Result = backend.OperationFailure + } + } + }() + + // Return the running operation. + return runningOp, nil +} + +func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.Status == tfe.RunPending && r.Actions.IsCancelable { + // Only ask if the remote operation should be canceled + // if the auto approve flag is not set. + if !op.AutoApprove { + v, err := op.UIIn.Input(&terraform.InputOpts{ + Id: "cancel", + Query: "\nDo you want to cancel the pending remote operation?", + Description: "Only 'yes' will be accepted to cancel.", + }) + if err != nil { + return generalError("Failed asking to cancel", err) + } + if v != "yes" { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) + } + return nil + } + } else { + if b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + } + + // Try to cancel the remote operation. + err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) + if err != nil { + return generalError("Failed to cancel run", err) + } + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) + } + } + + return nil +} + +// ReportResult is a helper for the common chore of setting the status of +// a running operation and showing any diagnostics produced during that +// operation. +// +// If the given diagnostics contains errors then the operation's result +// will be set to backend.OperationFailure. It will be set to +// backend.OperationSuccess otherwise. It will then use b.ShowDiagnostics +// to show the given diagnostics before returning. +// +// Callers should feel free to do each of these operations separately in +// more complex cases where e.g. diagnostics are interleaved with other +// output, but terminating immediately after reporting error diagnostics is +// common and can be expressed concisely via this method. +func (b *Remote) ReportResult(op *backend.RunningOperation, err error) { + var diags tfdiags.Diagnostics + + diags = diags.Append(err) + if diags.HasErrors() { + op.Result = backend.OperationFailure + } else { + op.Result = backend.OperationSuccess + } + + if b.ShowDiagnostics != nil { + b.ShowDiagnostics(diags) + } else { + // Shouldn't generally happen, but if it does then we'll at least + // make some noise in the logs to help us spot it. + if len(diags) != 0 { + log.Printf( + "[ERROR] Remote backend needs to report diagnostics but ShowDiagnostics is not set:\n%s", + diags.ErrWithWarnings(), + ) + } + } +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so useful +// as a helper to wrap any potentially colored strings. +// func (b *Remote) Colorize() *colorstring.Colorize { +// if b.CLIColor != nil { +// return b.CLIColor +// } + +// return &colorstring.Colorize{ +// Colors: colorstring.DefaultColors, +// Disable: true, +// } +// } + +func generalError(msg string, err error) error { + var diags tfdiags.Diagnostics + + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + + switch err { + case context.Canceled: + return err + case tfe.ErrResourceNotFound: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend returns '404 Not Found' errors for resources `+ + `that do not exist, as well as for resources that a user doesn't have access `+ + `to. When the resource does exists, please check the rights for the used token.`, + )) + return diags.Err() + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend encountered an unexpected error. Sometimes `+ + `this is caused by network connection problems, in which case you could retry `+ + `the command. If the issue persists please open a support ticket to get help `+ + `resolving the problem.`, + )) + return diags.Err() + } +} + +const operationCanceled = ` +[reset][red]The remote operation was successfully cancelled.[reset] +` + +const operationNotCanceled = ` +[reset][red]The remote operation was not cancelled.[reset] +` + +var schemaDescriptions = map[string]string{ + "hostname": "The remote backend hostname to connect to (defaults to app.terraform.io).", + "organization": "The name of the organization containing the targeted workspace(s).", + "token": "The token used to authenticate with the remote backend. If credentials for the\n" + + "host are configured in the CLI Config File, then those will be used instead.", + "name": "A workspace name used to map the default workspace to a named remote workspace.\n" + + "When configured only the default workspace can be used. This option conflicts\n" + + "with \"prefix\"", + "prefix": "A prefix used to filter workspaces using a single configuration. New workspaces\n" + + "will automatically be prefixed with this prefix. If omitted only the default\n" + + "workspace can be used. This option conflicts with \"name\"", +} diff --git a/backend/remote/backend_apply.go b/backend/remote/backend_apply.go new file mode 100644 index 000000000000..ae6d1eeacf4e --- /dev/null +++ b/backend/remote/backend_apply.go @@ -0,0 +1,237 @@ +package remote + +import ( + "bufio" + "context" + "fmt" + "log" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Apply operation") + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(stopCtx, b.organization, op.Workspace) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + var diags tfdiags.Diagnostics + + if !w.Permissions.CanUpdate { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to apply changes", + "The provided credentials have insufficient rights to apply changes. In order "+ + "to apply changes at least write permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if w.VCSRepo != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Apply not allowed for workspaces with a VCS connection", + "A workspace that is connected to a VCS requires the VCS-driven workflow "+ + "to ensure that the VCS remains the single source of truth.", + )) + return nil, diags.Err() + } + + if op.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if !op.PlanRefresh { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying without refresh is currently not supported", + `Currently the "remote" backend will always do an in-memory refresh of `+ + `the Terraform state prior to generating the plan.`, + )) + } + + if op.Targets != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is currently not supported", + `The "remote" backend does not support resource targeting at this time.`, + )) + } + + variables, parseDiags := b.parseVariableValues(op) + diags = diags.Append(parseDiags) + + if len(variables) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && !op.Destroy { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Apply requires configuration to be present. Applying without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run 'terraform destroy' which `+ + `does not require any configuration files.`, + )) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + // Run the plan phase. + r, err := b.plan(stopCtx, cancelCtx, op, w) + if err != nil { + return r, err + } + + // This check is also performed in the plan method to determine if + // the policies should be checked, but we need to check the values + // here again to determine if we are done and should return. + if !r.HasChanges || r.Status == tfe.RunErrored { + return r, nil + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run cannot be confirmed. + if !w.AutoApply && !r.Actions.IsConfirmable { + return r, nil + } + + // Since we already checked the permissions before creating the run + // this should never happen. But it doesn't hurt to keep this in as + // a safeguard for any unexpected situations. + if !w.AutoApply && !r.Permissions.CanApply { + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.Destroy { + return r, generalError("Failed to discard destroy", err) + } + return r, generalError("Failed to discard apply", err) + } + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to approve the pending changes", + fmt.Sprintf("There are pending changes, but the provided credentials have "+ + "insufficient rights to approve them. The run will be discarded to prevent "+ + "it from blocking the queue waiting for external approval. To queue a run "+ + "that can be approved by someone else, please use the 'Queue Plan' button in "+ + "the web UI:\nhttps://%s/app/%s/%s/runs", b.hostname, b.organization, op.Workspace), + )) + return r, diags.Err() + } + + mustConfirm := (op.UIIn != nil && op.UIOut != nil) && + ((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove)) + + if !w.AutoApply { + if mustConfirm { + opts := &terraform.InputOpts{Id: "approve"} + + if op.Destroy { + opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + } else { + opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + if err = b.confirm(stopCtx, op, opts, r, "yes"); err != nil { + return r, err + } + } + + err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}) + if err != nil { + return r, generalError("Failed to approve the apply command", err) + } + } + + // If we don't need to ask for confirmation, insert a blank + // line to separate the ouputs. + if w.AutoApply || !mustConfirm { + if b.CLI != nil { + b.CLI.Output("") + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + scanner := bufio.NewScanner(logs) + + skip := 0 + for scanner.Scan() { + // Skip the first 3 lines to prevent duplicate output. + if skip < 3 { + skip++ + continue + } + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(scanner.Text())) + } + } + if err := scanner.Err(); err != nil { + return r, generalError("Failed to read logs", err) + } + + return r, nil +} + +const applyDefaultHeader = ` +[reset][yellow]Running apply in the remote backend. Output will stream here. Pressing Ctrl-C +will cancel the remote apply if its still pending. If the apply started it +will stop streaming the logs, but will not stop the apply running remotely. +To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s[reset] +` diff --git a/backend/remote/backend_apply_test.go b/backend/remote/backend_apply_test.go new file mode 100644 index 000000000000..be7345e60701 --- /dev/null +++ b/backend/remote/backend_apply_test.go @@ -0,0 +1,880 @@ +package remote + +import ( + "context" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" +) + +func testOperationApply(t *testing.T, configDir string) (*backend.Operation, func()) { + t.Helper() + + _, configLoader, configCleanup := configload.MustLoadConfigForTests(t, configDir) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + Parallelism: defaultParallelism, + PlanRefresh: true, + Type: backend.OperationTypeApply, + }, configCleanup +} + +func TestRemote_applyBasic(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("missing apply summery in output: %s", output) + } +} + +func TestRemote_applyWithoutPermissions(t *testing.T) { + b := testBackendNoDefault(t) + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanUpdate = false + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "Insufficient rights to apply changes") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestRemote_applyWithVCS(t *testing.T) { + b := testBackendNoDefault(t) + + // Create a named workspace with a VCS. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + VCSRepo: &tfe.VCSRepoOptions{}, + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "not allowed for workspaces with a VCS") { + t.Fatalf("expected a VCS error, got: %v", errOutput) + } +} + +func TestRemote_applyWithParallelism(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + op.Parallelism = 3 + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestRemote_applyWithPlan(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + op.PlanFile = &planfile.Reader{} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestRemote_applyWithoutRefresh(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "refresh is currently not supported") { + t.Fatalf("expected a refresh error, got: %v", errOutput) + } +} + +func TestRemote_applyWithTarget(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "targeting is currently not supported") { + t.Fatalf("expected a targeting error, got: %v", errOutput) + } +} + +func TestRemote_applyWithVariables(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-variables") + defer configCleanup() + + op.Variables = testVariables(terraform.ValueFromNamedFile, "foo", "bar") + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "variables are currently not supported") { + t.Fatalf("expected a variables error, got: %v", errOutput) + } +} + +func TestRemote_applyNoConfig(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/empty") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } +} + +func TestRemote_applyNoChanges(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-no-changes") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "No changes. Infrastructure is up-to-date.") { + t.Fatalf("expected no changes in plan summery: %s", output) + } +} + +func TestRemote_applyNoApprove(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "Apply discarded") { + t.Fatalf("expected an apply discarded error, got: %v", errOutput) + } +} + +func TestRemote_applyAutoApprove(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "no", + }) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("missing apply summery in output: %s", output) + } +} + +func TestRemote_applyWithAutoApply(t *testing.T) { + b := testBackendNoDefault(t) + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + AutoApply: tfe.Bool(true), + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("missing apply summery in output: %s", output) + } +} + +func TestRemote_applyLockTimeout(t *testing.T) { + b := testBackendDefault(t) + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.StateLockTimeout = 5 * time.Second + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(10 * time.Second): + t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("missing lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summery in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyDestroy(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-destroy") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.Destroy = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { + t.Fatalf("missing apply summery in output: %s", output) + } +} + +func TestRemote_applyDestroyNoConfig(t *testing.T) { + b := testBackendDefault(t) + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op, configCleanup := testOperationApply(t, "./test-fixtures/empty") + defer configCleanup() + + op.Destroy = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } +} + +func TestRemote_applyPolicyPass(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-passed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("missing polic check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("missing apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicyHardFail(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-hard-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answers, got: %v", input.answers) + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("missing policy check result in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicySoftFail(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-soft-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("missing policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("missing apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicySoftFailAutoApprove(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-soft-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "override": "override", + }) + + op.AutoApprove = true + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answers, got: %v", input.answers) + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("missing policy check result in output: %s", output) + } + if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("unexpected apply summery in output: %s", output) + } +} + +func TestRemote_applyPolicySoftFailAutoApply(t *testing.T) { + b := testBackendDefault(t) + + // Create a named workspace that auto applies. + _, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + AutoApply: tfe.Bool(true), + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-policy-soft-failed") + defer configCleanup() + + input := testInput(t, map[string]string{ + "override": "override", + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) != 1 { + t.Fatalf("expected an unused answer, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("missing policy check result in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("missing apply summery in output: %s", output) + } +} + +func TestRemote_applyWithRemoteError(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply-with-error") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected apply operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("missing apply error in output: %s", output) + } +} diff --git a/backend/remote/backend_common.go b/backend/remote/backend_common.go new file mode 100644 index 000000000000..03ba2b733371 --- /dev/null +++ b/backend/remote/backend_common.go @@ -0,0 +1,334 @@ +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "math" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return r, stopCtx.Err() + case <-cancelCtx.Done(): + return r, cancelCtx.Err() + case <-time.After(backoff(1000, 3000, i)): + // Timer up, show status + } + + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run is no longer pending. + if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { + if i == 0 && opType == "plan" && b.CLI != nil { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) + } + if i > 0 && b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + return r, nil + } + + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + position := 0 + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + + // Retrieve the workspace used to run this operation in. + w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + // If the workspace is locked the run will not be queued and we can + // update the status without making any expensive calls. + if w.Locked && w.CurrentRun != nil { + cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) + if err != nil { + return r, generalError("Failed to retrieve current run", err) + } + if cr.Status == tfe.RunPending { + b.CLI.Output(b.Colorize().Color( + "Waiting for the manually locked workspace to be unlocked..." + elapsed)) + continue + } + } + + // Skip checking the workspace queue when we are the current run. + if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { + found := false + options := tfe.RunListOptions{} + runlist: + for { + rl, err := b.client.Runs.List(stopCtx, w.ID, options) + if err != nil { + return r, generalError("Failed to retrieve run list", err) + } + + // Loop through all runs to calculate the workspace queue position. + for _, item := range rl.Items { + if !found { + if r.ID == item.ID { + found = true + } + continue + } + + // If the run is in a final state, ignore it and continue. + switch item.Status { + case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: + continue + case tfe.RunPlanned: + if op.Type == backend.OperationTypePlan { + continue + } + } + + // Increase the workspace queue position. + position++ + + // Stop searching when we reached the current run. + if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { + break runlist + } + } + + // Exit the loop when we've seen all pages. + if rl.CurrentPage >= rl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rl.NextPage + } + + if position > 0 { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d run(s) to finish before being queued...%s", + position, + elapsed, + ))) + continue + } + } + + options := tfe.RunQueueOptions{} + search: + for { + rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options) + if err != nil { + return r, generalError("Failed to retrieve queue", err) + } + + // Search through all queued items to find our run. + for _, item := range rq.Items { + if r.ID == item.ID { + position = item.PositionInQueue + break search + } + } + + // Exit the loop when we've seen all pages. + if rq.CurrentPage >= rq.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rq.NextPage + } + + if position > 0 { + c, err := b.client.Organizations.Capacity(stopCtx, b.organization) + if err != nil { + return r, generalError("Failed to retrieve capacity", err) + } + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d queued run(s) to finish before starting...%s", + position-c.Running, + elapsed, + ))) + continue + } + + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for the %s to start...%s", opType, elapsed))) + } + } +} + +func (b *Remote) parseVariableValues(op *backend.Operation) (terraform.InputValues, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := make(terraform.InputValues) + + // Load the configuration using the caller-provided configuration loader. + config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir) + diags = diags.Append(configDiags) + if diags.HasErrors() { + return nil, diags + } + + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, diags + } + + // Save only the explicitly defined variables. + for k, v := range variables { + switch v.SourceType { + case terraform.ValueFromCLIArg, terraform.ValueFromNamedFile: + result[k] = v + } + } + + return result, diags +} + +func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + for i, pc := range r.PolicyChecks { + logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check logs", err) + } + scanner := bufio.NewScanner(logs) + + // Retrieve the policy check to get its current status. + pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check", err) + } + + var msgPrefix string + switch pc.Scope { + case tfe.PolicyScopeOrganization: + msgPrefix = "Organization policy check" + case tfe.PolicyScopeWorkspace: + msgPrefix = "Workspace policy check" + default: + msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + } + + for scanner.Scan() { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(scanner.Text())) + } + } + if err := scanner.Err(); err != nil { + return generalError("Failed to read logs", err) + } + + switch pc.Status { + case tfe.PolicyPasses: + if (op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + continue + case tfe.PolicyErrored: + return fmt.Errorf(msgPrefix + " errored.") + case tfe.PolicyHardFailed: + return fmt.Errorf(msgPrefix + " hard failed.") + case tfe.PolicySoftFailed: + if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || + op.AutoApprove || !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { + return fmt.Errorf(msgPrefix + " soft failed.") + } + default: + return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) + } + + opts := &terraform.InputOpts{ + Id: "override", + Query: "\nDo you want to override the soft failed policy check?", + Description: "Only 'override' will be accepted to override.", + } + + if err = b.confirm(stopCtx, op, opts, r, "override"); err != nil { + return err + } + + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError("Failed to override policy check", err) + } + + if b.CLI != nil { + b.CLI.Output("------------------------------------------------------------------------") + } + } + + return nil +} + +func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error { + v, err := op.UIIn.Input(opts) + if err != nil { + return fmt.Errorf("Error asking %s: %v", opts.Id, err) + } + if v != keyword { + // Retrieve the run again to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return generalError("Failed to retrieve run", err) + } + + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.Destroy { + return generalError("Failed to discard destroy", err) + } + return generalError("Failed to discard apply", err) + } + } + + // Even if the run was disarding successfully, we still + // return an error as the apply command was cancelled. + if op.Destroy { + return errors.New("Destroy discarded.") + } + return errors.New("Apply discarded.") + } + + return nil +} diff --git a/backend/remote/backend_mock.go b/backend/remote/backend_mock.go new file mode 100644 index 000000000000..eac6b68392f0 --- /dev/null +++ b/backend/remote/backend_mock.go @@ -0,0 +1,998 @@ +package remote + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/terraform" +) + +type mockClient struct { + Applies *mockApplies + ConfigurationVersions *mockConfigurationVersions + Organizations *mockOrganizations + Plans *mockPlans + PolicyChecks *mockPolicyChecks + Runs *mockRuns + StateVersions *mockStateVersions + Workspaces *mockWorkspaces +} + +func newMockClient() *mockClient { + c := &mockClient{} + c.Applies = newMockApplies(c) + c.ConfigurationVersions = newMockConfigurationVersions(c) + c.Organizations = newMockOrganizations(c) + c.Plans = newMockPlans(c) + c.PolicyChecks = newMockPolicyChecks(c) + c.Runs = newMockRuns(c) + c.StateVersions = newMockStateVersions(c) + c.Workspaces = newMockWorkspaces(c) + return c +} + +type mockApplies struct { + client *mockClient + applies map[string]*tfe.Apply + logs map[string]string +} + +func newMockApplies(client *mockClient) *mockApplies { + return &mockApplies{ + client: client, + applies: make(map[string]*tfe.Apply), + logs: make(map[string]string), + } +} + +// create is a helper function to create a mock apply that uses the configured +// working directory to find the logfile. +func (m *mockApplies) create(cvID, workspaceID string) (*tfe.Apply, error) { + c, ok := m.client.ConfigurationVersions.configVersions[cvID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if c.Speculative { + // Speculative means its plan-only so we don't create a Apply. + return nil, nil + } + + id := generateID("apply-") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + a := &tfe.Apply{ + ID: id, + LogReadURL: url, + Status: tfe.ApplyPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if w.AutoApply { + a.Status = tfe.ApplyRunning + } + + m.logs[url] = filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "apply.log", + ) + m.applies[a.ID] = a + + return a, nil +} + +func (m *mockApplies) Read(ctx context.Context, applyID string) (*tfe.Apply, error) { + a, ok := m.applies[applyID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + // Together with the mockLogReader this allows testing queued runs. + if a.Status == tfe.ApplyRunning { + a.Status = tfe.ApplyFinished + } + return a, nil +} + +func (m *mockApplies) Logs(ctx context.Context, applyID string) (io.Reader, error) { + a, err := m.Read(ctx, applyID) + if err != nil { + return nil, err + } + + logfile, ok := m.logs[a.LogReadURL] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + done := func() (bool, error) { + a, err := m.Read(ctx, applyID) + if err != nil { + return false, err + } + if a.Status != tfe.ApplyFinished { + return false, nil + } + return true, nil + } + + return &mockLogReader{ + done: done, + logs: bytes.NewBuffer(logs), + }, nil +} + +type mockConfigurationVersions struct { + client *mockClient + configVersions map[string]*tfe.ConfigurationVersion + uploadPaths map[string]string + uploadURLs map[string]*tfe.ConfigurationVersion +} + +func newMockConfigurationVersions(client *mockClient) *mockConfigurationVersions { + return &mockConfigurationVersions{ + client: client, + configVersions: make(map[string]*tfe.ConfigurationVersion), + uploadPaths: make(map[string]string), + uploadURLs: make(map[string]*tfe.ConfigurationVersion), + } +} + +func (m *mockConfigurationVersions) List(ctx context.Context, workspaceID string, options tfe.ConfigurationVersionListOptions) (*tfe.ConfigurationVersionList, error) { + cvl := &tfe.ConfigurationVersionList{} + for _, cv := range m.configVersions { + cvl.Items = append(cvl.Items, cv) + } + + cvl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(cvl.Items), + } + + return cvl, nil +} + +func (m *mockConfigurationVersions) Create(ctx context.Context, workspaceID string, options tfe.ConfigurationVersionCreateOptions) (*tfe.ConfigurationVersion, error) { + id := generateID("cv-") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + cv := &tfe.ConfigurationVersion{ + ID: id, + Status: tfe.ConfigurationPending, + UploadURL: url, + } + + m.configVersions[cv.ID] = cv + m.uploadURLs[url] = cv + + return cv, nil +} + +func (m *mockConfigurationVersions) Read(ctx context.Context, cvID string) (*tfe.ConfigurationVersion, error) { + cv, ok := m.configVersions[cvID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return cv, nil +} + +func (m *mockConfigurationVersions) Upload(ctx context.Context, url, path string) error { + cv, ok := m.uploadURLs[url] + if !ok { + return errors.New("404 not found") + } + m.uploadPaths[cv.ID] = path + cv.Status = tfe.ConfigurationUploaded + return nil +} + +// mockInput is a mock implementation of terraform.UIInput. +type mockInput struct { + answers map[string]string +} + +func (m *mockInput) Input(opts *terraform.InputOpts) (string, error) { + v, ok := m.answers[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + delete(m.answers, opts.Id) + return v, nil +} + +type mockOrganizations struct { + client *mockClient + organizations map[string]*tfe.Organization +} + +func newMockOrganizations(client *mockClient) *mockOrganizations { + return &mockOrganizations{ + client: client, + organizations: make(map[string]*tfe.Organization), + } +} + +func (m *mockOrganizations) List(ctx context.Context, options tfe.OrganizationListOptions) (*tfe.OrganizationList, error) { + orgl := &tfe.OrganizationList{} + for _, org := range m.organizations { + orgl.Items = append(orgl.Items, org) + } + + orgl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(orgl.Items), + } + + return orgl, nil +} + +// mockLogReader is a mock logreader that enables testing queued runs. +type mockLogReader struct { + done func() (bool, error) + logs *bytes.Buffer +} + +func (m *mockLogReader) Read(l []byte) (int, error) { + for { + if written, err := m.read(l); err != io.ErrNoProgress { + return written, err + } + time.Sleep(500 * time.Millisecond) + } +} + +func (m *mockLogReader) read(l []byte) (int, error) { + done, err := m.done() + if err != nil { + return 0, err + } + if !done { + return 0, io.ErrNoProgress + } + return m.logs.Read(l) +} + +func (m *mockOrganizations) Create(ctx context.Context, options tfe.OrganizationCreateOptions) (*tfe.Organization, error) { + org := &tfe.Organization{Name: *options.Name} + m.organizations[org.Name] = org + return org, nil +} + +func (m *mockOrganizations) Read(ctx context.Context, name string) (*tfe.Organization, error) { + org, ok := m.organizations[name] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return org, nil +} + +func (m *mockOrganizations) Update(ctx context.Context, name string, options tfe.OrganizationUpdateOptions) (*tfe.Organization, error) { + org, ok := m.organizations[name] + if !ok { + return nil, tfe.ErrResourceNotFound + } + org.Name = *options.Name + return org, nil + +} + +func (m *mockOrganizations) Delete(ctx context.Context, name string) error { + delete(m.organizations, name) + return nil +} + +func (m *mockOrganizations) Capacity(ctx context.Context, name string) (*tfe.Capacity, error) { + var pending, running int + for _, r := range m.client.Runs.runs { + if r.Status == tfe.RunPending { + pending++ + continue + } + running++ + } + return &tfe.Capacity{Pending: pending, Running: running}, nil +} + +func (m *mockOrganizations) RunQueue(ctx context.Context, name string, options tfe.RunQueueOptions) (*tfe.RunQueue, error) { + rq := &tfe.RunQueue{} + + for _, r := range m.client.Runs.runs { + rq.Items = append(rq.Items, r) + } + + rq.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(rq.Items), + } + + return rq, nil +} + +type mockPlans struct { + client *mockClient + logs map[string]string + plans map[string]*tfe.Plan +} + +func newMockPlans(client *mockClient) *mockPlans { + return &mockPlans{ + client: client, + logs: make(map[string]string), + plans: make(map[string]*tfe.Plan), + } +} + +// create is a helper function to create a mock plan that uses the configured +// working directory to find the logfile. +func (m *mockPlans) create(cvID, workspaceID string) (*tfe.Plan, error) { + id := generateID("plan-") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + p := &tfe.Plan{ + ID: id, + LogReadURL: url, + Status: tfe.PlanPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + m.logs[url] = filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "plan.log", + ) + m.plans[p.ID] = p + + return p, nil +} + +func (m *mockPlans) Read(ctx context.Context, planID string) (*tfe.Plan, error) { + p, ok := m.plans[planID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + // Together with the mockLogReader this allows testing queued runs. + if p.Status == tfe.PlanRunning { + p.Status = tfe.PlanFinished + } + return p, nil +} + +func (m *mockPlans) Logs(ctx context.Context, planID string) (io.Reader, error) { + p, err := m.Read(ctx, planID) + if err != nil { + return nil, err + } + + logfile, ok := m.logs[p.LogReadURL] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + done := func() (bool, error) { + p, err := m.Read(ctx, planID) + if err != nil { + return false, err + } + if p.Status != tfe.PlanFinished { + return false, nil + } + return true, nil + } + + return &mockLogReader{ + done: done, + logs: bytes.NewBuffer(logs), + }, nil +} + +type mockPolicyChecks struct { + client *mockClient + checks map[string]*tfe.PolicyCheck + logs map[string]string +} + +func newMockPolicyChecks(client *mockClient) *mockPolicyChecks { + return &mockPolicyChecks{ + client: client, + checks: make(map[string]*tfe.PolicyCheck), + logs: make(map[string]string), + } +} + +// create is a helper function to create a mock policy check that uses the +// configured working directory to find the logfile. +func (m *mockPolicyChecks) create(cvID, workspaceID string) (*tfe.PolicyCheck, error) { + id := generateID("pc-") + + pc := &tfe.PolicyCheck{ + ID: id, + Actions: &tfe.PolicyActions{}, + Permissions: &tfe.PolicyPermissions{}, + Scope: tfe.PolicyScopeOrganization, + Status: tfe.PolicyPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile := filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "policy.log", + ) + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return nil, nil + } + + m.logs[pc.ID] = logfile + m.checks[pc.ID] = pc + + return pc, nil +} + +func (m *mockPolicyChecks) List(ctx context.Context, runID string, options tfe.PolicyCheckListOptions) (*tfe.PolicyCheckList, error) { + _, ok := m.client.Runs.runs[runID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + pcl := &tfe.PolicyCheckList{} + for _, pc := range m.checks { + pcl.Items = append(pcl.Items, pc) + } + + pcl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(pcl.Items), + } + + return pcl, nil +} + +func (m *mockPolicyChecks) Read(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile, ok := m.logs[pc.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return nil, fmt.Errorf("logfile does not exist") + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + switch { + case bytes.Contains(logs, []byte("Sentinel Result: true")): + pc.Status = tfe.PolicyPasses + case bytes.Contains(logs, []byte("Sentinel Result: false")): + switch { + case bytes.Contains(logs, []byte("hard-mandatory")): + pc.Status = tfe.PolicyHardFailed + case bytes.Contains(logs, []byte("soft-mandatory")): + pc.Actions.IsOverridable = true + pc.Permissions.CanOverride = true + pc.Status = tfe.PolicySoftFailed + } + default: + // As this is an unexpected state, we say the policy errored. + pc.Status = tfe.PolicyErrored + } + + return pc, nil +} + +func (m *mockPolicyChecks) Override(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + pc.Status = tfe.PolicyOverridden + return pc, nil +} + +func (m *mockPolicyChecks) Logs(ctx context.Context, policyCheckID string) (io.Reader, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile, ok := m.logs[pc.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + switch { + case bytes.Contains(logs, []byte("Sentinel Result: true")): + pc.Status = tfe.PolicyPasses + case bytes.Contains(logs, []byte("Sentinel Result: false")): + switch { + case bytes.Contains(logs, []byte("hard-mandatory")): + pc.Status = tfe.PolicyHardFailed + case bytes.Contains(logs, []byte("soft-mandatory")): + pc.Actions.IsOverridable = true + pc.Permissions.CanOverride = true + pc.Status = tfe.PolicySoftFailed + } + default: + // As this is an unexpected state, we say the policy errored. + pc.Status = tfe.PolicyErrored + } + + return bytes.NewBuffer(logs), nil +} + +type mockRuns struct { + client *mockClient + runs map[string]*tfe.Run + workspaces map[string][]*tfe.Run +} + +func newMockRuns(client *mockClient) *mockRuns { + return &mockRuns{ + client: client, + runs: make(map[string]*tfe.Run), + workspaces: make(map[string][]*tfe.Run), + } +} + +func (m *mockRuns) List(ctx context.Context, workspaceID string, options tfe.RunListOptions) (*tfe.RunList, error) { + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + rl := &tfe.RunList{} + for _, r := range m.workspaces[w.ID] { + rl.Items = append(rl.Items, r) + } + + rl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(rl.Items), + } + + return rl, nil +} + +func (m *mockRuns) Create(ctx context.Context, options tfe.RunCreateOptions) (*tfe.Run, error) { + a, err := m.client.Applies.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + p, err := m.client.Plans.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + pc, err := m.client.PolicyChecks.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + r := &tfe.Run{ + ID: generateID("run-"), + Actions: &tfe.RunActions{IsCancelable: true}, + Apply: a, + HasChanges: false, + Permissions: &tfe.RunPermissions{}, + Plan: p, + Status: tfe.RunPending, + } + + if pc != nil { + r.PolicyChecks = []*tfe.PolicyCheck{pc} + } + + if options.IsDestroy != nil { + r.IsDestroy = *options.IsDestroy + } + + w, ok := m.client.Workspaces.workspaceIDs[options.Workspace.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if w.CurrentRun == nil { + w.CurrentRun = r + } + + m.runs[r.ID] = r + m.workspaces[options.Workspace.ID] = append(m.workspaces[options.Workspace.ID], r) + + return r, nil +} + +func (m *mockRuns) Read(ctx context.Context, runID string) (*tfe.Run, error) { + r, ok := m.runs[runID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + pending := false + for _, r := range m.runs { + if r.ID != runID && r.Status == tfe.RunPending { + pending = true + break + } + } + + if !pending && r.Status == tfe.RunPending { + // Only update the status if there are no other pending runs. + r.Status = tfe.RunPlanning + r.Plan.Status = tfe.PlanRunning + } + + logs, _ := ioutil.ReadFile(m.client.Plans.logs[r.Plan.LogReadURL]) + if r.Plan.Status == tfe.PlanFinished { + if r.IsDestroy || bytes.Contains(logs, []byte("1 to add, 0 to change, 0 to destroy")) { + r.Actions.IsCancelable = false + r.Actions.IsConfirmable = true + r.HasChanges = true + r.Permissions.CanApply = true + } + + if bytes.Contains(logs, []byte("null_resource.foo: 1 error")) { + r.Actions.IsCancelable = false + r.HasChanges = false + r.Status = tfe.RunErrored + } + } + + return r, nil +} + +func (m *mockRuns) Apply(ctx context.Context, runID string, options tfe.RunApplyOptions) error { + r, ok := m.runs[runID] + if !ok { + return tfe.ErrResourceNotFound + } + if r.Status != tfe.RunPending { + // Only update the status if the run is not pending anymore. + r.Status = tfe.RunApplying + r.Apply.Status = tfe.ApplyRunning + } + return nil +} + +func (m *mockRuns) Cancel(ctx context.Context, runID string, options tfe.RunCancelOptions) error { + panic("not implemented") +} + +func (m *mockRuns) ForceCancel(ctx context.Context, runID string, options tfe.RunForceCancelOptions) error { + panic("not implemented") +} + +func (m *mockRuns) Discard(ctx context.Context, runID string, options tfe.RunDiscardOptions) error { + panic("not implemented") +} + +type mockStateVersions struct { + client *mockClient + states map[string][]byte + stateVersions map[string]*tfe.StateVersion + workspaces map[string][]string +} + +func newMockStateVersions(client *mockClient) *mockStateVersions { + return &mockStateVersions{ + client: client, + states: make(map[string][]byte), + stateVersions: make(map[string]*tfe.StateVersion), + workspaces: make(map[string][]string), + } +} + +func (m *mockStateVersions) List(ctx context.Context, options tfe.StateVersionListOptions) (*tfe.StateVersionList, error) { + svl := &tfe.StateVersionList{} + for _, sv := range m.stateVersions { + svl.Items = append(svl.Items, sv) + } + + svl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(svl.Items), + } + + return svl, nil +} + +func (m *mockStateVersions) Create(ctx context.Context, workspaceID string, options tfe.StateVersionCreateOptions) (*tfe.StateVersion, error) { + id := generateID("sv-") + runID := os.Getenv("TFE_RUN_ID") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + if runID != "" && (options.Run == nil || runID != options.Run.ID) { + return nil, fmt.Errorf("option.Run.ID does not contain the ID exported by TFE_RUN_ID") + } + + sv := &tfe.StateVersion{ + ID: id, + DownloadURL: url, + Serial: *options.Serial, + } + + state, err := base64.StdEncoding.DecodeString(*options.State) + if err != nil { + return nil, err + } + + m.states[sv.DownloadURL] = state + m.stateVersions[sv.ID] = sv + m.workspaces[workspaceID] = append(m.workspaces[workspaceID], sv.ID) + + return sv, nil +} + +func (m *mockStateVersions) Read(ctx context.Context, svID string) (*tfe.StateVersion, error) { + sv, ok := m.stateVersions[svID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return sv, nil +} + +func (m *mockStateVersions) Current(ctx context.Context, workspaceID string) (*tfe.StateVersion, error) { + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + svs, ok := m.workspaces[w.ID] + if !ok || len(svs) == 0 { + return nil, tfe.ErrResourceNotFound + } + + sv, ok := m.stateVersions[svs[len(svs)-1]] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + return sv, nil +} + +func (m *mockStateVersions) Download(ctx context.Context, url string) ([]byte, error) { + state, ok := m.states[url] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return state, nil +} + +type mockWorkspaces struct { + client *mockClient + workspaceIDs map[string]*tfe.Workspace + workspaceNames map[string]*tfe.Workspace +} + +func newMockWorkspaces(client *mockClient) *mockWorkspaces { + return &mockWorkspaces{ + client: client, + workspaceIDs: make(map[string]*tfe.Workspace), + workspaceNames: make(map[string]*tfe.Workspace), + } +} + +func (m *mockWorkspaces) List(ctx context.Context, organization string, options tfe.WorkspaceListOptions) (*tfe.WorkspaceList, error) { + dummyWorkspaces := 10 + wl := &tfe.WorkspaceList{} + + // Get the prefix from the search options. + prefix := "" + if options.Search != nil { + prefix = *options.Search + } + + // Get all the workspaces that match the prefix. + var ws []*tfe.Workspace + for _, w := range m.workspaceIDs { + if strings.HasPrefix(w.Name, prefix) { + ws = append(ws, w) + } + } + + // Return an empty result if we have no matches. + if len(ws) == 0 { + wl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + } + return wl, nil + } + + // Return dummy workspaces for the first page to test pagination. + if options.PageNumber <= 1 { + for i := 0; i < dummyWorkspaces; i++ { + wl.Items = append(wl.Items, &tfe.Workspace{ + ID: generateID("ws-"), + Name: fmt.Sprintf("dummy-workspace-%d", i), + }) + } + + wl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 2, + TotalPages: 2, + TotalCount: len(wl.Items) + len(ws), + } + + return wl, nil + } + + // Return the actual workspaces that matched as the second page. + wl.Items = ws + wl.Pagination = &tfe.Pagination{ + CurrentPage: 2, + PreviousPage: 1, + TotalPages: 2, + TotalCount: len(wl.Items) + dummyWorkspaces, + } + + return wl, nil +} + +func (m *mockWorkspaces) Create(ctx context.Context, organization string, options tfe.WorkspaceCreateOptions) (*tfe.Workspace, error) { + w := &tfe.Workspace{ + ID: generateID("ws-"), + Name: *options.Name, + Permissions: &tfe.WorkspacePermissions{ + CanQueueRun: true, + CanUpdate: true, + }, + } + if options.AutoApply != nil { + w.AutoApply = *options.AutoApply + } + if options.VCSRepo != nil { + w.VCSRepo = &tfe.VCSRepo{} + } + m.workspaceIDs[w.ID] = w + m.workspaceNames[w.Name] = w + return w, nil +} + +func (m *mockWorkspaces) Read(ctx context.Context, organization, workspace string) (*tfe.Workspace, error) { + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return w, nil +} + +func (m *mockWorkspaces) Update(ctx context.Context, organization, workspace string, options tfe.WorkspaceUpdateOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if options.Name != nil { + w.Name = *options.Name + } + if options.TerraformVersion != nil { + w.TerraformVersion = *options.TerraformVersion + } + if options.WorkingDirectory != nil { + w.WorkingDirectory = *options.WorkingDirectory + } + + delete(m.workspaceNames, workspace) + m.workspaceNames[w.Name] = w + + return w, nil +} + +func (m *mockWorkspaces) Delete(ctx context.Context, organization, workspace string) error { + if w, ok := m.workspaceNames[workspace]; ok { + delete(m.workspaceIDs, w.ID) + } + delete(m.workspaceNames, workspace) + return nil +} + +func (m *mockWorkspaces) Lock(ctx context.Context, workspaceID string, options tfe.WorkspaceLockOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + w.Locked = true + return w, nil +} + +func (m *mockWorkspaces) Unlock(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + w.Locked = false + return w, nil +} + +func (m *mockWorkspaces) AssignSSHKey(ctx context.Context, workspaceID string, options tfe.WorkspaceAssignSSHKeyOptions) (*tfe.Workspace, error) { + panic("not implemented") +} + +func (m *mockWorkspaces) UnassignSSHKey(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + panic("not implemented") +} + +const alphanumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +func generateID(s string) string { + b := make([]byte, 16) + for i := range b { + b[i] = alphanumeric[rand.Intn(len(alphanumeric))] + } + return s + string(b) +} diff --git a/backend/remote/backend_plan.go b/backend/remote/backend_plan.go new file mode 100644 index 000000000000..2fdea7781950 --- /dev/null +++ b/backend/remote/backend_plan.go @@ -0,0 +1,304 @@ +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Plan operation") + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(stopCtx, b.organization, op.Workspace) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + var diags tfdiags.Diagnostics + + if !w.Permissions.CanQueueRun { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to generate a plan", + "The provided credentials have insufficient rights to generate a plan. In order "+ + "to generate plans, at least plan permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if op.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Displaying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if op.PlanOutPath != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saving a generated plan is currently not supported", + `The "remote" backend does not support saving the generated execution `+ + `plan locally at this time.`, + )) + } + + if !op.PlanRefresh { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning without refresh is currently not supported", + `Currently the "remote" backend will always do an in-memory refresh of `+ + `the Terraform state prior to generating the plan.`, + )) + } + + if op.Targets != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is currently not supported", + `The "remote" backend does not support resource targeting at this time.`, + )) + } + + variables, parseDiags := b.parseVariableValues(op) + diags = diags.Append(parseDiags) + + if len(variables) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && !op.Destroy { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Plan requires configuration to be present. Planning without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run plan with the "-destroy" `+ + `flag or create a single empty configuration file. Otherwise, please create `+ + `a Terraform configuration file in the path being executed and try again.`, + )) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + return b.plan(stopCtx, cancelCtx, op, w) +} + +func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + configOptions := tfe.ConfigurationVersionCreateOptions{ + AutoQueueRuns: tfe.Bool(false), + Speculative: tfe.Bool(op.Type == backend.OperationTypePlan), + } + + cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) + if err != nil { + return nil, generalError("Failed to create configuration version", err) + } + + var configDir string + if op.ConfigDir != "" { + // Make sure to take the working directory into account by removing + // the working directory from the current path. This will result in + // a path that points to the expected root of the workspace. + configDir = filepath.Clean(strings.TrimSuffix( + filepath.Clean(op.ConfigDir), + filepath.Clean(w.WorkingDirectory), + )) + } else { + // We did a check earlier to make sure we either have a config dir, + // or the plan is run with -destroy. So this else clause will only + // be executed when we are destroying and doesn't need the config. + configDir, err = ioutil.TempDir("", "tf") + if err != nil { + return nil, generalError("Failed to create temporary directory", err) + } + defer os.RemoveAll(configDir) + + // Make sure the configured working directory exists. + err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) + if err != nil { + return nil, generalError( + "Failed to create temporary working directory", err) + } + } + + err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) + if err != nil { + return nil, generalError("Failed to upload configuration files", err) + } + + uploaded := false + for i := 0; i < 60 && !uploaded; i++ { + select { + case <-stopCtx.Done(): + return nil, context.Canceled + case <-cancelCtx.Done(): + return nil, context.Canceled + case <-time.After(500 * time.Millisecond): + cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) + if err != nil { + return nil, generalError("Failed to retrieve configuration version", err) + } + + if cv.Status == tfe.ConfigurationUploaded { + uploaded = true + } + } + } + + if !uploaded { + return nil, generalError( + "Failed to upload configuration files", errors.New("operation timed out")) + } + + runOptions := tfe.RunCreateOptions{ + IsDestroy: tfe.Bool(op.Destroy), + Message: tfe.String("Queued manually using Terraform"), + ConfigurationVersion: cv, + Workspace: w, + } + + r, err := b.client.Runs.Create(stopCtx, runOptions) + if err != nil { + return r, generalError("Failed to create run", err) + } + + // When the lock timeout is set, + if op.StateLockTimeout > 0 { + go func() { + select { + case <-stopCtx.Done(): + return + case <-cancelCtx.Done(): + return + case <-time.After(op.StateLockTimeout): + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + log.Printf("[ERROR] error reading run: %v", err) + return + } + + if r.Status == tfe.RunPending && r.Actions.IsCancelable { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) + } + + // We abuse the auto aprove flag to indicate that we do not + // want to ask if the remote operation should be canceled. + op.AutoApprove = true + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + log.Printf("[ERROR] error searching process ID: %v", err) + return + } + p.Signal(syscall.SIGINT) + } + } + }() + } + + if b.CLI != nil { + header := planDefaultHeader + if op.Type == backend.OperationTypeApply { + header = applyDefaultHeader + } + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + header, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + scanner := bufio.NewScanner(logs) + + for scanner.Scan() { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(scanner.Text())) + } + } + if err := scanner.Err(); err != nil { + return r, generalError("Failed to read logs", err) + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if there are no changes or the run errored. We return + // without an error, even if the run errored, as the error is + // already displayed by the output of the remote run. + if !r.HasChanges || r.Status == tfe.RunErrored { + return r, nil + } + + // Check any configured sentinel policies. + if len(r.PolicyChecks) > 0 { + err = b.checkPolicy(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + return r, nil +} + +const planDefaultHeader = ` +[reset][yellow]Running plan in the remote backend. Output will stream here. Pressing Ctrl-C +will stop streaming the logs, but will not stop the plan running remotely. +To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s[reset] +` + +// The newline in this error is to make it look good in the CLI! +const lockTimeoutErr = ` +[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. +[reset] +` diff --git a/backend/remote/backend_plan_test.go b/backend/remote/backend_plan_test.go new file mode 100644 index 000000000000..6ce609d4318d --- /dev/null +++ b/backend/remote/backend_plan_test.go @@ -0,0 +1,561 @@ +package remote + +import ( + "context" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" +) + +func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func()) { + t.Helper() + + _, configLoader, configCleanup := configload.MustLoadConfigForTests(t, configDir) + + return &backend.Operation{ + ConfigDir: configDir, + ConfigLoader: configLoader, + Parallelism: defaultParallelism, + PlanRefresh: true, + Type: backend.OperationTypePlan, + }, configCleanup +} + +func TestRemote_planBasic(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatal("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } +} + +func TestRemote_planWithoutPermissions(t *testing.T) { + b := testBackendNoDefault(t) + + // Create a named workspace without permissions. + w, err := b.client.Workspaces.Create( + context.Background(), + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "prod"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + w.Permissions.CanQueueRun = false + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.Workspace = "prod" + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "Insufficient rights to generate a plan") { + t.Fatalf("expected a permissions error, got: %v", errOutput) + } +} + +func TestRemote_planWithParallelism(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.Parallelism = 3 + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "parallelism values are currently not supported") { + t.Fatalf("expected a parallelism error, got: %v", errOutput) + } +} + +func TestRemote_planWithPlan(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.PlanFile = &planfile.Reader{} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "saved plan is currently not supported") { + t.Fatalf("expected a saved plan error, got: %v", errOutput) + } +} + +func TestRemote_planWithPath(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.PlanOutPath = "./test-fixtures/plan" + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "generated plan is currently not supported") { + t.Fatalf("expected a generated plan error, got: %v", errOutput) + } +} + +func TestRemote_planWithoutRefresh(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.PlanRefresh = false + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "refresh is currently not supported") { + t.Fatalf("expected a refresh error, got: %v", errOutput) + } +} + +func TestRemote_planWithTarget(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + addr, _ := addrs.ParseAbsResourceStr("null_resource.foo") + + op.Targets = []addrs.Targetable{addr} + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "targeting is currently not supported") { + t.Fatalf("expected a targeting error, got: %v", errOutput) + } +} + +func TestRemote_planWithVariables(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-variables") + defer configCleanup() + + op.Variables = testVariables(terraform.ValueFromCLIArg, "foo", "bar") + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "variables are currently not supported") { + t.Fatalf("expected a variables error, got: %v", errOutput) + } +} + +func TestRemote_planNoConfig(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/empty") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "configuration files found") { + t.Fatalf("expected configuration files error, got: %v", errOutput) + } +} + +func TestRemote_planLockTimeout(t *testing.T) { + b := testBackendDefault(t) + ctx := context.Background() + + // Retrieve the workspace used to run this operation in. + w, err := b.client.Workspaces.Read(ctx, b.organization, b.workspace) + if err != nil { + t.Fatalf("error retrieving workspace: %v", err) + } + + // Create a new configuration version. + c, err := b.client.ConfigurationVersions.Create(ctx, w.ID, tfe.ConfigurationVersionCreateOptions{}) + if err != nil { + t.Fatalf("error creating configuration version: %v", err) + } + + // Create a pending run to block this run. + _, err = b.client.Runs.Create(ctx, tfe.RunCreateOptions{ + ConfigurationVersion: c, + Workspace: w, + }) + if err != nil { + t.Fatalf("error creating pending run: %v", err) + } + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + input := testInput(t, map[string]string{ + "cancel": "yes", + "approve": "yes", + }) + + op.StateLockTimeout = 5 * time.Second + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + _, err = b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + sigint := make(chan os.Signal, 1) + signal.Notify(sigint, syscall.SIGINT) + select { + case <-sigint: + // Stop redirecting SIGINT signals. + signal.Stop(sigint) + case <-time.After(10 * time.Second): + t.Fatalf("expected lock timeout after 5 seconds, waited 10 seconds") + } + + if len(input.answers) != 2 { + t.Fatalf("expected unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Lock timeout exceeded") { + t.Fatalf("missing lock timout error in output: %s", output) + } + if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("unexpected plan summery in output: %s", output) + } +} + +func TestRemote_planDestroy(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.Destroy = true + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestRemote_planDestroyNoConfig(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/empty") + defer configCleanup() + + op.Destroy = true + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } +} + +func TestRemote_planWithWorkingDirectory(t *testing.T) { + b := testBackendDefault(t) + + options := tfe.WorkspaceUpdateOptions{ + WorkingDirectory: tfe.String("terraform"), + } + + // Configure the workspace to use a custom working direcrtory. + _, err := b.client.Workspaces.Update(context.Background(), b.organization, b.workspace, options) + if err != nil { + t.Fatalf("error configuring working directory: %v", err) + } + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-with-working-directory/terraform") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } +} + +func TestRemote_planPolicyPass(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-policy-passed") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: true") { + t.Fatalf("missing polic check result in output: %s", output) + } +} + +func TestRemote_planPolicyHardFail(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-policy-hard-failed") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "hard failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("missing policy check result in output: %s", output) + } +} + +func TestRemote_planPolicySoftFail(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-policy-soft-failed") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if !run.PlanEmpty { + t.Fatalf("expected plan to be empty") + } + + errOutput := b.CLI.(*cli.MockUi).ErrorWriter.String() + if !strings.Contains(errOutput, "soft failed") { + t.Fatalf("expected a policy check error, got: %v", errOutput) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("missing plan summery in output: %s", output) + } + if !strings.Contains(output, "Sentinel Result: false") { + t.Fatalf("missing policy check result in output: %s", output) + } +} + +func TestRemote_planWithRemoteError(t *testing.T) { + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-with-error") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result == backend.OperationSuccess { + t.Fatal("expected plan operation to fail") + } + if run.Result.ExitStatus() != 1 { + t.Fatalf("expected exit code 1, got %d", run.Result.ExitStatus()) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "null_resource.foo: 1 error") { + t.Fatalf("missing plan error in output: %s", output) + } +} diff --git a/backend/remote/backend_state.go b/backend/remote/backend_state.go new file mode 100644 index 000000000000..99b795b4ea3d --- /dev/null +++ b/backend/remote/backend_state.go @@ -0,0 +1,181 @@ +package remote + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "fmt" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states/statefile" +) + +type remoteClient struct { + client *tfe.Client + lockInfo *state.LockInfo + organization string + runID string + workspace string +} + +// Get the remote state. +func (r *remoteClient) Get() (*remote.Payload, error) { + ctx := context.Background() + + // Retrieve the workspace for which to create a new state. + w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace) + if err != nil { + if err == tfe.ErrResourceNotFound { + // If no state exists, then return nil. + return nil, nil + } + return nil, fmt.Errorf("Error retrieving workspace: %v", err) + } + + sv, err := r.client.StateVersions.Current(ctx, w.ID) + if err != nil { + if err == tfe.ErrResourceNotFound { + // If no state exists, then return nil. + return nil, nil + } + return nil, fmt.Errorf("Error retrieving remote state: %v", err) + } + + state, err := r.client.StateVersions.Download(ctx, sv.DownloadURL) + if err != nil { + return nil, fmt.Errorf("Error downloading remote state: %v", err) + } + + // If the state is empty, then return nil. + if len(state) == 0 { + return nil, nil + } + + // Get the MD5 checksum of the state. + sum := md5.Sum(state) + + return &remote.Payload{ + Data: state, + MD5: sum[:], + }, nil +} + +// Put the remote state. +func (r *remoteClient) Put(state []byte) error { + ctx := context.Background() + + // Retrieve the workspace for which to create a new state. + w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace) + if err != nil { + return fmt.Errorf("Error retrieving workspace: %v", err) + } + + // Read the raw state into a Terraform state. + stateFile, err := statefile.Read(bytes.NewReader(state)) + if err != nil { + return fmt.Errorf("Error reading state: %s", err) + } + + options := tfe.StateVersionCreateOptions{ + Lineage: tfe.String(stateFile.Lineage), + Serial: tfe.Int64(int64(stateFile.Serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + State: tfe.String(base64.StdEncoding.EncodeToString(state)), + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + if r.runID != "" { + options.Run = &tfe.Run{ID: r.runID} + } + + // Create the new state. + _, err = r.client.StateVersions.Create(ctx, w.ID, options) + if err != nil { + return fmt.Errorf("Error creating remote state: %v", err) + } + + return nil +} + +// Delete the remote state. +func (r *remoteClient) Delete() error { + err := r.client.Workspaces.Delete(context.Background(), r.organization, r.workspace) + if err != nil && err != tfe.ErrResourceNotFound { + return fmt.Errorf("Error deleting workspace %s: %v", r.workspace, err) + } + + return nil +} + +// Lock the remote state. +func (r *remoteClient) Lock(info *state.LockInfo) (string, error) { + ctx := context.Background() + + lockErr := &state.LockError{Info: r.lockInfo} + + // Retrieve the workspace to lock. + w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace) + if err != nil { + lockErr.Err = err + return "", lockErr + } + + // Check if the workspace is already locked. + if w.Locked { + lockErr.Err = fmt.Errorf( + "remote state already\nlocked (lock ID: \"%s/%s\")", r.organization, r.workspace) + return "", lockErr + } + + // Lock the workspace. + w, err = r.client.Workspaces.Lock(ctx, w.ID, tfe.WorkspaceLockOptions{ + Reason: tfe.String("Locked by Terraform"), + }) + if err != nil { + lockErr.Err = err + return "", lockErr + } + + r.lockInfo = info + + return r.lockInfo.ID, nil +} + +// Unlock the remote state. +func (r *remoteClient) Unlock(id string) error { + ctx := context.Background() + + lockErr := &state.LockError{Info: r.lockInfo} + + // Verify the expected lock ID. + if r.lockInfo != nil && r.lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock ID does not match existing lock") + return lockErr + } + + // Verify the optional force-unlock lock ID. + if r.lockInfo == nil && r.organization+"/"+r.workspace != id { + lockErr.Err = fmt.Errorf("lock ID does not match existing lock") + return lockErr + } + + // Retrieve the workspace to lock. + w, err := r.client.Workspaces.Read(ctx, r.organization, r.workspace) + if err != nil { + lockErr.Err = err + return lockErr + } + + // Unlock the workspace. + w, err = r.client.Workspaces.Unlock(ctx, w.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} diff --git a/backend/remote/backend_state_test.go b/backend/remote/backend_state_test.go new file mode 100644 index 000000000000..c68f5e9c87e5 --- /dev/null +++ b/backend/remote/backend_state_test.go @@ -0,0 +1,58 @@ +package remote + +import ( + "bytes" + "os" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(remoteClient) +} + +func TestRemoteClient(t *testing.T) { + client := testRemoteClient(t) + remote.TestClient(t, client) +} + +func TestRemoteClient_stateLock(t *testing.T) { + b := testBackendDefault(t) + + s1, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + s2, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +func TestRemoteClient_withRunID(t *testing.T) { + // Set the TFE_RUN_ID environment variable before creating the client! + if err := os.Setenv("TFE_RUN_ID", generateID("run-")); err != nil { + t.Fatalf("error setting env var TFE_RUN_ID: %v", err) + } + + // Create a new test client. + client := testRemoteClient(t) + + // Create a new empty state. + state := bytes.NewBuffer(nil) + if err := terraform.WriteState(terraform.NewState(), state); err != nil { + t.Fatalf("expected no error, got: %v", err) + } + + // Store the new state to verify (this will be done + // by the mock that is used) that the run ID is set. + if err := client.Put(state.Bytes()); err != nil { + t.Fatalf("expected no error, got %v", err) + } +} diff --git a/backend/remote/backend_test.go b/backend/remote/backend_test.go new file mode 100644 index 000000000000..6e348d397c9a --- /dev/null +++ b/backend/remote/backend_test.go @@ -0,0 +1,234 @@ +package remote + +import ( + "reflect" + "strings" + "testing" + + "github.com/hashicorp/terraform/backend" + "github.com/zclconf/go-cty/cty" +) + +func TestRemote(t *testing.T) { + var _ backend.Enhanced = New(nil) + var _ backend.CLI = New(nil) +} + +func TestRemote_backendDefault(t *testing.T) { + b := testBackendDefault(t) + backend.TestBackendStates(t, b) + backend.TestBackendStateLocks(t, b, b) + backend.TestBackendStateForceUnlock(t, b, b) +} + +func TestRemote_backendNoDefault(t *testing.T) { + b := testBackendNoDefault(t) + backend.TestBackendStates(t, b) +} + +func TestRemote_config(t *testing.T) { + cases := map[string]struct { + config cty.Value + confErr string + valErr string + }{ + "with_a_name": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + }, + "with_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.StringVal("my-app-"), + }), + }), + }, + "without_either_a_name_and_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.NullVal(cty.String), + }), + }), + valErr: `Either workspace "name" or "prefix" is required`, + }, + "with_both_a_name_and_a_prefix": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.StringVal("my-app-"), + }), + }), + valErr: `Only one of workspace "name" or "prefix" is allowed`, + }, + "with_an_unknown_host": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("nonexisting.local"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "Host nonexisting.local does not provide a remote backend API", + }, + } + + for name, tc := range cases { + s := testServer(t) + b := New(testDisco(s)) + + // Validate + valDiags := b.ValidateConfig(tc.config) + if (valDiags.Err() == nil && tc.valErr != "") || + (valDiags.Err() != nil && !strings.Contains(valDiags.Err().Error(), tc.valErr)) { + t.Fatalf("%s: unexpected validation result: %v", name, valDiags.Err()) + } + + // Configure + confDiags := b.Configure(tc.config) + if (confDiags.Err() == nil && tc.confErr != "") || + (confDiags.Err() != nil && !strings.Contains(confDiags.Err().Error(), tc.confErr)) { + t.Fatalf("%s: unexpected configure result: %v", name, valDiags.Err()) + } + } +} + +func TestRemote_nonexistingOrganization(t *testing.T) { + msg := "does not exist" + + b := testBackendNoDefault(t) + b.organization = "nonexisting" + + if _, err := b.StateMgr("prod"); err == nil || !strings.Contains(err.Error(), msg) { + t.Fatalf("expected %q error, got: %v", msg, err) + } + + if err := b.DeleteWorkspace("prod"); err == nil || !strings.Contains(err.Error(), msg) { + t.Fatalf("expected %q error, got: %v", msg, err) + } + + if _, err := b.Workspaces(); err == nil || !strings.Contains(err.Error(), msg) { + t.Fatalf("expected %q error, got: %v", msg, err) + } +} + +func TestRemote_addAndRemoveWorkspacesDefault(t *testing.T) { + b := testBackendDefault(t) + if _, err := b.Workspaces(); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } + + if _, err := b.StateMgr(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if _, err := b.StateMgr("prod"); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } + + if err := b.DeleteWorkspace(backend.DefaultStateName); err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if err := b.DeleteWorkspace("prod"); err != backend.ErrWorkspacesNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrWorkspacesNotSupported, err) + } +} + +func TestRemote_addAndRemoveWorkspacesNoDefault(t *testing.T) { + b := testBackendNoDefault(t) + states, err := b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces := []string(nil) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected states %#+v, got %#+v", expectedWorkspaces, states) + } + + if _, err := b.StateMgr(backend.DefaultStateName); err != backend.ErrDefaultWorkspaceNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) + } + + expectedA := "test_A" + if _, err := b.StateMgr(expectedA); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = append(expectedWorkspaces, expectedA) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } + + expectedB := "test_B" + if _, err := b.StateMgr(expectedB); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = append(expectedWorkspaces, expectedB) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } + + if err := b.DeleteWorkspace(backend.DefaultStateName); err != backend.ErrDefaultWorkspaceNotSupported { + t.Fatalf("expected error %v, got %v", backend.ErrDefaultWorkspaceNotSupported, err) + } + + if err := b.DeleteWorkspace(expectedA); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = []string{expectedB} + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v got %#+v", expectedWorkspaces, states) + } + + if err := b.DeleteWorkspace(expectedB); err != nil { + t.Fatal(err) + } + + states, err = b.Workspaces() + if err != nil { + t.Fatal(err) + } + + expectedWorkspaces = []string(nil) + if !reflect.DeepEqual(states, expectedWorkspaces) { + t.Fatalf("expected %#+v, got %#+v", expectedWorkspaces, states) + } +} diff --git a/backend/remote/cli.go b/backend/remote/cli.go new file mode 100644 index 000000000000..a6aa1103fba2 --- /dev/null +++ b/backend/remote/cli.go @@ -0,0 +1,14 @@ +package remote + +import ( + "github.com/hashicorp/terraform/backend" +) + +// CLIInit implements backend.CLI +func (b *Remote) CLIInit(opts *backend.CLIOpts) error { + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ShowDiagnostics = opts.ShowDiagnostics + b.ContextOpts = opts.ContextOpts + return nil +} diff --git a/backend/remote/colorize.go b/backend/remote/colorize.go new file mode 100644 index 000000000000..0f877c0077e6 --- /dev/null +++ b/backend/remote/colorize.go @@ -0,0 +1,47 @@ +package remote + +import ( + "regexp" + + "github.com/mitchellh/colorstring" +) + +// colorsRe is used to find ANSI escaped color codes. +var colorsRe = regexp.MustCompile("\033\\[\\d{1,3}m") + +// Colorer is the interface that must be implemented to colorize strings. +type Colorer interface { + Color(v string) string +} + +// Colorize is used to print output when the -no-color flag is used. It will +// strip all ANSI escaped color codes which are set while the operation was +// executed in Terraform Enterprise. +// +// When Terraform Enterprise supports run specific variables, this code can be +// removed as we can then pass the CLI flag to the backend and prevent the color +// codes from being written to the output. +type Colorize struct { + cliColor *colorstring.Colorize +} + +// Color will strip all ANSI escaped color codes and return a uncolored string. +func (c *Colorize) Color(v string) string { + return colorsRe.ReplaceAllString(c.cliColor.Color(v), "") +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Remote) Colorize() Colorer { + if b.CLIColor != nil && !b.CLIColor.Disable { + return b.CLIColor + } + if b.CLIColor != nil { + return &Colorize{cliColor: b.CLIColor} + } + return &Colorize{cliColor: &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + }} +} diff --git a/backend/remote/test-fixtures/apply-destroy/apply.log b/backend/remote/test-fixtures/apply-destroy/apply.log new file mode 100644 index 000000000000..34adfcd6bd5a --- /dev/null +++ b/backend/remote/test-fixtures/apply-destroy/apply.log @@ -0,0 +1,4 @@ +null_resource.hello: Destroying... (ID: 8657651096157629581) +null_resource.hello: Destruction complete after 0s + +Apply complete! Resources: 0 added, 0 changed, 1 destroyed. diff --git a/backend/remote/test-fixtures/apply-destroy/main.tf b/backend/remote/test-fixtures/apply-destroy/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/apply-destroy/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/apply-destroy/plan.log b/backend/remote/test-fixtures/apply-destroy/plan.log new file mode 100644 index 000000000000..1d38d4168923 --- /dev/null +++ b/backend/remote/test-fixtures/apply-destroy/plan.log @@ -0,0 +1,22 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + - destroy + +Terraform will perform the following actions: + + - null_resource.hello + + +Plan: 0 to add, 0 to change, 1 to destroy. diff --git a/backend/remote/test-fixtures/apply-no-changes/main.tf b/backend/remote/test-fixtures/apply-no-changes/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/apply-no-changes/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/apply-no-changes/plan.log b/backend/remote/test-fixtures/apply-no-changes/plan.log new file mode 100644 index 000000000000..70416815133f --- /dev/null +++ b/backend/remote/test-fixtures/apply-no-changes/plan.log @@ -0,0 +1,17 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +null_resource.hello: Refreshing state... (ID: 8657651096157629581) + +------------------------------------------------------------------------ + +No changes. Infrastructure is up-to-date. + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. diff --git a/backend/remote/test-fixtures/apply-policy-hard-failed/main.tf b/backend/remote/test-fixtures/apply-policy-hard-failed/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-hard-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/apply-policy-hard-failed/plan.log b/backend/remote/test-fixtures/apply-policy-hard-failed/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-hard-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/apply-policy-hard-failed/policy.log b/backend/remote/test-fixtures/apply-policy-hard-failed/policy.log new file mode 100644 index 000000000000..5d6e6935b937 --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-hard-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (hard-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/backend/remote/test-fixtures/apply-policy-passed/apply.log b/backend/remote/test-fixtures/apply-policy-passed/apply.log new file mode 100644 index 000000000000..89c0dbc42d1e --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-passed/apply.log @@ -0,0 +1,4 @@ +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/backend/remote/test-fixtures/apply-policy-passed/main.tf b/backend/remote/test-fixtures/apply-policy-passed/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-passed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/apply-policy-passed/plan.log b/backend/remote/test-fixtures/apply-policy-passed/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-passed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/apply-policy-passed/policy.log b/backend/remote/test-fixtures/apply-policy-passed/policy.log new file mode 100644 index 000000000000..b0cb1e598592 --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-passed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/backend/remote/test-fixtures/apply-policy-soft-failed/apply.log b/backend/remote/test-fixtures/apply-policy-soft-failed/apply.log new file mode 100644 index 000000000000..89c0dbc42d1e --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-soft-failed/apply.log @@ -0,0 +1,4 @@ +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/backend/remote/test-fixtures/apply-policy-soft-failed/main.tf b/backend/remote/test-fixtures/apply-policy-soft-failed/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-soft-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/apply-policy-soft-failed/plan.log b/backend/remote/test-fixtures/apply-policy-soft-failed/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-soft-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/apply-policy-soft-failed/policy.log b/backend/remote/test-fixtures/apply-policy-soft-failed/policy.log new file mode 100644 index 000000000000..3e4ebedf6179 --- /dev/null +++ b/backend/remote/test-fixtures/apply-policy-soft-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/backend/remote/test-fixtures/apply-variables/apply.log b/backend/remote/test-fixtures/apply-variables/apply.log new file mode 100644 index 000000000000..89c0dbc42d1e --- /dev/null +++ b/backend/remote/test-fixtures/apply-variables/apply.log @@ -0,0 +1,4 @@ +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/backend/remote/test-fixtures/apply-variables/main.tf b/backend/remote/test-fixtures/apply-variables/main.tf new file mode 100644 index 000000000000..955e8b4c09af --- /dev/null +++ b/backend/remote/test-fixtures/apply-variables/main.tf @@ -0,0 +1,4 @@ +variable "foo" {} +variable "bar" {} + +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/apply-variables/plan.log b/backend/remote/test-fixtures/apply-variables/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/apply-variables/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/apply-with-error/main.tf b/backend/remote/test-fixtures/apply-with-error/main.tf new file mode 100644 index 000000000000..bc45f28f5637 --- /dev/null +++ b/backend/remote/test-fixtures/apply-with-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + random = "${guid()}" + } +} diff --git a/backend/remote/test-fixtures/apply-with-error/plan.log b/backend/remote/test-fixtures/apply-with-error/plan.log new file mode 100644 index 000000000000..4344a3722905 --- /dev/null +++ b/backend/remote/test-fixtures/apply-with-error/plan.log @@ -0,0 +1,10 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... + +Error: null_resource.foo: 1 error(s) occurred: + +* null_resource.foo: 1:3: unknown function called: guid in: + +${guid()} diff --git a/backend/remote/test-fixtures/apply/apply.log b/backend/remote/test-fixtures/apply/apply.log new file mode 100644 index 000000000000..89c0dbc42d1e --- /dev/null +++ b/backend/remote/test-fixtures/apply/apply.log @@ -0,0 +1,4 @@ +null_resource.hello: Creating... +null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) + +Apply complete! Resources: 1 added, 0 changed, 0 destroyed. diff --git a/backend/remote/test-fixtures/apply/main.tf b/backend/remote/test-fixtures/apply/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/apply/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/apply/plan.log b/backend/remote/test-fixtures/apply/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/apply/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/empty/.gitignore b/backend/remote/test-fixtures/empty/.gitignore new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/remote/test-fixtures/plan-policy-hard-failed/main.tf b/backend/remote/test-fixtures/plan-policy-hard-failed/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-hard-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/plan-policy-hard-failed/plan.log b/backend/remote/test-fixtures/plan-policy-hard-failed/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-hard-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/plan-policy-hard-failed/policy.log b/backend/remote/test-fixtures/plan-policy-hard-failed/policy.log new file mode 100644 index 000000000000..5d6e6935b937 --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-hard-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (hard-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/backend/remote/test-fixtures/plan-policy-passed/main.tf b/backend/remote/test-fixtures/plan-policy-passed/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-passed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/plan-policy-passed/plan.log b/backend/remote/test-fixtures/plan-policy-passed/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-passed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/plan-policy-passed/policy.log b/backend/remote/test-fixtures/plan-policy-passed/policy.log new file mode 100644 index 000000000000..b0cb1e598592 --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-passed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: true + +This result means that Sentinel policies returned true and the protected +behavior is allowed by Sentinel policies. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: true + +TRUE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/backend/remote/test-fixtures/plan-policy-soft-failed/main.tf b/backend/remote/test-fixtures/plan-policy-soft-failed/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-soft-failed/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/plan-policy-soft-failed/plan.log b/backend/remote/test-fixtures/plan-policy-soft-failed/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-soft-failed/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/plan-policy-soft-failed/policy.log b/backend/remote/test-fixtures/plan-policy-soft-failed/policy.log new file mode 100644 index 000000000000..3e4ebedf6179 --- /dev/null +++ b/backend/remote/test-fixtures/plan-policy-soft-failed/policy.log @@ -0,0 +1,12 @@ +Sentinel Result: false + +Sentinel evaluated to false because one or more Sentinel policies evaluated +to false. This false was not due to an undefined value or runtime error. + +1 policies evaluated. + +## Policy 1: Passthrough.sentinel (soft-mandatory) + +Result: false + +FALSE - Passthrough.sentinel:1:1 - Rule "main" diff --git a/backend/remote/test-fixtures/plan-variables/main.tf b/backend/remote/test-fixtures/plan-variables/main.tf new file mode 100644 index 000000000000..955e8b4c09af --- /dev/null +++ b/backend/remote/test-fixtures/plan-variables/main.tf @@ -0,0 +1,4 @@ +variable "foo" {} +variable "bar" {} + +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/plan-variables/plan.log b/backend/remote/test-fixtures/plan-variables/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/plan-variables/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/plan-with-error/main.tf b/backend/remote/test-fixtures/plan-with-error/main.tf new file mode 100644 index 000000000000..bc45f28f5637 --- /dev/null +++ b/backend/remote/test-fixtures/plan-with-error/main.tf @@ -0,0 +1,5 @@ +resource "null_resource" "foo" { + triggers { + random = "${guid()}" + } +} diff --git a/backend/remote/test-fixtures/plan-with-error/plan.log b/backend/remote/test-fixtures/plan-with-error/plan.log new file mode 100644 index 000000000000..4344a3722905 --- /dev/null +++ b/backend/remote/test-fixtures/plan-with-error/plan.log @@ -0,0 +1,10 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... + +Error: null_resource.foo: 1 error(s) occurred: + +* null_resource.foo: 1:3: unknown function called: guid in: + +${guid()} diff --git a/backend/remote/test-fixtures/plan-with-working-directory/terraform/main.tf b/backend/remote/test-fixtures/plan-with-working-directory/terraform/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/plan-with-working-directory/terraform/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/plan-with-working-directory/terraform/plan.log b/backend/remote/test-fixtures/plan-with-working-directory/terraform/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/plan-with-working-directory/terraform/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/test-fixtures/plan/main.tf b/backend/remote/test-fixtures/plan/main.tf new file mode 100644 index 000000000000..3911a2a9b2db --- /dev/null +++ b/backend/remote/test-fixtures/plan/main.tf @@ -0,0 +1 @@ +resource "null_resource" "foo" {} diff --git a/backend/remote/test-fixtures/plan/plan.log b/backend/remote/test-fixtures/plan/plan.log new file mode 100644 index 000000000000..5849e57595ef --- /dev/null +++ b/backend/remote/test-fixtures/plan/plan.log @@ -0,0 +1,21 @@ +Terraform v0.11.7 + +Configuring remote state backend... +Initializing Terraform configuration... +Refreshing Terraform state in-memory prior to plan... +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. + +------------------------------------------------------------------------ + +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: + + create + +Terraform will perform the following actions: + + + null_resource.foo + id: + + +Plan: 1 to add, 0 to change, 0 to destroy. diff --git a/backend/remote/testing.go b/backend/remote/testing.go new file mode 100644 index 000000000000..0bb8d66c963e --- /dev/null +++ b/backend/remote/testing.go @@ -0,0 +1,182 @@ +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" +) + +const ( + testCred = "test-auth-token" +) + +var ( + tfeHost = svchost.Hostname(defaultHostname) + credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + tfeHost: {"token": testCred}, + }) +) + +func testInput(t *testing.T, answers map[string]string) *mockInput { + return &mockInput{answers: answers} +} + +func testBackendDefault(t *testing.T) *Remote { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj) +} + +func testBackendNoDefault(t *testing.T) *Remote { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.StringVal("my-app-"), + }), + }) + return testBackend(t, obj) +} + +func testRemoteClient(t *testing.T) remote.Client { + b := testBackendDefault(t) + raw, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("error: %v", err) + } + s := raw.(*remote.State) + return s.Client +} + +func testBackend(t *testing.T, obj cty.Value) *Remote { + s := testServer(t) + b := New(testDisco(s)) + + // Configure the backend so the client is created. + valDiags := b.ValidateConfig(obj) + if len(valDiags) != 0 { + t.Fatal(valDiags.ErrWithWarnings()) + } + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + t.Fatal(confDiags.ErrWithWarnings()) + } + + // Get a new mock client. + mc := newMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.StateVersions = mc.StateVersions + b.client.Workspaces = mc.Workspaces + + b.ShowDiagnostics = func(vals ...interface{}) { + var diags tfdiags.Diagnostics + for _, diag := range diags.Append(vals...) { + b.CLI.Error(diag.Description().Summary) + } + } + + ctx := context.Background() + + // Create the organization. + _, err := b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ + Name: tfe.String(b.organization), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + + // Create the default workspace if required. + if b.workspace != "" { + _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.workspace), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + } + + return b +} + +// testServer returns a *httptest.Server used for local testing. +func testServer(t *testing.T) *httptest.Server { + mux := http.NewServeMux() + + // Respond to service discovery calls. + mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"tfe.v2":"/api/v2/"}`) + }) + + return httptest.NewServer(mux) +} + +// testDisco returns a *disco.Disco mapping app.terraform.io and +// localhost to a local test server. +func testDisco(s *httptest.Server) *disco.Disco { + services := map[string]interface{}{ + "tfe.v2": fmt.Sprintf("%s/api/v2/", s.URL), + } + d := disco.NewWithCredentialsSource(credsSrc) + + d.ForceHostServices(svchost.Hostname(defaultHostname), services) + d.ForceHostServices(svchost.Hostname("localhost"), services) + return d +} + +type unparsedVariableValue struct { + value string + source terraform.ValueSourceType +} + +func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + return &terraform.InputValue{ + Value: cty.StringVal(v.value), + SourceType: v.source, + }, tfdiags.Diagnostics{} +} + +// testVariable returns a backend.UnparsedVariableValue used for testing. +func testVariables(s terraform.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { + vars := make(map[string]backend.UnparsedVariableValue, len(vs)) + for _, v := range vs { + vars[v] = &unparsedVariableValue{ + value: v, + source: s, + } + } + return vars +} diff --git a/backend/testing.go b/backend/testing.go index 7fd85c74ec9f..d44073ad91f1 100644 --- a/backend/testing.go +++ b/backend/testing.go @@ -43,13 +43,13 @@ func TestBackendConfig(t *testing.T, b Backend, c hcl.Body) Backend { diags = diags.Append(valDiags.InConfigBody(c)) if len(diags) != 0 { - t.Fatal(diags) + t.Fatal(diags.ErrWithWarnings()) } confDiags := b.Configure(obj) if len(confDiags) != 0 { confDiags = confDiags.InConfigBody(c) - t.Fatal(confDiags) + t.Fatal(confDiags.ErrWithWarnings()) } return b @@ -69,19 +69,31 @@ func TestWrapConfig(raw map[string]interface{}) hcl.Body { // TestBackend will test the functionality of a Backend. The backend is // assumed to already be configured. This will test state functionality. // If the backend reports it doesn't support multi-state by returning the -// error ErrNamedStatesNotSupported, then it will not test that. +// error ErrWorkspacesNotSupported, then it will not test that. func TestBackendStates(t *testing.T, b Backend) { t.Helper() + noDefault := false + if _, err := b.StateMgr(DefaultStateName); err != nil { + if err == ErrDefaultWorkspaceNotSupported { + noDefault = true + } else { + t.Fatalf("error: %v", err) + } + } + workspaces, err := b.Workspaces() - if err == ErrNamedStatesNotSupported { - t.Logf("TestBackend: workspaces not supported in %T, skipping", b) - return + if err != nil { + if err == ErrWorkspacesNotSupported { + t.Logf("TestBackend: workspaces not supported in %T, skipping", b) + return + } + t.Fatalf("error: %v", err) } // Test it starts with only the default - if len(workspaces) != 1 || workspaces[0] != DefaultStateName { - t.Fatalf("should only have default to start: %#v", workspaces) + if !noDefault && (len(workspaces) != 1 || workspaces[0] != DefaultStateName) { + t.Fatalf("should only default to start: %#v", workspaces) } // Create a couple states @@ -111,8 +123,8 @@ func TestBackendStates(t *testing.T, b Backend) { { // We'll use two distinct states here and verify that changing one // does not also change the other. - barState := states.NewState() fooState := states.NewState() + barState := states.NewState() // write a known state to foo if err := foo.WriteState(fooState); err != nil { @@ -171,7 +183,7 @@ func TestBackendStates(t *testing.T, b Backend) { t.Fatal("after writing a resource to bar and re-reading foo, foo now has resources too") } - // fetch the bar again from the backend + // fetch the bar again from the backend bar, err = b.StateMgr("bar") if err != nil { t.Fatal("error re-fetching state:", err) @@ -190,11 +202,14 @@ func TestBackendStates(t *testing.T, b Backend) { // we determined that named stated are supported earlier workspaces, err := b.Workspaces() if err != nil { - t.Fatal(err) + t.Fatalf("err: %s", err) } sort.Strings(workspaces) expected := []string{"bar", "default", "foo"} + if noDefault { + expected = []string{"bar", "foo"} + } if !reflect.DeepEqual(workspaces, expected) { t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) } @@ -230,16 +245,18 @@ func TestBackendStates(t *testing.T, b Backend) { // Verify deletion { - states, err := b.Workspaces() - if err == ErrWorkspacesNotSupported { - t.Logf("TestBackend: named states not supported in %T, skipping", b) - return + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("err: %s", err) } - sort.Strings(states) + sort.Strings(workspaces) expected := []string{"bar", "default"} - if !reflect.DeepEqual(states, expected) { - t.Fatalf("bad: %#v", states) + if noDefault { + expected = []string{"bar"} + } + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) } } } diff --git a/builtin/providers/terraform/data_source_state.go b/builtin/providers/terraform/data_source_state.go index 7f88591fca28..b075fa16fd19 100644 --- a/builtin/providers/terraform/data_source_state.go +++ b/builtin/providers/terraform/data_source_state.go @@ -4,13 +4,13 @@ import ( "fmt" "log" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/backend" - backendinit "github.com/hashicorp/terraform/backend/init" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + backendInit "github.com/hashicorp/terraform/backend/init" ) func dataSourceRemoteStateGetSchema() providers.Schema { @@ -58,7 +58,7 @@ func dataSourceRemoteStateRead(d *cty.Value) (cty.Value, tfdiags.Diagnostics) { // Create the client to access our remote state log.Printf("[DEBUG] Initializing remote state backend: %s", backendType) - f := backendinit.Backend(backendType) + f := backendInit.Backend(backendType) if f == nil { diags = diags.Append(tfdiags.AttributeValue( tfdiags.Error, diff --git a/builtin/providers/terraform/provider_test.go b/builtin/providers/terraform/provider_test.go index 2baa338d3f17..2a3a2bfe9df6 100644 --- a/builtin/providers/terraform/provider_test.go +++ b/builtin/providers/terraform/provider_test.go @@ -3,19 +3,22 @@ package terraform import ( "testing" - backendinit "github.com/hashicorp/terraform/backend/init" "github.com/hashicorp/terraform/providers" + + backendInit "github.com/hashicorp/terraform/backend/init" ) var testAccProviders map[string]*Provider var testAccProvider *Provider func init() { + // Initialize the backends + backendInit.Init(nil) + testAccProvider = NewProvider() testAccProviders = map[string]*Provider{ "terraform": testAccProvider, } - backendinit.Init(nil) } func TestProvider_impl(t *testing.T) { diff --git a/command/apply.go b/command/apply.go index 5a317b8d6859..0ce426b00dea 100644 --- a/command/apply.go +++ b/command/apply.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/hashicorp/go-getter" - "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/config/hcl2shim" @@ -178,18 +177,19 @@ func (c *ApplyCommand) Run(args []string) int { // Build the operation opReq := c.Operation(be) opReq.AutoApprove = autoApprove - opReq.Destroy = c.Destroy opReq.ConfigDir = configPath + opReq.Destroy = c.Destroy + opReq.DestroyForce = destroyForce opReq.PlanFile = planFile opReq.PlanRefresh = refresh opReq.Type = backend.OperationTypeApply - opReq.AutoApprove = autoApprove - opReq.DestroyForce = destroyForce + opReq.ConfigLoader, err = c.initConfigLoader() if err != nil { c.showDiagnostics(err) return 1 } + { var moreDiags tfdiags.Diagnostics opReq.Variables, moreDiags = c.collectVariableValues() diff --git a/command/command_test.go b/command/command_test.go index b473e9664eb1..f5a17cd6333a 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -19,10 +19,7 @@ import ( "syscall" "testing" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/addrs" - backendinit "github.com/hashicorp/terraform/backend/init" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs/configload" "github.com/hashicorp/terraform/configs/configschema" @@ -36,6 +33,9 @@ import ( "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/version" + "github.com/zclconf/go-cty/cty" + + backendInit "github.com/hashicorp/terraform/backend/init" ) // This is the directory where our test fixtures are. @@ -47,6 +47,9 @@ var testingDir string func init() { test = true + // Initialize the backends + backendInit.Init(nil) + // Expand the fixture dir on init because we change the working // directory in some tests. var err error @@ -74,7 +77,7 @@ func TestMain(m *testing.M) { } // Make sure backend init is initialized, since our tests tend to assume it. - backendinit.Init(nil) + backendInit.Init(nil) os.Exit(m.Run()) } diff --git a/command/init.go b/command/init.go index f14c6e127cd6..0dd6c79f547c 100644 --- a/command/init.go +++ b/command/init.go @@ -12,7 +12,6 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/backend" - backendinit "github.com/hashicorp/terraform/backend/init" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/configs/configschema" @@ -21,6 +20,8 @@ import ( "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" + + backendInit "github.com/hashicorp/terraform/backend/init" ) // InitCommand is a Command implementation that takes a Terraform @@ -153,10 +154,12 @@ func (c *InitCommand) Run(args []string) int { // If our directory is empty, then we're done. We can't get or setup // the backend with an empty directory. - if empty, err := config.IsEmptyDir(path); err != nil { + empty, err := config.IsEmptyDir(path) + if err != nil { diags = diags.Append(fmt.Errorf("Error checking configuration: %s", err)) return 1 - } else if empty { + } + if empty { c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty))) return 0 } @@ -212,7 +215,7 @@ func (c *InitCommand) Run(args []string) int { c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[reset][bold]Initializing the backend..."))) backendType := config.Backend.Type - bf := backendinit.Backend(backendType) + bf := backendInit.Backend(backendType) if bf == nil { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, @@ -275,14 +278,12 @@ func (c *InitCommand) Run(args []string) int { if back != nil { sMgr, err := back.StateMgr(c.Workspace()) if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error loading state: %s", err)) + c.Ui.Error(fmt.Sprintf("Error loading state: %s", err)) return 1 } if err := sMgr.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error refreshing state: %s", err)) + c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err)) return 1 } diff --git a/command/meta.go b/command/meta.go index dbc1437d02b6..f8a637be0ace 100644 --- a/command/meta.go +++ b/command/meta.go @@ -26,7 +26,6 @@ import ( "github.com/hashicorp/terraform/helper/wrappedstreams" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/provisioners" - "github.com/hashicorp/terraform/svchost/auth" "github.com/hashicorp/terraform/svchost/disco" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" @@ -52,10 +51,6 @@ type Meta struct { // "terraform-native' services running at a specific user-facing hostname. Services *disco.Disco - // Credentials provides access to credentials for "terraform-native" - // services, which are accessed by a service hostname. - Credentials auth.CredentialsSource - // RunningInAutomation indicates that commands are being run by an // automated system rather than directly at a command prompt. // diff --git a/command/meta_backend.go b/command/meta_backend.go index 2208d4df900f..8f9e51580445 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -10,23 +10,24 @@ import ( "fmt" "log" "path/filepath" + "strconv" "strings" "github.com/hashicorp/errwrap" "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hcl2/hcldec" - "github.com/zclconf/go-cty/cty" - ctyjson "github.com/zclconf/go-cty/cty/json" - "github.com/hashicorp/terraform/backend" - backendinit "github.com/hashicorp/terraform/backend/init" - backendlocal "github.com/hashicorp/terraform/backend/local" "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + backendInit "github.com/hashicorp/terraform/backend/init" + backendLocal "github.com/hashicorp/terraform/backend/local" ) // BackendOpts are the options used to initialize a backend.Backend. @@ -91,7 +92,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics log.Printf("[INFO] command: backend initialized: %T", b) } - // Setup the CLI opts we pass into backends that support it + // Setup the CLI opts we pass into backends that support it. cliOpts := m.backendCLIOpts() cliOpts.Validation = true @@ -122,7 +123,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics } // Build the local backend - local := &backendlocal.Local{Backend: b} + local := backendLocal.NewWithBackend(b) if err := local.CLIInit(cliOpts); err != nil { // Local backend isn't allowed to fail. It would be a bug. panic(err) @@ -163,7 +164,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics func (m *Meta) BackendForPlan(settings plans.Backend) (backend.Enhanced, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics - f := backendinit.Backend(settings.Type) + f := backendInit.Backend(settings.Type) if f == nil { diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), settings.Type)) return nil, diags @@ -209,7 +210,7 @@ func (m *Meta) BackendForPlan(settings plans.Backend) (backend.Enhanced, tfdiags // to cause any operations to be run locally. cliOpts := m.backendCLIOpts() cliOpts.Validation = false // don't validate here in case config contains file(...) calls where the file doesn't exist - local := &backendlocal.Local{Backend: b} + local := backendLocal.NewWithBackend(b) if err := local.CLIInit(cliOpts); err != nil { // Local backend should never fail, so this is always a bug. panic(err) @@ -238,7 +239,7 @@ func (m *Meta) backendCLIOpts() *backend.CLIOpts { // for some checks that require a remote backend. func (m *Meta) IsLocalBackend(b backend.Backend) bool { // Is it a local backend? - bLocal, ok := b.(*backendlocal.Local) + bLocal, ok := b.(*backendLocal.Local) // If it is, does it not have an alternate state backend? if ok { @@ -267,6 +268,7 @@ func (m *Meta) Operation(b backend.Backend) *backend.Operation { return &backend.Operation{ PlanOutBackend: planOutBackend, + Parallelism: m.parallelism, Targets: m.targets, UIIn: m.UIInput(), UIOut: m.Ui, @@ -303,7 +305,7 @@ func (m *Meta) backendConfig(opts *BackendOpts) (*configs.Backend, int, tfdiags. return nil, 0, nil } - bf := backendinit.Backend(c.Type) + bf := backendInit.Backend(c.Type) if bf == nil { diags = diags.Append(&hcl.Diagnostic{ Severity: hcl.DiagError, @@ -598,22 +600,31 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta return nil, diags } - workspace := m.Workspace() - - localState, err := localB.StateMgr(workspace) + workspaces, err := localB.Workspaces() if err != nil { diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) return nil, diags } - if err := localState.RefreshState(); err != nil { - diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) - return nil, diags + + var localStates []state.State + for _, workspace := range workspaces { + localState, err := localB.StateMgr(workspace) + if err != nil { + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags + } + if err := localState.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags + } + + // We only care about non-empty states. + if localS := localState.State(); !localS.Empty() { + localStates = append(localStates, localState) + } } - // If the local state is not empty, we need to potentially do a - // state migration to the new backend (with user permission), unless the - // destination is also "local" - if localS := localState.State(); !localS.Empty() { + if len(localStates) > 0 { // Perform the migration err = m.backendMigrateState(&backendMigrateOpts{ OneType: "local", @@ -631,8 +642,8 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta // can get us here too. Don't delete our state if the old and new paths // are the same. erase := true - if newLocalB, ok := b.(*backendlocal.Local); ok { - if localB, ok := localB.(*backendlocal.Local); ok { + if newLocalB, ok := b.(*backendLocal.Local); ok { + if localB, ok := localB.(*backendLocal.Local); ok { if newLocalB.StatePath == localB.StatePath { erase = false } @@ -640,14 +651,16 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta } if erase { - // We always delete the local state, unless that was our new state too. - if err := localState.WriteState(nil); err != nil { - diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) - return nil, diags - } - if err := localState.PersistState(); err != nil { - diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) - return nil, diags + for _, localState := range localStates { + // We always delete the local state, unless that was our new state too. + if err := localState.WriteState(nil); err != nil { + diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) + return nil, diags + } + if err := localState.PersistState(); err != nil { + diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) + return nil, diags + } } } } @@ -687,6 +700,13 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta return nil, diags } + // Its possible that the currently selected workspace is not migrated, + // so we call selectWorkspace to ensure a valid workspace is selected. + if err := m.selectWorkspace(b); err != nil { + diags = diags.Append(err) + return nil, diags + } + m.Ui.Output(m.Colorize().Color(fmt.Sprintf( "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) @@ -694,6 +714,53 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta return b, diags } +// selectWorkspace gets a list of migrated workspaces and then checks +// if the currently selected workspace is valid. If not, it will ask +// the user to select a workspace from the list. +func (m *Meta) selectWorkspace(b backend.Backend) error { + workspaces, err := b.Workspaces() + if err != nil { + return fmt.Errorf("Failed to get migrated workspaces: %s", err) + } + if len(workspaces) == 0 { + return fmt.Errorf(errBackendNoMigratedWorkspaces) + } + + // Get the currently selected workspace. + workspace := m.Workspace() + + // Check if any of the migrated workspaces match the selected workspace + // and create a numbered list with migrated workspaces. + var list strings.Builder + for i, w := range workspaces { + if w == workspace { + return nil + } + fmt.Fprintf(&list, "%d. %s\n", i+1, w) + } + + // If the selected workspace is not migrated, ask the user to select + // a workspace from the list of migrated workspaces. + v, err := m.UIInput().Input(&terraform.InputOpts{ + Id: "select-workspace", + Query: fmt.Sprintf( + "[reset][bold][yellow]The currently selected workspace (%s) is not migrated.[reset]", + workspace), + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendSelectWorkspace), list.String()), + }) + if err != nil { + return fmt.Errorf("Error asking to select workspace: %s", err) + } + + idx, err := strconv.Atoi(v) + if err != nil || (idx < 1 || idx > len(workspaces)) { + return fmt.Errorf("Error selecting workspace: input not a valid number") + } + + return m.SetWorkspace(workspaces[idx-1]) +} + // Changing a previously saved backend. func (m *Meta) backend_C_r_S_changed(c *configs.Backend, cHash int, sMgr *state.LocalState, output bool) (backend.Backend, tfdiags.Diagnostics) { if output { @@ -797,7 +864,7 @@ func (m *Meta) backend_C_r_S_unchanged(c *configs.Backend, cHash int, sMgr *stat } // Get the backend - f := backendinit.Backend(s.Backend.Type) + f := backendInit.Backend(s.Backend.Type) if f == nil { diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Backend.Type)) return nil, diags @@ -861,7 +928,7 @@ func (m *Meta) backendInitFromConfig(c *configs.Backend) (backend.Backend, cty.V var diags tfdiags.Diagnostics // Get the backend - f := backendinit.Backend(c.Type) + f := backendInit.Backend(c.Type) if f == nil { diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendNewUnknown), c.Type)) return nil, cty.NilVal, diags @@ -901,7 +968,7 @@ func (m *Meta) backendInitFromSaved(s *terraform.BackendState) (backend.Backend, var diags tfdiags.Diagnostics // Get the backend - f := backendinit.Backend(s.Type) + f := backendInit.Backend(s.Type) if f == nil { diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Type)) return nil, diags diff --git a/command/meta_backend_migrate.go b/command/meta_backend_migrate.go index 2976cd63b786..06e46883baf5 100644 --- a/command/meta_backend_migrate.go +++ b/command/meta_backend_migrate.go @@ -10,15 +10,25 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform/states" - "github.com/hashicorp/terraform/states/statemgr" - "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" ) +type backendMigrateOpts struct { + OneType, TwoType string + One, Two backend.Backend + + // Fields below are set internally when migrate is called + + oneEnv string // source env + twoEnv string // dest env + force bool // if true, won't ask for confirmation +} + // backendMigrateState handles migrating (copying) state from one backend // to another. This function handles asking the user for confirmation // as well as the copy itself. @@ -212,7 +222,47 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { errMigrateSingleLoadDefault), opts.OneType, err) } + // Do not migrate workspaces without state. + if stateOne.State().Empty() { + return nil + } + stateTwo, err := opts.Two.StateMgr(opts.twoEnv) + if err == backend.ErrDefaultWorkspaceNotSupported { + // If the backend doesn't support using the default state, we ask the user + // for a new name and migrate the default state to the given named state. + stateTwo, err = func() (statemgr.Full, error) { + name, err := m.UIInput().Input(&terraform.InputOpts{ + Id: "new-state-name", + Query: fmt.Sprintf( + "[reset][bold][yellow]The %q backend configuration only allows "+ + "named workspaces![reset]", + opts.TwoType), + Description: strings.TrimSpace(inputBackendNewWorkspaceName), + }) + if err != nil { + return nil, fmt.Errorf("Error asking for new state name: %s", err) + } + + // Update the name of the target state. + opts.twoEnv = name + + stateTwo, err := opts.Two.StateMgr(opts.twoEnv) + if err != nil { + return nil, err + } + + // If the currently selected workspace is the default workspace, then set + // the named workspace as the new selected workspace. + if m.Workspace() == backend.DefaultStateName { + if err := m.SetWorkspace(opts.twoEnv); err != nil { + return nil, fmt.Errorf("Failed to set new workspace: %s", err) + } + } + + return stateTwo, nil + }() + } if err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateSingleLoadDefault), opts.TwoType, err) @@ -381,17 +431,6 @@ func (m *Meta) backendMigrateNonEmptyConfirm( return m.confirm(inputOpts) } -type backendMigrateOpts struct { - OneType, TwoType string - One, Two backend.Backend - - // Fields below are set internally when migrate is called - - oneEnv string // source env - twoEnv string // dest env - force bool // if true, won't ask for confirmation -} - const errMigrateLoadStates = ` Error inspecting states in the %q backend: %s @@ -414,8 +453,8 @@ above error and try again. ` const errMigrateMulti = ` -Error migrating the workspace %q from the previous %q backend to the newly -configured %q backend: +Error migrating the workspace %q from the previous %q backend +to the newly configured %q backend: %s Terraform copies workspaces in alphabetical order. Any workspaces @@ -428,13 +467,22 @@ This will attempt to copy (with permission) all workspaces again. ` const errBackendStateCopy = ` -Error copying state from the previous %q backend to the newly configured %q backend: +Error copying state from the previous %q backend to the newly configured +%q backend: %s The state in the previous backend remains intact and unmodified. Please resolve the error above and try again. ` +const errBackendNoMigratedWorkspaces = ` +No workspaces are migrated. Use the "terraform workspace" command to create +and select a new workspace. + +If the backend already contains existing workspaces, you may need to update +the workspace name or prefix in the backend configuration. +` + const inputBackendMigrateEmpty = ` Pre-existing state was found while migrating the previous %q backend to the newly configured %q backend. No existing state was found in the newly @@ -466,9 +514,9 @@ up, or cancel altogether, answer "no" and Terraform will abort. ` const inputBackendMigrateMultiToMulti = ` -Both the existing %[1]q backend and the newly configured %[2]q backend support -workspaces. When migrating between backends, Terraform will copy all -workspaces (with the same names). THIS WILL OVERWRITE any conflicting +Both the existing %[1]q backend and the newly configured %[2]q backend +support workspaces. When migrating between backends, Terraform will copy +all workspaces (with the same names). THIS WILL OVERWRITE any conflicting states in the destination. Terraform initialization doesn't currently migrate only select workspaces. @@ -478,3 +526,15 @@ pull and push those states. If you answer "yes", Terraform will migrate all states. If you answer "no", Terraform will abort. ` + +const inputBackendNewWorkspaceName = ` +Please provide a new workspace name (e.g. dev, test) that will be used +to migrate the existing default workspace. +` + +const inputBackendSelectWorkspace = ` +This is expected behavior when the selected workspace did not have an +existing non-empty state. Please enter a number to select a workspace: + +%s +` diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index ab8e58e4e349..e3a59cdd58da 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -8,12 +8,7 @@ import ( "sort" "testing" - "github.com/mitchellh/cli" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/terraform/backend" - backendinit "github.com/hashicorp/terraform/backend/init" - backendlocal "github.com/hashicorp/terraform/backend/local" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/helper/copy" "github.com/hashicorp/terraform/plans" @@ -22,6 +17,11 @@ import ( "github.com/hashicorp/terraform/states/statefile" "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + backendInit "github.com/hashicorp/terraform/backend/init" + backendLocal "github.com/hashicorp/terraform/backend/local" ) // Test empty directory with no config/state creates a local state. @@ -745,8 +745,8 @@ func TestMetaBackend_reconfigureChange(t *testing.T) { defer testChdir(t, td)() // Register the single-state backend - backendinit.Set("local-single", backendlocal.TestNewLocalSingle) - defer backendinit.Set("local-single", nil) + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) // Setup the meta m := testMetaBackend(t, nil) @@ -844,12 +844,11 @@ func TestMetaBackend_configuredChangeCopy_singleState(t *testing.T) { defer testChdir(t, td)() // Register the single-state backend - backendinit.Set("local-single", backendlocal.TestNewLocalSingle) - defer backendinit.Set("local-single", nil) + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) // Ask input defer testInputMap(t, map[string]string{ - "backend-migrate-to-new": "yes", "backend-migrate-copy-to-empty": "yes", })() @@ -900,12 +899,11 @@ func TestMetaBackend_configuredChangeCopy_multiToSingleDefault(t *testing.T) { defer testChdir(t, td)() // Register the single-state backend - backendinit.Set("local-single", backendlocal.TestNewLocalSingle) - defer backendinit.Set("local-single", nil) + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) // Ask input defer testInputMap(t, map[string]string{ - "backend-migrate-to-new": "yes", "backend-migrate-copy-to-empty": "yes", })() @@ -955,12 +953,11 @@ func TestMetaBackend_configuredChangeCopy_multiToSingle(t *testing.T) { defer testChdir(t, td)() // Register the single-state backend - backendinit.Set("local-single", backendlocal.TestNewLocalSingle) - defer backendinit.Set("local-single", nil) + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) // Ask input defer testInputMap(t, map[string]string{ - "backend-migrate-to-new": "yes", "backend-migrate-multistate-to-single": "yes", "backend-migrate-copy-to-empty": "yes", })() @@ -1001,7 +998,7 @@ func TestMetaBackend_configuredChangeCopy_multiToSingle(t *testing.T) { } // Verify existing workspaces exist - envPath := filepath.Join(backendlocal.DefaultWorkspaceDir, "env2", backendlocal.DefaultStateFilename) + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { t.Fatal("env should exist") } @@ -1022,12 +1019,11 @@ func TestMetaBackend_configuredChangeCopy_multiToSingleCurrentEnv(t *testing.T) defer testChdir(t, td)() // Register the single-state backend - backendinit.Set("local-single", backendlocal.TestNewLocalSingle) - defer backendinit.Set("local-single", nil) + backendInit.Set("local-single", backendLocal.TestNewLocalSingle) + defer backendInit.Set("local-single", nil) // Ask input defer testInputMap(t, map[string]string{ - "backend-migrate-to-new": "yes", "backend-migrate-multistate-to-single": "yes", "backend-migrate-copy-to-empty": "yes", })() @@ -1073,7 +1069,7 @@ func TestMetaBackend_configuredChangeCopy_multiToSingleCurrentEnv(t *testing.T) } // Verify existing workspaces exist - envPath := filepath.Join(backendlocal.DefaultWorkspaceDir, "env2", backendlocal.DefaultStateFilename) + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { t.Fatal("env should exist") } @@ -1090,7 +1086,6 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) { // Ask input defer testInputMap(t, map[string]string{ - "backend-migrate-to-new": "yes", "backend-migrate-multistate-to-multistate": "yes", })() @@ -1104,15 +1099,15 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) { } // Check resulting states - states, err := b.Workspaces() + workspaces, err := b.Workspaces() if err != nil { t.Fatalf("unexpected error: %s", err) } - sort.Strings(states) + sort.Strings(workspaces) expected := []string{"default", "env2"} - if !reflect.DeepEqual(states, expected) { - t.Fatalf("bad: %#v", states) + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("bad: %#v", workspaces) } { @@ -1158,7 +1153,159 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) { { // Verify existing workspaces exist - envPath := filepath.Join(backendlocal.DefaultWorkspaceDir, "env2", backendlocal.DefaultStateFilename) + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } + } + + { + // Verify new workspaces exist + envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } + } +} + +// Changing a configured backend that supports multi-state to a +// backend that also supports multi-state, but doesn't allow a +// default state while the default state is non-empty. +func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + copy.CopyDir(testFixturePath("backend-change-multi-to-no-default-with-default"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-no-default", backendLocal.TestNewLocalNoDefault) + defer backendInit.Set("local-no-default", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-multistate-to-multistate": "yes", + "new-state-name": "env1", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check resulting states + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"env1", "env2"} + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("bad: %#v", workspaces) + } + + { + // Check the renamed default state + s, err := b.StateMgr("env1") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change-env1" { + t.Fatalf("bad: %#v", state) + } + } + + { + // Verify existing workspaces exist + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } + } + + { + // Verify new workspaces exist + envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) + if _, err := os.Stat(envPath); err != nil { + t.Fatal("env should exist") + } + } +} + +// Changing a configured backend that supports multi-state to a +// backend that also supports multi-state, but doesn't allow a +// default state while the default state is empty. +func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithoutDefault(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + copy.CopyDir(testFixturePath("backend-change-multi-to-no-default-without-default"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + // Register the single-state backend + backendInit.Set("local-no-default", backendLocal.TestNewLocalNoDefault) + defer backendInit.Set("local-no-default", nil) + + // Ask input + defer testInputMap(t, map[string]string{ + "backend-migrate-multistate-to-multistate": "yes", + "select-workspace": "1", + })() + + // Setup the meta + m := testMetaBackend(t, nil) + + // Get the backend + b, diags := m.Backend(&BackendOpts{Init: true}) + if diags.HasErrors() { + t.Fatal(diags.Err()) + } + + // Check resulting states + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"default", "env2"} + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("bad: %#v", workspaces) + } + + { + // Check the named state + s, err := b.StateMgr("env2") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := s.RefreshState(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + state := s.State() + if state == nil { + t.Fatal("state should not be nil") + } + if testStateMgrCurrentLineage(s) != "backend-change-env2" { + t.Fatalf("bad: %#v", state) + } + } + + { + // Verify existing workspaces exist + envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { t.Fatal("env should exist") } @@ -1166,7 +1313,7 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) { { // Verify new workspaces exist - envPath := filepath.Join("envdir-new", "env2", backendlocal.DefaultStateFilename) + envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { t.Fatal("env should exist") } @@ -1611,7 +1758,7 @@ func TestMetaBackend_configureWithExtra(t *testing.T) { } // Check the state - s := testDataStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename)) + s := testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) if s.Backend.Hash != cHash { t.Fatal("mismatched state and config backend hashes") } @@ -1627,7 +1774,7 @@ func TestMetaBackend_configureWithExtra(t *testing.T) { } // Check the state - s = testDataStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename)) + s = testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) if s.Backend.Hash != cHash { t.Fatal("mismatched state and config backend hashes") } @@ -1694,7 +1841,7 @@ func TestMetaBackend_configToExtra(t *testing.T) { } // Check the state - s := testDataStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename)) + s := testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) backendHash := s.Backend.Hash // init again but remove the path option from the config @@ -1715,7 +1862,7 @@ func TestMetaBackend_configToExtra(t *testing.T) { t.Fatal(diags.Err()) } - s = testDataStateRead(t, filepath.Join(DefaultDataDir, backendlocal.DefaultStateFilename)) + s = testDataStateRead(t, filepath.Join(DefaultDataDir, backendLocal.DefaultStateFilename)) if s.Backend.Hash == backendHash { t.Fatal("state.Backend.Hash was not updated") diff --git a/command/plan.go b/command/plan.go index 2f3411286cfa..1e1664609b33 100644 --- a/command/plan.go +++ b/command/plan.go @@ -95,17 +95,19 @@ func (c *PlanCommand) Run(args []string) int { // Build the operation opReq := c.Operation(b) - opReq.Destroy = destroy opReq.ConfigDir = configPath + opReq.Destroy = destroy opReq.PlanRefresh = refresh opReq.PlanOutPath = outPath opReq.PlanRefresh = refresh opReq.Type = backend.OperationTypePlan + opReq.ConfigLoader, err = c.initConfigLoader() if err != nil { c.showDiagnostics(err) return 1 } + { var moreDiags tfdiags.Diagnostics opReq.Variables, moreDiags = c.collectVariableValues() diff --git a/command/state_meta.go b/command/state_meta.go index f823de880520..02a49934ae4e 100644 --- a/command/state_meta.go +++ b/command/state_meta.go @@ -6,10 +6,11 @@ import ( "time" "github.com/hashicorp/terraform/addrs" - backendlocal "github.com/hashicorp/terraform/backend/local" "github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statemgr" + + backendLocal "github.com/hashicorp/terraform/backend/local" ) // StateMeta is the meta struct that should be embedded in state subcommands. @@ -49,7 +50,7 @@ func (c *StateMeta) State() (state.State, error) { // This should never fail panic(backendDiags.Err()) } - localB := localRaw.(*backendlocal.Local) + localB := localRaw.(*backendLocal.Local) _, stateOutPath, _ = localB.StatePaths(workspace) if err != nil { return nil, err diff --git a/command/test-fixtures/backend-change-multi-to-no-default-with-default/main.tf b/command/test-fixtures/backend-change-multi-to-no-default-with-default/main.tf index 93c5bced0929..6328a4fb9aa9 100644 --- a/command/test-fixtures/backend-change-multi-to-no-default-with-default/main.tf +++ b/command/test-fixtures/backend-change-multi-to-no-default-with-default/main.tf @@ -1,5 +1,5 @@ terraform { backend "local-no-default" { - environment_dir = "envdir-new" + workspace_dir = "envdir-new" } } diff --git a/command/test-fixtures/backend-change-multi-to-no-default-without-default/main.tf b/command/test-fixtures/backend-change-multi-to-no-default-without-default/main.tf index 93c5bced0929..6328a4fb9aa9 100644 --- a/command/test-fixtures/backend-change-multi-to-no-default-without-default/main.tf +++ b/command/test-fixtures/backend-change-multi-to-no-default-without-default/main.tf @@ -1,5 +1,5 @@ terraform { backend "local-no-default" { - environment_dir = "envdir-new" + workspace_dir = "envdir-new" } } diff --git a/go.mod b/go.mod index e22e340e0226..3a7705311120 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,7 @@ require ( github.com/golang/protobuf v1.2.0 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect github.com/google/go-cmp v0.2.0 + github.com/google/go-querystring v1.0.0 // indirect github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e // indirect github.com/gophercloud/gophercloud v0.0.0-20170524130959-3027adb1ce72 github.com/gopherjs/gopherjs v0.0.0-20181004151105-1babbf986f6f // indirect @@ -65,7 +66,9 @@ require ( github.com/hashicorp/go-retryablehttp v0.0.0-20160930035102-6e85be8fee1d github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc // indirect + github.com/hashicorp/go-slug v0.1.0 // indirect github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 // indirect + github.com/hashicorp/go-tfe v0.2.6 github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 github.com/hashicorp/golang-lru v0.5.0 // indirect @@ -118,6 +121,7 @@ require ( github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect github.com/soheilhy/cmux v0.1.4 // indirect github.com/spf13/afero v1.0.2 + github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d // indirect github.com/terraform-providers/terraform-provider-aws v1.41.0 github.com/terraform-providers/terraform-provider-openstack v0.0.0-20170616075611-4080a521c6ea github.com/terraform-providers/terraform-provider-template v1.0.0 // indirect diff --git a/go.sum b/go.sum index 4d5a3d3c381c..576fed37ad96 100644 --- a/go.sum +++ b/go.sum @@ -97,6 +97,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCy github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e h1:CYRpN206UTHUinz3VJoLaBdy1gEGeJNsqT0mvswDcMw= github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/gophercloud/gophercloud v0.0.0-20170524130959-3027adb1ce72 h1:I0ssFkBxJw27fhEVIBVjGQVMqKj5HyzfvfIhdr5Tx2E= @@ -141,8 +143,12 @@ github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:9HVkPxOp github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc h1:wAa9fGALVHfjYxZuXRnmuJG2CnwRpJYOTvY6YdErAh0= github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-slug v0.1.0 h1:MJGEiOwRGrQCBmMMZABHqIESySFJ4ajrsjgDI4/aFI0= +github.com/hashicorp/go-slug v0.1.0/go.mod h1:+zDycQOzGqOqMW7Kn2fp9vz/NtqpMLQlgb9JUF+0km4= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 h1:7YOlAIO2YWnJZkQp7B5eFykaIY7C9JndqAFQyVV5BhM= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-tfe v0.2.6 h1:o2ryV7ZS0BgaLfNvzWz+A/6J70UETMy+wFL+DQlUy/M= +github.com/hashicorp/go-tfe v0.2.6/go.mod h1:nJs7lSMcNPGQQtjyPG6en099CQ/f83+hfeeSqehl2Fg= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 h1:at4+18LrM8myamuV7/vT6x2s1JNXp2k4PsSbt4I02X4= @@ -277,6 +283,8 @@ github.com/spf13/afero v1.0.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= github.com/terraform-providers/terraform-provider-aws v1.41.0 h1:ZOuxMXREOtJ+SHMX5SnbZbiqYhl9GNfZDl4f0H6CaOM= github.com/terraform-providers/terraform-provider-aws v1.41.0/go.mod h1:uvqaeKnm2ydZ2LuKuW1NDNBu6heC/7IDGXWm36/6oKs= github.com/terraform-providers/terraform-provider-openstack v0.0.0-20170616075611-4080a521c6ea h1:IfuzHOI3XwwYZS2Xw8SQbxOtGXlIUrKtXtuDCTNxmsQ= diff --git a/main.go b/main.go index 2333bbc33204..8caf7414bda5 100644 --- a/main.go +++ b/main.go @@ -12,7 +12,6 @@ import ( "sync" "github.com/hashicorp/go-plugin" - backendInit "github.com/hashicorp/terraform/backend/init" "github.com/hashicorp/terraform/command/format" "github.com/hashicorp/terraform/helper/logging" "github.com/hashicorp/terraform/svchost/disco" @@ -22,6 +21,8 @@ import ( "github.com/mitchellh/colorstring" "github.com/mitchellh/panicwrap" "github.com/mitchellh/prefixedio" + + backendInit "github.com/hashicorp/terraform/backend/init" ) const ( diff --git a/registry/client_test.go b/registry/client_test.go index ea5d1e5ef484..0796f5f33a8e 100644 --- a/registry/client_test.go +++ b/registry/client_test.go @@ -85,26 +85,26 @@ func TestRegistryAuth(t *testing.T) { t.Fatal(err) } - // both should fail without auth _, err = client.ModuleVersions(mod) - if err == nil { - t.Fatal("expected error") + if err != nil { + t.Fatal(err) } _, err = client.ModuleLocation(mod, "1.0.0") - if err == nil { - t.Fatal("expected error") + if err != nil { + t.Fatal(err) } // Also test without a credentials source client.services.SetCredentialsSource(nil) + // both should fail without auth _, err = client.ModuleVersions(mod) - if err != nil { - t.Fatal(err) + if err == nil { + t.Fatal("expected error") } _, err = client.ModuleLocation(mod, "1.0.0") - if err != nil { - t.Fatal(err) + if err == nil { + t.Fatal("expected error") } } diff --git a/website/docs/backends/types/manta.html.md b/website/docs/backends/types/manta.html.md index 018009422eba..583e7c108e30 100644 --- a/website/docs/backends/types/manta.html.md +++ b/website/docs/backends/types/manta.html.md @@ -49,5 +49,4 @@ The following configuration options are supported: * `key_id` - (Required) This is the fingerprint of the public key matching the key specified in key_path. It can be obtained via the command ssh-keygen -l -E md5 -f /path/to/key. Can be set via the `SDC_KEY_ID` or `TRITON_KEY_ID` environment variables. * `insecure_skip_tls_verify` - (Optional) This allows skipping TLS verification of the Triton endpoint. It is useful when connecting to a temporary Triton installation such as Cloud-On-A-Laptop which does not generally use a certificate signed by a trusted root CA. Defaults to `false`. * `path` - (Required) The path relative to your private storage directory (`/$MANTA_USER/stor`) where the state file will be stored. **Please Note:** If this path does not exist, then the backend will create this folder location as part of backend creation. - * `objectName` - (Optional, Deprecated) Use `object_name` instead. * `object_name` - (Optional) The name of the state file (defaults to `terraform.tfstate`) diff --git a/website/docs/backends/types/terraform-enterprise.html.md b/website/docs/backends/types/terraform-enterprise.html.md index 2351d3a62b23..96de2041617a 100644 --- a/website/docs/backends/types/terraform-enterprise.html.md +++ b/website/docs/backends/types/terraform-enterprise.html.md @@ -8,6 +8,9 @@ description: |- # terraform enterprise +-> **Deprecated** Please use the new enhanced [remote](/docs/backends/types/remote.html) +backend for storing state and running remote operations in Terraform Enterprise. + **Kind: Standard (with no locking)** Reads and writes state from a [Terraform Enterprise](/docs/enterprise/index.html) From 8f578c365f49662c70c5f089ff4fcd2efd7dce10 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 5 Nov 2018 17:13:05 -0800 Subject: [PATCH 044/149] lang/funcs: Permit object types in the "length" function The implementation already allowed this, so this was just an oversight in the type checking function. This fixes #19278. --- lang/funcs/collection.go | 2 +- lang/funcs/collection_test.go | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/lang/funcs/collection.go b/lang/funcs/collection.go index cf0958f44ba4..4d70b2c08505 100644 --- a/lang/funcs/collection.go +++ b/lang/funcs/collection.go @@ -87,7 +87,7 @@ var LengthFunc = function.New(&function.Spec{ Type: func(args []cty.Value) (cty.Type, error) { collTy := args[0].Type() switch { - case collTy == cty.String || collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: return cty.Number, nil default: return cty.Number, fmt.Errorf("argument must be a string, a collection type, or a structural type") diff --git a/lang/funcs/collection_test.go b/lang/funcs/collection_test.go index e0c3e33f8c0c..a3215d9d509a 100644 --- a/lang/funcs/collection_test.go +++ b/lang/funcs/collection_test.go @@ -163,10 +163,26 @@ func TestLength(t *testing.T) { cty.EmptyTupleVal, cty.NumberIntVal(0), }, + { + cty.UnknownVal(cty.EmptyTuple), + cty.NumberIntVal(0), + }, { cty.TupleVal([]cty.Value{cty.True}), cty.NumberIntVal(1), }, + { + cty.EmptyObjectVal, + cty.NumberIntVal(0), + }, + { + cty.UnknownVal(cty.EmptyObject), + cty.NumberIntVal(0), + }, + { + cty.ObjectVal(map[string]cty.Value{"true": cty.True}), + cty.NumberIntVal(1), + }, { cty.UnknownVal(cty.List(cty.Bool)), cty.UnknownVal(cty.Number), From 8c01cf72939972d4a7235a6af9ada1f8d6b429dc Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 5 Nov 2018 17:17:28 -0800 Subject: [PATCH 045/149] lang/funcs: Fix broken test for lookup function When the value we're looking in has an object type, we need to know the key in order to decide the result type. Therefore an object lookup with an unknown key must produce cty.DynamicVal, not an unknown value with a known type. --- lang/funcs/collection_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lang/funcs/collection_test.go b/lang/funcs/collection_test.go index a3215d9d509a..202275ddc76f 100644 --- a/lang/funcs/collection_test.go +++ b/lang/funcs/collection_test.go @@ -1287,7 +1287,7 @@ func TestLookup(t *testing.T) { }), cty.UnknownVal(cty.String), }, - cty.UnknownVal(cty.String), + cty.DynamicVal, // if the key is unknown then we don't know which object attribute and thus can't know the type false, }, } From ecc42b838ced6884b6716695aa07b165242712a2 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 5 Nov 2018 18:03:40 -0800 Subject: [PATCH 046/149] lang/funcs: Fix crash and improve precision of keys/values functions The "values" function wasn't producing consistently-ordered keys in its result, leading to crashes. This fixes #19204. While working on these functions anyway, this also improves slightly their precision when working with object types, where we can produce a more complete result for unknown values because the attribute names are part of the type. We can also produce results for known maps that have unknown elements; these unknowns will also appear in the values(...) result, allowing them to propagate through expressions. Finally, this adds a few more test cases to try different permutations of empty and unknown values. --- lang/funcs/collection.go | 121 ++++++++++++++++++++++------------ lang/funcs/collection_test.go | 74 ++++++++++++++++++--- 2 files changed, 144 insertions(+), 51 deletions(-) diff --git a/lang/funcs/collection.go b/lang/funcs/collection.go index 4d70b2c08505..79e11d708de9 100644 --- a/lang/funcs/collection.go +++ b/lang/funcs/collection.go @@ -419,27 +419,69 @@ func flattener(finalList []cty.Value, flattenList cty.Value) []cty.Value { var KeysFunc = function.New(&function.Spec{ Params: []function.Parameter{ { - Name: "inputMap", - Type: cty.DynamicPseudoType, + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowUnknown: true, }, }, - Type: function.StaticReturnType(cty.List(cty.String)), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - var keys []cty.Value + Type: func(args []cty.Value) (cty.Type, error) { ty := args[0].Type() - - if !ty.IsObjectType() && !ty.IsMapType() { - return cty.NilVal, fmt.Errorf("keys() requires a map") + switch { + case ty.IsMapType(): + return cty.List(cty.String), nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + // All of our result elements will be strings, and atys just + // decides how many there are. + etys := make([]cty.Type, len(atys)) + for i := range etys { + etys[i] = cty.String + } + return cty.Tuple(etys), nil + default: + return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + m := args[0] + var keys []cty.Value - for it := args[0].ElementIterator(); it.Next(); { - k, _ := it.Element() - keys = append(keys, k) - if err != nil { - return cty.ListValEmpty(cty.String), err + switch { + case m.Type().IsObjectType(): + // In this case we allow unknown values so we must work only with + // the attribute _types_, not with the value itself. + var names []string + for name := range m.Type().AttributeTypes() { + names = append(names, name) + } + sort.Strings(names) // same ordering guaranteed by cty's ElementIterator + if len(names) == 0 { + return cty.EmptyTupleVal, nil + } + keys = make([]cty.Value, len(names)) + for i, name := range names { + keys[i] = cty.StringVal(name) } + return cty.TupleVal(keys), nil + default: + if !m.IsKnown() { + return cty.UnknownVal(retType), nil + } + + // cty guarantees that ElementIterator will iterate in lexicographical + // order by key. + for it := args[0].ElementIterator(); it.Next(); { + k, _ := it.Element() + keys = append(keys, k) + } + if len(keys) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(keys), nil } - return cty.ListVal(keys), nil }, }) @@ -891,9 +933,23 @@ var ValuesFunc = function.New(&function.Spec{ if ty.IsMapType() { return cty.List(ty.ElementType()), nil } else if ty.IsObjectType() { - var tys []cty.Type - for _, v := range ty.AttributeTypes() { - tys = append(tys, v) + // The result is a tuple type with all of the same types as our + // object type's attributes, sorted in lexicographical order by the + // keys. (This matches the sort order guaranteed by ElementIterator + // on a cty object value.) + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + attrNames := make([]string, 0, len(atys)) + for name := range atys { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + + tys := make([]cty.Type, len(attrNames)) + for i, name := range attrNames { + tys[i] = atys[name] } return cty.Tuple(tys), nil } @@ -902,33 +958,12 @@ var ValuesFunc = function.New(&function.Spec{ Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { mapVar := args[0] - if !mapVar.IsWhollyKnown() { - return cty.UnknownVal(retType), nil - } - - if mapVar.LengthInt() == 0 { - return cty.ListValEmpty(retType.ElementType()), nil - } - - keys, err := Keys(mapVar) - if err != nil { - return cty.NilVal, err - } - + // We can just iterate the map/object value here because cty guarantees + // that these types always iterate in key lexicographical order. var values []cty.Value - - for it := keys.ElementIterator(); it.Next(); { - _, key := it.Element() - k := key.AsString() - if mapVar.Type().IsObjectType() { - if mapVar.Type().HasAttribute(k) { - value := mapVar.GetAttr(k) - values = append(values, value) - } - } else { - value := mapVar.Index(cty.StringVal(k)) - values = append(values, value) - } + for it := mapVar.ElementIterator(); it.Next(); { + _, val := it.Element() + values = append(values, val) } if retType.IsTupleType() { diff --git a/lang/funcs/collection_test.go b/lang/funcs/collection_test.go index 202275ddc76f..83b8a3cbfa80 100644 --- a/lang/funcs/collection_test.go +++ b/lang/funcs/collection_test.go @@ -1018,22 +1018,56 @@ func TestKeys(t *testing.T) { "hello": cty.NumberIntVal(1), "goodbye": cty.StringVal("adieu"), }), - cty.ListVal([]cty.Value{ + cty.TupleVal([]cty.Value{ cty.StringVal("goodbye"), cty.StringVal("hello"), }), false, }, - { // Not a map - cty.StringVal("foo"), - cty.NilVal, - true, + { // for an unknown object we can still return the keys, since they are part of the type + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "hello": cty.Number, + "goodbye": cty.String, + })), + cty.TupleVal([]cty.Value{ + cty.StringVal("goodbye"), + cty.StringVal("hello"), + }), + false, }, - { // Unknown map + { // an empty object has no keys + cty.EmptyObjectVal, + cty.EmptyTupleVal, + false, + }, + { // an empty map has no keys, but the result should still be properly typed + cty.MapValEmpty(cty.Number), + cty.ListValEmpty(cty.String), + false, + }, + { // Unknown map has unknown keys cty.UnknownVal(cty.Map(cty.String)), cty.UnknownVal(cty.List(cty.String)), false, }, + { // Not a map at all, so invalid + cty.StringVal("foo"), + cty.NilVal, + true, + }, + { // Can't get keys from a null object + cty.NullVal(cty.Object(map[string]cty.Type{ + "hello": cty.Number, + "goodbye": cty.String, + })), + cty.NilVal, + true, + }, + { // Can't get keys from a null map + cty.NullVal(cty.Map(cty.Number)), + cty.NilVal, + true, + }, } for _, test := range tests { @@ -2015,8 +2049,8 @@ func TestValues(t *testing.T) { }, { cty.ObjectVal(map[string]cty.Value{ - "hello": cty.StringVal("world"), "what's": cty.StringVal("up"), + "hello": cty.StringVal("world"), }), cty.TupleVal([]cty.Value{ cty.StringVal("world"), @@ -2024,6 +2058,22 @@ func TestValues(t *testing.T) { }), false, }, + { // empty object + cty.EmptyObjectVal, + cty.EmptyTupleVal, + false, + }, + { + cty.UnknownVal(cty.Object(map[string]cty.Type{ + "what's": cty.String, + "hello": cty.Bool, + })), + cty.UnknownVal(cty.Tuple([]cty.Type{ + cty.Bool, + cty.String, + })), + false, + }, { // note ordering: keys are sorted first cty.MapVal(map[string]cty.Value{ "hello": cty.NumberIntVal(1), @@ -2051,7 +2101,10 @@ func TestValues(t *testing.T) { "hello": cty.ListVal([]cty.Value{cty.StringVal("world")}), "what's": cty.UnknownVal(cty.List(cty.String)), }), - cty.UnknownVal(cty.List(cty.List(cty.String))), + cty.ListVal([]cty.Value{ + cty.ListVal([]cty.Value{cty.StringVal("world")}), + cty.UnknownVal(cty.List(cty.String)), + }), false, }, { // empty m @@ -2059,6 +2112,11 @@ func TestValues(t *testing.T) { cty.ListValEmpty(cty.DynamicPseudoType), false, }, + { // unknown m + cty.UnknownVal(cty.Map(cty.String)), + cty.UnknownVal(cty.List(cty.String)), + false, + }, } for _, test := range tests { From 027b10726853c6f01cb17099c0ff19edb98c7056 Mon Sep 17 00:00:00 2001 From: Doug Goldstein Date: Thu, 4 Oct 2018 13:49:43 -0500 Subject: [PATCH 047/149] website: update deprecated backend/swift docs Update the examples and docs to not directly reference deprecated fields. --- website/docs/backends/types/swift.html.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md index 192e5852931c..711ce994385a 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/backends/types/swift.html.md @@ -12,14 +12,14 @@ description: |- Stores the state as an artifact in [Swift](http://docs.openstack.org/developer/swift/). -~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/overview_object_versioning.html) by setting the [`expire_after`](https://www.terraform.io/docs/backends/types/swift.html#archive_path) configuration. This allows for state recovery in the case of accidental deletions and human error. +~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/overview_object_versioning.html) by setting the [`expire_after`](https://www.terraform.io/docs/backends/types/swift.html#archive_container) configuration. This allows for state recovery in the case of accidental deletions and human error. ## Example Configuration ```hcl terraform { backend "swift" { - path = "terraform-state" + container = "terraform-state" } } ``` @@ -36,7 +36,7 @@ For the access credentials we recommend using a data "terraform_remote_state" "foo" { backend = "swift" config = { - path = "terraform_state" + container = "terraform_state" } } ``` @@ -105,5 +105,5 @@ The following configuration options are supported: The path to store archived copied of `terraform.tfstate`. If specified, Swift [object versioning](https://docs.openstack.org/developer/swift/overview_object_versioning.html) is enabled on the container created at `path`. - * `expire_after` - (Optional) How long should the `terraform.tfstate` created at `path` + * `expire_after` - (Optional) How long should the `terraform.tfstate` created at `container` be retained for? Supported durations: `m` - Minutes, `h` - Hours, `d` - Days. From b31aab446969213a400a4b37ae7fd1dad401188d Mon Sep 17 00:00:00 2001 From: Doug Goldstein Date: Thu, 4 Oct 2018 13:52:40 -0500 Subject: [PATCH 048/149] website: fix backend/swift links to Swift docs The links to the OpenStack Swift documentation were broken due to changes on the OpenStack website. --- website/docs/backends/types/swift.html.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md index 711ce994385a..b89702ca3ce4 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/backends/types/swift.html.md @@ -10,9 +10,9 @@ description: |- **Kind: Standard (with no locking)** -Stores the state as an artifact in [Swift](http://docs.openstack.org/developer/swift/). +Stores the state as an artifact in [Swift](http://docs.openstack.org/developer/swift/latest/). -~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/overview_object_versioning.html) by setting the [`expire_after`](https://www.terraform.io/docs/backends/types/swift.html#archive_container) configuration. This allows for state recovery in the case of accidental deletions and human error. +~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) by setting the [`expire_after`](https://www.terraform.io/docs/backends/types/swift.html#archive_container) configuration. This allows for state recovery in the case of accidental deletions and human error. ## Example Configuration @@ -99,11 +99,11 @@ The following configuration options are supported: If omitted the `OS_KEY` environment variable is used. * `archive_container` - (Optional) The container to create to store archived copies - of the Terraform state file. If specified, Swift [object versioning](https://docs.openstack.org/developer/swift/overview_object_versioning.html) is enabled on the container created at `container`. + of the Terraform state file. If specified, Swift [object versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) is enabled on the container created at `container`. * `archive_path` - (Optional) DEPRECATED: Use `archive_container` instead. The path to store archived copied of `terraform.tfstate`. If specified, - Swift [object versioning](https://docs.openstack.org/developer/swift/overview_object_versioning.html) is enabled on the container created at `path`. + Swift [object versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) is enabled on the container created at `path`. * `expire_after` - (Optional) How long should the `terraform.tfstate` created at `container` be retained for? Supported durations: `m` - Minutes, `h` - Hours, `d` - Days. From ddc30b6546a91048218638c1a0e81bfb58545fc5 Mon Sep 17 00:00:00 2001 From: Doug Goldstein Date: Thu, 4 Oct 2018 14:22:27 -0500 Subject: [PATCH 049/149] website: backend/swift add docs link for expire_after Provide a link to the OpenStack Swift docs for the object expiration feature that is used by the `expire_after` field. --- website/docs/backends/types/swift.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md index b89702ca3ce4..afcd9fff751d 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/backends/types/swift.html.md @@ -106,4 +106,4 @@ The following configuration options are supported: Swift [object versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) is enabled on the container created at `path`. * `expire_after` - (Optional) How long should the `terraform.tfstate` created at `container` - be retained for? Supported durations: `m` - Minutes, `h` - Hours, `d` - Days. + be retained for? If specified, Swift [expiring object support](https://docs.openstack.org/developer/swift/latest/overview_expiring_objects.html) is enabled on the state. Supported durations: `m` - Minutes, `h` - Hours, `d` - Days. From 58cb47d108348dc013b00ece1377aa27597e9a5c Mon Sep 17 00:00:00 2001 From: Doug Goldstein Date: Thu, 4 Oct 2018 14:25:15 -0500 Subject: [PATCH 050/149] website: backend/swift fix bad link for object versioning The displayed link said `expire_after` but really is a link to `archive_container` so update the link to read the right data. fixes #19005 --- website/docs/backends/types/swift.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md index afcd9fff751d..9e33c276cd7f 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/backends/types/swift.html.md @@ -12,7 +12,7 @@ description: |- Stores the state as an artifact in [Swift](http://docs.openstack.org/developer/swift/latest/). -~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) by setting the [`expire_after`](https://www.terraform.io/docs/backends/types/swift.html#archive_container) configuration. This allows for state recovery in the case of accidental deletions and human error. +~> Warning! It is highly recommended that you enable [Object Versioning](https://docs.openstack.org/developer/swift/latest/overview_object_versioning.html) by setting the [`archive_container`](https://www.terraform.io/docs/backends/types/swift.html#archive_container) configuration. This allows for state recovery in the case of accidental deletions and human error. ## Example Configuration From 817be7b23f230dd4abde3b4a5faaad4c64a7773a Mon Sep 17 00:00:00 2001 From: Doug Goldstein Date: Thu, 4 Oct 2018 14:26:10 -0500 Subject: [PATCH 051/149] website: update backend/swift examples to use versioning Since object versioning is a best practice the docs should have all the examples containing it by default. --- website/docs/backends/types/swift.html.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/website/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md index 9e33c276cd7f..001dea0a1020 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/backends/types/swift.html.md @@ -19,11 +19,12 @@ Stores the state as an artifact in [Swift](http://docs.openstack.org/developer/s ```hcl terraform { backend "swift" { - container = "terraform-state" + container = "terraform-state" + archive_container = "terraform-state-archive" } } ``` -This will create a container called `terraform-state` and an object within that container called `tfstate.tf`. +This will create a container called `terraform-state` and an object within that container called `tfstate.tf`. It will enable versioning using the `terraform-state-archive` container to contain the older version. -> Note: Currently, the object name is statically defined as 'tfstate.tf'. Therefore Swift [pseudo-folders](https://docs.openstack.org/user-guide/cli-swift-pseudo-hierarchical-folders-directories.html) are not currently supported. @@ -36,7 +37,8 @@ For the access credentials we recommend using a data "terraform_remote_state" "foo" { backend = "swift" config = { - container = "terraform_state" + container = "terraform_state" + archive_container = "terraform_state-archive" } } ``` From 49c42b98300a8ced3c470ac272ae34b8c8fb679e Mon Sep 17 00:00:00 2001 From: Tom Harvey Date: Mon, 29 Oct 2018 15:08:27 -0500 Subject: [PATCH 052/149] docs: add warning to backend/swift about auto-expire If the user uses the auto-expire value in the backend/swift settings then swift will automatically delete their Statefile which is likely something the user doesn't want given how Terraform works. --- website/docs/backends/types/swift.html.md | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md index 001dea0a1020..2e11444c73a9 100644 --- a/website/docs/backends/types/swift.html.md +++ b/website/docs/backends/types/swift.html.md @@ -109,3 +109,4 @@ The following configuration options are supported: * `expire_after` - (Optional) How long should the `terraform.tfstate` created at `container` be retained for? If specified, Swift [expiring object support](https://docs.openstack.org/developer/swift/latest/overview_expiring_objects.html) is enabled on the state. Supported durations: `m` - Minutes, `h` - Hours, `d` - Days. + ~> **NOTE:** Since Terraform is inherently stateful - we'd strongly recommend against auto-expiring Statefiles. From a63c9d7db5ecebdaa79dbfc4ed29606fd2b573d2 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Wed, 7 Nov 2018 08:50:16 +0100 Subject: [PATCH 053/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92fcfe365632..8f0a5b4ca081 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ IMPROVEMENTS: * command/state: Update and enable the `state show` command [GH-19200] +* backend/remote: Implement the remote enhanced backend [GH-19299] BUG FIXES: From 0cbf745e5a7fda69eca0177431aec3011b9830bf Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 7 Nov 2018 15:28:16 +0000 Subject: [PATCH 054/149] helper/schema: Avoid erroring out on undefined timeouts --- helper/schema/resource_timeout.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/helper/schema/resource_timeout.go b/helper/schema/resource_timeout.go index 33cbce1854d1..b7d63fafa56d 100644 --- a/helper/schema/resource_timeout.go +++ b/helper/schema/resource_timeout.go @@ -5,6 +5,7 @@ import ( "log" "time" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/copystructure" ) @@ -105,10 +106,16 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) *timeout = rt } - } else { - log.Printf("[ERROR] Invalid timeout structure: %T", raw) - return fmt.Errorf("Invalid Timeout structure found") + return nil + } + if v, ok := raw.(string); ok && v == config.UnknownVariableValue { + // Timeout is not defined in the config + // Defaults will be used instead + return nil } + + log.Printf("[ERROR] Invalid timeout structure: %T", raw) + return fmt.Errorf("Invalid Timeout structure found") } return nil From 3b02214d8cc78659d034de20208391fb5c4a271e Mon Sep 17 00:00:00 2001 From: Nick Fagerlund Date: Wed, 7 Nov 2018 14:01:58 -0800 Subject: [PATCH 055/149] website: clarify behavior of terraform_remote_state data sources (#19227) --- .../terraform/d/remote_state.html.md | 41 ++++++++++++------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/website/docs/providers/terraform/d/remote_state.html.md b/website/docs/providers/terraform/d/remote_state.html.md index d31507339986..37680c8b2a8e 100644 --- a/website/docs/providers/terraform/d/remote_state.html.md +++ b/website/docs/providers/terraform/d/remote_state.html.md @@ -8,7 +8,16 @@ description: |- # remote_state -Retrieves state meta data from a remote backend +[backends]: /docs/backends/index.html + +Retrieves state data from a [Terraform backend][backends]. This allows you to +use the root-level outputs of one or more Terraform configurations as input data +for another configuration. + +Although this data source uses Terraform's [backends][], it doesn't have the +same limitations as the main backend configuration. You can use any number of +`remote_state` data sources with differently configured backends, and you can +use interpolations when configuring them. ## Example Usage @@ -31,10 +40,15 @@ resource "aws_instance" "foo" { The following arguments are supported: * `backend` - (Required) The remote backend to use. -* `workspace` - (Optional) The Terraform workspace to use. -* `config` - (Optional) The configuration of the remote backend. -* `defaults` - (Optional) default value for outputs in case state file is empty or it does not have the output. - * Remote state config docs can be found [here](/docs/backends/types/terraform-enterprise.html) +* `workspace` - (Optional) The Terraform workspace to use, if the backend + supports workspaces. +* `config` - (Optional; block) The configuration of the remote backend. The + `config` block can use any arguments that would be valid in the equivalent + `terraform { backend "" { ... } }` block. See + [the documentation of your chosen backend](/docs/backends/types/index.html) + for details. +* `defaults` - (Optional; block) Default values for outputs, in case the state + file is empty or lacks a required output. ## Attributes Reference @@ -42,18 +56,17 @@ The following attributes are exported: * `backend` - See Argument Reference above. * `config` - See Argument Reference above. - -In addition, each output in the remote state appears as a top level attribute -on the `terraform_remote_state` resource. +* `` - Each root-level [output](/docs/configuration/outputs.html) + in the remote state appears as a top level attribute on the data source. ## Root Outputs Only -Only the root level outputs from the remote state are accessible. Outputs from -modules within the state cannot be accessed. If you want a module output to be -accessible via a remote state, you must thread the output through to a root -output. +Only the root-level outputs from the remote state are accessible. Outputs from +modules within the state cannot be accessed. If you want a module output or a +resource attribute to be accessible via a remote state, you must thread the +output through to a root output. -An example is shown below: +For example: ```hcl module "app" { @@ -66,5 +79,5 @@ output "app_value" { ``` In this example, the output `value` from the "app" module is available as -"app_value". If this root level output hadn't been created, then a remote state +`app_value`. If this root level output hadn't been created, then a remote state resource wouldn't be able to access the `value` output on the module. From 0ea8aa6fe521af9c8967f28146451be2fcea5569 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Nov 2018 10:49:10 -0800 Subject: [PATCH 056/149] command: Fix TestWorkspace_createWithState The State.Equal function is now more precise than this test needs. It's only trying to distinguish between an empty state and a non-empty state, so the string representation of state is good enough to get that done while disregarding other subtle differences. --- command/workspace_command_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/command/workspace_command_test.go b/command/workspace_command_test.go index 4503850fd0d5..ea645e00627b 100644 --- a/command/workspace_command_test.go +++ b/command/workspace_command_test.go @@ -276,8 +276,8 @@ func TestWorkspace_createWithState(t *testing.T) { newState := sMgr.State() - if !originalState.Equal(newState) { - t.Fatalf("states not equal\norig: %s\nnew: %s", originalState, newState) + if got, want := newState.String(), originalState.String(); got != want { + t.Fatalf("states not equal\ngot: %s\nwant: %s", got, want) } } From fcf3f643cef42de8ac789d9b410164e3566ac6f6 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Nov 2018 14:48:55 -0800 Subject: [PATCH 057/149] command: Fix TestPlan_shutdown Comments here indicate that this was erroneously returning an error but we accepted it anyway to get the tests passing again after other work. The tests over in the "terraform" package agree that cancelling should be a successful outcome rather than an error. I think that cancelling _should_ actually be an error, since Terraform did not complete the operation it set out to complete, but that's a change we'd need to make cautiously since automation wrapper scripts may be depending on the success-on-cancel behavior. Therefore this just fixes the command package test to agree with the Terraform package tests and adds some FIXME notes to capture the potential that we might want to update this later. --- command/plan_test.go | 12 +++++++----- terraform/hook_stop.go | 4 ++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/command/plan_test.go b/command/plan_test.go index 2af32adf1fc2..b646ce7e4547 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -761,16 +761,18 @@ func TestPlan_shutdown(t *testing.T) { "-state=nonexistent.tfstate", testFixturePath("apply-shutdown"), }) - if code != 1 { - // FIXME: we should be able to avoid the error during evaluation - // the early exit isn't caught before the interpolation is evaluated - t.Fatalf("wrong exit code %d; want 1\noutput:\n%s", code, ui.OutputWriter.String()) + if code != 0 { + // FIXME: In retrospect cancellation ought to be an unsuccessful exit + // case, but we need to do that cautiously in case it impacts automation + // wrappers. See the note about this in the terraform.stopHook + // implementation for more. + t.Errorf("wrong exit code %d; want 0\noutput:\n%s", code, ui.OutputWriter.String()) } select { case <-cancelled: default: - t.Fatal("command not cancelled") + t.Error("command not cancelled") } } diff --git a/terraform/hook_stop.go b/terraform/hook_stop.go index 72c004d3349e..811fb337cc37 100644 --- a/terraform/hook_stop.go +++ b/terraform/hook_stop.go @@ -76,6 +76,10 @@ func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { func (h *stopHook) hook() (HookAction, error) { if h.Stopped() { + // FIXME: This should really return an error since stopping partway + // through is not a successful run-to-completion, but we'll need to + // introduce that cautiously since existing automation solutions may + // be depending on this behavior. return HookActionHalt, nil } From e20346bf4f7d09e2d2ac181ecba925fe3735bc4a Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Nov 2018 15:45:06 -0800 Subject: [PATCH 058/149] command: fix TestMeta_process The mission of this process method used to include dealing with auto-loaded tfvars files, but it doesn't do that anymore. It does still deal with the -no-color option, but the test wasn't exercising that part before. Now the test here focuses on the -no-color behavior. The process method still has a "vars" flag argument which is no longer used. Since this is an unexported method we could potentially address this but this commit is intentionally limited only to fixing the test. --- command/meta.go | 4 +- command/meta_test.go | 88 +++++++++++++++++++++++++++++--------------- 2 files changed, 62 insertions(+), 30 deletions(-) diff --git a/command/meta.go b/command/meta.go index f8a637be0ace..b73989c35616 100644 --- a/command/meta.go +++ b/command/meta.go @@ -413,7 +413,9 @@ func (m *Meta) moduleStorage(root string, mode module.GetMode) *module.Storage { // will potentially modify the args in-place. It will return the resulting // slice. // -// vars says whether or not we support variables. +// vars is now ignored. It used to control whether to process variables, but +// that is no longer the responsibility of this function. (That happens +// instead in Meta.collectVariableValues.) func (m *Meta) process(args []string, vars bool) ([]string, error) { // We do this so that we retain the ability to technically call // process multiple times, even if we have no plans to do so diff --git a/command/meta_test.go b/command/meta_test.go index b42312924ced..b03fdff647fd 100644 --- a/command/meta_test.go +++ b/command/meta_test.go @@ -1,12 +1,15 @@ package command import ( + "fmt" "io/ioutil" "os" "path/filepath" "reflect" "testing" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/terraform" ) @@ -269,7 +272,13 @@ func TestMeta_process(t *testing.T) { defer os.RemoveAll(d) defer testChdir(t, d)() - // Create two vars files + // At one point it was the responsibility of this process function to + // insert fake additional -var-file options into the command line + // if the automatic tfvars files were present. This is no longer the + // responsibility of process (it happens in collectVariableValues instead) + // but we're still testing with these files in place to verify that + // they _aren't_ being interpreted by process, since that could otherwise + // cause them to be added more than once and mess up the precedence order. defaultVarsfile := "terraform.tfvars" err := ioutil.WriteFile( filepath.Join(d, defaultVarsfile), @@ -304,33 +313,54 @@ func TestMeta_process(t *testing.T) { t.Fatalf("err: %s", err) } - m := new(Meta) - args := []string{} - args, err = m.process(args, true) - if err != nil { - t.Fatalf("err: %s", err) - } - - if len(args) != 6 { - t.Fatalf("expected 6 args, got %v", args) - } - - if args[0] != "-var-file-default" { - t.Fatalf("expected %q, got %q", "-var-file-default", args[0]) - } - if args[1] != defaultVarsfile { - t.Fatalf("expected %q, got %q", defaultVarsfile, args[1]) - } - if args[2] != "-var-file-default" { - t.Fatalf("expected %q, got %q", "-var-file-default", args[2]) - } - if args[3] != fileFirstAlphabetical { - t.Fatalf("expected %q, got %q", fileFirstAlphabetical, args[3]) - } - if args[4] != "-var-file-default" { - t.Fatalf("expected %q, got %q", "-var-file-default", args[4]) - } - if args[5] != fileLastAlphabetical { - t.Fatalf("expected %q, got %q", fileLastAlphabetical, args[5]) + tests := []struct { + GivenArgs []string + FilteredArgs []string + ExtraCheck func(*testing.T, *Meta) + }{ + { + []string{}, + []string{}, + func(t *testing.T, m *Meta) { + if got, want := m.color, true; got != want { + t.Errorf("wrong m.color value %#v; want %#v", got, want) + } + if got, want := m.Color, true; got != want { + t.Errorf("wrong m.Color value %#v; want %#v", got, want) + } + }, + }, + { + []string{"-no-color"}, + []string{}, + func(t *testing.T, m *Meta) { + if got, want := m.color, false; got != want { + t.Errorf("wrong m.color value %#v; want %#v", got, want) + } + if got, want := m.Color, false; got != want { + t.Errorf("wrong m.Color value %#v; want %#v", got, want) + } + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s", test.GivenArgs), func(t *testing.T) { + m := new(Meta) + m.Color = true // this is the default also for normal use, overridden by -no-color + args := test.GivenArgs + args, err = m.process(args, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !cmp.Equal(test.FilteredArgs, args) { + t.Errorf("wrong filtered arguments\n%s", cmp.Diff(test.FilteredArgs, args)) + } + + if test.ExtraCheck != nil { + test.ExtraCheck(t, m) + } + }) } } From c3d11b762b19df3a3bee0def34a8166ef60a8c02 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Nov 2018 17:20:46 -0800 Subject: [PATCH 059/149] command: Fix testBackendState The hashing function for cached backend configuration is different now, so our hard-coded hash of the HTTP backend address wasn't working anymore. Here we update the hash so that tests using this test backend will work again. Rather than leaving it hard-coded, we'll instead compute it the same way as "terraform init" would. In practice only one test is actually using this function right now, so we also update the test fixture for that test (TestPlan_outBackend) to match the new expectations, though as of this commit it's still failing with an unrelated error. --- command/command_test.go | 23 ++++++++++++++++++- .../test-fixtures/plan-out-backend/main.tf | 7 +++--- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/command/command_test.go b/command/command_test.go index f5a17cd6333a..b7921caa7bf5 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -664,6 +664,19 @@ func testInputMap(t *testing.T, answers map[string]string) func() { // testBackendState is used to make a test HTTP server to test a configured // backend. This returns the complete state that can be saved. Use // `testStateFileRemote` to write the returned state. +// +// When using this function, the configuration fixture for the test must +// include an empty configuration block for the HTTP backend, like this: +// +// terraform { +// backend "http" { +// } +// } +// +// If such a block isn't present, or if it isn't empty, then an error will +// be returned about the backend configuration having changed and that +// "terraform init" must be run, since the test backend config cache created +// by this function contains the hash for an empty configuration. func testBackendState(t *testing.T, s *terraform.State, c int) (*terraform.State, *httptest.Server) { t.Helper() @@ -696,11 +709,19 @@ func testBackendState(t *testing.T, s *terraform.State, c int) (*terraform.State srv := httptest.NewServer(http.HandlerFunc(cb)) + backendConfig := &configs.Backend{ + Type: "http", + Config: configs.SynthBody("", map[string]cty.Value{}), + } + b := backendinit.Backend("http")() + configSchema := b.ConfigSchema() + hash := backendConfig.Hash(configSchema) + state := terraform.NewState() state.Backend = &terraform.BackendState{ Type: "http", ConfigRaw: json.RawMessage(fmt.Sprintf(`{"address":%q}`, srv.URL)), - Hash: 2529831861221416334, + Hash: hash, } return state, srv diff --git a/command/test-fixtures/plan-out-backend/main.tf b/command/test-fixtures/plan-out-backend/main.tf index e1be95fa8d0c..38ba171da4c2 100644 --- a/command/test-fixtures/plan-out-backend/main.tf +++ b/command/test-fixtures/plan-out-backend/main.tf @@ -1,9 +1,8 @@ terraform { - backend "http" { - test = true - } + backend "http" { + } } resource "test_instance" "foo" { - ami = "bar" + ami = "bar" } From 1e45d30036aeb25ff1bd9065169b895f75c2cfff Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 7 Nov 2018 17:43:46 -0800 Subject: [PATCH 060/149] command: Fix TestPlan_outBackend We missed fixing this up during the big updates for the new plan/state models since the failures were being masked by testBackendState being broken. This is the same sort of update made to many other tests: add schema to the mock provider, adjust for the new plan/state types, and make allowances for the new built-in diffing behavior in core. --- command/plan_test.go | 49 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/command/plan_test.go b/command/plan_test.go index b646ce7e4547..1abd6c9f375a 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "os" "path/filepath" - "reflect" "strings" "sync" "testing" @@ -16,6 +15,7 @@ import ( "github.com/zclconf/go-cty/cty" "github.com/hashicorp/terraform/addrs" + backendinit "github.com/hashicorp/terraform/backend/init" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/helper/copy" "github.com/hashicorp/terraform/plans" @@ -290,6 +290,9 @@ func TestPlan_outBackend(t *testing.T) { Type: "test_instance", Primary: &terraform.InstanceState{ ID: "bar", + Attributes: map[string]string{ + "ami": "bar", + }, }, }, }, @@ -305,6 +308,27 @@ func TestPlan_outBackend(t *testing.T) { outPath := "foo" p := testProvider() + p.GetSchemaReturn = &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_instance": { + Attributes: map[string]*configschema.Attribute{ + "id": { + Type: cty.String, + Computed: true, + }, + "ami": { + Type: cty.String, + Optional: true, + }, + }, + }, + }, + } + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + return providers.PlanResourceChangeResponse{ + PlannedState: req.ProposedNewState, + } + } ui := cli.NewMockUi() c := &PlanCommand{ Meta: Meta{ @@ -326,11 +350,26 @@ func TestPlan_outBackend(t *testing.T) { t.Fatalf("Expected empty plan to be written to plan file, got: %s", spew.Sdump(plan)) } - if plan.Backend.Type == "" || plan.Backend.Config == nil { - t.Fatal("should have backend info") + if got, want := plan.Backend.Type, "http"; got != want { + t.Errorf("wrong backend type %q; want %q", got, want) + } + if got, want := plan.Backend.Workspace, "default"; got != want { + t.Errorf("wrong backend workspace %q; want %q", got, want) } - if !reflect.DeepEqual(plan.Backend, dataState.Backend) { - t.Fatalf("wrong backend config in plan\ngot: %swant: %s", spew.Sdump(plan.Backend), spew.Sdump(dataState.Backend)) + { + httpBackend := backendinit.Backend("http")() + schema := httpBackend.ConfigSchema() + got, err := plan.Backend.Config.Decode(schema.ImpliedType()) + if err != nil { + t.Fatalf("failed to decode backend config in plan: %s", err) + } + want, err := dataState.Backend.Config(schema) + if err != nil { + t.Fatalf("failed to decode cached config: %s", err) + } + if !want.RawEquals(got) { + t.Errorf("wrong backend config\ngot: %#v\nwant: %#v", got, want) + } } } From 6806d7cba5b7f80b20adc25f448aa6587dd320a5 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 13:13:13 -0800 Subject: [PATCH 061/149] vendor: catch up vendoring We've missed a few recent additions to go.mod in the vendor directory. We need to keep this updated for the moment until all of the surrounding tooling is ready to go all-in with Go 1.11 modules. --- .../github.com/google/go-querystring/LICENSE | 27 + .../google/go-querystring/query/encode.go | 320 +++++++++ .../github.com/hashicorp/go-slug/.gitignore | 12 + vendor/github.com/hashicorp/go-slug/LICENSE | 373 ++++++++++ vendor/github.com/hashicorp/go-slug/README.md | 70 ++ vendor/github.com/hashicorp/go-slug/slug.go | 215 ++++++ vendor/github.com/hashicorp/go-tfe/LICENSE | 354 +++++++++ vendor/github.com/hashicorp/go-tfe/README.md | 131 ++++ vendor/github.com/hashicorp/go-tfe/apply.go | 132 ++++ .../hashicorp/go-tfe/configuration_version.go | 199 +++++ .../github.com/hashicorp/go-tfe/logreader.go | 138 ++++ .../hashicorp/go-tfe/oauth_client.go | 199 +++++ .../hashicorp/go-tfe/oauth_token.go | 150 ++++ .../hashicorp/go-tfe/organization.go | 310 ++++++++ .../hashicorp/go-tfe/organization_token.go | 99 +++ vendor/github.com/hashicorp/go-tfe/plan.go | 133 ++++ vendor/github.com/hashicorp/go-tfe/policy.go | 282 ++++++++ .../hashicorp/go-tfe/policy_check.go | 220 ++++++ vendor/github.com/hashicorp/go-tfe/run.go | 311 ++++++++ vendor/github.com/hashicorp/go-tfe/ssh_key.go | 198 +++++ .../hashicorp/go-tfe/state_version.go | 216 ++++++ vendor/github.com/hashicorp/go-tfe/team.go | 165 +++++ .../hashicorp/go-tfe/team_access.go | 184 +++++ .../hashicorp/go-tfe/team_member.go | 139 ++++ .../github.com/hashicorp/go-tfe/team_token.go | 99 +++ vendor/github.com/hashicorp/go-tfe/tfe.go | 420 +++++++++++ .../hashicorp/go-tfe/type_helpers.go | 46 ++ vendor/github.com/hashicorp/go-tfe/user.go | 93 +++ .../hashicorp/go-tfe/validations.go | 19 + .../github.com/hashicorp/go-tfe/variable.go | 243 +++++++ .../github.com/hashicorp/go-tfe/workspace.go | 447 ++++++++++++ .../svanharmelen/jsonapi/.gitignore | 1 + .../svanharmelen/jsonapi/.travis.yml | 7 + .../github.com/svanharmelen/jsonapi/LICENSE | 21 + .../github.com/svanharmelen/jsonapi/README.md | 457 ++++++++++++ .../svanharmelen/jsonapi/constants.go | 55 ++ vendor/github.com/svanharmelen/jsonapi/doc.go | 70 ++ .../github.com/svanharmelen/jsonapi/errors.go | 55 ++ .../github.com/svanharmelen/jsonapi/node.go | 121 ++++ .../svanharmelen/jsonapi/request.go | 680 ++++++++++++++++++ .../svanharmelen/jsonapi/response.go | 539 ++++++++++++++ .../svanharmelen/jsonapi/runtime.go | 103 +++ vendor/modules.txt | 8 + 43 files changed, 8061 insertions(+) create mode 100644 vendor/github.com/google/go-querystring/LICENSE create mode 100644 vendor/github.com/google/go-querystring/query/encode.go create mode 100644 vendor/github.com/hashicorp/go-slug/.gitignore create mode 100644 vendor/github.com/hashicorp/go-slug/LICENSE create mode 100644 vendor/github.com/hashicorp/go-slug/README.md create mode 100644 vendor/github.com/hashicorp/go-slug/slug.go create mode 100644 vendor/github.com/hashicorp/go-tfe/LICENSE create mode 100644 vendor/github.com/hashicorp/go-tfe/README.md create mode 100644 vendor/github.com/hashicorp/go-tfe/apply.go create mode 100644 vendor/github.com/hashicorp/go-tfe/configuration_version.go create mode 100644 vendor/github.com/hashicorp/go-tfe/logreader.go create mode 100644 vendor/github.com/hashicorp/go-tfe/oauth_client.go create mode 100644 vendor/github.com/hashicorp/go-tfe/oauth_token.go create mode 100644 vendor/github.com/hashicorp/go-tfe/organization.go create mode 100644 vendor/github.com/hashicorp/go-tfe/organization_token.go create mode 100644 vendor/github.com/hashicorp/go-tfe/plan.go create mode 100644 vendor/github.com/hashicorp/go-tfe/policy.go create mode 100644 vendor/github.com/hashicorp/go-tfe/policy_check.go create mode 100644 vendor/github.com/hashicorp/go-tfe/run.go create mode 100644 vendor/github.com/hashicorp/go-tfe/ssh_key.go create mode 100644 vendor/github.com/hashicorp/go-tfe/state_version.go create mode 100644 vendor/github.com/hashicorp/go-tfe/team.go create mode 100644 vendor/github.com/hashicorp/go-tfe/team_access.go create mode 100644 vendor/github.com/hashicorp/go-tfe/team_member.go create mode 100644 vendor/github.com/hashicorp/go-tfe/team_token.go create mode 100644 vendor/github.com/hashicorp/go-tfe/tfe.go create mode 100644 vendor/github.com/hashicorp/go-tfe/type_helpers.go create mode 100644 vendor/github.com/hashicorp/go-tfe/user.go create mode 100644 vendor/github.com/hashicorp/go-tfe/validations.go create mode 100644 vendor/github.com/hashicorp/go-tfe/variable.go create mode 100644 vendor/github.com/hashicorp/go-tfe/workspace.go create mode 100644 vendor/github.com/svanharmelen/jsonapi/.gitignore create mode 100644 vendor/github.com/svanharmelen/jsonapi/.travis.yml create mode 100644 vendor/github.com/svanharmelen/jsonapi/LICENSE create mode 100644 vendor/github.com/svanharmelen/jsonapi/README.md create mode 100644 vendor/github.com/svanharmelen/jsonapi/constants.go create mode 100644 vendor/github.com/svanharmelen/jsonapi/doc.go create mode 100644 vendor/github.com/svanharmelen/jsonapi/errors.go create mode 100644 vendor/github.com/svanharmelen/jsonapi/node.go create mode 100644 vendor/github.com/svanharmelen/jsonapi/request.go create mode 100644 vendor/github.com/svanharmelen/jsonapi/response.go create mode 100644 vendor/github.com/svanharmelen/jsonapi/runtime.go diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE new file mode 100644 index 000000000000..ae121a1e46df --- /dev/null +++ b/vendor/github.com/google/go-querystring/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go new file mode 100644 index 000000000000..37080b19b5d9 --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -0,0 +1,320 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package query implements encoding of structs into URL query parameters. +// +// As a simple example: +// +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } +// +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// +// The exact mapping between Go values and url.Values is described in the +// documentation for the Values() function. +package query + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type Encoder interface { + EncodeValues(key string, v *url.Values) error +} + +// Values returns the url.Values encoding of v. +// +// Values expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a URL parameter unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any time.Time that returns true +// for IsZero(). +// +// The URL parameter name defaults to the struct field name but can be +// specified in the struct field's tag value. The "url" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `url:"-"` +// +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` +// +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` +// +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC3339 timestamps. Including the +// "unix" option signals that the field should be encoded as a Unix time (see +// time.Unix()) +// +// Slice and Array values default to encoding as multiple URL values of the +// same name. Including the "comma" option signals that the field should be +// encoded as a single comma-delimited value. Including the "space" option +// similarly encodes the value as a single space-delimited string. Including +// the "semicolon" option will encode the value as a semicolon-delimited string. +// Including the "brackets" option signals that the multiple URL values should +// have "[]" appended to the value name. "numbered" will append a number to +// the end of each incidence of the value name, example: +// name0=value0&name1=value1, etc. +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its URL +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// Nested structs are encoded including parent fields in value names for +// scoping. e.g: +// +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same URL parameter name will be included +// as multiple URL values of the same name. +func Values(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if name == "" { + if sf.Anonymous && sv.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, sv) + continue + } + + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + if !reflect.Indirect(sv).IsValid() { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeValues(name, &values); err != nil { + return err + } + continue + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + var del byte + if opts.Contains("comma") { + del = ',' + } else if opts.Contains("space") { + del = ' ' + } else if opts.Contains("semicolon") { + del = ';' + } else if opts.Contains("brackets") { + name = name + "[]" + } + + if del != 0 { + s := new(bytes.Buffer) + first := true + for i := 0; i < sv.Len(); i++ { + if first { + first = false + } else { + s.WriteByte(del) + } + s.WriteString(valueString(sv.Index(i), opts)) + } + values.Add(name, s.String()) + } else { + for i := 0; i < sv.Len(); i++ { + k := name + if opts.Contains("numbered") { + k = fmt.Sprintf("%s%d", name, i) + } + values.Add(k, valueString(sv.Index(i), opts)) + } + } + continue + } + + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts)) + continue + } + + if sv.Kind() == reflect.Struct { + reflectValue(values, sv, name) + continue + } + + values.Add(name, valueString(sv, opts)) + } + + for _, f := range embedded { + if err := reflectValue(values, f, scope); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + return t.Format(time.RFC3339) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + if v.Type() == timeType { + return v.Interface().(time.Time).IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-slug/.gitignore b/vendor/github.com/hashicorp/go-slug/.gitignore new file mode 100644 index 000000000000..f1c181ec9c5c --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/hashicorp/go-slug/LICENSE b/vendor/github.com/hashicorp/go-slug/LICENSE new file mode 100644 index 000000000000..a612ad9813b0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-slug/README.md b/vendor/github.com/hashicorp/go-slug/README.md new file mode 100644 index 000000000000..978314f1b1f3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/README.md @@ -0,0 +1,70 @@ +# go-slug + +[![Build Status](https://travis-ci.org/hashicorp/go-slug.svg?branch=master)](https://travis-ci.org/hashicorp/go-slug) +[![GitHub license](https://img.shields.io/github/license/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/blob/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-slug?status.svg)](https://godoc.org/github.com/hashicorp/go-slug) +[![Go Report Card](https://goreportcard.com/badge/github.com/hashicorp/go-slug)](https://goreportcard.com/report/github.com/hashicorp/go-slug) +[![GitHub issues](https://img.shields.io/github/issues/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/issues) + +Package `go-slug` offers functions for packing and unpacking Terraform Enterprise +compatible slugs. Slugs are gzip compressed tar files containing Terraform configuration files. + +## Installation + +Installation can be done with a normal `go get`: + +``` +go get -u github.com/hashicorp/go-slug +``` + +## Documentation + +For the complete usage of `go-slug`, see the full [package docs](https://godoc.org/github.com/hashicorp/go-slug). + +## Example + +Packing or unpacking a slug is pretty straight forward as shown in the +following example: + +```go +package main + +import ( + "bytes" + "ioutil" + "log" + "os" + + slug "github.com/hashicorp/go-slug" +) + +func main() { + // First create a buffer for storing the slug. + slug := bytes.NewBuffer(nil) + + // Then call the Pack function with a directory path containing the + // configuration files and an io.Writer to write the slug to. + if _, err := Pack("test-fixtures/archive-dir", slug); err != nil { + log.Fatal(err) + } + + // Create a directory to unpack the slug contents into. + dst, err := ioutil.TempDir("", "slug") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(dst) + + // Unpacking a slug is done by calling the Unpack function with an + // io.Reader to read the slug from and a directory path of an existing + // directory to store the unpacked configuration files. + if err := Unpack(slug, dst); err != nil { + log.Fatal(err) + } +} +``` + +## Issues and Contributing + +If you find an issue with this package, please report an issue. If you'd like, +we welcome any contributions. Fork this repository and submit a pull request. diff --git a/vendor/github.com/hashicorp/go-slug/slug.go b/vendor/github.com/hashicorp/go-slug/slug.go new file mode 100644 index 000000000000..b7a62c963061 --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/slug.go @@ -0,0 +1,215 @@ +package slug + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" +) + +// Meta provides detailed information about a slug. +type Meta struct { + // The list of files contained in the slug. + Files []string + + // Total size of the slug in bytes. + Size int64 +} + +// Pack creates a slug from a directory src, and writes the new +// slug to w. Returns metadata about the slug and any error. +func Pack(src string, w io.Writer) (*Meta, error) { + // Gzip compress all the output data + gzipW := gzip.NewWriter(w) + + // Tar the file contents + tarW := tar.NewWriter(gzipW) + + // Track the metadata details as we go. + meta := &Meta{} + + // Walk the tree of files + err := filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Check the file type and if we need to write the body + keepFile, writeBody := checkFileMode(info.Mode()) + if !keepFile { + return nil + } + + // Get the relative path from the unpack directory + subpath, err := filepath.Rel(src, path) + if err != nil { + return fmt.Errorf("Failed to get relative path for file %q: %v", path, err) + } + if subpath == "." { + return nil + } + + // Read the symlink target. We don't track the error because + // it doesn't matter if there is an error. + target, _ := os.Readlink(path) + + // Build the file header for the tar entry + header, err := tar.FileInfoHeader(info, target) + if err != nil { + return fmt.Errorf("Failed creating archive header for file %q: %v", path, err) + } + + // Modify the header to properly be the full subpath + header.Name = subpath + if info.IsDir() { + header.Name += "/" + } + + // Write the header first to the archive. + if err := tarW.WriteHeader(header); err != nil { + return fmt.Errorf("Failed writing archive header for file %q: %v", path, err) + } + + // Account for the file in the list + meta.Files = append(meta.Files, header.Name) + + // Skip writing file data for certain file types (above). + if !writeBody { + return nil + } + + // Add the size since we are going to write the body. + meta.Size += info.Size() + + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("Failed opening file %q for archiving: %v", path, err) + } + defer f.Close() + + if _, err = io.Copy(tarW, f); err != nil { + return fmt.Errorf("Failed copying file %q to archive: %v", path, err) + } + + return nil + }) + if err != nil { + return nil, err + } + + // Flush the tar writer + if err := tarW.Close(); err != nil { + return nil, fmt.Errorf("Failed to close the tar archive: %v", err) + } + + // Flush the gzip writer + if err := gzipW.Close(); err != nil { + return nil, fmt.Errorf("Failed to close the gzip writer: %v", err) + } + + return meta, nil +} + +// Unpack is used to read and extract the contents of a slug to +// directory dst. Returns any error. +func Unpack(r io.Reader, dst string) error { + // Decompress as we read + uncompressed, err := gzip.NewReader(r) + if err != nil { + return fmt.Errorf("Failed to uncompress slug: %v", err) + } + + // Untar as we read + untar := tar.NewReader(uncompressed) + + // Unpackage all the contents into the directory + for { + header, err := untar.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("Failed to untar slug: %v", err) + } + + // Get rid of absolute paths + path := header.Name + if path[0] == '/' { + path = path[1:] + } + path = filepath.Join(dst, path) + + // Make the directories to the path + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("Failed to create directory %q: %v", dir, err) + } + + // If we have a symlink, just link it. + if header.Typeflag == tar.TypeSymlink { + if err := os.Symlink(header.Linkname, path); err != nil { + return fmt.Errorf("Failed creating symlink %q => %q: %v", + path, header.Linkname, err) + } + continue + } + + // Only unpack regular files from this point on + if header.Typeflag == tar.TypeDir { + continue + } else if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA { + return fmt.Errorf("Failed creating %q: unsupported type %c", path, + header.Typeflag) + } + + // Open a handle to the destination + fh, err := os.Create(path) + if err != nil { + // This mimics tar's behavior wrt the tar file containing duplicate files + // and it allowing later ones to clobber earlier ones even if the file + // has perms that don't allow overwriting + if os.IsPermission(err) { + os.Chmod(path, 0600) + fh, err = os.Create(path) + } + + if err != nil { + return fmt.Errorf("Failed creating file %q: %v", path, err) + } + } + + // Copy the contents + _, err = io.Copy(fh, untar) + fh.Close() + if err != nil { + return fmt.Errorf("Failed to copy slug file %q: %v", path, err) + } + + // Restore the file mode. We have to do this after writing the file, + // since it is possible we have a read-only mode. + mode := header.FileInfo().Mode() + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("Failed setting permissions on %q: %v", path, err) + } + } + return nil +} + +// checkFileMode is used to examine an os.FileMode and determine if it should +// be included in the archive, and if it has a data body which needs writing. +func checkFileMode(m os.FileMode) (keep, body bool) { + switch { + case m.IsRegular(): + return true, true + + case m.IsDir(): + return true, false + + case m&os.ModeSymlink != 0: + return true, false + } + + return false, false +} diff --git a/vendor/github.com/hashicorp/go-tfe/LICENSE b/vendor/github.com/hashicorp/go-tfe/LICENSE new file mode 100644 index 000000000000..c33dcc7c928c --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-tfe/README.md b/vendor/github.com/hashicorp/go-tfe/README.md new file mode 100644 index 000000000000..05bbc78d8a3e --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/README.md @@ -0,0 +1,131 @@ +Terraform Enterprise Go Client +============================== + +[![Build Status](https://travis-ci.org/hashicorp/go-tfe.svg?branch=master)](https://travis-ci.org/hashicorp/go-tfe) +[![GitHub license](https://img.shields.io/github/license/hashicorp/go-tfe.svg)](https://github.com/hashicorp/go-tfe/blob/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-tfe?status.svg)](https://godoc.org/github.com/hashicorp/go-tfe) +[![Go Report Card](https://goreportcard.com/badge/github.com/hashicorp/go-tfe)](https://goreportcard.com/report/github.com/hashicorp/go-tfe) +[![GitHub issues](https://img.shields.io/github/issues/hashicorp/go-tfe.svg)](https://github.com/hashicorp/go-tfe/issues) + +This is an API client for [Terraform Enterprise](https://www.hashicorp.com/products/terraform). + +## NOTE + +The Terraform Enterprise API endpoints are in beta and are subject to change! +So that means this API client is also in beta and is also subject to change. We +will indicate any breaking changes by releasing new versions. Until the release +of v1.0, any minor version changes will indicate possible breaking changes. Patch +version changes will be used for both bugfixes and non-breaking changes. + +## Coverage + +Currently the following endpoints are supported: + +- [x] [Accounts](https://www.terraform.io/docs/enterprise/api/account.html) +- [x] [Configuration Versions](https://www.terraform.io/docs/enterprise/api/configuration-versions.html) +- [x] [OAuth Clients](https://www.terraform.io/docs/enterprise/api/oauth-clients.html) +- [x] [OAuth Tokens](https://www.terraform.io/docs/enterprise/api/oauth-tokens.html) +- [x] [Organizations](https://www.terraform.io/docs/enterprise/api/organizations.html) +- [x] [Organization Tokens](https://www.terraform.io/docs/enterprise/api/organization-tokens.html) +- [x] [Policies](https://www.terraform.io/docs/enterprise/api/policies.html) +- [x] [Policy Checks](https://www.terraform.io/docs/enterprise/api/policy-checks.html) +- [ ] [Registry Modules](https://www.terraform.io/docs/enterprise/api/modules.html) +- [x] [Runs](https://www.terraform.io/docs/enterprise/api/run.html) +- [x] [SSH Keys](https://www.terraform.io/docs/enterprise/api/ssh-keys.html) +- [x] [State Versions](https://www.terraform.io/docs/enterprise/api/state-versions.html) +- [x] [Team Access](https://www.terraform.io/docs/enterprise/api/team-access.html) +- [x] [Team Memberships](https://www.terraform.io/docs/enterprise/api/team-members.html) +- [x] [Team Tokens](https://www.terraform.io/docs/enterprise/api/team-tokens.html) +- [x] [Teams](https://www.terraform.io/docs/enterprise/api/teams.html) +- [x] [Variables](https://www.terraform.io/docs/enterprise/api/variables.html) +- [x] [Workspaces](https://www.terraform.io/docs/enterprise/api/workspaces.html) +- [ ] [Admin](https://www.terraform.io/docs/enterprise/api/admin/index.html) + +## Installation + +Installation can be done with a normal `go get`: + +``` +go get -u github.com/hashicorp/go-tfe +``` + +## Documentation + +For complete usage of the API client, see the full [package docs](https://godoc.org/github.com/hashicorp/go-tfe). + +## Usage + +```go +import tfe "github.com/hashicorp/go-tfe" +``` + +Construct a new TFE client, then use the various endpoints on the client to +access different parts of the Terraform Enterprise API. For example, to list +all organizations: + +```go +config := &tfe.Config{ + Token: "insert-your-token-here", +} + +client, err := tfe.NewClient(config) +if err != nil { + log.Fatal(err) +} + +orgs, err := client.Organizations.List(context.Background(), OrganizationListOptions{}) +if err != nil { + log.Fatal(err) +} +``` + +## Examples + +The [examples](https://github.com/hashicorp/go-tfe/tree/master/examples) directory +contains a couple of examples. One of which is listed here as well: + +```go +package main + +import ( + "log" + + tfe "github.com/hashicorp/go-tfe" +) + +func main() { + config := &tfe.Config{ + Token: "insert-your-token-here", + } + + client, err := tfe.NewClient(config) + if err != nil { + log.Fatal(err) + } + + // Create a context + ctx := context.Background() + + // Create a new organization + options := tfe.OrganizationCreateOptions{ + Name: tfe.String("example"), + Email: tfe.String("info@example.com"), + } + + org, err := client.Organizations.Create(ctx, options) + if err != nil { + log.Fatal(err) + } + + // Delete an organization + err = client.Organizations.Delete(ctx, org.Name) + if err != nil { + log.Fatal(err) + } +} +``` + +## Issues and Contributing + +If you find an issue with this package, please report an issue. If you'd like, +we welcome any contributions. Fork this repository and submit a pull request. diff --git a/vendor/github.com/hashicorp/go-tfe/apply.go b/vendor/github.com/hashicorp/go-tfe/apply.go new file mode 100644 index 000000000000..d99eaab53396 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/apply.go @@ -0,0 +1,132 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Applies = (*applies)(nil) + +// Applies describes all the apply related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/apply.html +type Applies interface { + // Read an apply by its ID. + Read(ctx context.Context, applyID string) (*Apply, error) + + // Logs retrieves the logs of an apply. + Logs(ctx context.Context, applyID string) (io.Reader, error) +} + +// applies implements Applys. +type applies struct { + client *Client +} + +// ApplyStatus represents an apply state. +type ApplyStatus string + +//List all available apply statuses. +const ( + ApplyCanceled ApplyStatus = "canceled" + ApplyCreated ApplyStatus = "created" + ApplyErrored ApplyStatus = "errored" + ApplyFinished ApplyStatus = "finished" + ApplyMFAWaiting ApplyStatus = "mfa_waiting" + ApplyPending ApplyStatus = "pending" + ApplyQueued ApplyStatus = "queued" + ApplyRunning ApplyStatus = "running" + ApplyUnreachable ApplyStatus = "unreachable" +) + +// Apply represents a Terraform Enterprise apply. +type Apply struct { + ID string `jsonapi:"primary,applies"` + LogReadURL string `jsonapi:"attr,log-read-url"` + ResourceAdditions int `jsonapi:"attr,resource-additions"` + ResourceChanges int `jsonapi:"attr,resource-changes"` + ResourceDestructions int `jsonapi:"attr,resource-destructions"` + Status ApplyStatus `jsonapi:"attr,status"` + StatusTimestamps *ApplyStatusTimestamps `jsonapi:"attr,status-timestamps"` +} + +// ApplyStatusTimestamps holds the timestamps for individual apply statuses. +type ApplyStatusTimestamps struct { + CanceledAt time.Time `json:"canceled-at"` + ErroredAt time.Time `json:"errored-at"` + FinishedAt time.Time `json:"finished-at"` + ForceCanceledAt time.Time `json:"force-canceled-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` +} + +// Read an apply by its ID. +func (s *applies) Read(ctx context.Context, applyID string) (*Apply, error) { + if !validStringID(&applyID) { + return nil, errors.New("Invalid value for apply ID") + } + + u := fmt.Sprintf("applies/%s", url.QueryEscape(applyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + a := &Apply{} + err = s.client.do(ctx, req, a) + if err != nil { + return nil, err + } + + return a, nil +} + +// Logs retrieves the logs of an apply. +func (s *applies) Logs(ctx context.Context, applyID string) (io.Reader, error) { + if !validStringID(&applyID) { + return nil, errors.New("Invalid value for apply ID") + } + + // Get the apply to make sure it exists. + a, err := s.Read(ctx, applyID) + if err != nil { + return nil, err + } + + // Return an error if the log URL is empty. + if a.LogReadURL == "" { + return nil, fmt.Errorf("Apply %s does not have a log URL", applyID) + } + + u, err := url.Parse(a.LogReadURL) + if err != nil { + return nil, fmt.Errorf("Invalid log URL: %v", err) + } + + done := func() (bool, error) { + a, err := s.Read(ctx, a.ID) + if err != nil { + return false, err + } + + switch a.Status { + case ApplyCanceled, ApplyErrored, ApplyFinished, ApplyUnreachable: + return true, nil + default: + return false, nil + } + } + + return &LogReader{ + client: s.client, + ctx: ctx, + done: done, + logURL: u, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/configuration_version.go b/vendor/github.com/hashicorp/go-tfe/configuration_version.go new file mode 100644 index 000000000000..168c1c6dd478 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/configuration_version.go @@ -0,0 +1,199 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "time" + + slug "github.com/hashicorp/go-slug" +) + +// Compile-time proof of interface implementation. +var _ ConfigurationVersions = (*configurationVersions)(nil) + +// ConfigurationVersions describes all the configuration version related +// methods that the Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/configuration-versions.html +type ConfigurationVersions interface { + // List returns all configuration versions of a workspace. + List(ctx context.Context, workspaceID string, options ConfigurationVersionListOptions) (*ConfigurationVersionList, error) + + // Create is used to create a new configuration version. The created + // configuration version will be usable once data is uploaded to it. + Create(ctx context.Context, workspaceID string, options ConfigurationVersionCreateOptions) (*ConfigurationVersion, error) + + // Read a configuration version by its ID. + Read(ctx context.Context, cvID string) (*ConfigurationVersion, error) + + // Upload packages and uploads Terraform configuration files. It requires + // the upload URL from a configuration version and the full path to the + // configuration files on disk. + Upload(ctx context.Context, url string, path string) error +} + +// configurationVersions implements ConfigurationVersions. +type configurationVersions struct { + client *Client +} + +// ConfigurationStatus represents a configuration version status. +type ConfigurationStatus string + +//List all available configuration version statuses. +const ( + ConfigurationErrored ConfigurationStatus = "errored" + ConfigurationPending ConfigurationStatus = "pending" + ConfigurationUploaded ConfigurationStatus = "uploaded" +) + +// ConfigurationSource represents a source of a configuration version. +type ConfigurationSource string + +// List all available configuration version sources. +const ( + ConfigurationSourceAPI ConfigurationSource = "tfe-api" + ConfigurationSourceBitbucket ConfigurationSource = "bitbucket" + ConfigurationSourceGithub ConfigurationSource = "github" + ConfigurationSourceGitlab ConfigurationSource = "gitlab" + ConfigurationSourceTerraform ConfigurationSource = "terraform" +) + +// ConfigurationVersionList represents a list of configuration versions. +type ConfigurationVersionList struct { + *Pagination + Items []*ConfigurationVersion +} + +// ConfigurationVersion is a representation of an uploaded or ingressed +// Terraform configuration in TFE. A workspace must have at least one +// configuration version before any runs may be queued on it. +type ConfigurationVersion struct { + ID string `jsonapi:"primary,configuration-versions"` + AutoQueueRuns bool `jsonapi:"attr,auto-queue-runs"` + Error string `jsonapi:"attr,error"` + ErrorMessage string `jsonapi:"attr,error-message"` + Source ConfigurationSource `jsonapi:"attr,source"` + Speculative bool `jsonapi:"attr,speculative "` + Status ConfigurationStatus `jsonapi:"attr,status"` + StatusTimestamps *CVStatusTimestamps `jsonapi:"attr,status-timestamps"` + UploadURL string `jsonapi:"attr,upload-url"` +} + +// CVStatusTimestamps holds the timestamps for individual configuration version +// statuses. +type CVStatusTimestamps struct { + FinishedAt time.Time `json:"finished-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` +} + +// ConfigurationVersionListOptions represents the options for listing +// configuration versions. +type ConfigurationVersionListOptions struct { + ListOptions +} + +// List returns all configuration versions of a workspace. +func (s *configurationVersions) List(ctx context.Context, workspaceID string, options ConfigurationVersionListOptions) (*ConfigurationVersionList, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/configuration-versions", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + cvl := &ConfigurationVersionList{} + err = s.client.do(ctx, req, cvl) + if err != nil { + return nil, err + } + + return cvl, nil +} + +// ConfigurationVersionCreateOptions represents the options for creating a +// configuration version. +type ConfigurationVersionCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,configuration-versions"` + + // When true, runs are queued automatically when the configuration version + // is uploaded. + AutoQueueRuns *bool `jsonapi:"attr,auto-queue-runs,omitempty"` + + // When true, this configuration version can only be used for planning. + Speculative *bool `jsonapi:"attr,speculative,omitempty"` +} + +// Create is used to create a new configuration version. The created +// configuration version will be usable once data is uploaded to it. +func (s *configurationVersions) Create(ctx context.Context, workspaceID string, options ConfigurationVersionCreateOptions) (*ConfigurationVersion, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("workspaces/%s/configuration-versions", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + cv := &ConfigurationVersion{} + err = s.client.do(ctx, req, cv) + if err != nil { + return nil, err + } + + return cv, nil +} + +// Read a configuration version by its ID. +func (s *configurationVersions) Read(ctx context.Context, cvID string) (*ConfigurationVersion, error) { + if !validStringID(&cvID) { + return nil, errors.New("Invalid value for configuration version ID") + } + + u := fmt.Sprintf("configuration-versions/%s", url.QueryEscape(cvID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + cv := &ConfigurationVersion{} + err = s.client.do(ctx, req, cv) + if err != nil { + return nil, err + } + + return cv, nil +} + +// Upload packages and uploads Terraform configuration files. It requires the +// upload URL from a configuration version and the path to the configuration +// files on disk. +func (s *configurationVersions) Upload(ctx context.Context, url, path string) error { + body := bytes.NewBuffer(nil) + + _, err := slug.Pack(path, body) + if err != nil { + return err + } + + req, err := s.client.newRequest("PUT", url, body) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/logreader.go b/vendor/github.com/hashicorp/go-tfe/logreader.go new file mode 100644 index 000000000000..cdc1aad9b4cf --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/logreader.go @@ -0,0 +1,138 @@ +package tfe + +import ( + "context" + "fmt" + "io" + "math" + "net/http" + "net/url" + "time" +) + +// LogReader implements io.Reader for streaming logs. +type LogReader struct { + client *Client + ctx context.Context + done func() (bool, error) + logURL *url.URL + offset int64 + reads int + startOfText bool + endOfText bool +} + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (r *LogReader) Read(l []byte) (int, error) { + if written, err := r.read(l); err != io.ErrNoProgress { + return written, err + } + + // Loop until we can any data, the context is canceled or the + // run is finsished. If we would return right away without any + // data, we could and up causing a io.ErrNoProgress error. + for r.reads = 1; ; r.reads++ { + select { + case <-r.ctx.Done(): + return 0, r.ctx.Err() + case <-time.After(backoff(500, 2000, r.reads)): + if written, err := r.read(l); err != io.ErrNoProgress { + return written, err + } + } + } +} + +func (r *LogReader) read(l []byte) (int, error) { + // Update the query string. + r.logURL.RawQuery = fmt.Sprintf("limit=%d&offset=%d", len(l), r.offset) + + // Create a new request. + req, err := http.NewRequest("GET", r.logURL.String(), nil) + if err != nil { + return 0, err + } + req = req.WithContext(r.ctx) + + // Retrieve the next chunk. + resp, err := r.client.http.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + // Basic response checking. + if err := checkResponseCode(resp); err != nil { + return 0, err + } + + // Read the retrieved chunk. + written, err := resp.Body.Read(l) + if err != nil && err != io.EOF { + // Ignore io.EOF errors returned when reading from the response + // body as this indicates the end of the chunk and not the end + // of the logfile. + return written, err + } + + if written > 0 { + // Check for an STX (Start of Text) ASCII control marker. + if !r.startOfText && l[0] == byte(2) { + r.startOfText = true + + // Remove the STX marker from the received chunk. + copy(l[:written-1], l[1:]) + l[written-1] = byte(0) + r.offset++ + written-- + + // Return early if we only received the STX marker. + if written == 0 { + return 0, io.ErrNoProgress + } + } + + // If we found an STX ASCII control character, start looking for + // the ETX (End of Text) control character. + if r.startOfText && l[written-1] == byte(3) { + r.endOfText = true + + // Remove the ETX marker from the received chunk. + l[written-1] = byte(0) + r.offset++ + written-- + } + } + + // Check if we need to continue the loop and wait 500 miliseconds + // before checking if there is a new chunk available or that the + // run is finished and we are done reading all chunks. + if written == 0 { + if (r.startOfText && r.endOfText) || // The logstream finished without issues. + (r.startOfText && r.reads%10 == 0) || // The logstream terminated unexpectedly. + (!r.startOfText && r.reads > 1) { // The logstream doesn't support STX/ETX. + done, err := r.done() + if err != nil { + return 0, err + } + if done { + return 0, io.EOF + } + } + return 0, io.ErrNoProgress + } + + // Update the offset for the next read. + r.offset += int64(written) + + return written, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/oauth_client.go b/vendor/github.com/hashicorp/go-tfe/oauth_client.go new file mode 100644 index 000000000000..be31fd8e35dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/oauth_client.go @@ -0,0 +1,199 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ OAuthClients = (*oAuthClients)(nil) + +// OAuthClients describes all the OAuth client related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/oauth-clients.html +type OAuthClients interface { + // List all the OAuth clients for a given organization. + List(ctx context.Context, organization string, options OAuthClientListOptions) (*OAuthClientList, error) + + // Create an OAuth client to connect an organization and a VCS provider. + Create(ctx context.Context, organization string, options OAuthClientCreateOptions) (*OAuthClient, error) + + // Read an OAuth client by its ID. + Read(ctx context.Context, oAuthClientID string) (*OAuthClient, error) + + // Delete an OAuth client by its ID. + Delete(ctx context.Context, oAuthClientID string) error +} + +// oAuthClients implements OAuthClients. +type oAuthClients struct { + client *Client +} + +// ServiceProviderType represents a VCS type. +type ServiceProviderType string + +// List of available VCS types. +const ( + ServiceProviderBitbucket ServiceProviderType = "bitbucket_hosted" + ServiceProviderBitbucketServer ServiceProviderType = "bitbucket_server" + ServiceProviderGithub ServiceProviderType = "github" + ServiceProviderGithubEE ServiceProviderType = "github_enterprise" + ServiceProviderGitlab ServiceProviderType = "gitlab_hosted" + ServiceProviderGitlabCE ServiceProviderType = "gitlab_community_edition" + ServiceProviderGitlabEE ServiceProviderType = "gitlab_enterprise_edition" +) + +// OAuthClientList represents a list of OAuth clients. +type OAuthClientList struct { + *Pagination + Items []*OAuthClient +} + +// OAuthClient represents a connection between an organization and a VCS +// provider. +type OAuthClient struct { + ID string `jsonapi:"primary,oauth-clients"` + APIURL string `jsonapi:"attr,api-url"` + CallbackURL string `jsonapi:"attr,callback-url"` + ConnectPath string `jsonapi:"attr,connect-path"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + HTTPURL string `jsonapi:"attr,http-url"` + Key string `jsonapi:"attr,key"` + RSAPublicKey string `jsonapi:"attr,rsa-public-key"` + ServiceProvider ServiceProviderType `jsonapi:"attr,service-provider"` + ServiceProviderName string `jsonapi:"attr,service-provider-display-name"` + + // Relations + Organization *Organization `jsonapi:"relation,organization"` + OAuthTokens []*OAuthToken `jsonapi:"relation,oauth-tokens"` +} + +// OAuthClientListOptions represents the options for listing +// OAuth clients. +type OAuthClientListOptions struct { + ListOptions +} + +// List all the OAuth clients for a given organization. +func (s *oAuthClients) List(ctx context.Context, organization string, options OAuthClientListOptions) (*OAuthClientList, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/oauth-clients", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + ocl := &OAuthClientList{} + err = s.client.do(ctx, req, ocl) + if err != nil { + return nil, err + } + + return ocl, nil +} + +// OAuthClientCreateOptions represents the options for creating an OAuth client. +type OAuthClientCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,oauth-clients"` + + // The base URL of your VCS provider's API. + APIURL *string `jsonapi:"attr,api-url"` + + // The homepage of your VCS provider. + HTTPURL *string `jsonapi:"attr,http-url"` + + // The token string you were given by your VCS provider. + OAuthToken *string `jsonapi:"attr,oauth-token-string"` + + // The VCS provider being connected with. + ServiceProvider *ServiceProviderType `jsonapi:"attr,service-provider"` +} + +func (o OAuthClientCreateOptions) valid() error { + if !validString(o.APIURL) { + return errors.New("APIURL is required") + } + if !validString(o.HTTPURL) { + return errors.New("HTTPURL is required") + } + if !validString(o.OAuthToken) { + return errors.New("OAuthToken is required") + } + if o.ServiceProvider == nil { + return errors.New("ServiceProvider is required") + } + return nil +} + +// Create an OAuth client to connect an organization and a VCS provider. +func (s *oAuthClients) Create(ctx context.Context, organization string, options OAuthClientCreateOptions) (*OAuthClient, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/oauth-clients", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + oc := &OAuthClient{} + err = s.client.do(ctx, req, oc) + if err != nil { + return nil, err + } + + return oc, nil +} + +// Read an OAuth client by its ID. +func (s *oAuthClients) Read(ctx context.Context, oAuthClientID string) (*OAuthClient, error) { + if !validStringID(&oAuthClientID) { + return nil, errors.New("Invalid value for OAuth client ID") + } + + u := fmt.Sprintf("oauth-clients/%s", url.QueryEscape(oAuthClientID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + oc := &OAuthClient{} + err = s.client.do(ctx, req, oc) + if err != nil { + return nil, err + } + + return oc, err +} + +// Delete an OAuth client by its ID. +func (s *oAuthClients) Delete(ctx context.Context, oAuthClientID string) error { + if !validStringID(&oAuthClientID) { + return errors.New("Invalid value for OAuth client ID") + } + + u := fmt.Sprintf("oauth-clients/%s", url.QueryEscape(oAuthClientID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/oauth_token.go b/vendor/github.com/hashicorp/go-tfe/oauth_token.go new file mode 100644 index 000000000000..2367a1e3b9ef --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/oauth_token.go @@ -0,0 +1,150 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ OAuthTokens = (*oAuthTokens)(nil) + +// OAuthTokens describes all the OAuth token related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/oauth-tokens.html +type OAuthTokens interface { + // List all the OAuth tokens for a given organization. + List(ctx context.Context, organization string, options OAuthTokenListOptions) (*OAuthTokenList, error) + // Read a OAuth token by its ID. + Read(ctx context.Context, oAuthTokenID string) (*OAuthToken, error) + + // Update an existing OAuth token. + Update(ctx context.Context, oAuthTokenID string, options OAuthTokenUpdateOptions) (*OAuthToken, error) + + // Delete a OAuth token by its ID. + Delete(ctx context.Context, oAuthTokenID string) error +} + +// oAuthTokens implements OAuthTokens. +type oAuthTokens struct { + client *Client +} + +// OAuthTokenList represents a list of OAuth tokens. +type OAuthTokenList struct { + *Pagination + Items []*OAuthToken +} + +// OAuthToken represents a VCS configuration including the associated +// OAuth token +type OAuthToken struct { + ID string `jsonapi:"primary,oauth-tokens"` + UID string `jsonapi:"attr,uid"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + HasSSHKey bool `jsonapi:"attr,has-ssh-key"` + ServiceProviderUser string `jsonapi:"attr,service-provider-user"` + + // Relations + OAuthClient *OAuthClient `jsonapi:"relation,oauth-client"` +} + +// OAuthTokenListOptions represents the options for listing +// OAuth tokens. +type OAuthTokenListOptions struct { + ListOptions +} + +// List all the OAuth tokens for a given organization. +func (s *oAuthTokens) List(ctx context.Context, organization string, options OAuthTokenListOptions) (*OAuthTokenList, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/oauth-tokens", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + otl := &OAuthTokenList{} + err = s.client.do(ctx, req, otl) + if err != nil { + return nil, err + } + + return otl, nil +} + +// Read an OAuth token by its ID. +func (s *oAuthTokens) Read(ctx context.Context, oAuthTokenID string) (*OAuthToken, error) { + if !validStringID(&oAuthTokenID) { + return nil, errors.New("Invalid value for OAuth token ID") + } + + u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ot := &OAuthToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// OAuthTokenUpdateOptions represents the options for updating an OAuth token. +type OAuthTokenUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,oauth-tokens"` + + // A private SSH key to be used for git clone operations. + PrivateSSHKey *string `jsonapi:"attr,ssh-key"` +} + +// Update an existing OAuth token. +func (s *oAuthTokens) Update(ctx context.Context, oAuthTokenID string, options OAuthTokenUpdateOptions) (*OAuthToken, error) { + if !validStringID(&oAuthTokenID) { + return nil, errors.New("Invalid value for OAuth token ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + ot := &OAuthToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// Delete an OAuth token by its ID. +func (s *oAuthTokens) Delete(ctx context.Context, oAuthTokenID string) error { + if !validStringID(&oAuthTokenID) { + return errors.New("Invalid value for OAuth token ID") + } + + u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/organization.go b/vendor/github.com/hashicorp/go-tfe/organization.go new file mode 100644 index 000000000000..f4759a231f0d --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/organization.go @@ -0,0 +1,310 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Organizations = (*organizations)(nil) + +// Organizations describes all the organization related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/organizations.html +type Organizations interface { + // List all the organizations visible to the current user. + List(ctx context.Context, options OrganizationListOptions) (*OrganizationList, error) + + // Create a new organization with the given options. + Create(ctx context.Context, options OrganizationCreateOptions) (*Organization, error) + + // Read an organization by its name. + Read(ctx context.Context, organization string) (*Organization, error) + + // Update attributes of an existing organization. + Update(ctx context.Context, organization string, options OrganizationUpdateOptions) (*Organization, error) + + // Delete an organization by its name. + Delete(ctx context.Context, organization string) error + + // Capacity shows the current run capacity of an organization. + Capacity(ctx context.Context, organization string) (*Capacity, error) + + // RunQueue shows the current run queue of an organization. + RunQueue(ctx context.Context, organization string, options RunQueueOptions) (*RunQueue, error) +} + +// organizations implements Organizations. +type organizations struct { + client *Client +} + +// AuthPolicyType represents an authentication policy type. +type AuthPolicyType string + +// List of available authentication policies. +const ( + AuthPolicyPassword AuthPolicyType = "password" + AuthPolicyTwoFactor AuthPolicyType = "two_factor_mandatory" +) + +// EnterprisePlanType represents an enterprise plan type. +type EnterprisePlanType string + +// List of available enterprise plan types. +const ( + EnterprisePlanDisabled EnterprisePlanType = "disabled" + EnterprisePlanPremium EnterprisePlanType = "premium" + EnterprisePlanPro EnterprisePlanType = "pro" + EnterprisePlanTrial EnterprisePlanType = "trial" +) + +// OrganizationList represents a list of organizations. +type OrganizationList struct { + *Pagination + Items []*Organization +} + +// Organization represents a Terraform Enterprise organization. +type Organization struct { + Name string `jsonapi:"primary,organizations"` + CollaboratorAuthPolicy AuthPolicyType `jsonapi:"attr,collaborator-auth-policy"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Email string `jsonapi:"attr,email"` + EnterprisePlan EnterprisePlanType `jsonapi:"attr,enterprise-plan"` + OwnersTeamSamlRoleID string `jsonapi:"attr,owners-team-saml-role-id"` + Permissions *OrganizationPermissions `jsonapi:"attr,permissions"` + SAMLEnabled bool `jsonapi:"attr,saml-enabled"` + SessionRemember int `jsonapi:"attr,session-remember"` + SessionTimeout int `jsonapi:"attr,session-timeout"` + TrialExpiresAt time.Time `jsonapi:"attr,trial-expires-at,iso8601"` + TwoFactorConformant bool `jsonapi:"attr,two-factor-conformant"` +} + +// Capacity represents the current run capacity of an organization. +type Capacity struct { + Organization string `jsonapi:"primary,organization-capacity"` + Pending int `jsonapi:"attr,pending"` + Running int `jsonapi:"attr,running"` +} + +// RunQueue represents the current run queue of an organization. +type RunQueue struct { + *Pagination + Items []*Run +} + +// OrganizationPermissions represents the organization permissions. +type OrganizationPermissions struct { + CanCreateTeam bool `json:"can-create-team"` + CanCreateWorkspace bool `json:"can-create-workspace"` + CanCreateWorkspaceMigration bool `json:"can-create-workspace-migration"` + CanDestroy bool `json:"can-destroy"` + CanTraverse bool `json:"can-traverse"` + CanUpdate bool `json:"can-update"` + CanUpdateAPIToken bool `json:"can-update-api-token"` + CanUpdateOAuth bool `json:"can-update-oauth"` + CanUpdateSentinel bool `json:"can-update-sentinel"` +} + +// OrganizationListOptions represents the options for listing organizations. +type OrganizationListOptions struct { + ListOptions +} + +// List all the organizations visible to the current user. +func (s *organizations) List(ctx context.Context, options OrganizationListOptions) (*OrganizationList, error) { + req, err := s.client.newRequest("GET", "organizations", &options) + if err != nil { + return nil, err + } + + orgl := &OrganizationList{} + err = s.client.do(ctx, req, orgl) + if err != nil { + return nil, err + } + + return orgl, nil +} + +// OrganizationCreateOptions represents the options for creating an organization. +type OrganizationCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,organizations"` + + // Name of the organization. + Name *string `jsonapi:"attr,name"` + + // Admin email address. + Email *string `jsonapi:"attr,email"` +} + +func (o OrganizationCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("Name is required") + } + if !validStringID(o.Name) { + return errors.New("Invalid value for name") + } + if !validString(o.Email) { + return errors.New("Email is required") + } + return nil +} + +// Create a new organization with the given options. +func (s *organizations) Create(ctx context.Context, options OrganizationCreateOptions) (*Organization, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "organizations", &options) + if err != nil { + return nil, err + } + + org := &Organization{} + err = s.client.do(ctx, req, org) + if err != nil { + return nil, err + } + + return org, nil +} + +// Read an organization by its name. +func (s *organizations) Read(ctx context.Context, organization string) (*Organization, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + org := &Organization{} + err = s.client.do(ctx, req, org) + if err != nil { + return nil, err + } + + return org, nil +} + +// OrganizationUpdateOptions represents the options for updating an organization. +type OrganizationUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,organizations"` + + // New name for the organization. + Name *string `jsonapi:"attr,name,omitempty"` + + // New admin email address. + Email *string `jsonapi:"attr,email,omitempty"` + + // Session expiration (minutes). + SessionRemember *int `jsonapi:"attr,session-remember,omitempty"` + + // Session timeout after inactivity (minutes). + SessionTimeout *int `jsonapi:"attr,session-timeout,omitempty"` + + // Authentication policy. + CollaboratorAuthPolicy *AuthPolicyType `jsonapi:"attr,collaborator-auth-policy,omitempty"` +} + +// Update attributes of an existing organization. +func (s *organizations) Update(ctx context.Context, organization string, options OrganizationUpdateOptions) (*Organization, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + org := &Organization{} + err = s.client.do(ctx, req, org) + if err != nil { + return nil, err + } + + return org, nil +} + +// Delete an organization by its name. +func (s *organizations) Delete(ctx context.Context, organization string) error { + if !validStringID(&organization) { + return errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Capacity shows the currently used capacity of an organization. +func (s *organizations) Capacity(ctx context.Context, organization string) (*Capacity, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/capacity", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + c := &Capacity{} + err = s.client.do(ctx, req, c) + if err != nil { + return nil, err + } + + return c, nil +} + +// RunQueueOptions represents the options for showing the queue. +type RunQueueOptions struct { + ListOptions +} + +// RunQueue shows the current run queue of an organization. +func (s *organizations) RunQueue(ctx context.Context, organization string, options RunQueueOptions) (*RunQueue, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/runs/queue", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + rq := &RunQueue{} + err = s.client.do(ctx, req, rq) + if err != nil { + return nil, err + } + + return rq, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/organization_token.go b/vendor/github.com/hashicorp/go-tfe/organization_token.go new file mode 100644 index 000000000000..33368da0ba45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/organization_token.go @@ -0,0 +1,99 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ OrganizationTokens = (*organizationTokens)(nil) + +// OrganizationTokens describes all the organization token related methods +// that the Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/organization-tokens.html +type OrganizationTokens interface { + // Generate a new organization token, replacing any existing token. + Generate(ctx context.Context, organization string) (*OrganizationToken, error) + + // Read an organization token. + Read(ctx context.Context, organization string) (*OrganizationToken, error) + + // Delete an organization token. + Delete(ctx context.Context, organization string) error +} + +// organizationTokens implements OrganizationTokens. +type organizationTokens struct { + client *Client +} + +// OrganizationToken represents a Terraform Enterprise organization token. +type OrganizationToken struct { + ID string `jsonapi:"primary,authentication-tokens"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Description string `jsonapi:"attr,description"` + LastUsedAt time.Time `jsonapi:"attr,last-used-at,iso8601"` + Token string `jsonapi:"attr,token"` +} + +// Generate a new organization token, replacing any existing token. +func (s *organizationTokens) Generate(ctx context.Context, organization string) (*OrganizationToken, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + ot := &OrganizationToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// Read an organization token. +func (s *organizationTokens) Read(ctx context.Context, organization string) (*OrganizationToken, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ot := &OrganizationToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// Delete an organization token. +func (s *organizationTokens) Delete(ctx context.Context, organization string) error { + if !validStringID(&organization) { + return errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/plan.go b/vendor/github.com/hashicorp/go-tfe/plan.go new file mode 100644 index 000000000000..194ce65fba33 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/plan.go @@ -0,0 +1,133 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Plans = (*plans)(nil) + +// Plans describes all the plan related methods that the Terraform Enterprise +// API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/plan.html +type Plans interface { + // Read a plan by its ID. + Read(ctx context.Context, planID string) (*Plan, error) + + // Logs retrieves the logs of a plan. + Logs(ctx context.Context, planID string) (io.Reader, error) +} + +// plans implements Plans. +type plans struct { + client *Client +} + +// PlanStatus represents a plan state. +type PlanStatus string + +//List all available plan statuses. +const ( + PlanCanceled PlanStatus = "canceled" + PlanCreated PlanStatus = "created" + PlanErrored PlanStatus = "errored" + PlanFinished PlanStatus = "finished" + PlanMFAWaiting PlanStatus = "mfa_waiting" + PlanPending PlanStatus = "pending" + PlanQueued PlanStatus = "queued" + PlanRunning PlanStatus = "running" + PlanUnreachable PlanStatus = "unreachable" +) + +// Plan represents a Terraform Enterprise plan. +type Plan struct { + ID string `jsonapi:"primary,plans"` + HasChanges bool `jsonapi:"attr,has-changes"` + LogReadURL string `jsonapi:"attr,log-read-url"` + ResourceAdditions int `jsonapi:"attr,resource-additions"` + ResourceChanges int `jsonapi:"attr,resource-changes"` + ResourceDestructions int `jsonapi:"attr,resource-destructions"` + Status PlanStatus `jsonapi:"attr,status"` + StatusTimestamps *PlanStatusTimestamps `jsonapi:"attr,status-timestamps"` +} + +// PlanStatusTimestamps holds the timestamps for individual plan statuses. +type PlanStatusTimestamps struct { + CanceledAt time.Time `json:"canceled-at"` + ErroredAt time.Time `json:"errored-at"` + FinishedAt time.Time `json:"finished-at"` + ForceCanceledAt time.Time `json:"force-canceled-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` +} + +// Read a plan by its ID. +func (s *plans) Read(ctx context.Context, planID string) (*Plan, error) { + if !validStringID(&planID) { + return nil, errors.New("Invalid value for plan ID") + } + + u := fmt.Sprintf("plans/%s", url.QueryEscape(planID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + p := &Plan{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, nil +} + +// Logs retrieves the logs of a plan. +func (s *plans) Logs(ctx context.Context, planID string) (io.Reader, error) { + if !validStringID(&planID) { + return nil, errors.New("Invalid value for plan ID") + } + + // Get the plan to make sure it exists. + p, err := s.Read(ctx, planID) + if err != nil { + return nil, err + } + + // Return an error if the log URL is empty. + if p.LogReadURL == "" { + return nil, fmt.Errorf("Plan %s does not have a log URL", planID) + } + + u, err := url.Parse(p.LogReadURL) + if err != nil { + return nil, fmt.Errorf("Invalid log URL: %v", err) + } + + done := func() (bool, error) { + p, err := s.Read(ctx, p.ID) + if err != nil { + return false, err + } + + switch p.Status { + case PlanCanceled, PlanErrored, PlanFinished, PlanUnreachable: + return true, nil + default: + return false, nil + } + } + + return &LogReader{ + client: s.client, + ctx: ctx, + done: done, + logURL: u, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/policy.go b/vendor/github.com/hashicorp/go-tfe/policy.go new file mode 100644 index 000000000000..80926af370f4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/policy.go @@ -0,0 +1,282 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Policies = (*policies)(nil) + +// Policies describes all the policy related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/policies.html +type Policies interface { + // List all the policies for a given organization + List(ctx context.Context, organization string, options PolicyListOptions) (*PolicyList, error) + + // Create a policy and associate it with an organization. + Create(ctx context.Context, organization string, options PolicyCreateOptions) (*Policy, error) + + // Read a policy by its ID. + Read(ctx context.Context, policyID string) (*Policy, error) + + // Update an existing policy. + Update(ctx context.Context, policyID string, options PolicyUpdateOptions) (*Policy, error) + + // Delete a policy by its ID. + Delete(ctx context.Context, policyID string) error + + // Upload the policy content of the policy. + Upload(ctx context.Context, policyID string, content []byte) error + + // Upload the policy content of the policy. + Download(ctx context.Context, policyID string) ([]byte, error) +} + +// policies implements Policies. +type policies struct { + client *Client +} + +// EnforcementLevel represents an enforcement level. +type EnforcementLevel string + +// List the available enforcement types. +const ( + EnforcementAdvisory EnforcementLevel = "advisory" + EnforcementHard EnforcementLevel = "hard-mandatory" + EnforcementSoft EnforcementLevel = "soft-mandatory" +) + +// PolicyList represents a list of policies.. +type PolicyList struct { + *Pagination + Items []*Policy +} + +// Policy represents a Terraform Enterprise policy. +type Policy struct { + ID string `jsonapi:"primary,policies"` + Name string `jsonapi:"attr,name"` + Enforce []*Enforcement `jsonapi:"attr,enforce"` + UpdatedAt time.Time `jsonapi:"attr,updated-at,iso8601"` +} + +// Enforcement describes a enforcement. +type Enforcement struct { + Path string `json:"path"` + Mode EnforcementLevel `json:"mode"` +} + +// PolicyListOptions represents the options for listing policies. +type PolicyListOptions struct { + ListOptions +} + +// List all the policies for a given organization +func (s *policies) List(ctx context.Context, organization string, options PolicyListOptions) (*PolicyList, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/policies", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + pl := &PolicyList{} + err = s.client.do(ctx, req, pl) + if err != nil { + return nil, err + } + + return pl, nil +} + +// PolicyCreateOptions represents the options for creating a new policy. +type PolicyCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policies"` + + // The name of the policy. + Name *string `jsonapi:"attr,name"` + + // The enforcements of the policy. + Enforce []*EnforcementOptions `jsonapi:"attr,enforce"` +} + +// EnforcementOptions represents the enforcement options of a policy. +type EnforcementOptions struct { + Path *string `json:"path,omitempty"` + Mode *EnforcementLevel `json:"mode"` +} + +func (o PolicyCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("Name is required") + } + if !validStringID(o.Name) { + return errors.New("Invalid value for name") + } + if o.Enforce == nil { + return errors.New("Enforce is required") + } + for _, e := range o.Enforce { + if !validString(e.Path) { + return errors.New("Enforcement path is required") + } + if e.Mode == nil { + return errors.New("Enforcement mode is required") + } + } + return nil +} + +// Create a policy and associate it with an organization. +func (s *policies) Create(ctx context.Context, organization string, options PolicyCreateOptions) (*Policy, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/policies", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + p := &Policy{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, err +} + +// Read a policy by its ID. +func (s *policies) Read(ctx context.Context, policyID string) (*Policy, error) { + if !validStringID(&policyID) { + return nil, errors.New("Invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + p := &Policy{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, err +} + +// PolicyUpdateOptions represents the options for updating a policy. +type PolicyUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policies"` + + // The enforcements of the policy. + Enforce []*EnforcementOptions `jsonapi:"attr,enforce"` +} + +func (o PolicyUpdateOptions) valid() error { + if o.Enforce == nil { + return errors.New("Enforce is required") + } + return nil +} + +// Update an existing policy. +func (s *policies) Update(ctx context.Context, policyID string, options PolicyUpdateOptions) (*Policy, error) { + if !validStringID(&policyID) { + return nil, errors.New("Invalid value for policy ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + p := &Policy{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, err +} + +// Delete a policy by its ID. +func (s *policies) Delete(ctx context.Context, policyID string) error { + if !validStringID(&policyID) { + return errors.New("Invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Upload the policy content of the policy. +func (s *policies) Upload(ctx context.Context, policyID string, content []byte) error { + if !validStringID(&policyID) { + return errors.New("Invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s/upload", url.QueryEscape(policyID)) + req, err := s.client.newRequest("PUT", u, content) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Download the policy content of the policy. +func (s *policies) Download(ctx context.Context, policyID string) ([]byte, error) { + if !validStringID(&policyID) { + return nil, errors.New("Invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s/download", url.QueryEscape(policyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + err = s.client.do(ctx, req, &buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/policy_check.go b/vendor/github.com/hashicorp/go-tfe/policy_check.go new file mode 100644 index 000000000000..d5417e300632 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/policy_check.go @@ -0,0 +1,220 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ PolicyChecks = (*policyChecks)(nil) + +// PolicyChecks describes all the policy check related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/policy-checks.html +type PolicyChecks interface { + // List all policy checks of the given run. + List(ctx context.Context, runID string, options PolicyCheckListOptions) (*PolicyCheckList, error) + + // Read a policy check by its ID. + Read(ctx context.Context, policyCheckID string) (*PolicyCheck, error) + + // Override a soft-mandatory or warning policy. + Override(ctx context.Context, policyCheckID string) (*PolicyCheck, error) + + // Logs retrieves the logs of a policy check. + Logs(ctx context.Context, policyCheckID string) (io.Reader, error) +} + +// policyChecks implements PolicyChecks. +type policyChecks struct { + client *Client +} + +// PolicyScope represents a policy scope. +type PolicyScope string + +// List all available policy scopes. +const ( + PolicyScopeOrganization PolicyScope = "organization" + PolicyScopeWorkspace PolicyScope = "workspace" +) + +// PolicyStatus represents a policy check state. +type PolicyStatus string + +//List all available policy check statuses. +const ( + PolicyErrored PolicyStatus = "errored" + PolicyHardFailed PolicyStatus = "hard_failed" + PolicyOverridden PolicyStatus = "overridden" + PolicyPasses PolicyStatus = "passed" + PolicyPending PolicyStatus = "pending" + PolicyQueued PolicyStatus = "queued" + PolicySoftFailed PolicyStatus = "soft_failed" + PolicyUnreachable PolicyStatus = "unreachable" +) + +// PolicyCheckList represents a list of policy checks. +type PolicyCheckList struct { + *Pagination + Items []*PolicyCheck +} + +// PolicyCheck represents a Terraform Enterprise policy check.. +type PolicyCheck struct { + ID string `jsonapi:"primary,policy-checks"` + Actions *PolicyActions `jsonapi:"attr,actions"` + Permissions *PolicyPermissions `jsonapi:"attr,permissions"` + Result *PolicyResult `jsonapi:"attr,result"` + Scope PolicyScope `jsonapi:"attr,scope"` + Status PolicyStatus `jsonapi:"attr,status"` + StatusTimestamps *PolicyStatusTimestamps `jsonapi:"attr,status-timestamps"` +} + +// PolicyActions represents the policy check actions. +type PolicyActions struct { + IsOverridable bool `json:"is-overridable"` +} + +// PolicyPermissions represents the policy check permissions. +type PolicyPermissions struct { + CanOverride bool `json:"can-override"` +} + +// PolicyResult represents the complete policy check result, +type PolicyResult struct { + AdvisoryFailed int `json:"advisory-failed"` + Duration int `json:"duration"` + HardFailed int `json:"hard-failed"` + Passed int `json:"passed"` + Result bool `json:"result"` + // Sentinel *sentinel.EvalResult `json:"sentinel"` + SoftFailed int `json:"soft-failed"` + TotalFailed int `json:"total-failed"` +} + +// PolicyStatusTimestamps holds the timestamps for individual policy check +// statuses. +type PolicyStatusTimestamps struct { + ErroredAt time.Time `json:"errored-at"` + HardFailedAt time.Time `json:"hard-failed-at"` + PassedAt time.Time `json:"passed-at"` + QueuedAt time.Time `json:"queued-at"` + SoftFailedAt time.Time `json:"soft-failed-at"` +} + +// PolicyCheckListOptions represents the options for listing policy checks. +type PolicyCheckListOptions struct { + ListOptions +} + +// List all policy checks of the given run. +func (s *policyChecks) List(ctx context.Context, runID string, options PolicyCheckListOptions) (*PolicyCheckList, error) { + if !validStringID(&runID) { + return nil, errors.New("Invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/policy-checks", url.QueryEscape(runID)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + pcl := &PolicyCheckList{} + err = s.client.do(ctx, req, pcl) + if err != nil { + return nil, err + } + + return pcl, nil +} + +// Read a policy check by its ID. +func (s *policyChecks) Read(ctx context.Context, policyCheckID string) (*PolicyCheck, error) { + if !validStringID(&policyCheckID) { + return nil, errors.New("Invalid value for policy check ID") + } + + u := fmt.Sprintf("policy-checks/%s", url.QueryEscape(policyCheckID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + pc := &PolicyCheck{} + err = s.client.do(ctx, req, pc) + if err != nil { + return nil, err + } + + return pc, nil +} + +// Override a soft-mandatory or warning policy. +func (s *policyChecks) Override(ctx context.Context, policyCheckID string) (*PolicyCheck, error) { + if !validStringID(&policyCheckID) { + return nil, errors.New("Invalid value for policy check ID") + } + + u := fmt.Sprintf("policy-checks/%s/actions/override", url.QueryEscape(policyCheckID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + pc := &PolicyCheck{} + err = s.client.do(ctx, req, pc) + if err != nil { + return nil, err + } + + return pc, nil +} + +// Logs retrieves the logs of a policy check. +func (s *policyChecks) Logs(ctx context.Context, policyCheckID string) (io.Reader, error) { + if !validStringID(&policyCheckID) { + return nil, errors.New("Invalid value for policy check ID") + } + + // Loop until the context is canceled or the policy check is finished + // running. The policy check logs are not streamed and so only available + // once the check is finished. + for { + pc, err := s.Read(ctx, policyCheckID) + if err != nil { + return nil, err + } + + switch pc.Status { + case PolicyPending, PolicyQueued: + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(500 * time.Millisecond): + continue + } + } + + u := fmt.Sprintf("policy-checks/%s/output", url.QueryEscape(policyCheckID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + logs := bytes.NewBuffer(nil) + err = s.client.do(ctx, req, logs) + if err != nil { + return nil, err + } + + return logs, nil + } +} diff --git a/vendor/github.com/hashicorp/go-tfe/run.go b/vendor/github.com/hashicorp/go-tfe/run.go new file mode 100644 index 000000000000..59501e6ad82a --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/run.go @@ -0,0 +1,311 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Runs = (*runs)(nil) + +// Runs describes all the run related methods that the Terraform Enterprise +// API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/run.html +type Runs interface { + // List all the runs of the given workspace. + List(ctx context.Context, workspaceID string, options RunListOptions) (*RunList, error) + + // Create a new run with the given options. + Create(ctx context.Context, options RunCreateOptions) (*Run, error) + + // Read a run by its ID. + Read(ctx context.Context, runID string) (*Run, error) + + // Apply a run by its ID. + Apply(ctx context.Context, runID string, options RunApplyOptions) error + + // Cancel a run by its ID. + Cancel(ctx context.Context, runID string, options RunCancelOptions) error + + // Force-cancel a run by its ID. + ForceCancel(ctx context.Context, runID string, options RunForceCancelOptions) error + + // Discard a run by its ID. + Discard(ctx context.Context, runID string, options RunDiscardOptions) error +} + +// runs implements Runs. +type runs struct { + client *Client +} + +// RunStatus represents a run state. +type RunStatus string + +//List all available run statuses. +const ( + RunApplied RunStatus = "applied" + RunApplying RunStatus = "applying" + RunCanceled RunStatus = "canceled" + RunConfirmed RunStatus = "confirmed" + RunDiscarded RunStatus = "discarded" + RunErrored RunStatus = "errored" + RunPending RunStatus = "pending" + RunPlanned RunStatus = "planned" + RunPlannedAndFinished RunStatus = "planned_and_finished" + RunPlanning RunStatus = "planning" + RunPolicyChecked RunStatus = "policy_checked" + RunPolicyChecking RunStatus = "policy_checking" + RunPolicyOverride RunStatus = "policy_override" + RunPolicySoftFailed RunStatus = "policy_soft_failed" +) + +// RunSource represents a source type of a run. +type RunSource string + +// List all available run sources. +const ( + RunSourceAPI RunSource = "tfe-api" + RunSourceConfigurationVersion RunSource = "tfe-configuration-version" + RunSourceUI RunSource = "tfe-ui" +) + +// RunList represents a list of runs. +type RunList struct { + *Pagination + Items []*Run +} + +// Run represents a Terraform Enterprise run. +type Run struct { + ID string `jsonapi:"primary,runs"` + Actions *RunActions `jsonapi:"attr,actions"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + ForceCancelAvailableAt time.Time `jsonapi:"attr,force-cancel-available-at,iso8601"` + HasChanges bool `jsonapi:"attr,has-changes"` + IsDestroy bool `jsonapi:"attr,is-destroy"` + Message string `jsonapi:"attr,message"` + Permissions *RunPermissions `jsonapi:"attr,permissions"` + PositionInQueue int `jsonapi:"attr,position-in-queue"` + Source RunSource `jsonapi:"attr,source"` + Status RunStatus `jsonapi:"attr,status"` + StatusTimestamps *RunStatusTimestamps `jsonapi:"attr,status-timestamps"` + + // Relations + Apply *Apply `jsonapi:"relation,apply"` + ConfigurationVersion *ConfigurationVersion `jsonapi:"relation,configuration-version"` + Plan *Plan `jsonapi:"relation,plan"` + PolicyChecks []*PolicyCheck `jsonapi:"relation,policy-checks"` + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +// RunActions represents the run actions. +type RunActions struct { + IsCancelable bool `json:"is-cancelable"` + IsConfirmable bool `json:"is-confirmable"` + IsDiscardable bool `json:"is-discardable"` + IsForceCancelable bool `json:"is-force-cancelable"` +} + +// RunPermissions represents the run permissions. +type RunPermissions struct { + CanApply bool `json:"can-apply"` + CanCancel bool `json:"can-cancel"` + CanDiscard bool `json:"can-discard"` + CanForceCancel bool `json:"can-force-cancel"` + CanForceExecute bool `json:"can-force-execute"` +} + +// RunStatusTimestamps holds the timestamps for individual run statuses. +type RunStatusTimestamps struct { + ErroredAt time.Time `json:"errored-at"` + FinishedAt time.Time `json:"finished-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` +} + +// RunListOptions represents the options for listing runs. +type RunListOptions struct { + ListOptions +} + +// List all the runs of the given workspace. +func (s *runs) List(ctx context.Context, workspaceID string, options RunListOptions) (*RunList, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/runs", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + rl := &RunList{} + err = s.client.do(ctx, req, rl) + if err != nil { + return nil, err + } + + return rl, nil +} + +// RunCreateOptions represents the options for creating a new run. +type RunCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,runs"` + + // Specifies if this plan is a destroy plan, which will destroy all + // provisioned resources. + IsDestroy *bool `jsonapi:"attr,is-destroy,omitempty"` + + // Specifies the message to be associated with this run. + Message *string `jsonapi:"attr,message,omitempty"` + + // Specifies the configuration version to use for this run. If the + // configuration version object is omitted, the run will be created using the + // workspace's latest configuration version. + ConfigurationVersion *ConfigurationVersion `jsonapi:"relation,configuration-version"` + + // Specifies the workspace where the run will be executed. + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +func (o RunCreateOptions) valid() error { + if o.Workspace == nil { + return errors.New("Workspace is required") + } + return nil +} + +// Create a new run with the given options. +func (s *runs) Create(ctx context.Context, options RunCreateOptions) (*Run, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "runs", &options) + if err != nil { + return nil, err + } + + r := &Run{} + err = s.client.do(ctx, req, r) + if err != nil { + return nil, err + } + + return r, nil +} + +// Read a run by its ID. +func (s *runs) Read(ctx context.Context, runID string) (*Run, error) { + if !validStringID(&runID) { + return nil, errors.New("Invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s", url.QueryEscape(runID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + r := &Run{} + err = s.client.do(ctx, req, r) + if err != nil { + return nil, err + } + + return r, nil +} + +// RunApplyOptions represents the options for applying a run. +type RunApplyOptions struct { + // An optional comment about the run. + Comment *string `json:"comment,omitempty"` +} + +// Apply a run by its ID. +func (s *runs) Apply(ctx context.Context, runID string, options RunApplyOptions) error { + if !validStringID(&runID) { + return errors.New("Invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/apply", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// RunCancelOptions represents the options for canceling a run. +type RunCancelOptions struct { + // An optional explanation for why the run was canceled. + Comment *string `json:"comment,omitempty"` +} + +// Cancel a run by its ID. +func (s *runs) Cancel(ctx context.Context, runID string, options RunCancelOptions) error { + if !validStringID(&runID) { + return errors.New("Invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/cancel", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// RunCancelOptions represents the options for force-canceling a run. +type RunForceCancelOptions struct { + // An optional comment explaining the reason for the force-cancel. + Comment *string `json:"comment,omitempty"` +} + +// ForceCancel is used to forcefully cancel a run by its ID. +func (s *runs) ForceCancel(ctx context.Context, runID string, options RunForceCancelOptions) error { + if !validStringID(&runID) { + return errors.New("Invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/force-cancel", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// RunDiscardOptions represents the options for discarding a run. +type RunDiscardOptions struct { + // An optional explanation for why the run was discarded. + Comment *string `json:"comment,omitempty"` +} + +// Discard a run by its ID. +func (s *runs) Discard(ctx context.Context, runID string, options RunDiscardOptions) error { + if !validStringID(&runID) { + return errors.New("Invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/discard", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/ssh_key.go b/vendor/github.com/hashicorp/go-tfe/ssh_key.go new file mode 100644 index 000000000000..b76a170db3fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/ssh_key.go @@ -0,0 +1,198 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ SSHKeys = (*sshKeys)(nil) + +// SSHKeys describes all the SSH key related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/ssh-keys.html +type SSHKeys interface { + // List all the SSH keys for a given organization + List(ctx context.Context, organization string, options SSHKeyListOptions) (*SSHKeyList, error) + + // Create an SSH key and associate it with an organization. + Create(ctx context.Context, organization string, options SSHKeyCreateOptions) (*SSHKey, error) + + // Read an SSH key by its ID. + Read(ctx context.Context, sshKeyID string) (*SSHKey, error) + + // Update an SSH key by its ID. + Update(ctx context.Context, sshKeyID string, options SSHKeyUpdateOptions) (*SSHKey, error) + + // Delete an SSH key by its ID. + Delete(ctx context.Context, sshKeyID string) error +} + +// sshKeys implements SSHKeys. +type sshKeys struct { + client *Client +} + +// SSHKeyList represents a list of SSH keys. +type SSHKeyList struct { + *Pagination + Items []*SSHKey +} + +// SSHKey represents a SSH key. +type SSHKey struct { + ID string `jsonapi:"primary,ssh-keys"` + Name string `jsonapi:"attr,name"` +} + +// SSHKeyListOptions represents the options for listing SSH keys. +type SSHKeyListOptions struct { + ListOptions +} + +// List all the SSH keys for a given organization +func (s *sshKeys) List(ctx context.Context, organization string, options SSHKeyListOptions) (*SSHKeyList, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/ssh-keys", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + kl := &SSHKeyList{} + err = s.client.do(ctx, req, kl) + if err != nil { + return nil, err + } + + return kl, nil +} + +// SSHKeyCreateOptions represents the options for creating an SSH key. +type SSHKeyCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,ssh-keys"` + + // A name to identify the SSH key. + Name *string `jsonapi:"attr,name"` + + // The content of the SSH private key. + Value *string `jsonapi:"attr,value"` +} + +func (o SSHKeyCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("Name is required") + } + if !validString(o.Value) { + return errors.New("Value is required") + } + return nil +} + +// Create an SSH key and associate it with an organization. +func (s *sshKeys) Create(ctx context.Context, organization string, options SSHKeyCreateOptions) (*SSHKey, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/ssh-keys", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + k := &SSHKey{} + err = s.client.do(ctx, req, k) + if err != nil { + return nil, err + } + + return k, nil +} + +// Read an SSH key by its ID. +func (s *sshKeys) Read(ctx context.Context, sshKeyID string) (*SSHKey, error) { + if !validStringID(&sshKeyID) { + return nil, errors.New("Invalid value for SSH key ID") + } + + u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + k := &SSHKey{} + err = s.client.do(ctx, req, k) + if err != nil { + return nil, err + } + + return k, nil +} + +// SSHKeyUpdateOptions represents the options for updating an SSH key. +type SSHKeyUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,ssh-keys"` + + // A new name to identify the SSH key. + Name *string `jsonapi:"attr,name,omitempty"` + + // Updated content of the SSH private key. + Value *string `jsonapi:"attr,value,omitempty"` +} + +// Update an SSH key by its ID. +func (s *sshKeys) Update(ctx context.Context, sshKeyID string, options SSHKeyUpdateOptions) (*SSHKey, error) { + if !validStringID(&sshKeyID) { + return nil, errors.New("Invalid value for SSH key ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + k := &SSHKey{} + err = s.client.do(ctx, req, k) + if err != nil { + return nil, err + } + + return k, nil +} + +// Delete an SSH key by its ID. +func (s *sshKeys) Delete(ctx context.Context, sshKeyID string) error { + if !validStringID(&sshKeyID) { + return errors.New("Invalid value for SSH key ID") + } + + u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/state_version.go b/vendor/github.com/hashicorp/go-tfe/state_version.go new file mode 100644 index 000000000000..89bcfda1c440 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/state_version.go @@ -0,0 +1,216 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ StateVersions = (*stateVersions)(nil) + +// StateVersions describes all the state version related methods that +// the Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/state-versions.html +type StateVersions interface { + // List all the state versions for a given workspace. + List(ctx context.Context, options StateVersionListOptions) (*StateVersionList, error) + + // Create a new state version for the given workspace. + Create(ctx context.Context, workspaceID string, options StateVersionCreateOptions) (*StateVersion, error) + + // Read a state version by its ID. + Read(ctx context.Context, svID string) (*StateVersion, error) + + // Current reads the latest available state from the given workspace. + Current(ctx context.Context, workspaceID string) (*StateVersion, error) + + // Download retrieves the actual stored state of a state version + Download(ctx context.Context, url string) ([]byte, error) +} + +// stateVersions implements StateVersions. +type stateVersions struct { + client *Client +} + +// StateVersionList represents a list of state versions. +type StateVersionList struct { + *Pagination + Items []*StateVersion +} + +// StateVersion represents a Terraform Enterprise state version. +type StateVersion struct { + ID string `jsonapi:"primary,state-versions"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + DownloadURL string `jsonapi:"attr,hosted-state-download-url"` + Serial int64 `jsonapi:"attr,serial"` + VCSCommitSHA string `jsonapi:"attr,vcs-commit-sha"` + VCSCommitURL string `jsonapi:"attr,vcs-commit-url"` + + // Relations + Run *Run `jsonapi:"relation,run"` +} + +// StateVersionListOptions represents the options for listing state versions. +type StateVersionListOptions struct { + ListOptions + Organization *string `url:"filter[organization][name]"` + Workspace *string `url:"filter[workspace][name]"` +} + +func (o StateVersionListOptions) valid() error { + if !validString(o.Organization) { + return errors.New("Organization is required") + } + if !validString(o.Workspace) { + return errors.New("Workspace is required") + } + return nil +} + +// List all the state versions for a given workspace. +func (s *stateVersions) List(ctx context.Context, options StateVersionListOptions) (*StateVersionList, error) { + if err := options.valid(); err != nil { + return nil, err + } + + req, err := s.client.newRequest("GET", "state-versions", &options) + if err != nil { + return nil, err + } + + svl := &StateVersionList{} + err = s.client.do(ctx, req, svl) + if err != nil { + return nil, err + } + + return svl, nil +} + +// StateVersionCreateOptions represents the options for creating a state version. +type StateVersionCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,state-versions"` + + // The lineage of the state. + Lineage *string `jsonapi:"attr,lineage,omitempty"` + + // The MD5 hash of the state version. + MD5 *string `jsonapi:"attr,md5"` + + // The serial of the state. + Serial *int64 `jsonapi:"attr,serial"` + + // The base64 encoded state. + State *string `jsonapi:"attr,state"` + + // Specifies the run to associate the state with. + Run *Run `jsonapi:"relation,run,omitempty"` +} + +func (o StateVersionCreateOptions) valid() error { + if !validString(o.MD5) { + return errors.New("MD5 is required") + } + if o.Serial == nil { + return errors.New("Serial is required") + } + if !validString(o.State) { + return errors.New("State is required") + } + return nil +} + +// Create a new state version for the given workspace. +func (s *stateVersions) Create(ctx context.Context, workspaceID string, options StateVersionCreateOptions) (*StateVersion, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("workspaces/%s/state-versions", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + sv := &StateVersion{} + err = s.client.do(ctx, req, sv) + if err != nil { + return nil, err + } + + return sv, nil +} + +// Read a state version by its ID. +func (s *stateVersions) Read(ctx context.Context, svID string) (*StateVersion, error) { + if !validStringID(&svID) { + return nil, errors.New("Invalid value for state version ID") + } + + u := fmt.Sprintf("state-versions/%s", url.QueryEscape(svID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + sv := &StateVersion{} + err = s.client.do(ctx, req, sv) + if err != nil { + return nil, err + } + + return sv, nil +} + +// Current reads the latest available state from the given workspace. +func (s *stateVersions) Current(ctx context.Context, workspaceID string) (*StateVersion, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/current-state-version", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + sv := &StateVersion{} + err = s.client.do(ctx, req, sv) + if err != nil { + return nil, err + } + + return sv, nil +} + +// Download retrieves the actual stored state of a state version +func (s *stateVersions) Download(ctx context.Context, url string) ([]byte, error) { + req, err := s.client.newRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + + var buf bytes.Buffer + err = s.client.do(ctx, req, &buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/team.go b/vendor/github.com/hashicorp/go-tfe/team.go new file mode 100644 index 000000000000..e6a69c3d2555 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team.go @@ -0,0 +1,165 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ Teams = (*teams)(nil) + +// Teams describes all the team related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/teams.html +type Teams interface { + // List all the teams of the given organization. + List(ctx context.Context, organization string, options TeamListOptions) (*TeamList, error) + + // Create a new team with the given options. + Create(ctx context.Context, organization string, options TeamCreateOptions) (*Team, error) + + // Read a team by its ID. + Read(ctx context.Context, teamID string) (*Team, error) + + // Delete a team by its ID. + Delete(ctx context.Context, teamID string) error +} + +// teams implements Teams. +type teams struct { + client *Client +} + +// TeamList represents a list of teams. +type TeamList struct { + *Pagination + Items []*Team +} + +// Team represents a Terraform Enterprise team. +type Team struct { + ID string `jsonapi:"primary,teams"` + Name string `jsonapi:"attr,name"` + Permissions *TeamPermissions `jsonapi:"attr,permissions"` + UserCount int `jsonapi:"attr,users-count"` + + // Relations + Users []*User `jsonapi:"relation,users"` +} + +// TeamPermissions represents the team permissions. +type TeamPermissions struct { + CanDestroy bool `json:"can-destroy"` + CanUpdateMembership bool `json:"can-update-membership"` +} + +// TeamListOptions represents the options for listing teams. +type TeamListOptions struct { + ListOptions +} + +// List all the teams of the given organization. +func (s *teams) List(ctx context.Context, organization string, options TeamListOptions) (*TeamList, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/teams", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + tl := &TeamList{} + err = s.client.do(ctx, req, tl) + if err != nil { + return nil, err + } + + return tl, nil +} + +// TeamCreateOptions represents the options for creating a team. +type TeamCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,teams"` + + // Name of the team. + Name *string `jsonapi:"attr,name"` +} + +func (o TeamCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("Name is required") + } + if !validStringID(o.Name) { + return errors.New("Invalid value for name") + } + return nil +} + +// Create a new team with the given options. +func (s *teams) Create(ctx context.Context, organization string, options TeamCreateOptions) (*Team, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/teams", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + t := &Team{} + err = s.client.do(ctx, req, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// Read a single team by its ID. +func (s *teams) Read(ctx context.Context, teamID string) (*Team, error) { + if !validStringID(&teamID) { + return nil, errors.New("Invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + t := &Team{} + err = s.client.do(ctx, req, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// Delete a team by its ID. +func (s *teams) Delete(ctx context.Context, teamID string) error { + if !validStringID(&teamID) { + return errors.New("Invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/team_access.go b/vendor/github.com/hashicorp/go-tfe/team_access.go new file mode 100644 index 000000000000..33abc322561a --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team_access.go @@ -0,0 +1,184 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ TeamAccesses = (*teamAccesses)(nil) + +// TeamAccesses describes all the team access related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/team-access.html +type TeamAccesses interface { + // List all the team accesses for a given workspace. + List(ctx context.Context, options TeamAccessListOptions) (*TeamAccessList, error) + + // Add team access for a workspace. + Add(ctx context.Context, options TeamAccessAddOptions) (*TeamAccess, error) + + // Read a team access by its ID. + Read(ctx context.Context, teamAccessID string) (*TeamAccess, error) + + // Remove team access from a workspace. + Remove(ctx context.Context, teamAccessID string) error +} + +// teamAccesses implements TeamAccesses. +type teamAccesses struct { + client *Client +} + +// AccessType represents a team access type. +type AccessType string + +// List all available team access types. +const ( + AccessAdmin AccessType = "admin" + AccessRead AccessType = "read" + AccessWrite AccessType = "write" +) + +// TeamAccessList represents a list of team accesses. +type TeamAccessList struct { + *Pagination + Items []*TeamAccess +} + +// TeamAccess represents the workspace access for a team. +type TeamAccess struct { + ID string `jsonapi:"primary,team-workspaces"` + Access AccessType `jsonapi:"attr,access"` + + // Relations + Team *Team `jsonapi:"relation,team"` + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +// TeamAccessListOptions represents the options for listing team accesses. +type TeamAccessListOptions struct { + ListOptions + WorkspaceID *string `url:"filter[workspace][id],omitempty"` +} + +func (o TeamAccessListOptions) valid() error { + if !validString(o.WorkspaceID) { + return errors.New("Workspace ID is required") + } + if !validStringID(o.WorkspaceID) { + return errors.New("Invalid value for workspace ID") + } + return nil +} + +// List all the team accesses for a given workspace. +func (s *teamAccesses) List(ctx context.Context, options TeamAccessListOptions) (*TeamAccessList, error) { + if err := options.valid(); err != nil { + return nil, err + } + + req, err := s.client.newRequest("GET", "team-workspaces", &options) + if err != nil { + return nil, err + } + + tal := &TeamAccessList{} + err = s.client.do(ctx, req, tal) + if err != nil { + return nil, err + } + + return tal, nil +} + +// TeamAccessAddOptions represents the options for adding team access. +type TeamAccessAddOptions struct { + // For internal use only! + ID string `jsonapi:"primary,team-workspaces"` + + // The type of access to grant. + Access *AccessType `jsonapi:"attr,access"` + + // The team to add to the workspace + Team *Team `jsonapi:"relation,team"` + + // The workspace to which the team is to be added. + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +func (o TeamAccessAddOptions) valid() error { + if o.Access == nil { + return errors.New("Access is required") + } + if o.Team == nil { + return errors.New("Team is required") + } + if o.Workspace == nil { + return errors.New("Workspace is required") + } + return nil +} + +// Add team access for a workspace. +func (s *teamAccesses) Add(ctx context.Context, options TeamAccessAddOptions) (*TeamAccess, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "team-workspaces", &options) + if err != nil { + return nil, err + } + + ta := &TeamAccess{} + err = s.client.do(ctx, req, ta) + if err != nil { + return nil, err + } + + return ta, nil +} + +// Read a team access by its ID. +func (s *teamAccesses) Read(ctx context.Context, teamAccessID string) (*TeamAccess, error) { + if !validStringID(&teamAccessID) { + return nil, errors.New("Invalid value for team access ID") + } + + u := fmt.Sprintf("team-workspaces/%s", url.QueryEscape(teamAccessID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ta := &TeamAccess{} + err = s.client.do(ctx, req, ta) + if err != nil { + return nil, err + } + + return ta, nil +} + +// Remove team access from a workspace. +func (s *teamAccesses) Remove(ctx context.Context, teamAccessID string) error { + if !validStringID(&teamAccessID) { + return errors.New("Invalid value for team access ID") + } + + u := fmt.Sprintf("team-workspaces/%s", url.QueryEscape(teamAccessID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/team_member.go b/vendor/github.com/hashicorp/go-tfe/team_member.go new file mode 100644 index 000000000000..297d58a6bc63 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team_member.go @@ -0,0 +1,139 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ TeamMembers = (*teamMembers)(nil) + +// TeamMembers describes all the team member related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/team-members.html +type TeamMembers interface { + // List all members of a team. + List(ctx context.Context, teamID string) ([]*User, error) + + // Add multiple users to a team. + Add(ctx context.Context, teamID string, options TeamMemberAddOptions) error + + // Remove multiple users from a team. + Remove(ctx context.Context, teamID string, options TeamMemberRemoveOptions) error +} + +// teamMembers implements TeamMembers. +type teamMembers struct { + client *Client +} + +type teamMember struct { + Username string `jsonapi:"primary,users"` +} + +// List all members of a team. +func (s *teamMembers) List(ctx context.Context, teamID string) ([]*User, error) { + if !validStringID(&teamID) { + return nil, errors.New("Invalid value for team ID") + } + + options := struct { + Include string `url:"include"` + }{ + Include: "users", + } + + u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) + req, err := s.client.newRequest("GET", u, options) + if err != nil { + return nil, err + } + + t := &Team{} + err = s.client.do(ctx, req, t) + if err != nil { + return nil, err + } + + return t.Users, nil +} + +// TeamMemberAddOptions represents the options for adding team members. +type TeamMemberAddOptions struct { + Usernames []string +} + +func (o *TeamMemberAddOptions) valid() error { + if o.Usernames == nil { + return errors.New("Usernames is required") + } + if len(o.Usernames) == 0 { + return errors.New("Invalid value for usernames") + } + return nil +} + +// Add multiple users to a team. +func (s *teamMembers) Add(ctx context.Context, teamID string, options TeamMemberAddOptions) error { + if !validStringID(&teamID) { + return errors.New("Invalid value for team ID") + } + if err := options.valid(); err != nil { + return err + } + + var tms []*teamMember + for _, name := range options.Usernames { + tms = append(tms, &teamMember{Username: name}) + } + + u := fmt.Sprintf("teams/%s/relationships/users", url.QueryEscape(teamID)) + req, err := s.client.newRequest("POST", u, tms) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// TeamMemberRemoveOptions represents the options for deleting team members. +type TeamMemberRemoveOptions struct { + Usernames []string +} + +func (o *TeamMemberRemoveOptions) valid() error { + if o.Usernames == nil { + return errors.New("Usernames is required") + } + if len(o.Usernames) == 0 { + return errors.New("Invalid value for usernames") + } + return nil +} + +// Remove multiple users from a team. +func (s *teamMembers) Remove(ctx context.Context, teamID string, options TeamMemberRemoveOptions) error { + if !validStringID(&teamID) { + return errors.New("Invalid value for team ID") + } + if err := options.valid(); err != nil { + return err + } + + var tms []*teamMember + for _, name := range options.Usernames { + tms = append(tms, &teamMember{Username: name}) + } + + u := fmt.Sprintf("teams/%s/relationships/users", url.QueryEscape(teamID)) + req, err := s.client.newRequest("DELETE", u, tms) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/team_token.go b/vendor/github.com/hashicorp/go-tfe/team_token.go new file mode 100644 index 000000000000..baaf75789e6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team_token.go @@ -0,0 +1,99 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ TeamTokens = (*teamTokens)(nil) + +// TeamTokens describes all the team token related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/team-tokens.html +type TeamTokens interface { + // Generate a new team token, replacing any existing token. + Generate(ctx context.Context, teamID string) (*TeamToken, error) + + // Read a team token by its ID. + Read(ctx context.Context, teamID string) (*TeamToken, error) + + // Delete a team token by its ID. + Delete(ctx context.Context, teamID string) error +} + +// teamTokens implements TeamTokens. +type teamTokens struct { + client *Client +} + +// TeamToken represents a Terraform Enterprise team token. +type TeamToken struct { + ID string `jsonapi:"primary,authentication-tokens"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Description string `jsonapi:"attr,description"` + LastUsedAt time.Time `jsonapi:"attr,last-used-at,iso8601"` + Token string `jsonapi:"attr,token"` +} + +// Generate a new team token, replacing any existing token. +func (s *teamTokens) Generate(ctx context.Context, teamID string) (*TeamToken, error) { + if !validStringID(&teamID) { + return nil, errors.New("Invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + tt := &TeamToken{} + err = s.client.do(ctx, req, tt) + if err != nil { + return nil, err + } + + return tt, err +} + +// Read a team token by its ID. +func (s *teamTokens) Read(ctx context.Context, teamID string) (*TeamToken, error) { + if !validStringID(&teamID) { + return nil, errors.New("Invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + tt := &TeamToken{} + err = s.client.do(ctx, req, tt) + if err != nil { + return nil, err + } + + return tt, err +} + +// Delete a team token by its ID. +func (s *teamTokens) Delete(ctx context.Context, teamID string) error { + if !validStringID(&teamID) { + return errors.New("Invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/tfe.go b/vendor/github.com/hashicorp/go-tfe/tfe.go new file mode 100644 index 000000000000..127d94a93135 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/tfe.go @@ -0,0 +1,420 @@ +package tfe + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "reflect" + "strings" + + "github.com/google/go-querystring/query" + "github.com/hashicorp/go-cleanhttp" + "github.com/svanharmelen/jsonapi" +) + +const ( + // DefaultAddress of Terraform Enterprise. + DefaultAddress = "https://app.terraform.io" + // DefaultBasePath on which the API is served. + DefaultBasePath = "/api/v2/" +) + +const ( + userAgent = "go-tfe" +) + +var ( + // ErrUnauthorized is returned when a receiving a 401. + ErrUnauthorized = errors.New("unauthorized") + // ErrResourceNotFound is returned when a receiving a 404. + ErrResourceNotFound = errors.New("resource not found") +) + +// Config provides configuration details to the API client. +type Config struct { + // The address of the Terraform Enterprise API. + Address string + + // The base path on which the API is served. + BasePath string + + // API token used to access the Terraform Enterprise API. + Token string + + // Headers that will be added to every request. + Headers http.Header + + // A custom HTTP client to use. + HTTPClient *http.Client +} + +// DefaultConfig returns a default config structure. +func DefaultConfig() *Config { + config := &Config{ + Address: os.Getenv("TFE_ADDRESS"), + BasePath: DefaultBasePath, + Token: os.Getenv("TFE_TOKEN"), + Headers: make(http.Header), + HTTPClient: cleanhttp.DefaultPooledClient(), + } + + // Set the default address if none is given. + if config.Address == "" { + config.Address = DefaultAddress + } + + // Set the default user agent. + config.Headers.Set("User-Agent", userAgent) + + return config +} + +// Client is the Terraform Enterprise API client. It provides the basic +// connectivity and configuration for accessing the TFE API. +type Client struct { + baseURL *url.URL + token string + headers http.Header + http *http.Client + + Applies Applies + ConfigurationVersions ConfigurationVersions + OAuthClients OAuthClients + OAuthTokens OAuthTokens + Organizations Organizations + OrganizationTokens OrganizationTokens + Plans Plans + Policies Policies + PolicyChecks PolicyChecks + Runs Runs + SSHKeys SSHKeys + StateVersions StateVersions + Teams Teams + TeamAccess TeamAccesses + TeamMembers TeamMembers + TeamTokens TeamTokens + Users Users + Variables Variables + Workspaces Workspaces +} + +// NewClient creates a new Terraform Enterprise API client. +func NewClient(cfg *Config) (*Client, error) { + config := DefaultConfig() + + // Layer in the provided config for any non-blank values. + if cfg != nil { + if cfg.Address != "" { + config.Address = cfg.Address + } + if cfg.BasePath != "" { + config.BasePath = cfg.BasePath + } + if cfg.Token != "" { + config.Token = cfg.Token + } + for k, v := range cfg.Headers { + config.Headers[k] = v + } + if cfg.HTTPClient != nil { + config.HTTPClient = cfg.HTTPClient + } + } + + // Parse the address to make sure its a valid URL. + baseURL, err := url.Parse(config.Address) + if err != nil { + return nil, fmt.Errorf("Invalid address: %v", err) + } + + baseURL.Path = config.BasePath + if !strings.HasSuffix(baseURL.Path, "/") { + baseURL.Path += "/" + } + + // This value must be provided by the user. + if config.Token == "" { + return nil, fmt.Errorf("Missing API token") + } + + // Create the client. + client := &Client{ + baseURL: baseURL, + token: config.Token, + headers: config.Headers, + http: config.HTTPClient, + } + + // Create the services. + client.Applies = &applies{client: client} + client.ConfigurationVersions = &configurationVersions{client: client} + client.OAuthClients = &oAuthClients{client: client} + client.OAuthTokens = &oAuthTokens{client: client} + client.Organizations = &organizations{client: client} + client.OrganizationTokens = &organizationTokens{client: client} + client.Plans = &plans{client: client} + client.Policies = &policies{client: client} + client.PolicyChecks = &policyChecks{client: client} + client.Runs = &runs{client: client} + client.SSHKeys = &sshKeys{client: client} + client.StateVersions = &stateVersions{client: client} + client.Teams = &teams{client: client} + client.TeamAccess = &teamAccesses{client: client} + client.TeamMembers = &teamMembers{client: client} + client.TeamTokens = &teamTokens{client: client} + client.Users = &users{client: client} + client.Variables = &variables{client: client} + client.Workspaces = &workspaces{client: client} + + return client, nil +} + +// ListOptions is used to specify pagination options when making API requests. +// Pagination allows breaking up large result sets into chunks, or "pages". +type ListOptions struct { + // The page number to request. The results vary based on the PageSize. + PageNumber int `url:"page[number],omitempty"` + + // The number of elements returned in a single page. + PageSize int `url:"page[size],omitempty"` +} + +// Pagination is used to return the pagination details of an API request. +type Pagination struct { + CurrentPage int `json:"current-page"` + PreviousPage int `json:"prev-page"` + NextPage int `json:"next-page"` + TotalPages int `json:"total-pages"` + TotalCount int `json:"total-count"` +} + +// newRequest creates an API request. A relative URL path can be provided in +// path, in which case it is resolved relative to the apiVersionPath of the +// Client. Relative URL paths should always be specified without a preceding +// slash. +// If v is supplied, the value will be JSONAPI encoded and included as the +// request body. If the method is GET, the value will be parsed and added as +// query parameters. +func (c *Client) newRequest(method, path string, v interface{}) (*http.Request, error) { + u, err := c.baseURL.Parse(path) + if err != nil { + return nil, err + } + + req := &http.Request{ + Method: method, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set default headers. + for k, v := range c.headers { + req.Header[k] = v + } + + switch method { + case "GET": + req.Header.Set("Accept", "application/vnd.api+json") + + if v != nil { + q, err := query.Values(v) + if err != nil { + return nil, err + } + u.RawQuery = q.Encode() + } + case "DELETE", "PATCH", "POST": + req.Header.Set("Accept", "application/vnd.api+json") + req.Header.Set("Content-Type", "application/vnd.api+json") + + if v != nil { + var body bytes.Buffer + if err := jsonapi.MarshalPayloadWithoutIncluded(&body, v); err != nil { + return nil, err + } + req.Body = ioutil.NopCloser(&body) + req.ContentLength = int64(body.Len()) + } + case "PUT": + req.Header.Set("Accept", "application/json") + req.Header.Set("Content-Type", "application/octet-stream") + + if v != nil { + switch v := v.(type) { + case *bytes.Buffer: + req.Body = ioutil.NopCloser(v) + req.ContentLength = int64(v.Len()) + case []byte: + req.Body = ioutil.NopCloser(bytes.NewReader(v)) + req.ContentLength = int64(len(v)) + default: + return nil, fmt.Errorf("Unexpected type: %T", v) + } + } + } + + // Set the authorization header. + req.Header.Set("Authorization", "Bearer "+c.token) + + return req, nil +} + +// do sends an API request and returns the API response. The API response +// is JSONAPI decoded and the document's primary data is stored in the value +// pointed to by v, or returned as an error if an API error has occurred. + +// If v implements the io.Writer interface, the raw response body will be +// written to v, without attempting to first decode it. +// +// The provided ctx must be non-nil. If it is canceled or times out, ctx.Err() +// will be returned. +func (c *Client) do(ctx context.Context, req *http.Request, v interface{}) error { + // Add the context to the request. + req = req.WithContext(ctx) + + // Execute the request and check the response. + resp, err := c.http.Do(req) + if err != nil { + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + select { + case <-ctx.Done(): + return ctx.Err() + default: + return err + } + } + defer resp.Body.Close() + + // Basic response checking. + if err := checkResponseCode(resp); err != nil { + return err + } + + // Return here if decoding the response isn't needed. + if v == nil { + return nil + } + + // If v implements io.Writer, write the raw response body. + if w, ok := v.(io.Writer); ok { + _, err = io.Copy(w, resp.Body) + return err + } + + // Get the value of v so we can test if it's a struct. + dst := reflect.Indirect(reflect.ValueOf(v)) + + // Return an error if v is not a struct or an io.Writer. + if dst.Kind() != reflect.Struct { + return fmt.Errorf("v must be a struct or an io.Writer") + } + + // Try to get the Items and Pagination struct fields. + items := dst.FieldByName("Items") + pagination := dst.FieldByName("Pagination") + + // Unmarshal a single value if v does not contain the + // Items and Pagination struct fields. + if !items.IsValid() || !pagination.IsValid() { + return jsonapi.UnmarshalPayload(resp.Body, v) + } + + // Return an error if v.Items is not a slice. + if items.Type().Kind() != reflect.Slice { + return fmt.Errorf("v.Items must be a slice") + } + + // Create a temporary buffer and copy all the read data into it. + body := bytes.NewBuffer(nil) + reader := io.TeeReader(resp.Body, body) + + // Unmarshal as a list of values as v.Items is a slice. + raw, err := jsonapi.UnmarshalManyPayload(reader, items.Type().Elem()) + if err != nil { + return err + } + + // Make a new slice to hold the results. + sliceType := reflect.SliceOf(items.Type().Elem()) + result := reflect.MakeSlice(sliceType, 0, len(raw)) + + // Add all of the results to the new slice. + for _, v := range raw { + result = reflect.Append(result, reflect.ValueOf(v)) + } + + // Pointer-swap the result. + items.Set(result) + + // As we are getting a list of values, we need to decode + // the pagination details out of the response body. + p, err := parsePagination(body) + if err != nil { + return err + } + + // Pointer-swap the decoded pagination details. + pagination.Set(reflect.ValueOf(p)) + + return nil +} + +func parsePagination(body io.Reader) (*Pagination, error) { + var raw struct { + Meta struct { + Pagination Pagination `json:"pagination"` + } `json:"meta"` + } + + // JSON decode the raw response. + if err := json.NewDecoder(body).Decode(&raw); err != nil { + return &Pagination{}, err + } + + return &raw.Meta.Pagination, nil +} + +// checkResponseCode can be used to check the status code of an HTTP request. +func checkResponseCode(r *http.Response) error { + if r.StatusCode >= 200 && r.StatusCode <= 299 { + return nil + } + + switch r.StatusCode { + case 401: + return ErrUnauthorized + case 404: + return ErrResourceNotFound + } + + // Decode the error payload. + errPayload := &jsonapi.ErrorsPayload{} + err := json.NewDecoder(r.Body).Decode(errPayload) + if err != nil || len(errPayload.Errors) == 0 { + return fmt.Errorf(r.Status) + } + + // Parse and format the errors. + var errs []string + for _, e := range errPayload.Errors { + if e.Detail == "" { + errs = append(errs, e.Title) + } else { + errs = append(errs, fmt.Sprintf("%s %s", e.Title, e.Detail)) + } + } + + return fmt.Errorf(strings.Join(errs, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-tfe/type_helpers.go b/vendor/github.com/hashicorp/go-tfe/type_helpers.go new file mode 100644 index 000000000000..30df01e4948a --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/type_helpers.go @@ -0,0 +1,46 @@ +package tfe + +// Access returns a pointer to the given team access type. +func Access(v AccessType) *AccessType { + return &v +} + +// AuthPolicy returns a pointer to the given authentication poliy. +func AuthPolicy(v AuthPolicyType) *AuthPolicyType { + return &v +} + +// Bool returns a pointer to the given bool +func Bool(v bool) *bool { + return &v +} + +// Category returns a pointer to the given category type. +func Category(v CategoryType) *CategoryType { + return &v +} + +// EnforcementMode returns a pointer to the given enforcement level. +func EnforcementMode(v EnforcementLevel) *EnforcementLevel { + return &v +} + +// Int returns a pointer to the given int. +func Int(v int) *int { + return &v +} + +// Int64 returns a pointer to the given int64. +func Int64(v int64) *int64 { + return &v +} + +// ServiceProvider returns a pointer to the given service provider type. +func ServiceProvider(v ServiceProviderType) *ServiceProviderType { + return &v +} + +// String returns a pointer to the given string. +func String(v string) *string { + return &v +} diff --git a/vendor/github.com/hashicorp/go-tfe/user.go b/vendor/github.com/hashicorp/go-tfe/user.go new file mode 100644 index 000000000000..f0ca28ee392f --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/user.go @@ -0,0 +1,93 @@ +package tfe + +import ( + "context" +) + +// Compile-time proof of interface implementation. +var _ Users = (*users)(nil) + +// Users describes all the user related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/user.html +type Users interface { + // ReadCurrent reads the details of the currently authenticated user. + ReadCurrent(ctx context.Context) (*User, error) + + // Update attributes of the currently authenticated user. + Update(ctx context.Context, options UserUpdateOptions) (*User, error) +} + +// users implements Users. +type users struct { + client *Client +} + +// User represents a Terraform Enterprise user. +type User struct { + ID string `jsonapi:"primary,users"` + AvatarURL string `jsonapi:"attr,avatar-url"` + Email string `jsonapi:"attr,email"` + IsServiceAccount bool `jsonapi:"attr,is-service-account"` + TwoFactor *TwoFactor `jsonapi:"attr,two-factor"` + UnconfirmedEmail string `jsonapi:"attr,unconfirmed-email"` + Username string `jsonapi:"attr,username"` + V2Only bool `jsonapi:"attr,v2-only"` + + // Relations + // AuthenticationTokens *AuthenticationTokens `jsonapi:"relation,authentication-tokens"` +} + +// TwoFactor represents the organization permissions. +type TwoFactor struct { + Enabled bool `json:"enabled"` + Verified bool `json:"verified"` +} + +// ReadCurrent reads the details of the currently authenticated user. +func (s *users) ReadCurrent(ctx context.Context) (*User, error) { + req, err := s.client.newRequest("GET", "account/details", nil) + if err != nil { + return nil, err + } + + u := &User{} + err = s.client.do(ctx, req, u) + if err != nil { + return nil, err + } + + return u, nil +} + +// UserUpdateOptions represents the options for updating a user. +type UserUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,users"` + + // New username. + Username *string `jsonapi:"attr,username,omitempty"` + + // New email address (must be consumed afterwards to take effect). + Email *string `jsonapi:"attr,email,omitempty"` +} + +// Update attributes of the currently authenticated user. +func (s *users) Update(ctx context.Context, options UserUpdateOptions) (*User, error) { + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("PATCH", "account/update", &options) + if err != nil { + return nil, err + } + + u := &User{} + err = s.client.do(ctx, req, u) + if err != nil { + return nil, err + } + + return u, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/validations.go b/vendor/github.com/hashicorp/go-tfe/validations.go new file mode 100644 index 000000000000..38d95a681e86 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/validations.go @@ -0,0 +1,19 @@ +package tfe + +import ( + "regexp" +) + +// A regular expression used to validate common string ID patterns. +var reStringID = regexp.MustCompile(`^[a-zA-Z0-9\-\._]+$`) + +// validString checks if the given input is present and non-empty. +func validString(v *string) bool { + return v != nil && *v != "" +} + +// validStringID checks if the given string pointer is non-nil and +// contains a typical string identifier. +func validStringID(v *string) bool { + return v != nil && reStringID.MatchString(*v) +} diff --git a/vendor/github.com/hashicorp/go-tfe/variable.go b/vendor/github.com/hashicorp/go-tfe/variable.go new file mode 100644 index 000000000000..ba28404c9fb4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/variable.go @@ -0,0 +1,243 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ Variables = (*variables)(nil) + +// Variables describes all the variable related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/variables.html +type Variables interface { + // List all the variables associated with the given workspace. + List(ctx context.Context, options VariableListOptions) (*VariableList, error) + + // Create is used to create a new variable. + Create(ctx context.Context, options VariableCreateOptions) (*Variable, error) + + // Read a variable by its ID. + Read(ctx context.Context, variableID string) (*Variable, error) + + // Update values of an existing variable. + Update(ctx context.Context, variableID string, options VariableUpdateOptions) (*Variable, error) + + // Delete a variable by its ID. + Delete(ctx context.Context, variableID string) error +} + +// variables implements Variables. +type variables struct { + client *Client +} + +// CategoryType represents a category type. +type CategoryType string + +//List all available categories. +const ( + CategoryEnv CategoryType = "env" + CategoryTerraform CategoryType = "terraform" +) + +// VariableList represents a list of variables. +type VariableList struct { + *Pagination + Items []*Variable +} + +// Variable represents a Terraform Enterprise variable. +type Variable struct { + ID string `jsonapi:"primary,vars"` + Key string `jsonapi:"attr,key"` + Value string `jsonapi:"attr,value"` + Category CategoryType `jsonapi:"attr,category"` + HCL bool `jsonapi:"attr,hcl"` + Sensitive bool `jsonapi:"attr,sensitive"` + + // Relations + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +// VariableListOptions represents the options for listing variables. +type VariableListOptions struct { + ListOptions + Organization *string `url:"filter[organization][name]"` + Workspace *string `url:"filter[workspace][name]"` +} + +func (o VariableListOptions) valid() error { + if !validString(o.Organization) { + return errors.New("Organization is required") + } + if !validString(o.Workspace) { + return errors.New("Workspace is required") + } + return nil +} + +// List all the variables associated with the given workspace. +func (s *variables) List(ctx context.Context, options VariableListOptions) (*VariableList, error) { + if err := options.valid(); err != nil { + return nil, err + } + + req, err := s.client.newRequest("GET", "vars", &options) + if err != nil { + return nil, err + } + + vl := &VariableList{} + err = s.client.do(ctx, req, vl) + if err != nil { + return nil, err + } + + return vl, nil +} + +// VariableCreateOptions represents the options for creating a new variable. +type VariableCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,vars"` + + // The name of the variable. + Key *string `jsonapi:"attr,key"` + + // The value of the variable. + Value *string `jsonapi:"attr,value"` + + // Whether this is a Terraform or environment variable. + Category *CategoryType `jsonapi:"attr,category"` + + // Whether to evaluate the value of the variable as a string of HCL code. + HCL *bool `jsonapi:"attr,hcl,omitempty"` + + // Whether the value is sensitive. + Sensitive *bool `jsonapi:"attr,sensitive,omitempty"` + + // The workspace that owns the variable. + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +func (o VariableCreateOptions) valid() error { + if !validString(o.Key) { + return errors.New("Key is required") + } + if !validString(o.Value) { + return errors.New("Value is required") + } + if o.Category == nil { + return errors.New("Category is required") + } + if o.Workspace == nil { + return errors.New("Workspace is required") + } + return nil +} + +// Create is used to create a new variable. +func (s *variables) Create(ctx context.Context, options VariableCreateOptions) (*Variable, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "vars", &options) + if err != nil { + return nil, err + } + + v := &Variable{} + err = s.client.do(ctx, req, v) + if err != nil { + return nil, err + } + + return v, nil +} + +// Read a variable by its ID. +func (s *variables) Read(ctx context.Context, variableID string) (*Variable, error) { + if !validStringID(&variableID) { + return nil, errors.New("Invalid value for variable ID") + } + + u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + v := &Variable{} + err = s.client.do(ctx, req, v) + if err != nil { + return nil, err + } + + return v, err +} + +// VariableUpdateOptions represents the options for updating a variable. +type VariableUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,vars"` + + // The name of the variable. + Key *string `jsonapi:"attr,key,omitempty"` + + // The value of the variable. + Value *string `jsonapi:"attr,value,omitempty"` + + // Whether to evaluate the value of the variable as a string of HCL code. + HCL *bool `jsonapi:"attr,hcl,omitempty"` + + // Whether the value is sensitive. + Sensitive *bool `jsonapi:"attr,sensitive,omitempty"` +} + +// Update values of an existing variable. +func (s *variables) Update(ctx context.Context, variableID string, options VariableUpdateOptions) (*Variable, error) { + if !validStringID(&variableID) { + return nil, errors.New("Invalid value for variable ID") + } + + // Make sure we don't send a user provided ID. + options.ID = variableID + + u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + v := &Variable{} + err = s.client.do(ctx, req, v) + if err != nil { + return nil, err + } + + return v, nil +} + +// Delete a variable by its ID. +func (s *variables) Delete(ctx context.Context, variableID string) error { + if !validStringID(&variableID) { + return errors.New("Invalid value for variable ID") + } + + u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/workspace.go b/vendor/github.com/hashicorp/go-tfe/workspace.go new file mode 100644 index 000000000000..4d78a75d8fae --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/workspace.go @@ -0,0 +1,447 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Workspaces = (*workspaces)(nil) + +// Workspaces describes all the workspace related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/workspaces.html +type Workspaces interface { + // List all the workspaces within an organization. + List(ctx context.Context, organization string, options WorkspaceListOptions) (*WorkspaceList, error) + + // Create is used to create a new workspace. + Create(ctx context.Context, organization string, options WorkspaceCreateOptions) (*Workspace, error) + + // Read a workspace by its name. + Read(ctx context.Context, organization string, workspace string) (*Workspace, error) + + // Update settings of an existing workspace. + Update(ctx context.Context, organization string, workspace string, options WorkspaceUpdateOptions) (*Workspace, error) + + // Delete a workspace by its name. + Delete(ctx context.Context, organization string, workspace string) error + + // Lock a workspace by its ID. + Lock(ctx context.Context, workspaceID string, options WorkspaceLockOptions) (*Workspace, error) + + // Unlock a workspace by its ID. + Unlock(ctx context.Context, workspaceID string) (*Workspace, error) + + // AssignSSHKey to a workspace. + AssignSSHKey(ctx context.Context, workspaceID string, options WorkspaceAssignSSHKeyOptions) (*Workspace, error) + + // UnassignSSHKey from a workspace. + UnassignSSHKey(ctx context.Context, workspaceID string) (*Workspace, error) +} + +// workspaces implements Workspaces. +type workspaces struct { + client *Client +} + +// WorkspaceList represents a list of workspaces. +type WorkspaceList struct { + *Pagination + Items []*Workspace +} + +// Workspace represents a Terraform Enterprise workspace. +type Workspace struct { + ID string `jsonapi:"primary,workspaces"` + Actions *WorkspaceActions `jsonapi:"attr,actions"` + AutoApply bool `jsonapi:"attr,auto-apply"` + CanQueueDestroyPlan bool `jsonapi:"attr,can-queue-destroy-plan"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Environment string `jsonapi:"attr,environment"` + Locked bool `jsonapi:"attr,locked"` + MigrationEnvironment string `jsonapi:"attr,migration-environment"` + Name string `jsonapi:"attr,name"` + Permissions *WorkspacePermissions `jsonapi:"attr,permissions"` + TerraformVersion string `jsonapi:"attr,terraform-version"` + VCSRepo *VCSRepo `jsonapi:"attr,vcs-repo"` + WorkingDirectory string `jsonapi:"attr,working-directory"` + + // Relations + CurrentRun *Run `jsonapi:"relation,current-run"` + Organization *Organization `jsonapi:"relation,organization"` + SSHKey *SSHKey `jsonapi:"relation,ssh-key"` +} + +// VCSRepo contains the configuration of a VCS integration. +type VCSRepo struct { + Branch string `json:"branch"` + Identifier string `json:"identifier"` + IngressSubmodules bool `json:"ingress-submodules"` + OAuthTokenID string `json:"oauth-token-id"` +} + +// WorkspaceActions represents the workspace actions. +type WorkspaceActions struct { + IsDestroyable bool `json:"is-destroyable"` +} + +// WorkspacePermissions represents the workspace permissions. +type WorkspacePermissions struct { + CanDestroy bool `json:"can-destroy"` + CanLock bool `json:"can-lock"` + CanQueueDestroy bool `json:"can-queue-destroy"` + CanQueueRun bool `json:"can-queue-run"` + CanReadSettings bool `json:"can-read-settings"` + CanUpdate bool `json:"can-update"` + CanUpdateVariable bool `json:"can-update-variable"` +} + +// WorkspaceListOptions represents the options for listing workspaces. +type WorkspaceListOptions struct { + ListOptions + + // A search string (partial workspace name) used to filter the results. + Search *string `url:"search[name],omitempty"` +} + +// List all the workspaces within an organization. +func (s *workspaces) List(ctx context.Context, organization string, options WorkspaceListOptions) (*WorkspaceList, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/workspaces", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + wl := &WorkspaceList{} + err = s.client.do(ctx, req, wl) + if err != nil { + return nil, err + } + + return wl, nil +} + +// WorkspaceCreateOptions represents the options for creating a new workspace. +type WorkspaceCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // Whether to automatically apply changes when a Terraform plan is successful. + AutoApply *bool `jsonapi:"attr,auto-apply,omitempty"` + + // The legacy TFE environment to use as the source of the migration, in the + // form organization/environment. Omit this unless you are migrating a legacy + // environment. + MigrationEnvironment *string `jsonapi:"attr,migration-environment,omitempty"` + + // The name of the workspace, which can only include letters, numbers, -, + // and _. This will be used as an identifier and must be unique in the + // organization. + Name *string `jsonapi:"attr,name"` + + // The version of Terraform to use for this workspace. Upon creating a + // workspace, the latest version is selected unless otherwise specified. + TerraformVersion *string `jsonapi:"attr,terraform-version,omitempty"` + + // Settings for the workspace's VCS repository. If omitted, the workspace is + // created without a VCS repo. If included, you must specify at least the + // oauth-token-id and identifier keys below. + VCSRepo *VCSRepoOptions `jsonapi:"attr,vcs-repo,omitempty"` + + // A relative path that Terraform will execute within. This defaults to the + // root of your repository and is typically set to a subdirectory matching the + // environment when multiple environments exist within the same repository. + WorkingDirectory *string `jsonapi:"attr,working-directory,omitempty"` +} + +// VCSRepoOptions represents the configuration options of a VCS integration. +type VCSRepoOptions struct { + Branch *string `json:"branch,omitempty"` + Identifier *string `json:"identifier,omitempty"` + IngressSubmodules *bool `json:"ingress-submodules,omitempty"` + OAuthTokenID *string `json:"oauth-token-id,omitempty"` +} + +func (o WorkspaceCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("Name is required") + } + if !validStringID(o.Name) { + return errors.New("Invalid value for name") + } + return nil +} + +// Create is used to create a new workspace. +func (s *workspaces) Create(ctx context.Context, organization string, options WorkspaceCreateOptions) (*Workspace, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/workspaces", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// Read a workspace by its name. +func (s *workspaces) Read(ctx context.Context, organization, workspace string) (*Workspace, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + if !validStringID(&workspace) { + return nil, errors.New("Invalid value for workspace") + } + + u := fmt.Sprintf( + "organizations/%s/workspaces/%s", + url.QueryEscape(organization), + url.QueryEscape(workspace), + ) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// WorkspaceUpdateOptions represents the options for updating a workspace. +type WorkspaceUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // Whether to automatically apply changes when a Terraform plan is successful. + AutoApply *bool `jsonapi:"attr,auto-apply,omitempty"` + + // A new name for the workspace, which can only include letters, numbers, -, + // and _. This will be used as an identifier and must be unique in the + // organization. Warning: Changing a workspace's name changes its URL in the + // API and UI. + Name *string `jsonapi:"attr,name,omitempty"` + + // The version of Terraform to use for this workspace. + TerraformVersion *string `jsonapi:"attr,terraform-version,omitempty"` + + // To delete a workspace's existing VCS repo, specify null instead of an + // object. To modify a workspace's existing VCS repo, include whichever of + // the keys below you wish to modify. To add a new VCS repo to a workspace + // that didn't previously have one, include at least the oauth-token-id and + // identifier keys. VCSRepo *VCSRepo `jsonapi:"relation,vcs-repo,om-tempty"` + VCSRepo *VCSRepoOptions `jsonapi:"attr,vcs-repo,omitempty"` + + // A relative path that Terraform will execute within. This defaults to the + // root of your repository and is typically set to a subdirectory matching + // the environment when multiple environments exist within the same + // repository. + WorkingDirectory *string `jsonapi:"attr,working-directory,omitempty"` +} + +// Update settings of an existing workspace. +func (s *workspaces) Update(ctx context.Context, organization, workspace string, options WorkspaceUpdateOptions) (*Workspace, error) { + if !validStringID(&organization) { + return nil, errors.New("Invalid value for organization") + } + if !validStringID(&workspace) { + return nil, errors.New("Invalid value for workspace") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf( + "organizations/%s/workspaces/%s", + url.QueryEscape(organization), + url.QueryEscape(workspace), + ) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// Delete a workspace by its name. +func (s *workspaces) Delete(ctx context.Context, organization, workspace string) error { + if !validStringID(&organization) { + return errors.New("Invalid value for organization") + } + if !validStringID(&workspace) { + return errors.New("Invalid value for workspace") + } + + u := fmt.Sprintf( + "organizations/%s/workspaces/%s", + url.QueryEscape(organization), + url.QueryEscape(workspace), + ) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// WorkspaceLockOptions represents the options for locking a workspace. +type WorkspaceLockOptions struct { + // Specifies the reason for locking the workspace. + Reason *string `json:"reason,omitempty"` +} + +// Lock a workspace by its ID. +func (s *workspaces) Lock(ctx context.Context, workspaceID string, options WorkspaceLockOptions) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/actions/lock", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// Unlock a workspace by its ID. +func (s *workspaces) Unlock(ctx context.Context, workspaceID string) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/actions/unlock", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// WorkspaceAssignSSHKeyOptions represents the options to assign an SSH key to +// a workspace. +type WorkspaceAssignSSHKeyOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // The SSH key ID to assign. + SSHKeyID *string `jsonapi:"attr,id"` +} + +func (o WorkspaceAssignSSHKeyOptions) valid() error { + if !validString(o.SSHKeyID) { + return errors.New("SSH key ID is required") + } + if !validStringID(o.SSHKeyID) { + return errors.New("Invalid value for SSH key ID") + } + return nil +} + +// AssignSSHKey to a workspace. +func (s *workspaces) AssignSSHKey(ctx context.Context, workspaceID string, options WorkspaceAssignSSHKeyOptions) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("workspaces/%s/relationships/ssh-key", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// workspaceUnassignSSHKeyOptions represents the options to unassign an SSH key +// to a workspace. +type workspaceUnassignSSHKeyOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // Must be nil to unset the currently assigned SSH key. + SSHKeyID *string `jsonapi:"attr,id"` +} + +// UnassignSSHKey from a workspace. +func (s *workspaces) UnassignSSHKey(ctx context.Context, workspaceID string) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("Invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/relationships/ssh-key", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("PATCH", u, &workspaceUnassignSSHKeyOptions{}) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} diff --git a/vendor/github.com/svanharmelen/jsonapi/.gitignore b/vendor/github.com/svanharmelen/jsonapi/.gitignore new file mode 100644 index 000000000000..19b1e1cf07b7 --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/.gitignore @@ -0,0 +1 @@ +/examples/examples diff --git a/vendor/github.com/svanharmelen/jsonapi/.travis.yml b/vendor/github.com/svanharmelen/jsonapi/.travis.yml new file mode 100644 index 000000000000..d07c5f5f5fef --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.8.x + - 1.9.x + - 1.10.x + - tip +script: go test ./... -v diff --git a/vendor/github.com/svanharmelen/jsonapi/LICENSE b/vendor/github.com/svanharmelen/jsonapi/LICENSE new file mode 100644 index 000000000000..c97912cef06a --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Google Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/svanharmelen/jsonapi/README.md b/vendor/github.com/svanharmelen/jsonapi/README.md new file mode 100644 index 000000000000..44b0541815da --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/README.md @@ -0,0 +1,457 @@ +# jsonapi + +[![Build Status](https://travis-ci.org/google/jsonapi.svg?branch=master)](https://travis-ci.org/google/jsonapi) +[![Go Report Card](https://goreportcard.com/badge/github.com/google/jsonapi)](https://goreportcard.com/report/github.com/google/jsonapi) +[![GoDoc](https://godoc.org/github.com/google/jsonapi?status.svg)](http://godoc.org/github.com/google/jsonapi) + +A serializer/deserializer for JSON payloads that comply to the +[JSON API - jsonapi.org](http://jsonapi.org) spec in go. + +## Installation + +``` +go get -u github.com/google/jsonapi +``` + +Or, see [Alternative Installation](#alternative-installation). + +## Background + +You are working in your Go web application and you have a struct that is +organized similarly to your database schema. You need to send and +receive json payloads that adhere to the JSON API spec. Once you realize that +your json needed to take on this special form, you go down the path of +creating more structs to be able to serialize and deserialize JSON API +payloads. Then there are more models required with this additional +structure. Ugh! With JSON API, you can keep your model structs as is and +use [StructTags](http://golang.org/pkg/reflect/#StructTag) to indicate +to JSON API how you want your response built or your request +deserialized. What about your relationships? JSON API supports +relationships out of the box and will even put them in your response +into an `included` side-loaded slice--that contains associated records. + +## Introduction + +JSON API uses [StructField](http://golang.org/pkg/reflect/#StructField) +tags to annotate the structs fields that you already have and use in +your app and then reads and writes [JSON API](http://jsonapi.org) +output based on the instructions you give the library in your JSON API +tags. Let's take an example. In your app, you most likely have structs +that look similar to these: + + +```go +type Blog struct { + ID int `json:"id"` + Title string `json:"title"` + Posts []*Post `json:"posts"` + CurrentPost *Post `json:"current_post"` + CurrentPostId int `json:"current_post_id"` + CreatedAt time.Time `json:"created_at"` + ViewCount int `json:"view_count"` +} + +type Post struct { + ID int `json:"id"` + BlogID int `json:"blog_id"` + Title string `json:"title"` + Body string `json:"body"` + Comments []*Comment `json:"comments"` +} + +type Comment struct { + Id int `json:"id"` + PostID int `json:"post_id"` + Body string `json:"body"` + Likes uint `json:"likes_count,omitempty"` +} +``` + +These structs may or may not resemble the layout of your database. But +these are the ones that you want to use right? You wouldn't want to use +structs like those that JSON API sends because it is difficult to get at +all of your data easily. + +## Example App + +[examples/app.go](https://github.com/google/jsonapi/blob/master/examples/app.go) + +This program demonstrates the implementation of a create, a show, +and a list [http.Handler](http://golang.org/pkg/net/http#Handler). It +outputs some example requests and responses as well as serialized +examples of the source/target structs to json. That is to say, I show +you that the library has successfully taken your JSON API request and +turned it into your struct types. + +To run, + +* Make sure you have [Go installed](https://golang.org/doc/install) +* Create the following directories or similar: `~/go` +* Set `GOPATH` to `PWD` in your shell session, `export GOPATH=$PWD` +* `go get github.com/google/jsonapi`. (Append `-u` after `get` if you + are updating.) +* `cd $GOPATH/src/github.com/google/jsonapi/examples` +* `go build && ./examples` + +## `jsonapi` Tag Reference + +### Example + +The `jsonapi` [StructTags](http://golang.org/pkg/reflect/#StructTag) +tells this library how to marshal and unmarshal your structs into +JSON API payloads and your JSON API payloads to structs, respectively. +Then Use JSON API's Marshal and Unmarshal methods to construct and read +your responses and replies. Here's an example of the structs above +using JSON API tags: + +```go +type Blog struct { + ID int `jsonapi:"primary,blogs"` + Title string `jsonapi:"attr,title"` + Posts []*Post `jsonapi:"relation,posts"` + CurrentPost *Post `jsonapi:"relation,current_post"` + CurrentPostID int `jsonapi:"attr,current_post_id"` + CreatedAt time.Time `jsonapi:"attr,created_at"` + ViewCount int `jsonapi:"attr,view_count"` +} + +type Post struct { + ID int `jsonapi:"primary,posts"` + BlogID int `jsonapi:"attr,blog_id"` + Title string `jsonapi:"attr,title"` + Body string `jsonapi:"attr,body"` + Comments []*Comment `jsonapi:"relation,comments"` +} + +type Comment struct { + ID int `jsonapi:"primary,comments"` + PostID int `jsonapi:"attr,post_id"` + Body string `jsonapi:"attr,body"` + Likes uint `jsonapi:"attr,likes-count,omitempty"` +} +``` + +### Permitted Tag Values + +#### `primary` + +``` +`jsonapi:"primary,"` +``` + +This indicates this is the primary key field for this struct type. +Tag value arguments are comma separated. The first argument must be, +`primary`, and the second must be the name that should appear in the +`type`\* field for all data objects that represent this type of model. + +\* According the [JSON API](http://jsonapi.org) spec, the plural record +types are shown in the examples, but not required. + +#### `attr` + +``` +`jsonapi:"attr,,"` +``` + +These fields' values will end up in the `attributes`hash for a record. +The first argument must be, `attr`, and the second should be the name +for the key to display in the `attributes` hash for that record. The optional +third argument is `omitempty` - if it is present the field will not be present +in the `"attributes"` if the field's value is equivalent to the field types +empty value (ie if the `count` field is of type `int`, `omitempty` will omit the +field when `count` has a value of `0`). Lastly, the spec indicates that +`attributes` key names should be dasherized for multiple word field names. + +#### `relation` + +``` +`jsonapi:"relation,,"` +``` + +Relations are struct fields that represent a one-to-one or one-to-many +relationship with other structs. JSON API will traverse the graph of +relationships and marshal or unmarshal records. The first argument must +be, `relation`, and the second should be the name of the relationship, +used as the key in the `relationships` hash for the record. The optional +third argument is `omitempty` - if present will prevent non existent to-one and +to-many from being serialized. + +## Methods Reference + +**All `Marshal` and `Unmarshal` methods expect pointers to struct +instance or slices of the same contained with the `interface{}`s** + +Now you have your structs prepared to be seralized or materialized, What +about the rest? + +### Create Record Example + +You can Unmarshal a JSON API payload using +[jsonapi.UnmarshalPayload](http://godoc.org/github.com/google/jsonapi#UnmarshalPayload). +It reads from an [io.Reader](https://golang.org/pkg/io/#Reader) +containing a JSON API payload for one record (but can have related +records). Then, it materializes a struct that you created and passed in +(using new or &). Again, the method supports single records only, at +the top level, in request payloads at the moment. Bulk creates and +updates are not supported yet. + +After saving your record, you can use, +[MarshalOnePayload](http://godoc.org/github.com/google/jsonapi#MarshalOnePayload), +to write the JSON API response to an +[io.Writer](https://golang.org/pkg/io/#Writer). + +#### `UnmarshalPayload` + +```go +UnmarshalPayload(in io.Reader, model interface{}) +``` + +Visit [godoc](http://godoc.org/github.com/google/jsonapi#UnmarshalPayload) + +#### `MarshalPayload` + +```go +MarshalPayload(w io.Writer, models interface{}) error +``` + +Visit [godoc](http://godoc.org/github.com/google/jsonapi#MarshalPayload) + +Writes a JSON API response, with related records sideloaded, into an +`included` array. This method encodes a response for either a single record or +many records. + +##### Handler Example Code + +```go +func CreateBlog(w http.ResponseWriter, r *http.Request) { + blog := new(Blog) + + if err := jsonapi.UnmarshalPayload(r.Body, blog); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // ...save your blog... + + w.Header().Set("Content-Type", jsonapi.MediaType) + w.WriteHeader(http.StatusCreated) + + if err := jsonapi.MarshalPayload(w, blog); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +### Create Records Example + +#### `UnmarshalManyPayload` + +```go +UnmarshalManyPayload(in io.Reader, t reflect.Type) ([]interface{}, error) +``` + +Visit [godoc](http://godoc.org/github.com/google/jsonapi#UnmarshalManyPayload) + +Takes an `io.Reader` and a `reflect.Type` representing the uniform type +contained within the `"data"` JSON API member. + +##### Handler Example Code + +```go +func CreateBlogs(w http.ResponseWriter, r *http.Request) { + // ...create many blogs at once + + blogs, err := UnmarshalManyPayload(r.Body, reflect.TypeOf(new(Blog))) + if err != nil { + t.Fatal(err) + } + + for _, blog := range blogs { + b, ok := blog.(*Blog) + // ...save each of your blogs + } + + w.Header().Set("Content-Type", jsonapi.MediaType) + w.WriteHeader(http.StatusCreated) + + if err := jsonapi.MarshalPayload(w, blogs); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + + +### Links + +If you need to include [link objects](http://jsonapi.org/format/#document-links) along with response data, implement the `Linkable` interface for document-links, and `RelationshipLinkable` for relationship links: + +```go +func (post Post) JSONAPILinks() *Links { + return &Links{ + "self": "href": fmt.Sprintf("https://example.com/posts/%d", post.ID), + "comments": Link{ + Href: fmt.Sprintf("https://example.com/api/blogs/%d/comments", post.ID), + Meta: map[string]interface{}{ + "counts": map[string]uint{ + "likes": 4, + }, + }, + }, + } +} + +// Invoked for each relationship defined on the Post struct when marshaled +func (post Post) JSONAPIRelationshipLinks(relation string) *Links { + if relation == "comments" { + return &Links{ + "related": fmt.Sprintf("https://example.com/posts/%d/comments", post.ID), + } + } + return nil +} +``` + +### Meta + + If you need to include [meta objects](http://jsonapi.org/format/#document-meta) along with response data, implement the `Metable` interface for document-meta, and `RelationshipMetable` for relationship meta: + + ```go +func (post Post) JSONAPIMeta() *Meta { + return &Meta{ + "details": "sample details here", + } +} + +// Invoked for each relationship defined on the Post struct when marshaled +func (post Post) JSONAPIRelationshipMeta(relation string) *Meta { + if relation == "comments" { + return &Meta{ + "this": map[string]interface{}{ + "can": map[string]interface{}{ + "go": []interface{}{ + "as", + "deep", + map[string]interface{}{ + "as": "required", + }, + }, + }, + }, + } + } + return nil +} +``` + +### Errors +This package also implements support for JSON API compatible `errors` payloads using the following types. + +#### `MarshalErrors` +```go +MarshalErrors(w io.Writer, errs []*ErrorObject) error +``` + +Writes a JSON API response using the given `[]error`. + +#### `ErrorsPayload` +```go +type ErrorsPayload struct { + Errors []*ErrorObject `json:"errors"` +} +``` + +ErrorsPayload is a serializer struct for representing a valid JSON API errors payload. + +#### `ErrorObject` +```go +type ErrorObject struct { ... } + +// Error implements the `Error` interface. +func (e *ErrorObject) Error() string { + return fmt.Sprintf("Error: %s %s\n", e.Title, e.Detail) +} +``` + +ErrorObject is an `Error` implementation as well as an implementation of the JSON API error object. + +The main idea behind this struct is that you can use it directly in your code as an error type and pass it directly to `MarshalErrors` to get a valid JSON API errors payload. + +##### Errors Example Code +```go +// An error has come up in your code, so set an appropriate status, and serialize the error. +if err := validate(&myStructToValidate); err != nil { + context.SetStatusCode(http.StatusBadRequest) // Or however you need to set a status. + jsonapi.MarshalErrors(w, []*ErrorObject{{ + Title: "Validation Error", + Detail: "Given request body was invalid.", + Status: "400", + Meta: map[string]interface{}{"field": "some_field", "error": "bad type", "expected": "string", "received": "float64"}, + }}) + return +} +``` + +## Testing + +### `MarshalOnePayloadEmbedded` + +```go +MarshalOnePayloadEmbedded(w io.Writer, model interface{}) error +``` + +Visit [godoc](http://godoc.org/github.com/google/jsonapi#MarshalOnePayloadEmbedded) + +This method is not strictly meant to for use in implementation code, +although feel free. It was mainly created for use in tests; in most cases, +your request payloads for create will be embedded rather than sideloaded +for related records. This method will serialize a single struct pointer +into an embedded json response. In other words, there will be no, +`included`, array in the json; all relationships will be serialized +inline with the data. + +However, in tests, you may want to construct payloads to post to create +methods that are embedded to most closely model the payloads that will +be produced by the client. This method aims to enable that. + +### Example + +```go +out := bytes.NewBuffer(nil) + +// testModel returns a pointer to a Blog +jsonapi.MarshalOnePayloadEmbedded(out, testModel()) + +h := new(BlogsHandler) + +w := httptest.NewRecorder() +r, _ := http.NewRequest(http.MethodPost, "/blogs", out) + +h.CreateBlog(w, r) + +blog := new(Blog) +jsonapi.UnmarshalPayload(w.Body, blog) + +// ... assert stuff about blog here ... +``` + +## Alternative Installation +I use git subtrees to manage dependencies rather than `go get` so that +the src is committed to my repo. + +``` +git subtree add --squash --prefix=src/github.com/google/jsonapi https://github.com/google/jsonapi.git master +``` + +To update, + +``` +git subtree pull --squash --prefix=src/github.com/google/jsonapi https://github.com/google/jsonapi.git master +``` + +This assumes that I have my repo structured with a `src` dir containing +a collection of packages and `GOPATH` is set to the root +folder--containing `src`. + +## Contributing + +Fork, Change, Pull Request *with tests*. diff --git a/vendor/github.com/svanharmelen/jsonapi/constants.go b/vendor/github.com/svanharmelen/jsonapi/constants.go new file mode 100644 index 000000000000..23288d311eb8 --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/constants.go @@ -0,0 +1,55 @@ +package jsonapi + +const ( + // StructTag annotation strings + annotationJSONAPI = "jsonapi" + annotationPrimary = "primary" + annotationClientID = "client-id" + annotationAttribute = "attr" + annotationRelation = "relation" + annotationOmitEmpty = "omitempty" + annotationISO8601 = "iso8601" + annotationSeperator = "," + + iso8601TimeFormat = "2006-01-02T15:04:05Z" + + // MediaType is the identifier for the JSON API media type + // + // see http://jsonapi.org/format/#document-structure + MediaType = "application/vnd.api+json" + + // Pagination Constants + // + // http://jsonapi.org/format/#fetching-pagination + + // KeyFirstPage is the key to the links object whose value contains a link to + // the first page of data + KeyFirstPage = "first" + // KeyLastPage is the key to the links object whose value contains a link to + // the last page of data + KeyLastPage = "last" + // KeyPreviousPage is the key to the links object whose value contains a link + // to the previous page of data + KeyPreviousPage = "prev" + // KeyNextPage is the key to the links object whose value contains a link to + // the next page of data + KeyNextPage = "next" + + // QueryParamPageNumber is a JSON API query parameter used in a page based + // pagination strategy in conjunction with QueryParamPageSize + QueryParamPageNumber = "page[number]" + // QueryParamPageSize is a JSON API query parameter used in a page based + // pagination strategy in conjunction with QueryParamPageNumber + QueryParamPageSize = "page[size]" + + // QueryParamPageOffset is a JSON API query parameter used in an offset based + // pagination strategy in conjunction with QueryParamPageLimit + QueryParamPageOffset = "page[offset]" + // QueryParamPageLimit is a JSON API query parameter used in an offset based + // pagination strategy in conjunction with QueryParamPageOffset + QueryParamPageLimit = "page[limit]" + + // QueryParamPageCursor is a JSON API query parameter used with a cursor-based + // strategy + QueryParamPageCursor = "page[cursor]" +) diff --git a/vendor/github.com/svanharmelen/jsonapi/doc.go b/vendor/github.com/svanharmelen/jsonapi/doc.go new file mode 100644 index 000000000000..29d7a14ba72b --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/doc.go @@ -0,0 +1,70 @@ +/* +Package jsonapi provides a serializer and deserializer for jsonapi.org spec payloads. + +You can keep your model structs as is and use struct field tags to indicate to jsonapi +how you want your response built or your request deserialzied. What about my relationships? +jsonapi supports relationships out of the box and will even side load them in your response +into an "included" array--that contains associated objects. + +jsonapi uses StructField tags to annotate the structs fields that you already have and use +in your app and then reads and writes jsonapi.org output based on the instructions you give +the library in your jsonapi tags. + +Example structs using a Blog > Post > Comment structure, + + type Blog struct { + ID int `jsonapi:"primary,blogs"` + Title string `jsonapi:"attr,title"` + Posts []*Post `jsonapi:"relation,posts"` + CurrentPost *Post `jsonapi:"relation,current_post"` + CurrentPostID int `jsonapi:"attr,current_post_id"` + CreatedAt time.Time `jsonapi:"attr,created_at"` + ViewCount int `jsonapi:"attr,view_count"` + } + + type Post struct { + ID int `jsonapi:"primary,posts"` + BlogID int `jsonapi:"attr,blog_id"` + Title string `jsonapi:"attr,title"` + Body string `jsonapi:"attr,body"` + Comments []*Comment `jsonapi:"relation,comments"` + } + + type Comment struct { + ID int `jsonapi:"primary,comments"` + PostID int `jsonapi:"attr,post_id"` + Body string `jsonapi:"attr,body"` + } + +jsonapi Tag Reference + +Value, primary: "primary," + +This indicates that this is the primary key field for this struct type. Tag +value arguments are comma separated. The first argument must be, "primary", and +the second must be the name that should appear in the "type" field for all data +objects that represent this type of model. + +Value, attr: "attr,[,]" + +These fields' values should end up in the "attribute" hash for a record. The first +argument must be, "attr', and the second should be the name for the key to display in +the the "attributes" hash for that record. + +The following extra arguments are also supported: + +"omitempty": excludes the fields value from the "attribute" hash. +"iso8601": uses the ISO8601 timestamp format when serialising or deserialising the time.Time value. + +Value, relation: "relation," + +Relations are struct fields that represent a one-to-one or one-to-many to other structs. +jsonapi will traverse the graph of relationships and marshal or unmarshal records. The first +argument must be, "relation", and the second should be the name of the relationship, used as +the key in the "relationships" hash for the record. + +Use the methods below to Marshal and Unmarshal jsonapi.org json payloads. + +Visit the readme at https://github.com/google/jsonapi +*/ +package jsonapi diff --git a/vendor/github.com/svanharmelen/jsonapi/errors.go b/vendor/github.com/svanharmelen/jsonapi/errors.go new file mode 100644 index 000000000000..ed7fa9f75d85 --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/errors.go @@ -0,0 +1,55 @@ +package jsonapi + +import ( + "encoding/json" + "fmt" + "io" +) + +// MarshalErrors writes a JSON API response using the given `[]error`. +// +// For more information on JSON API error payloads, see the spec here: +// http://jsonapi.org/format/#document-top-level +// and here: http://jsonapi.org/format/#error-objects. +func MarshalErrors(w io.Writer, errorObjects []*ErrorObject) error { + if err := json.NewEncoder(w).Encode(&ErrorsPayload{Errors: errorObjects}); err != nil { + return err + } + return nil +} + +// ErrorsPayload is a serializer struct for representing a valid JSON API errors payload. +type ErrorsPayload struct { + Errors []*ErrorObject `json:"errors"` +} + +// ErrorObject is an `Error` implementation as well as an implementation of the JSON API error object. +// +// The main idea behind this struct is that you can use it directly in your code as an error type +// and pass it directly to `MarshalErrors` to get a valid JSON API errors payload. +// For more information on Golang errors, see: https://golang.org/pkg/errors/ +// For more information on the JSON API spec's error objects, see: http://jsonapi.org/format/#error-objects +type ErrorObject struct { + // ID is a unique identifier for this particular occurrence of a problem. + ID string `json:"id,omitempty"` + + // Title is a short, human-readable summary of the problem that SHOULD NOT change from occurrence to occurrence of the problem, except for purposes of localization. + Title string `json:"title,omitempty"` + + // Detail is a human-readable explanation specific to this occurrence of the problem. Like title, this field’s value can be localized. + Detail string `json:"detail,omitempty"` + + // Status is the HTTP status code applicable to this problem, expressed as a string value. + Status string `json:"status,omitempty"` + + // Code is an application-specific error code, expressed as a string value. + Code string `json:"code,omitempty"` + + // Meta is an object containing non-standard meta-information about the error. + Meta *map[string]interface{} `json:"meta,omitempty"` +} + +// Error implements the `Error` interface. +func (e *ErrorObject) Error() string { + return fmt.Sprintf("Error: %s %s\n", e.Title, e.Detail) +} diff --git a/vendor/github.com/svanharmelen/jsonapi/node.go b/vendor/github.com/svanharmelen/jsonapi/node.go new file mode 100644 index 000000000000..a58488c82214 --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/node.go @@ -0,0 +1,121 @@ +package jsonapi + +import "fmt" + +// Payloader is used to encapsulate the One and Many payload types +type Payloader interface { + clearIncluded() +} + +// OnePayload is used to represent a generic JSON API payload where a single +// resource (Node) was included as an {} in the "data" key +type OnePayload struct { + Data *Node `json:"data"` + Included []*Node `json:"included,omitempty"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta,omitempty"` +} + +func (p *OnePayload) clearIncluded() { + p.Included = []*Node{} +} + +// ManyPayload is used to represent a generic JSON API payload where many +// resources (Nodes) were included in an [] in the "data" key +type ManyPayload struct { + Data []*Node `json:"data"` + Included []*Node `json:"included,omitempty"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta,omitempty"` +} + +func (p *ManyPayload) clearIncluded() { + p.Included = []*Node{} +} + +// Node is used to represent a generic JSON API Resource +type Node struct { + Type string `json:"type"` + ID string `json:"id,omitempty"` + ClientID string `json:"client-id,omitempty"` + Attributes map[string]interface{} `json:"attributes,omitempty"` + Relationships map[string]interface{} `json:"relationships,omitempty"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta,omitempty"` +} + +// RelationshipOneNode is used to represent a generic has one JSON API relation +type RelationshipOneNode struct { + Data *Node `json:"data"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta,omitempty"` +} + +// RelationshipManyNode is used to represent a generic has many JSON API +// relation +type RelationshipManyNode struct { + Data []*Node `json:"data"` + Links *Links `json:"links,omitempty"` + Meta *Meta `json:"meta,omitempty"` +} + +// Links is used to represent a `links` object. +// http://jsonapi.org/format/#document-links +type Links map[string]interface{} + +func (l *Links) validate() (err error) { + // Each member of a links object is a “link”. A link MUST be represented as + // either: + // - a string containing the link’s URL. + // - an object (“link object”) which can contain the following members: + // - href: a string containing the link’s URL. + // - meta: a meta object containing non-standard meta-information about the + // link. + for k, v := range *l { + _, isString := v.(string) + _, isLink := v.(Link) + + if !(isString || isLink) { + return fmt.Errorf( + "The %s member of the links object was not a string or link object", + k, + ) + } + } + return +} + +// Link is used to represent a member of the `links` object. +type Link struct { + Href string `json:"href"` + Meta Meta `json:"meta,omitempty"` +} + +// Linkable is used to include document links in response data +// e.g. {"self": "http://example.com/posts/1"} +type Linkable interface { + JSONAPILinks() *Links +} + +// RelationshipLinkable is used to include relationship links in response data +// e.g. {"related": "http://example.com/posts/1/comments"} +type RelationshipLinkable interface { + // JSONAPIRelationshipLinks will be invoked for each relationship with the corresponding relation name (e.g. `comments`) + JSONAPIRelationshipLinks(relation string) *Links +} + +// Meta is used to represent a `meta` object. +// http://jsonapi.org/format/#document-meta +type Meta map[string]interface{} + +// Metable is used to include document meta in response data +// e.g. {"foo": "bar"} +type Metable interface { + JSONAPIMeta() *Meta +} + +// RelationshipMetable is used to include relationship meta in response data +type RelationshipMetable interface { + // JSONRelationshipMeta will be invoked for each relationship with the corresponding relation name (e.g. `comments`) + JSONAPIRelationshipMeta(relation string) *Meta +} diff --git a/vendor/github.com/svanharmelen/jsonapi/request.go b/vendor/github.com/svanharmelen/jsonapi/request.go new file mode 100644 index 000000000000..e3543428a5b3 --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/request.go @@ -0,0 +1,680 @@ +package jsonapi + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +const ( + unsuportedStructTagMsg = "Unsupported jsonapi tag annotation, %s" +) + +var ( + // ErrInvalidTime is returned when a struct has a time.Time type field, but + // the JSON value was not a unix timestamp integer. + ErrInvalidTime = errors.New("Only numbers can be parsed as dates, unix timestamps") + // ErrInvalidISO8601 is returned when a struct has a time.Time type field and includes + // "iso8601" in the tag spec, but the JSON value was not an ISO8601 timestamp string. + ErrInvalidISO8601 = errors.New("Only strings can be parsed as dates, ISO8601 timestamps") + // ErrUnknownFieldNumberType is returned when the JSON value was a float + // (numeric) but the Struct field was a non numeric type (i.e. not int, uint, + // float, etc) + ErrUnknownFieldNumberType = errors.New("The struct field was not of a known number type") + // ErrInvalidType is returned when the given type is incompatible with the expected type. + ErrInvalidType = errors.New("Invalid type provided") // I wish we used punctuation. + +) + +// ErrUnsupportedPtrType is returned when the Struct field was a pointer but +// the JSON value was of a different type +type ErrUnsupportedPtrType struct { + rf reflect.Value + t reflect.Type + structField reflect.StructField +} + +func (eupt ErrUnsupportedPtrType) Error() string { + typeName := eupt.t.Elem().Name() + kind := eupt.t.Elem().Kind() + if kind.String() != "" && kind.String() != typeName { + typeName = fmt.Sprintf("%s (%s)", typeName, kind.String()) + } + return fmt.Sprintf( + "jsonapi: Can't unmarshal %+v (%s) to struct field `%s`, which is a pointer to `%s`", + eupt.rf, eupt.rf.Type().Kind(), eupt.structField.Name, typeName, + ) +} + +func newErrUnsupportedPtrType(rf reflect.Value, t reflect.Type, structField reflect.StructField) error { + return ErrUnsupportedPtrType{rf, t, structField} +} + +// UnmarshalPayload converts an io into a struct instance using jsonapi tags on +// struct fields. This method supports single request payloads only, at the +// moment. Bulk creates and updates are not supported yet. +// +// Will Unmarshal embedded and sideloaded payloads. The latter is only possible if the +// object graph is complete. That is, in the "relationships" data there are type and id, +// keys that correspond to records in the "included" array. +// +// For example you could pass it, in, req.Body and, model, a BlogPost +// struct instance to populate in an http handler, +// +// func CreateBlog(w http.ResponseWriter, r *http.Request) { +// blog := new(Blog) +// +// if err := jsonapi.UnmarshalPayload(r.Body, blog); err != nil { +// http.Error(w, err.Error(), 500) +// return +// } +// +// // ...do stuff with your blog... +// +// w.Header().Set("Content-Type", jsonapi.MediaType) +// w.WriteHeader(201) +// +// if err := jsonapi.MarshalPayload(w, blog); err != nil { +// http.Error(w, err.Error(), 500) +// } +// } +// +// +// Visit https://github.com/google/jsonapi#create for more info. +// +// model interface{} should be a pointer to a struct. +func UnmarshalPayload(in io.Reader, model interface{}) error { + payload := new(OnePayload) + + if err := json.NewDecoder(in).Decode(payload); err != nil { + return err + } + + if payload.Included != nil { + includedMap := make(map[string]*Node) + for _, included := range payload.Included { + key := fmt.Sprintf("%s,%s", included.Type, included.ID) + includedMap[key] = included + } + + return unmarshalNode(payload.Data, reflect.ValueOf(model), &includedMap) + } + return unmarshalNode(payload.Data, reflect.ValueOf(model), nil) +} + +// UnmarshalManyPayload converts an io into a set of struct instances using +// jsonapi tags on the type's struct fields. +func UnmarshalManyPayload(in io.Reader, t reflect.Type) ([]interface{}, error) { + payload := new(ManyPayload) + + if err := json.NewDecoder(in).Decode(payload); err != nil { + return nil, err + } + + models := []interface{}{} // will be populated from the "data" + includedMap := map[string]*Node{} // will be populate from the "included" + + if payload.Included != nil { + for _, included := range payload.Included { + key := fmt.Sprintf("%s,%s", included.Type, included.ID) + includedMap[key] = included + } + } + + for _, data := range payload.Data { + model := reflect.New(t.Elem()) + err := unmarshalNode(data, model, &includedMap) + if err != nil { + return nil, err + } + models = append(models, model.Interface()) + } + + return models, nil +} + +func unmarshalNode(data *Node, model reflect.Value, included *map[string]*Node) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("data is not a jsonapi representation of '%v'", model.Type()) + } + }() + + modelValue := model.Elem() + modelType := model.Type().Elem() + + var er error + + for i := 0; i < modelValue.NumField(); i++ { + fieldType := modelType.Field(i) + tag := fieldType.Tag.Get("jsonapi") + if tag == "" { + continue + } + + fieldValue := modelValue.Field(i) + + args := strings.Split(tag, ",") + if len(args) < 1 { + er = ErrBadJSONAPIStructTag + break + } + + annotation := args[0] + + if (annotation == annotationClientID && len(args) != 1) || + (annotation != annotationClientID && len(args) < 2) { + er = ErrBadJSONAPIStructTag + break + } + + if annotation == annotationPrimary { + if data.ID == "" { + continue + } + + // Check the JSON API Type + if data.Type != args[1] { + er = fmt.Errorf( + "Trying to Unmarshal an object of type %#v, but %#v does not match", + data.Type, + args[1], + ) + break + } + + // ID will have to be transmitted as astring per the JSON API spec + v := reflect.ValueOf(data.ID) + + // Deal with PTRS + var kind reflect.Kind + if fieldValue.Kind() == reflect.Ptr { + kind = fieldType.Type.Elem().Kind() + } else { + kind = fieldType.Type.Kind() + } + + // Handle String case + if kind == reflect.String { + assign(fieldValue, v) + continue + } + + // Value was not a string... only other supported type was a numeric, + // which would have been sent as a float value. + floatValue, err := strconv.ParseFloat(data.ID, 64) + if err != nil { + // Could not convert the value in the "id" attr to a float + er = ErrBadJSONAPIID + break + } + + // Convert the numeric float to one of the supported ID numeric types + // (int[8,16,32,64] or uint[8,16,32,64]) + var idValue reflect.Value + switch kind { + case reflect.Int: + n := int(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Int8: + n := int8(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Int16: + n := int16(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Int32: + n := int32(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Int64: + n := int64(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Uint: + n := uint(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Uint8: + n := uint8(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Uint16: + n := uint16(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Uint32: + n := uint32(floatValue) + idValue = reflect.ValueOf(&n) + case reflect.Uint64: + n := uint64(floatValue) + idValue = reflect.ValueOf(&n) + default: + // We had a JSON float (numeric), but our field was not one of the + // allowed numeric types + er = ErrBadJSONAPIID + break + } + + assign(fieldValue, idValue) + } else if annotation == annotationClientID { + if data.ClientID == "" { + continue + } + + fieldValue.Set(reflect.ValueOf(data.ClientID)) + } else if annotation == annotationAttribute { + attributes := data.Attributes + + if attributes == nil || len(data.Attributes) == 0 { + continue + } + + attribute := attributes[args[1]] + + // continue if the attribute was not included in the request + if attribute == nil { + continue + } + + structField := fieldType + value, err := unmarshalAttribute(attribute, args, structField, fieldValue) + if err != nil { + er = err + break + } + + assign(fieldValue, value) + continue + + } else if annotation == annotationRelation { + isSlice := fieldValue.Type().Kind() == reflect.Slice + + if data.Relationships == nil || data.Relationships[args[1]] == nil { + continue + } + + if isSlice { + // to-many relationship + relationship := new(RelationshipManyNode) + + buf := bytes.NewBuffer(nil) + + json.NewEncoder(buf).Encode(data.Relationships[args[1]]) + json.NewDecoder(buf).Decode(relationship) + + data := relationship.Data + models := reflect.New(fieldValue.Type()).Elem() + + for _, n := range data { + m := reflect.New(fieldValue.Type().Elem().Elem()) + + if err := unmarshalNode( + fullNode(n, included), + m, + included, + ); err != nil { + er = err + break + } + + models = reflect.Append(models, m) + } + + fieldValue.Set(models) + } else { + // to-one relationships + relationship := new(RelationshipOneNode) + + buf := bytes.NewBuffer(nil) + + json.NewEncoder(buf).Encode( + data.Relationships[args[1]], + ) + json.NewDecoder(buf).Decode(relationship) + + /* + http://jsonapi.org/format/#document-resource-object-relationships + http://jsonapi.org/format/#document-resource-object-linkage + relationship can have a data node set to null (e.g. to disassociate the relationship) + so unmarshal and set fieldValue only if data obj is not null + */ + if relationship.Data == nil { + continue + } + + m := reflect.New(fieldValue.Type().Elem()) + if err := unmarshalNode( + fullNode(relationship.Data, included), + m, + included, + ); err != nil { + er = err + break + } + + fieldValue.Set(m) + + } + + } else { + er = fmt.Errorf(unsuportedStructTagMsg, annotation) + } + } + + return er +} + +func fullNode(n *Node, included *map[string]*Node) *Node { + includedKey := fmt.Sprintf("%s,%s", n.Type, n.ID) + + if included != nil && (*included)[includedKey] != nil { + return (*included)[includedKey] + } + + return n +} + +// assign will take the value specified and assign it to the field; if +// field is expecting a ptr assign will assign a ptr. +func assign(field, value reflect.Value) { + value = reflect.Indirect(value) + + if field.Kind() == reflect.Ptr { + // initialize pointer so it's value + // can be set by assignValue + field.Set(reflect.New(field.Type().Elem())) + assignValue(field.Elem(), value) + } else { + assignValue(field, value) + } +} + +// assign assigns the specified value to the field, +// expecting both values not to be pointer types. +func assignValue(field, value reflect.Value) { + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64: + field.SetInt(value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64, reflect.Uintptr: + field.SetUint(value.Uint()) + case reflect.Float32, reflect.Float64: + field.SetFloat(value.Float()) + case reflect.String: + field.SetString(value.String()) + case reflect.Bool: + field.SetBool(value.Bool()) + default: + field.Set(value) + } +} + +func unmarshalAttribute( + attribute interface{}, + args []string, + structField reflect.StructField, + fieldValue reflect.Value) (value reflect.Value, err error) { + value = reflect.ValueOf(attribute) + fieldType := structField.Type + + // Handle field of type []string + if fieldValue.Type() == reflect.TypeOf([]string{}) { + value, err = handleStringSlice(attribute, args, fieldType, fieldValue) + return + } + + // Handle field of type time.Time + if fieldValue.Type() == reflect.TypeOf(time.Time{}) || + fieldValue.Type() == reflect.TypeOf(new(time.Time)) { + value, err = handleTime(attribute, args, fieldType, fieldValue) + return + } + + // Handle field of type struct + if fieldValue.Type().Kind() == reflect.Struct { + value, err = handleStruct(attribute, args, fieldType, fieldValue) + return + } + + // Handle field containing slice of structs + if fieldValue.Type().Kind() == reflect.Slice { + elem := reflect.TypeOf(fieldValue.Interface()).Elem() + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + + if elem.Kind() == reflect.Struct { + value, err = handleStructSlice(attribute, args, fieldType, fieldValue) + return + } + } + + // JSON value was a float (numeric) + if value.Kind() == reflect.Float64 { + value, err = handleNumeric(attribute, args, fieldType, fieldValue) + return + } + + // Field was a Pointer type + if fieldValue.Kind() == reflect.Ptr { + value, err = handlePointer(attribute, args, fieldType, fieldValue, structField) + return + } + + // As a final catch-all, ensure types line up to avoid a runtime panic. + if fieldValue.Kind() != value.Kind() { + err = ErrInvalidType + return + } + + return +} + +func handleStringSlice( + attribute interface{}, + args []string, + fieldType reflect.Type, + fieldValue reflect.Value) (reflect.Value, error) { + v := reflect.ValueOf(attribute) + values := make([]string, v.Len()) + for i := 0; i < v.Len(); i++ { + values[i] = v.Index(i).Interface().(string) + } + + return reflect.ValueOf(values), nil +} + +func handleTime( + attribute interface{}, + args []string, + fieldType reflect.Type, + fieldValue reflect.Value) (reflect.Value, error) { + var isIso8601 bool + v := reflect.ValueOf(attribute) + + if len(args) > 2 { + for _, arg := range args[2:] { + if arg == annotationISO8601 { + isIso8601 = true + } + } + } + + if isIso8601 { + var tm string + if v.Kind() == reflect.String { + tm = v.Interface().(string) + } else { + return reflect.ValueOf(time.Now()), ErrInvalidISO8601 + } + + t, err := time.Parse(iso8601TimeFormat, tm) + if err != nil { + return reflect.ValueOf(time.Now()), ErrInvalidISO8601 + } + + if fieldValue.Kind() == reflect.Ptr { + return reflect.ValueOf(&t), nil + } + + return reflect.ValueOf(t), nil + } + + var at int64 + + if v.Kind() == reflect.Float64 { + at = int64(v.Interface().(float64)) + } else if v.Kind() == reflect.Int { + at = v.Int() + } else { + return reflect.ValueOf(time.Now()), ErrInvalidTime + } + + t := time.Unix(at, 0) + + return reflect.ValueOf(t), nil +} + +func handleNumeric( + attribute interface{}, + args []string, + fieldType reflect.Type, + fieldValue reflect.Value) (reflect.Value, error) { + v := reflect.ValueOf(attribute) + floatValue := v.Interface().(float64) + + var kind reflect.Kind + if fieldValue.Kind() == reflect.Ptr { + kind = fieldType.Elem().Kind() + } else { + kind = fieldType.Kind() + } + + var numericValue reflect.Value + + switch kind { + case reflect.Int: + n := int(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Int8: + n := int8(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Int16: + n := int16(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Int32: + n := int32(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Int64: + n := int64(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Uint: + n := uint(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Uint8: + n := uint8(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Uint16: + n := uint16(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Uint32: + n := uint32(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Uint64: + n := uint64(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Float32: + n := float32(floatValue) + numericValue = reflect.ValueOf(&n) + case reflect.Float64: + n := floatValue + numericValue = reflect.ValueOf(&n) + default: + return reflect.Value{}, ErrUnknownFieldNumberType + } + + return numericValue, nil +} + +func handlePointer( + attribute interface{}, + args []string, + fieldType reflect.Type, + fieldValue reflect.Value, + structField reflect.StructField) (reflect.Value, error) { + t := fieldValue.Type() + var concreteVal reflect.Value + + switch cVal := attribute.(type) { + case string: + concreteVal = reflect.ValueOf(&cVal) + case bool: + concreteVal = reflect.ValueOf(&cVal) + case complex64, complex128, uintptr: + concreteVal = reflect.ValueOf(&cVal) + case map[string]interface{}: + var err error + concreteVal, err = handleStruct(attribute, args, fieldType, fieldValue) + if err != nil { + return reflect.Value{}, newErrUnsupportedPtrType( + reflect.ValueOf(attribute), fieldType, structField) + } + return concreteVal.Elem(), err + default: + return reflect.Value{}, newErrUnsupportedPtrType( + reflect.ValueOf(attribute), fieldType, structField) + } + + if t != concreteVal.Type() { + return reflect.Value{}, newErrUnsupportedPtrType( + reflect.ValueOf(attribute), fieldType, structField) + } + + return concreteVal, nil +} + +func handleStruct( + attribute interface{}, + args []string, + fieldType reflect.Type, + fieldValue reflect.Value) (reflect.Value, error) { + model := reflect.New(fieldValue.Type()) + + data, err := json.Marshal(attribute) + if err != nil { + return model, err + } + + err = json.Unmarshal(data, model.Interface()) + + if err != nil { + return model, err + } + + return model, err +} + +func handleStructSlice( + attribute interface{}, + args []string, + fieldType reflect.Type, + fieldValue reflect.Value) (reflect.Value, error) { + models := reflect.New(fieldValue.Type()).Elem() + dataMap := reflect.ValueOf(attribute).Interface().([]interface{}) + for _, data := range dataMap { + model := reflect.New(fieldValue.Type().Elem()).Elem() + modelType := model.Type() + + value, err := handleStruct(data, []string{}, modelType, model) + + if err != nil { + continue + } + + models = reflect.Append(models, reflect.Indirect(value)) + } + + return models, nil +} diff --git a/vendor/github.com/svanharmelen/jsonapi/response.go b/vendor/github.com/svanharmelen/jsonapi/response.go new file mode 100644 index 000000000000..e8e85fa42f11 --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/response.go @@ -0,0 +1,539 @@ +package jsonapi + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +var ( + // ErrBadJSONAPIStructTag is returned when the Struct field's JSON API + // annotation is invalid. + ErrBadJSONAPIStructTag = errors.New("Bad jsonapi struct tag format") + // ErrBadJSONAPIID is returned when the Struct JSON API annotated "id" field + // was not a valid numeric type. + ErrBadJSONAPIID = errors.New( + "id should be either string, int(8,16,32,64) or uint(8,16,32,64)") + // ErrExpectedSlice is returned when a variable or argument was expected to + // be a slice of *Structs; MarshalMany will return this error when its + // interface{} argument is invalid. + ErrExpectedSlice = errors.New("models should be a slice of struct pointers") + // ErrUnexpectedType is returned when marshalling an interface; the interface + // had to be a pointer or a slice; otherwise this error is returned. + ErrUnexpectedType = errors.New("models should be a struct pointer or slice of struct pointers") +) + +// MarshalPayload writes a jsonapi response for one or many records. The +// related records are sideloaded into the "included" array. If this method is +// given a struct pointer as an argument it will serialize in the form +// "data": {...}. If this method is given a slice of pointers, this method will +// serialize in the form "data": [...] +// +// One Example: you could pass it, w, your http.ResponseWriter, and, models, a +// ptr to a Blog to be written to the response body: +// +// func ShowBlog(w http.ResponseWriter, r *http.Request) { +// blog := &Blog{} +// +// w.Header().Set("Content-Type", jsonapi.MediaType) +// w.WriteHeader(http.StatusOK) +// +// if err := jsonapi.MarshalPayload(w, blog); err != nil { +// http.Error(w, err.Error(), http.StatusInternalServerError) +// } +// } +// +// Many Example: you could pass it, w, your http.ResponseWriter, and, models, a +// slice of Blog struct instance pointers to be written to the response body: +// +// func ListBlogs(w http.ResponseWriter, r *http.Request) { +// blogs := []*Blog{} +// +// w.Header().Set("Content-Type", jsonapi.MediaType) +// w.WriteHeader(http.StatusOK) +// +// if err := jsonapi.MarshalPayload(w, blogs); err != nil { +// http.Error(w, err.Error(), http.StatusInternalServerError) +// } +// } +// +func MarshalPayload(w io.Writer, models interface{}) error { + payload, err := Marshal(models) + if err != nil { + return err + } + + if err := json.NewEncoder(w).Encode(payload); err != nil { + return err + } + return nil +} + +// Marshal does the same as MarshalPayload except it just returns the payload +// and doesn't write out results. Useful if you use your own JSON rendering +// library. +func Marshal(models interface{}) (Payloader, error) { + switch vals := reflect.ValueOf(models); vals.Kind() { + case reflect.Slice: + m, err := convertToSliceInterface(&models) + if err != nil { + return nil, err + } + + payload, err := marshalMany(m) + if err != nil { + return nil, err + } + + if linkableModels, isLinkable := models.(Linkable); isLinkable { + jl := linkableModels.JSONAPILinks() + if er := jl.validate(); er != nil { + return nil, er + } + payload.Links = linkableModels.JSONAPILinks() + } + + if metableModels, ok := models.(Metable); ok { + payload.Meta = metableModels.JSONAPIMeta() + } + + return payload, nil + case reflect.Ptr: + // Check that the pointer was to a struct + if reflect.Indirect(vals).Kind() != reflect.Struct { + return nil, ErrUnexpectedType + } + return marshalOne(models) + default: + return nil, ErrUnexpectedType + } +} + +// MarshalPayloadWithoutIncluded writes a jsonapi response with one or many +// records, without the related records sideloaded into "included" array. +// If you want to serialize the relations into the "included" array see +// MarshalPayload. +// +// models interface{} should be either a struct pointer or a slice of struct +// pointers. +func MarshalPayloadWithoutIncluded(w io.Writer, model interface{}) error { + payload, err := Marshal(model) + if err != nil { + return err + } + payload.clearIncluded() + + if err := json.NewEncoder(w).Encode(payload); err != nil { + return err + } + return nil +} + +// marshalOne does the same as MarshalOnePayload except it just returns the +// payload and doesn't write out results. Useful is you use your JSON rendering +// library. +func marshalOne(model interface{}) (*OnePayload, error) { + included := make(map[string]*Node) + + rootNode, err := visitModelNode(model, &included, true) + if err != nil { + return nil, err + } + payload := &OnePayload{Data: rootNode} + + payload.Included = nodeMapValues(&included) + + return payload, nil +} + +// marshalMany does the same as MarshalManyPayload except it just returns the +// payload and doesn't write out results. Useful is you use your JSON rendering +// library. +func marshalMany(models []interface{}) (*ManyPayload, error) { + payload := &ManyPayload{ + Data: []*Node{}, + } + included := map[string]*Node{} + + for _, model := range models { + node, err := visitModelNode(model, &included, true) + if err != nil { + return nil, err + } + payload.Data = append(payload.Data, node) + } + payload.Included = nodeMapValues(&included) + + return payload, nil +} + +// MarshalOnePayloadEmbedded - This method not meant to for use in +// implementation code, although feel free. The purpose of this +// method is for use in tests. In most cases, your request +// payloads for create will be embedded rather than sideloaded for +// related records. This method will serialize a single struct +// pointer into an embedded json response. In other words, there +// will be no, "included", array in the json all relationships will +// be serailized inline in the data. +// +// However, in tests, you may want to construct payloads to post +// to create methods that are embedded to most closely resemble +// the payloads that will be produced by the client. This is what +// this method is intended for. +// +// model interface{} should be a pointer to a struct. +func MarshalOnePayloadEmbedded(w io.Writer, model interface{}) error { + rootNode, err := visitModelNode(model, nil, false) + if err != nil { + return err + } + + payload := &OnePayload{Data: rootNode} + + if err := json.NewEncoder(w).Encode(payload); err != nil { + return err + } + + return nil +} + +func visitModelNode(model interface{}, included *map[string]*Node, + sideload bool) (*Node, error) { + node := new(Node) + + var er error + value := reflect.ValueOf(model) + if value.IsNil() { + return nil, nil + } + + modelValue := value.Elem() + modelType := value.Type().Elem() + + for i := 0; i < modelValue.NumField(); i++ { + structField := modelValue.Type().Field(i) + tag := structField.Tag.Get(annotationJSONAPI) + if tag == "" { + continue + } + + fieldValue := modelValue.Field(i) + fieldType := modelType.Field(i) + + args := strings.Split(tag, annotationSeperator) + + if len(args) < 1 { + er = ErrBadJSONAPIStructTag + break + } + + annotation := args[0] + + if (annotation == annotationClientID && len(args) != 1) || + (annotation != annotationClientID && len(args) < 2) { + er = ErrBadJSONAPIStructTag + break + } + + if annotation == annotationPrimary { + v := fieldValue + + // Deal with PTRS + var kind reflect.Kind + if fieldValue.Kind() == reflect.Ptr { + kind = fieldType.Type.Elem().Kind() + v = reflect.Indirect(fieldValue) + } else { + kind = fieldType.Type.Kind() + } + + // Handle allowed types + switch kind { + case reflect.String: + node.ID = v.Interface().(string) + case reflect.Int: + node.ID = strconv.FormatInt(int64(v.Interface().(int)), 10) + case reflect.Int8: + node.ID = strconv.FormatInt(int64(v.Interface().(int8)), 10) + case reflect.Int16: + node.ID = strconv.FormatInt(int64(v.Interface().(int16)), 10) + case reflect.Int32: + node.ID = strconv.FormatInt(int64(v.Interface().(int32)), 10) + case reflect.Int64: + node.ID = strconv.FormatInt(v.Interface().(int64), 10) + case reflect.Uint: + node.ID = strconv.FormatUint(uint64(v.Interface().(uint)), 10) + case reflect.Uint8: + node.ID = strconv.FormatUint(uint64(v.Interface().(uint8)), 10) + case reflect.Uint16: + node.ID = strconv.FormatUint(uint64(v.Interface().(uint16)), 10) + case reflect.Uint32: + node.ID = strconv.FormatUint(uint64(v.Interface().(uint32)), 10) + case reflect.Uint64: + node.ID = strconv.FormatUint(v.Interface().(uint64), 10) + default: + // We had a JSON float (numeric), but our field was not one of the + // allowed numeric types + er = ErrBadJSONAPIID + break + } + + node.Type = args[1] + } else if annotation == annotationClientID { + clientID := fieldValue.String() + if clientID != "" { + node.ClientID = clientID + } + } else if annotation == annotationAttribute { + var omitEmpty, iso8601 bool + + if len(args) > 2 { + for _, arg := range args[2:] { + switch arg { + case annotationOmitEmpty: + omitEmpty = true + case annotationISO8601: + iso8601 = true + } + } + } + + if node.Attributes == nil { + node.Attributes = make(map[string]interface{}) + } + + if fieldValue.Type() == reflect.TypeOf(time.Time{}) { + t := fieldValue.Interface().(time.Time) + + if t.IsZero() { + continue + } + + if iso8601 { + node.Attributes[args[1]] = t.UTC().Format(iso8601TimeFormat) + } else { + node.Attributes[args[1]] = t.Unix() + } + } else if fieldValue.Type() == reflect.TypeOf(new(time.Time)) { + // A time pointer may be nil + if fieldValue.IsNil() { + if omitEmpty { + continue + } + + node.Attributes[args[1]] = nil + } else { + tm := fieldValue.Interface().(*time.Time) + + if tm.IsZero() && omitEmpty { + continue + } + + if iso8601 { + node.Attributes[args[1]] = tm.UTC().Format(iso8601TimeFormat) + } else { + node.Attributes[args[1]] = tm.Unix() + } + } + } else { + // Dealing with a fieldValue that is not a time + emptyValue := reflect.Zero(fieldValue.Type()) + + // See if we need to omit this field + if omitEmpty && reflect.DeepEqual(fieldValue.Interface(), emptyValue.Interface()) { + continue + } + + strAttr, ok := fieldValue.Interface().(string) + if ok { + node.Attributes[args[1]] = strAttr + } else { + node.Attributes[args[1]] = fieldValue.Interface() + } + } + } else if annotation == annotationRelation { + var omitEmpty bool + + //add support for 'omitempty' struct tag for marshaling as absent + if len(args) > 2 { + omitEmpty = args[2] == annotationOmitEmpty + } + + isSlice := fieldValue.Type().Kind() == reflect.Slice + if omitEmpty && + (isSlice && fieldValue.Len() < 1 || + (!isSlice && fieldValue.IsNil())) { + continue + } + + if node.Relationships == nil { + node.Relationships = make(map[string]interface{}) + } + + var relLinks *Links + if linkableModel, ok := model.(RelationshipLinkable); ok { + relLinks = linkableModel.JSONAPIRelationshipLinks(args[1]) + } + + var relMeta *Meta + if metableModel, ok := model.(RelationshipMetable); ok { + relMeta = metableModel.JSONAPIRelationshipMeta(args[1]) + } + + if isSlice { + // to-many relationship + relationship, err := visitModelNodeRelationships( + fieldValue, + included, + sideload, + ) + if err != nil { + er = err + break + } + relationship.Links = relLinks + relationship.Meta = relMeta + + if sideload { + shallowNodes := []*Node{} + for _, n := range relationship.Data { + appendIncluded(included, n) + shallowNodes = append(shallowNodes, toShallowNode(n)) + } + + node.Relationships[args[1]] = &RelationshipManyNode{ + Data: shallowNodes, + Links: relationship.Links, + Meta: relationship.Meta, + } + } else { + node.Relationships[args[1]] = relationship + } + } else { + // to-one relationships + + // Handle null relationship case + if fieldValue.IsNil() { + node.Relationships[args[1]] = &RelationshipOneNode{Data: nil} + continue + } + + relationship, err := visitModelNode( + fieldValue.Interface(), + included, + sideload, + ) + if err != nil { + er = err + break + } + + if sideload { + appendIncluded(included, relationship) + node.Relationships[args[1]] = &RelationshipOneNode{ + Data: toShallowNode(relationship), + Links: relLinks, + Meta: relMeta, + } + } else { + node.Relationships[args[1]] = &RelationshipOneNode{ + Data: relationship, + Links: relLinks, + Meta: relMeta, + } + } + } + + } else { + er = ErrBadJSONAPIStructTag + break + } + } + + if er != nil { + return nil, er + } + + if linkableModel, isLinkable := model.(Linkable); isLinkable { + jl := linkableModel.JSONAPILinks() + if er := jl.validate(); er != nil { + return nil, er + } + node.Links = linkableModel.JSONAPILinks() + } + + if metableModel, ok := model.(Metable); ok { + node.Meta = metableModel.JSONAPIMeta() + } + + return node, nil +} + +func toShallowNode(node *Node) *Node { + return &Node{ + ID: node.ID, + Type: node.Type, + } +} + +func visitModelNodeRelationships(models reflect.Value, included *map[string]*Node, + sideload bool) (*RelationshipManyNode, error) { + nodes := []*Node{} + + for i := 0; i < models.Len(); i++ { + n := models.Index(i).Interface() + + node, err := visitModelNode(n, included, sideload) + if err != nil { + return nil, err + } + + nodes = append(nodes, node) + } + + return &RelationshipManyNode{Data: nodes}, nil +} + +func appendIncluded(m *map[string]*Node, nodes ...*Node) { + included := *m + + for _, n := range nodes { + k := fmt.Sprintf("%s,%s", n.Type, n.ID) + + if _, hasNode := included[k]; hasNode { + continue + } + + included[k] = n + } +} + +func nodeMapValues(m *map[string]*Node) []*Node { + mp := *m + nodes := make([]*Node, len(mp)) + + i := 0 + for _, n := range mp { + nodes[i] = n + i++ + } + + return nodes +} + +func convertToSliceInterface(i *interface{}) ([]interface{}, error) { + vals := reflect.ValueOf(*i) + if vals.Kind() != reflect.Slice { + return nil, ErrExpectedSlice + } + var response []interface{} + for x := 0; x < vals.Len(); x++ { + response = append(response, vals.Index(x).Interface()) + } + return response, nil +} diff --git a/vendor/github.com/svanharmelen/jsonapi/runtime.go b/vendor/github.com/svanharmelen/jsonapi/runtime.go new file mode 100644 index 000000000000..7dc658155bad --- /dev/null +++ b/vendor/github.com/svanharmelen/jsonapi/runtime.go @@ -0,0 +1,103 @@ +package jsonapi + +import ( + "crypto/rand" + "fmt" + "io" + "reflect" + "time" +) + +type Event int + +const ( + UnmarshalStart Event = iota + UnmarshalStop + MarshalStart + MarshalStop +) + +type Runtime struct { + ctx map[string]interface{} +} + +type Events func(*Runtime, Event, string, time.Duration) + +var Instrumentation Events + +func NewRuntime() *Runtime { return &Runtime{make(map[string]interface{})} } + +func (r *Runtime) WithValue(key string, value interface{}) *Runtime { + r.ctx[key] = value + + return r +} + +func (r *Runtime) Value(key string) interface{} { + return r.ctx[key] +} + +func (r *Runtime) Instrument(key string) *Runtime { + return r.WithValue("instrument", key) +} + +func (r *Runtime) shouldInstrument() bool { + return Instrumentation != nil +} + +func (r *Runtime) UnmarshalPayload(reader io.Reader, model interface{}) error { + return r.instrumentCall(UnmarshalStart, UnmarshalStop, func() error { + return UnmarshalPayload(reader, model) + }) +} + +func (r *Runtime) UnmarshalManyPayload(reader io.Reader, kind reflect.Type) (elems []interface{}, err error) { + r.instrumentCall(UnmarshalStart, UnmarshalStop, func() error { + elems, err = UnmarshalManyPayload(reader, kind) + return err + }) + + return +} + +func (r *Runtime) MarshalPayload(w io.Writer, model interface{}) error { + return r.instrumentCall(MarshalStart, MarshalStop, func() error { + return MarshalPayload(w, model) + }) +} + +func (r *Runtime) instrumentCall(start Event, stop Event, c func() error) error { + if !r.shouldInstrument() { + return c() + } + + instrumentationGUID, err := newUUID() + if err != nil { + return err + } + + begin := time.Now() + Instrumentation(r, start, instrumentationGUID, time.Duration(0)) + + if err := c(); err != nil { + return err + } + + diff := time.Duration(time.Now().UnixNano() - begin.UnixNano()) + Instrumentation(r, stop, instrumentationGUID, diff) + + return nil +} + +// citation: http://play.golang.org/p/4FkNSiUDMg +func newUUID() (string, error) { + uuid := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, uuid); err != nil { + return "", err + } + // variant bits; see section 4.1.1 + uuid[8] = uuid[8]&^0xc0 | 0x80 + // version 4 (pseudo-random); see section 4.1.3 + uuid[6] = uuid[6]&^0xf0 | 0x40 + return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7b98291790b5..09369f8fe359 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -231,6 +231,8 @@ github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value +# github.com/google/go-querystring v1.0.0 +github.com/google/go-querystring/query # github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e github.com/googleapis/gax-go # github.com/gophercloud/gophercloud v0.0.0-20170524130959-3027adb1ce72 @@ -311,6 +313,10 @@ github.com/hashicorp/go-retryablehttp github.com/hashicorp/go-rootcerts # github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc github.com/hashicorp/go-safetemp +# github.com/hashicorp/go-slug v0.1.0 +github.com/hashicorp/go-slug +# github.com/hashicorp/go-tfe v0.2.6 +github.com/hashicorp/go-tfe # github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 @@ -435,6 +441,8 @@ github.com/satori/uuid # github.com/spf13/afero v1.0.2 github.com/spf13/afero github.com/spf13/afero/mem +# github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d +github.com/svanharmelen/jsonapi # github.com/terraform-providers/terraform-provider-aws v1.41.0 github.com/terraform-providers/terraform-provider-aws/aws # github.com/terraform-providers/terraform-provider-openstack v0.0.0-20170616075611-4080a521c6ea From ad276142b416155afcf9e5adf6e1d3f1c5e2acc7 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 14:38:56 -0800 Subject: [PATCH 062/149] command: Fix the command package test build Some merging conflict shenanigans here led to this usage not lining up with the imported symbol name, meaning that the tests couldn't compile any more. --- command/command_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/command_test.go b/command/command_test.go index b7921caa7bf5..7e786ffcc7fa 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -713,7 +713,7 @@ func testBackendState(t *testing.T, s *terraform.State, c int) (*terraform.State Type: "http", Config: configs.SynthBody("", map[string]cty.Value{}), } - b := backendinit.Backend("http")() + b := backendInit.Backend("http")() configSchema := b.ConfigSchema() hash := backendConfig.Hash(configSchema) From c05a556c0e24c49f0355ea7d791d11c493218c80 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 15:05:49 -0800 Subject: [PATCH 063/149] command: Fix TestRefresh_backup This test intentionally overrides the backup file location using the -backup option, so the backup file is not in the default location for this one. --- command/refresh_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/refresh_test.go b/command/refresh_test.go index 9b2037f8120c..84808f1743d7 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -566,7 +566,7 @@ func TestRefresh_backup(t *testing.T) { t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) } - backupState := testStateRead(t, outPath+DefaultBackupExtension) + backupState := testStateRead(t, backupPath) actualStr := strings.TrimSpace(backupState.String()) expectedStr := strings.TrimSpace(state.String()) if actualStr != expectedStr { From 592850e22c6b706f49f98f92010a9f4156746759 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 15:22:41 -0800 Subject: [PATCH 064/149] command: Fix TestApply_plan_remoteState Some over-zealous bulk updating of this test file caused this test to be producing a remote state config cache file on disk when it doesn't actually need one: the backend config comes from the plan file when applying a saved plan. --- command/apply_test.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/command/apply_test.go b/command/apply_test.go index 6501874d0b90..e511cfea474e 100644 --- a/command/apply_test.go +++ b/command/apply_test.go @@ -651,9 +651,8 @@ func TestApply_plan_remoteState(t *testing.T) { // Create a remote state state := testState() - backendState, srv := testRemoteState(t, state, 200) + _, srv := testRemoteState(t, state, 200) defer srv.Close() - testStateFileRemote(t, backendState) _, snap := testModuleWithSnapshot(t, "apply") backendConfig := cty.ObjectVal(map[string]cty.Value{ @@ -702,8 +701,8 @@ func TestApply_plan_remoteState(t *testing.T) { } // Check that there is no remote state config - if _, err := os.Stat(remoteStatePath); err == nil { - t.Fatalf("has remote state config") + if src, err := ioutil.ReadFile(remoteStatePath); err == nil { + t.Fatalf("has %s file; should not\n%s", remoteStatePath, src) } } From 8b603e48770386fbd1387ce27e212ac9fef32d8c Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 16:12:11 -0800 Subject: [PATCH 065/149] command: Name the Terraform Registry specifically in error message When we originally wrote this message we struggled a bit for how to refer to the releases server without writing an awkwardly-ungrammatical sentence, and so "the official repository" became a placeholder name for it. Now that we'll be looking in Terraform Registry this gives us a nice proper noun to use. This message will need to evolve more as our integration with the registry gets more sophisticated, but for now this works. --- command/init.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/init.go b/command/init.go index 0dd6c79f547c..1614a604f106 100644 --- a/command/init.go +++ b/command/init.go @@ -700,7 +700,7 @@ suggested below. const errProviderNotFound = ` [reset][bold][red]Provider %[1]q not available for installation.[reset][red] -A provider named %[1]q could not be found in the official repository. +A provider named %[1]q could not be found in the Terraform Registry. This may result from mistyping the provider name, or the given provider may be a third-party provider that cannot be installed automatically. From f001cb8654d74d531eec7da1898dc707dbaff7b1 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 16:16:00 -0800 Subject: [PATCH 066/149] command: Fix terraform init -from-module with relative paths Since our new approach here works by installing with a synthetic module configuration block, we need to treat relative paths as a special case for two reasons: - Relative paths in module addresses are relative to the file containing the call rather than the working directory, but -from-module uses the working directory (and the call is in a synthetic "file" anyway) - We need to force Terraform to pass the path through to go-getter rather than just treating it as a relative reference, since we really do want a copy of the directory in this case, even if it is local. To address both of these things, we'll detect a relative path and turn it into an absolute path before beginning installation. This is a bit hacky, but this is consistent with the general philosophy of the -from-module implementation where it does hacky things so that the rest of the installer code can be spared of dealing with its special cases. This is covered by a couple of existing tests that run init -from-module, including TestInit_fromModule_dstInSrc which now passes. --- configs/configload/loader_init_from_module.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/configs/configload/loader_init_from_module.go b/configs/configload/loader_init_from_module.go index 0e41ea22c3fa..c383e2f4a3e5 100644 --- a/configs/configload/loader_init_from_module.go +++ b/configs/configload/loader_init_from_module.go @@ -127,6 +127,23 @@ func (l *Loader) InitDirFromModule(rootDir, sourceAddr string, hooks InstallHook }, } + // -from-module allows relative paths but it's different than a normal + // module address where it'd be resolved relative to the module call + // (which is synthetic, here.) To address this, we'll just patch up any + // relative paths to be absolute paths before we run, ensuring we'll + // get the right result. This also, as an important side-effect, ensures + // that the result will be "downloaded" with go-getter (copied from the + // source location), rather than just recorded as a relative path. + { + maybePath := filepath.ToSlash(sourceAddr) + if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { + if wd, err := os.Getwd(); err == nil { + sourceAddr = filepath.Join(wd, sourceAddr) + log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) + } + } + } + // Now we need to create an artificial root module that will seed our // installation process. fakeRootModule := &configs.Module{ From b5547f00f66f01042139f4fd9378d5e53bacb978 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 17:08:32 -0800 Subject: [PATCH 067/149] command: backendConfig must mutate its copy, not the original Here we were going to the trouble of copying the body so we could mutate it, but then ended up mutating the original anyway and then returning the unmodified copy. Whoops! This fix is verified by a number of "init" command tests that exercise the -backend-config option, including TestInit_backendConfigFile and several others whose names have the prefix TestInit_backendConfig . --- command/meta_backend.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/meta_backend.go b/command/meta_backend.go index 8f9e51580445..d4c9f0bcc9d1 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -329,7 +329,7 @@ func (m *Meta) backendConfig(opts *BackendOpts) (*configs.Backend, int, tfdiags. // We'll shallow-copy configs.Backend here so that we can replace the // body without affecting others that hold this reference. configCopy := *c - c.Config = configBody + configCopy.Config = configBody return &configCopy, configHash, diags } From 297b3b8830d93f272da8bffc42215f7ae6759464 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 8 Nov 2018 17:26:15 -0800 Subject: [PATCH 068/149] command: Fix TestInit_backendReinitConfigToExtra This test was re-using the same InitCommand value to run multiple times, which is not realistic. Since we now cache configuration source code inside command.Meta on load, it's important that we use a fresh InitCommand instance here so it'll see the modified configuration file we've left on disk. --- command/init_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/command/init_test.go b/command/init_test.go index f2b39fd6b477..fab65d7cc278 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -509,11 +509,23 @@ func TestInit_backendReinitConfigToExtra(t *testing.T) { t.Fatal(err) } + // We need a fresh InitCommand here because the old one now has our configuration + // file cached inside it, so it won't re-read the modification we just made. + c = &InitCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(testProvider()), + Ui: ui, + }, + } + args := []string{"-input=false", "-backend-config=path=foo"} if code := c.Run(args); code != 0 { t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) } state = testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename)) + if got, want := normalizeJSON(t, state.Backend.ConfigRaw), `{"path":"foo","workspace_dir":null}`; got != want { + t.Errorf("wrong config after moving to arg\ngot: %s\nwant: %s", got, want) + } if state.Backend.Hash == backendHash { t.Fatal("state.Backend.Hash was not updated") From e10cf6dabdcf8c8dfc16edfdf0473893fc8dc5a4 Mon Sep 17 00:00:00 2001 From: Nikolai Vavilov Date: Sun, 11 Nov 2018 23:15:11 +0200 Subject: [PATCH 069/149] website: add missing article --- website/intro/getting-started/change.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/intro/getting-started/change.html.md b/website/intro/getting-started/change.html.md index 5784af60de53..d65bbe648869 100644 --- a/website/intro/getting-started/change.html.md +++ b/website/intro/getting-started/change.html.md @@ -71,7 +71,7 @@ AMI for an EC2 instance requires recreating it. Terraform handles these details for you, and the execution plan makes it clear what Terraform will do. Additionally, the execution plan shows that the AMI change is what -required resource to be replaced. Using this information, +required the resource to be replaced. Using this information, you can adjust your changes to possibly avoid destroy/create updates if they are not acceptable in some situations. From a2746763a1236eff634f7a9582610f5c3bef7fb4 Mon Sep 17 00:00:00 2001 From: Kristin Laemmert Date: Mon, 12 Nov 2018 09:27:10 -0500 Subject: [PATCH 070/149] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f0a5b4ca081..cf69fd0b37bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,8 @@ ## 0.12.0-beta1 (Unreleased) +BACKWARDS INCOMPATIBILITIES / NOTES: +* command: Remove `-module-depth` flag from plan, apply, show, and graph. This flag was not widely used and the various updates and improvements to cli output should remove the need for this flag. [GH-19267] + IMPROVEMENTS: * command/state: Update and enable the `state show` command [GH-19200] From 5e9414bf037cb7ed7b0e59fcf7abe76afc097dd3 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Mon, 12 Nov 2018 16:51:32 +0100 Subject: [PATCH 071/149] backend/migrations: only select workspaces if supported If the backend does not support workspaces, we don't have to try to select a workspace and we should not return an error. --- command/meta_backend.go | 27 ++++++++++++++++++++------- command/meta_backend_migrate.go | 8 -------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/command/meta_backend.go b/command/meta_backend.go index d4c9f0bcc9d1..93892d20b9ae 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -700,6 +700,10 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta return nil, diags } + // By now the backend is successfully configured. + m.Ui.Output(m.Colorize().Color(fmt.Sprintf( + "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) + // Its possible that the currently selected workspace is not migrated, // so we call selectWorkspace to ensure a valid workspace is selected. if err := m.selectWorkspace(b); err != nil { @@ -707,9 +711,6 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta return nil, diags } - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) - // Return the backend return b, diags } @@ -720,10 +721,14 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta func (m *Meta) selectWorkspace(b backend.Backend) error { workspaces, err := b.Workspaces() if err != nil { + if err == backend.ErrWorkspacesNotSupported { + return nil + } return fmt.Errorf("Failed to get migrated workspaces: %s", err) } + if len(workspaces) == 0 { - return fmt.Errorf(errBackendNoMigratedWorkspaces) + return fmt.Errorf(strings.TrimSpace(errBackendNoMigratedWorkspaces)) } // Get the currently selected workspace. @@ -744,18 +749,18 @@ func (m *Meta) selectWorkspace(b backend.Backend) error { v, err := m.UIInput().Input(&terraform.InputOpts{ Id: "select-workspace", Query: fmt.Sprintf( - "[reset][bold][yellow]The currently selected workspace (%s) is not migrated.[reset]", + "\n[reset][bold][yellow]The currently selected workspace (%s) is not migrated.[reset]", workspace), Description: fmt.Sprintf( strings.TrimSpace(inputBackendSelectWorkspace), list.String()), }) if err != nil { - return fmt.Errorf("Error asking to select workspace: %s", err) + return fmt.Errorf("Failed to select workspace: %s", err) } idx, err := strconv.Atoi(v) if err != nil || (idx < 1 || idx > len(workspaces)) { - return fmt.Errorf("Error selecting workspace: input not a valid number") + return fmt.Errorf("Failed to select workspace: input not a valid number") } return m.SetWorkspace(workspaces[idx-1]) @@ -1086,6 +1091,14 @@ If you'd like to run Terraform and store state locally, you can fix this error by removing the backend configuration from your configuration. ` +const errBackendNoMigratedWorkspaces = ` +No workspaces are migrated. + +Use the "terraform workspace" command to create and select a new workspace. +If the backend already contains existing workspaces, you may need to update +the backend configuration. +` + const errBackendRemoteRead = ` Error reading backend state: %s diff --git a/command/meta_backend_migrate.go b/command/meta_backend_migrate.go index 06e46883baf5..b0eee2a83afb 100644 --- a/command/meta_backend_migrate.go +++ b/command/meta_backend_migrate.go @@ -475,14 +475,6 @@ The state in the previous backend remains intact and unmodified. Please resolve the error above and try again. ` -const errBackendNoMigratedWorkspaces = ` -No workspaces are migrated. Use the "terraform workspace" command to create -and select a new workspace. - -If the backend already contains existing workspaces, you may need to update -the workspace name or prefix in the backend configuration. -` - const inputBackendMigrateEmpty = ` Pre-existing state was found while migrating the previous %q backend to the newly configured %q backend. No existing state was found in the newly From c0b7f58143c1c521e24e05e3db37b04de9ebd6b8 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Nov 2018 12:13:30 -0800 Subject: [PATCH 072/149] command: Fix detection of necessary backend migration In the refactoring for new HCL this codepath stopped taking into account changes to the CLI -backend-config options when deciding if a backend migration is required. This restores that behavior in a different way than it used to be: rather than re-hashing the merged config and comparing the hashes, we instead just compare directly the configuration values, which must be exactly equal in order to skip migration. This change is covered by the test TestInit_inputFalse, although as of this commit it is still not passing due a downstream problem within the migration code itself. --- command/meta_backend.go | 65 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 57 insertions(+), 8 deletions(-) diff --git a/command/meta_backend.go b/command/meta_backend.go index 93892d20b9ae..e1655c215595 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -450,11 +450,8 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di case c != nil && !s.Backend.Empty(): // If our configuration is the same, then we're just initializing // a previously configured remote backend. - if !s.Backend.Empty() { - storedHash := s.Backend.Hash - if storedHash == cHash { - return m.backend_C_r_S_unchanged(c, cHash, sMgr) - } + if !m.backendConfigNeedsMigration(c, s.Backend) { + return m.backend_C_r_S_unchanged(c, cHash, sMgr) } if !opts.Init { @@ -466,9 +463,7 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di return nil, diags } - log.Printf( - "[WARN] command: backend config change! saved: %d, new: %d", - s.Backend.Hash, cHash) + log.Printf("[WARN] backend config has changed since last init") return m.backend_C_r_S_changed(c, cHash, sMgr, true) default: @@ -929,6 +924,60 @@ func (m *Meta) backend_C_R_S_unchanged(c *configs.Backend, sMgr *state.LocalStat // Reusable helper functions for backend management //------------------------------------------------------------------- +// backendConfigNeedsMigration returns true if migration might be required to +// move from the configured backend to the given cached backend config. +// +// This must be called with the synthetic *configs.Backend that results from +// merging in any command-line options for correct behavior. +// +// If either the given configuration or cached configuration are invalid then +// this function will conservatively assume that migration is required, +// expecting that the migration code will subsequently deal with the same +// errors. +func (m *Meta) backendConfigNeedsMigration(c *configs.Backend, s *terraform.BackendState) bool { + if s == nil || s.Empty() { + log.Print("[TRACE] backendConfigNeedsMigration: no cached config, so migration is required") + return true + } + if c.Type != s.Type { + log.Printf("[TRACE] backendConfigNeedsMigration: type changed from %q to %q, so migration is required", s.Type, c.Type) + return true + } + + // We need the backend's schema to do our comparison here. + f := backendInit.Backend(c.Type) + if f == nil { + log.Printf("[TRACE] backendConfigNeedsMigration: no backend of type %q, which migration codepath must handle", c.Type) + return true // let the migration codepath deal with the missing backend + } + b := f() + + schema := b.ConfigSchema() + decSpec := schema.NoneRequired().DecoderSpec() + givenVal, diags := hcldec.Decode(c.Config, decSpec, nil) + if diags.HasErrors() { + log.Printf("[TRACE] backendConfigNeedsMigration: failed to decode given config; migration codepath must handle problem: %s", diags.Error()) + return true // let the migration codepath deal with these errors + } + + cachedVal, err := s.Config(schema) + if err != nil { + log.Printf("[TRACE] backendConfigNeedsMigration: failed to decode cached config; migration codepath must handle problem: %s", err) + return true // let the migration codepath deal with the error + } + + // If we get all the way down here then it's the exact equality of the + // two decoded values that decides our outcome. It's safe to use RawEquals + // here (rather than Equals) because we know that unknown values can + // never appear in backend configurations. + if cachedVal.RawEquals(givenVal) { + log.Print("[TRACE] backendConfigNeedsMigration: given configuration matches cached configuration, so no migration is required") + return false + } + log.Print("[TRACE] backendConfigNeedsMigration: configuration values have changed, so migration is required") + return true +} + func (m *Meta) backendInitFromConfig(c *configs.Backend) (backend.Backend, cty.Value, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics From f6d468ffd5a62097104d35b7717576e4352bad2e Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Nov 2018 14:26:01 -0800 Subject: [PATCH 073/149] command: Fix TestInit_inputFalse This test was using old-style state files as its input, differing only by lineage. Since lineages are now managed within the state manager itself, the test can't use that to distinguish the two files and so we put a different output in each one instead. This also introduces some TRACE logging to the migration codepaths. There's some hard-to-follow control flow here and so this extra logging helps to understand the reason for a particular outcome, and since this codepath is visited only in "terraform init" anyway it doesn't hurt to be a bit more verbose here. --- command/init_test.go | 28 +++++++++++++++++++++------- command/meta_backend_migrate.go | 19 +++++++++++++++++++ 2 files changed, 40 insertions(+), 7 deletions(-) diff --git a/command/init_test.go b/command/init_test.go index fab65d7cc278..29eaa90fd49a 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -11,15 +11,18 @@ import ( "strings" "testing" - "github.com/hashicorp/terraform/configs" + "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/helper/copy" "github.com/hashicorp/terraform/plugin/discovery" "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" ) func TestInit_empty(t *testing.T) { @@ -553,13 +556,24 @@ func TestInit_inputFalse(t *testing.T) { } // write different states for foo and bar - s := terraform.NewState() - s.Lineage = "foo" - if err := (&state.LocalState{Path: "foo"}).WriteState(s); err != nil { + fooState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("foo"), + false, // not sensitive + ) + }) + if err := statemgr.NewFilesystem("foo").WriteState(fooState); err != nil { t.Fatal(err) } - s.Lineage = "bar" - if err := (&state.LocalState{Path: "bar"}).WriteState(s); err != nil { + barState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "bar"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), + false, // not sensitive + ) + }) + if err := statemgr.NewFilesystem("bar").WriteState(barState); err != nil { t.Fatal(err) } diff --git a/command/meta_backend_migrate.go b/command/meta_backend_migrate.go index b0eee2a83afb..91c24a9c64c6 100644 --- a/command/meta_backend_migrate.go +++ b/command/meta_backend_migrate.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io/ioutil" + "log" "os" "path/filepath" "sort" @@ -41,6 +42,7 @@ type backendMigrateOpts struct { // // This will attempt to lock both states for the migration. func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { + log.Printf("[TRACE] backendMigrateState: need to migrate from %q to %q backend config", opts.OneType, opts.TwoType) // We need to check what the named state status is. If we're converting // from multi-state to single-state for example, we need to handle that. var oneSingle, twoSingle bool @@ -126,6 +128,8 @@ func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { // Multi-state to multi-state. func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error { + log.Print("[TRACE] backendMigrateState: migrating all named workspaces") + // Ask the user if they want to migrate their existing remote state migrate, err := m.confirm(&terraform.InputOpts{ Id: "backend-migrate-multistate-to-multistate", @@ -175,6 +179,8 @@ func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error { // Multi-state to single state. func (m *Meta) backendMigrateState_S_s(opts *backendMigrateOpts) error { + log.Printf("[TRACE] backendMigrateState: target backend type %q does not support named workspaces", opts.TwoType) + currentEnv := m.Workspace() migrate := opts.force @@ -212,6 +218,8 @@ func (m *Meta) backendMigrateState_S_s(opts *backendMigrateOpts) error { // Single state to single state, assumed default state name. func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { + log.Printf("[TRACE] backendMigrateState: migrating %q workspace to %q workspace", opts.oneEnv, opts.twoEnv) + stateOne, err := opts.One.StateMgr(opts.oneEnv) if err != nil { return fmt.Errorf(strings.TrimSpace( @@ -224,6 +232,7 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { // Do not migrate workspaces without state. if stateOne.State().Empty() { + log.Print("[TRACE] backendMigrateState: source workspace has empty state, so nothing to migrate") return nil } @@ -232,6 +241,7 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { // If the backend doesn't support using the default state, we ask the user // for a new name and migrate the default state to the given named state. stateTwo, err = func() (statemgr.Full, error) { + log.Print("[TRACE] backendMigrateState: target doesn't support a default workspace, so we must prompt for a new name") name, err := m.UIInput().Input(&terraform.InputOpts{ Id: "new-state-name", Query: fmt.Sprintf( @@ -284,9 +294,11 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { sm2, _ := stateTwo.(statemgr.PersistentMeta) if one != nil && two != nil { if sm1 == nil || sm2 == nil { + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have no state, so no migration is needed") return nil } if sm1.StateSnapshotMeta().Lineage == sm2.StateSnapshotMeta().Lineage { + log.Printf("[TRACE] backendMigrateState: both source and destination workspaces have equal state with lineage %q, so no migration is needed", sm1.StateSnapshotMeta().Lineage) return nil } } @@ -309,10 +321,12 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { // We now own a lock, so double check that we have the version // corresponding to the lock. + log.Print("[TRACE] backendMigrateState: refreshing source workspace state") if err := stateOne.RefreshState(); err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateSingleLoadDefault), opts.OneType, err) } + log.Print("[TRACE] backendMigrateState: refreshing target workspace state") if err := stateTwo.RefreshState(); err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateSingleLoadDefault), opts.OneType, err) @@ -326,20 +340,24 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { switch { // No migration necessary case one.Empty() && two.Empty(): + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have empty state, so no migration is required") return nil // No migration necessary if we're inheriting state. case one.Empty() && !two.Empty(): + log.Print("[TRACE] backendMigrateState: source workspace has empty state, so no migration is required") return nil // We have existing state moving into no state. Ask the user if // they'd like to do this. case !one.Empty() && two.Empty(): + log.Print("[TRACE] backendMigrateState: target workspace has empty state, so might copy source workspace state") confirmFunc = m.backendMigrateEmptyConfirm // Both states are non-empty, meaning we need to determine which // state should be used and update accordingly. case !one.Empty() && !two.Empty(): + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have states, so might overwrite destination with source") confirmFunc = m.backendMigrateNonEmptyConfirm } @@ -350,6 +368,7 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { if !opts.force { // Abort if we can't ask for input. if !m.input { + log.Print("[TRACE] backendMigrateState: can't prompt for input, so aborting migration") return errors.New("error asking for state migration action: input disabled") } From 9ba399bca88a2971d6fc5df457c3f8c79dafeeed Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Nov 2018 15:06:00 -0800 Subject: [PATCH 074/149] command: Fix TestInit_getProvider After all of the refactoring we were no longer checking the Terraform version field in a state file, causing this test to fail. This restores that check, though with a slightly different error message. --- command/init_test.go | 2 +- states/statefile/read.go | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/command/init_test.go b/command/init_test.go index 29eaa90fd49a..41e0d0ec6021 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -691,7 +691,7 @@ func TestInit_getProvider(t *testing.T) { } errMsg := ui.ErrorWriter.String() - if !strings.Contains(errMsg, "future Terraform version") { + if !strings.Contains(errMsg, "which is newer than current") { t.Fatal("unexpected error:", errMsg) } }) diff --git a/states/statefile/read.go b/states/statefile/read.go index 8abd3be14da2..d691c0290d4b 100644 --- a/states/statefile/read.go +++ b/states/statefile/read.go @@ -62,6 +62,15 @@ func Read(r io.Reader) (*File, error) { panic("readState returned nil state with no errors") } + if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { + return state, fmt.Errorf( + "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", + state.TerraformVersion, + tfversion.SemVer, + state.TerraformVersion, + ) + } + return state, diags.Err() } From 544c2932ce75ed8d4754b57211a46c1daa5e8dca Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Nov 2018 15:08:39 -0800 Subject: [PATCH 075/149] command: Fix TestInit_checkRequiredVersion In prior refactoring we lost the required core version check from "terraform init", which we restore here. Additionally, this test used to have an incorrect name that suggested it was testing something in the "getProvider" codepath, but version checking happens regardless of what other options are selected. --- command/init.go | 18 ++++++++++++++++++ command/init_test.go | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/command/init.go b/command/init.go index 1614a604f106..e8a8c24e191f 100644 --- a/command/init.go +++ b/command/init.go @@ -257,6 +257,24 @@ func (c *InitCommand) Run(args []string) int { } } + // With modules now installed, we should be able to load the whole + // configuration and check the core version constraints. + config, confDiags := c.loadConfig(path) + diags = diags.Append(confDiags) + if confDiags.HasErrors() { + // Since this may be the user's first ever interaction with Terraform, + // we'll provide some additional context in this case. + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + c.showDiagnostics(diags) + return 1 + } + confDiags = terraform.CheckCoreVersionRequirements(config) + diags = diags.Append(confDiags) + if confDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + if back == nil { // If we didn't initialize a backend then we'll try to at least // instantiate one. This might fail if it wasn't already initalized diff --git a/command/init_test.go b/command/init_test.go index 41e0d0ec6021..d50baebd6d6d 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -959,7 +959,7 @@ func TestInit_getProviderHaveLegacyVersion(t *testing.T) { } } -func TestInit_getProviderCheckRequiredVersion(t *testing.T) { +func TestInit_checkRequiredVersion(t *testing.T) { // Create a temporary working directory that is empty td := tempDir(t) copy.CopyDir(testFixturePath("init-check-required-version"), td) From ced06a4ca333ad48e11a71b7bf122a94943aa390 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Nov 2018 15:16:20 -0800 Subject: [PATCH 076/149] command: fix panic in TestStatePush_lineageMismatch This test is currently failing due to the command completing successfully, which would previously cause a panic because we didn't properly initialize the MockUi and so its error buffer is nil unless written to. (The failure this was masking will be fixed in a subsequent commit.) --- command/state_push_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/state_push_test.go b/command/state_push_test.go index 437da55d72a4..4240dffff15d 100644 --- a/command/state_push_test.go +++ b/command/state_push_test.go @@ -116,7 +116,7 @@ func TestStatePush_lineageMismatch(t *testing.T) { expected := testStateRead(t, "local-state.tfstate") p := testProvider() - ui := new(cli.MockUi) + ui := cli.NewMockUi() c := &StatePushCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), From 8c54da0ad26c7e6dc52c2aa122cac52ade9581b7 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 9 Nov 2018 15:18:00 -0800 Subject: [PATCH 077/149] command: TestInit_fromModule_explicitDest guard against other tests Some other test is leaving behind a terraform.tfstate after it concludes, which can cause this test to fail in a strange way due to picking up extra provider requirements from that state. This check doesn't fix that problem, but it at least makes the test fail in a more helpful way to avoid time wasted trying to debug this test when it's some other test that actually has the bug. --- command/init_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/command/init_test.go b/command/init_test.go index d50baebd6d6d..ade5c533c144 100644 --- a/command/init_test.go +++ b/command/init_test.go @@ -79,6 +79,14 @@ func TestInit_fromModule_explicitDest(t *testing.T) { }, } + if _, err := os.Stat(DefaultStateFilename); err == nil { + // This should never happen; it indicates a bug in another test + // is causing a terraform.tfstate to get left behind in our directory + // here, which can interfere with our init process in a way that + // isn't relevant to this test. + t.Fatalf("some other test has left terraform.tfstate behind") + } + args := []string{ "-from-module=" + testFixturePath("init"), dir, From 7edbb3c8bf89b1daa26184654b60369ab4302d2d Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Fri, 2 Nov 2018 12:20:51 -0700 Subject: [PATCH 078/149] return state even if cfg is invalid --- helper/resource/testing_config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper/resource/testing_config.go b/helper/resource/testing_config.go index c8cc587bd4d0..9bf04cb5982a 100644 --- a/helper/resource/testing_config.go +++ b/helper/resource/testing_config.go @@ -53,7 +53,7 @@ func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep, } if stepDiags := ctx.Validate(); len(stepDiags) > 0 { if stepDiags.HasErrors() { - return nil, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) + return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) } log.Printf("[WARN] Config warnings:\n%s", stepDiags) From d0c320f1485a2d28d32344965575308aad891e3f Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Tue, 13 Nov 2018 11:17:39 +0100 Subject: [PATCH 079/149] depencies: update `go-tfe` --- go.mod | 10 +- go.sum | 18 +- .../github.com/hashicorp/go-cleanhttp/go.mod | 1 + .../hashicorp/go-cleanhttp/handlers.go | 43 ++ .../hashicorp/go-retryablehttp/.travis.yml | 2 +- .../hashicorp/go-retryablehttp/README.md | 13 +- .../hashicorp/go-retryablehttp/client.go | 298 +++++++++++--- vendor/github.com/hashicorp/go-tfe/README.md | 1 + vendor/github.com/hashicorp/go-tfe/apply.go | 8 +- .../hashicorp/go-tfe/configuration_version.go | 6 +- vendor/github.com/hashicorp/go-tfe/go.mod | 14 + vendor/github.com/hashicorp/go-tfe/go.sum | 20 + .../github.com/hashicorp/go-tfe/logreader.go | 7 +- .../hashicorp/go-tfe/oauth_client.go | 16 +- .../hashicorp/go-tfe/oauth_token.go | 8 +- .../hashicorp/go-tfe/organization.go | 16 +- .../hashicorp/go-tfe/organization_token.go | 6 +- vendor/github.com/hashicorp/go-tfe/plan.go | 8 +- vendor/github.com/hashicorp/go-tfe/policy.go | 58 +-- .../hashicorp/go-tfe/policy_check.go | 8 +- .../github.com/hashicorp/go-tfe/policy_set.go | 381 ++++++++++++++++++ vendor/github.com/hashicorp/go-tfe/run.go | 14 +- vendor/github.com/hashicorp/go-tfe/ssh_key.go | 14 +- .../hashicorp/go-tfe/state_version.go | 14 +- vendor/github.com/hashicorp/go-tfe/team.go | 12 +- .../hashicorp/go-tfe/team_access.go | 14 +- .../hashicorp/go-tfe/team_member.go | 14 +- .../github.com/hashicorp/go-tfe/team_token.go | 6 +- vendor/github.com/hashicorp/go-tfe/tfe.go | 204 +++++++--- .../github.com/hashicorp/go-tfe/variable.go | 18 +- .../github.com/hashicorp/go-tfe/workspace.go | 30 +- vendor/golang.org/x/time/AUTHORS | 3 + vendor/golang.org/x/time/CONTRIBUTORS | 3 + vendor/golang.org/x/time/LICENSE | 27 ++ vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/rate/rate.go | 374 +++++++++++++++++ vendor/modules.txt | 8 +- 37 files changed, 1463 insertions(+), 256 deletions(-) create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/go.mod create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/handlers.go create mode 100644 vendor/github.com/hashicorp/go-tfe/go.mod create mode 100644 vendor/github.com/hashicorp/go-tfe/go.sum create mode 100644 vendor/github.com/hashicorp/go-tfe/policy_set.go create mode 100644 vendor/golang.org/x/time/AUTHORS create mode 100644 vendor/golang.org/x/time/CONTRIBUTORS create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/rate/rate.go diff --git a/go.mod b/go.mod index 3a7705311120..cb722b2f5000 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,6 @@ require ( github.com/golang/protobuf v1.2.0 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect github.com/google/go-cmp v0.2.0 - github.com/google/go-querystring v1.0.0 // indirect github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e // indirect github.com/gophercloud/gophercloud v0.0.0-20170524130959-3027adb1ce72 github.com/gopherjs/gopherjs v0.0.0-20181004151105-1babbf986f6f // indirect @@ -56,19 +55,18 @@ require ( github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de - github.com/hashicorp/go-cleanhttp v0.0.0-20171130225243-06c9ea3a335b + github.com/hashicorp/go-cleanhttp v0.5.0 github.com/hashicorp/go-getter v0.0.0-20180327010114-90bb99a48d86 github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa // indirect github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c // indirect github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6 - github.com/hashicorp/go-retryablehttp v0.0.0-20160930035102-6e85be8fee1d + github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc // indirect - github.com/hashicorp/go-slug v0.1.0 // indirect github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 // indirect - github.com/hashicorp/go-tfe v0.2.6 + github.com/hashicorp/go-tfe v0.2.9 github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 github.com/hashicorp/golang-lru v0.5.0 // indirect @@ -121,7 +119,6 @@ require ( github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect github.com/soheilhy/cmux v0.1.4 // indirect github.com/spf13/afero v1.0.2 - github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d // indirect github.com/terraform-providers/terraform-provider-aws v1.41.0 github.com/terraform-providers/terraform-provider-openstack v0.0.0-20170616075611-4080a521c6ea github.com/terraform-providers/terraform-provider-template v1.0.0 // indirect @@ -142,7 +139,6 @@ require ( golang.org/x/net v0.0.0-20181017193950-04a2e542c03f golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e // indirect - golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect google.golang.org/api v0.0.0-20181015145326-625cd1887957 google.golang.org/appengine v1.2.0 // indirect google.golang.org/grpc v1.14.0 diff --git a/go.sum b/go.sum index 576fed37ad96..e7924ef9199e 100644 --- a/go.sum +++ b/go.sum @@ -121,8 +121,8 @@ github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/U github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8= github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4= -github.com/hashicorp/go-cleanhttp v0.0.0-20171130225243-06c9ea3a335b h1:xrvnoavY7pMnMB/4x+cSAMgkzwjiSyilS55LZ14Ko7o= -github.com/hashicorp/go-cleanhttp v0.0.0-20171130225243-06c9ea3a335b/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-getter v0.0.0-20180327010114-90bb99a48d86 h1:mv3oKLM8sTaxmU/PrT39T35HRnUfchK+vtzXw6Ci9lY= github.com/hashicorp/go-getter v0.0.0-20180327010114-90bb99a48d86/go.mod h1:6rdJFnhkXnzGOJbvkrdv4t9nLwKcVA+tmbQeUlkIzrU= github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f h1:Yv9YzBlAETjy6AOX9eLBZ3nshNVRREgerT/3nvxlGho= @@ -137,8 +137,8 @@ github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uP github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6 h1:czAJ5CXRPr+6vd6RGdJelApnxNbK3dAkakgBwLEWfrc= github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ= -github.com/hashicorp/go-retryablehttp v0.0.0-20160930035102-6e85be8fee1d h1:/T1aqTlRV/71ER/wHvhqTZaXGQW7XSO+F16mIIHw7zc= -github.com/hashicorp/go-retryablehttp v0.0.0-20160930035102-6e85be8fee1d/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 h1:qCv4319q2q7XKn0MQbi8p37hsJ+9Xo8e6yojA73JVxk= +github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:9HVkPxOpo+yO93Ah4yrO67d/qh0fbLLWbKqhYjyHq9A= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc h1:wAa9fGALVHfjYxZuXRnmuJG2CnwRpJYOTvY6YdErAh0= @@ -147,8 +147,10 @@ github.com/hashicorp/go-slug v0.1.0 h1:MJGEiOwRGrQCBmMMZABHqIESySFJ4ajrsjgDI4/aF github.com/hashicorp/go-slug v0.1.0/go.mod h1:+zDycQOzGqOqMW7Kn2fp9vz/NtqpMLQlgb9JUF+0km4= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 h1:7YOlAIO2YWnJZkQp7B5eFykaIY7C9JndqAFQyVV5BhM= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-tfe v0.2.6 h1:o2ryV7ZS0BgaLfNvzWz+A/6J70UETMy+wFL+DQlUy/M= -github.com/hashicorp/go-tfe v0.2.6/go.mod h1:nJs7lSMcNPGQQtjyPG6en099CQ/f83+hfeeSqehl2Fg= +github.com/hashicorp/go-tfe v0.2.7 h1:Cy0irO9Qfgdn7FmvxSoXIQrRa3iM/kFmp/c0oCboCow= +github.com/hashicorp/go-tfe v0.2.7/go.mod h1:WJgjAJVdnXYPOWF6j66VI20djUGfeFjeayIgUDhohsU= +github.com/hashicorp/go-tfe v0.2.9 h1:CmxjF5zBKh5XBf2fMseJPaSKxKIauIIS4r+6+hNX8JM= +github.com/hashicorp/go-tfe v0.2.9/go.mod h1:WJgjAJVdnXYPOWF6j66VI20djUGfeFjeayIgUDhohsU= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 h1:at4+18LrM8myamuV7/vT6x2s1JNXp2k4PsSbt4I02X4= @@ -343,8 +345,8 @@ golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e h1:LSlw/Dbj0MkNvPYAAkGinYmGl golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181015145326-625cd1887957 h1:jwCmWUTrTFfjsobRuGurnCQeW4NZKijaIf6yAXwLR0E= google.golang.org/api v0.0.0-20181015145326-625cd1887957/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 000000000000..310f07569fc4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 000000000000..7eda3777f3c2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,43 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + next.ServeHTTP(w, r) + return + }) +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml index 54a6c7a221b8..2df4e7dfaf74 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml +++ b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml @@ -3,7 +3,7 @@ sudo: false language: go go: - - 1.6.3 + - 1.8.1 branches: only: diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md index 0d6f9ed40afd..ccdc7e87cad7 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -14,13 +14,16 @@ makes `retryablehttp` very easy to drop into existing programs. `retryablehttp` performs automatic retries under certain conditions. Mainly, if an error is returned by the client (connection errors, etc.), or if a 500-range -response code is received, then a retry is invoked after a wait period. -Otherwise, the response is returned and left to the caller to interpret. +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. The main difference from `net/http` is that requests which take a request body -(POST/PUT et. al) require an `io.ReadSeeker` to be provided. This enables the -request body to be "rewound" if the initial request fails so that the full -request can be attempted again. +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. Example Use =========== diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index 198779bdf978..21f45e5ed647 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -8,18 +8,28 @@ // response is received, then a retry is invoked. Otherwise, the response is // returned and left to the caller to interpret. // -// The main difference from net/http is that requests which take a request body -// (POST/PUT et. al) require an io.ReadSeeker to be provided. This enables the -// request body to be "rewound" if the initial request fails so that the full -// request can be attempted again. +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. package retryablehttp import ( + "bytes" + "context" "fmt" "io" "io/ioutil" "log" "math" + "math/rand" "net/http" "net/url" "os" @@ -44,6 +54,9 @@ var ( respReadLimit = int64(4096) ) +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + // LenReader is an interface implemented by many in-memory io.Reader's. Used // for automatically sending the right Content-Length header when possible. type LenReader interface { @@ -54,32 +67,118 @@ type LenReader interface { type Request struct { // body is a seekable reader over the request body payload. This is // used to rewind the request data in between retries. - body io.ReadSeeker + body ReaderFunc // Embed an HTTP request directly. This makes a *Request act exactly // like an *http.Request so that all meta methods are supported. *http.Request } +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + r.Request = r.Request.WithContext(ctx) + return r +} + // NewRequest creates a new wrapped request. -func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { - // Wrap the body in a noop ReadCloser if non-nil. This prevents the - // reader from being closed by the HTTP client. - var rcBody io.ReadCloser - if body != nil { - rcBody = ioutil.NopCloser(body) +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + var err error + var body ReaderFunc + var contentLength int64 + + if rawBody != nil { + switch rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + body = rawBody.(ReaderFunc) + tmp, err := body() + if err != nil { + return nil, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + body = rawBody.(func() (io.Reader, error)) + tmp, err := body() + if err != nil { + return nil, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := rawBody.([]byte) + body = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := rawBody.(*bytes.Buffer) + body = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader)) + if err != nil { + return nil, err + } + body = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := rawBody.(io.ReadSeeker) + body = func() (io.Reader, error) { + raw.Seek(0, 0) + return ioutil.NopCloser(raw), nil + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(rawBody.(io.Reader)) + if err != nil { + return nil, err + } + body = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + default: + return nil, fmt.Errorf("cannot handle type %T", rawBody) + } } - // Make the request with the noop-closer for the body. - httpReq, err := http.NewRequest(method, url, rcBody) + httpReq, err := http.NewRequest(method, url, nil) if err != nil { return nil, err } - - // Check if we can set the Content-Length automatically. - if lr, ok := body.(LenReader); ok { - httpReq.ContentLength = int64(lr.Len()) - } + httpReq.ContentLength = contentLength return &Request{body, httpReq}, nil } @@ -105,7 +204,18 @@ type ResponseLogHook func(*log.Logger, *http.Response) // Client will close any response body when retrying, but if the retry is // aborted it is up to the CheckResponse callback to properly close any // response body before returning. -type CheckRetry func(resp *http.Response, err error) (bool, error) +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) // Client is used to make HTTP requests. It adds additional functionality // like automatic retries to tolerate minor outages. @@ -128,6 +238,12 @@ type Client struct { // CheckRetry specifies the policy for handling retries, and is called // after each request. The default policy is DefaultRetryPolicy. CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler } // NewClient creates a new Client with default settings. @@ -139,12 +255,18 @@ func NewClient() *Client { RetryWaitMax: defaultRetryWaitMax, RetryMax: defaultRetryMax, CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, } } // DefaultRetryPolicy provides a default callback for Client.CheckRetry, which // will retry on connection errors and server errors. -func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + if err != nil { return true, err } @@ -152,24 +274,92 @@ func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { // the server time to recover, as 500's are typically not permanent // errors and may relate to outages on the server side. This will catch // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || resp.StatusCode >= 500 { + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { return true, nil } return false, nil } +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multipled by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + // Do wraps calling an HTTP method with retries. func (c *Client) Do(req *Request) (*http.Response, error) { - c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + if c.Logger != nil { + c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + } + + var resp *http.Response + var err error for i := 0; ; i++ { var code int // HTTP response code // Always rewind the request body when non-nil. if req.body != nil { - if _, err := req.body.Seek(0, 0); err != nil { - return nil, fmt.Errorf("failed to seek body: %v", err) + body, err := req.body() + if err != nil { + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(body) } } @@ -178,13 +368,18 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } // Attempt the request - resp, err := c.HTTPClient.Do(req.Request) + resp, err = c.HTTPClient.Do(req.Request) + if resp != nil { + code = resp.StatusCode + } // Check if we should continue with retries. - checkOK, checkErr := c.CheckRetry(resp, err) + checkOK, checkErr := c.CheckRetry(req.Request.Context(), resp, err) if err != nil { - c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + if c.Logger != nil { + c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } } else { // Call this here to maintain the behavior of logging all requests, // even if CheckRetry signals to stop. @@ -202,25 +397,38 @@ func (c *Client) Do(req *Request) (*http.Response, error) { return resp, err } + // We do this before drainBody beause there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + // We're going to retry, consume any response to reuse the connection. - if err == nil { + if err == nil && resp != nil { c.drainBody(resp.Body) } - remain := c.RetryMax - i - if remain == 0 { - break - } - wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i) + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) desc := fmt.Sprintf("%s %s", req.Method, req.URL) if code > 0 { desc = fmt.Sprintf("%s (status: %d)", desc, code) } - c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + if c.Logger != nil { + c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + } time.Sleep(wait) } - // Return an error if we fall out of the retry loop + if c.ErrorHandler != nil { + return c.ErrorHandler(resp, err, c.RetryMax+1) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + resp.Body.Close() + } return nil, fmt.Errorf("%s %s giving up after %d attempts", req.Method, req.URL, c.RetryMax+1) } @@ -230,7 +438,9 @@ func (c *Client) drainBody(body io.ReadCloser) { defer body.Close() _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) if err != nil { - c.Logger.Printf("[ERR] error reading response body: %v", err) + if c.Logger != nil { + c.Logger.Printf("[ERR] error reading response body: %v", err) + } } } @@ -263,12 +473,12 @@ func (c *Client) Head(url string) (*http.Response, error) { } // Post is a shortcut for doing a POST request without making a new client. -func Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { +func Post(url, bodyType string, body interface{}) (*http.Response, error) { return defaultClient.Post(url, bodyType, body) } // Post is a convenience method for doing simple POST requests. -func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { req, err := NewRequest("POST", url, body) if err != nil { return nil, err @@ -288,15 +498,3 @@ func PostForm(url string, data url.Values) (*http.Response, error) { func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } - -// backoff is used to calculate how long to sleep before retrying -// after observing failures. It takes the minimum/maximum wait time and -// iteration, and returns the duration to wait. -func backoff(min, max time.Duration, iter int) time.Duration { - mult := math.Pow(2, float64(iter)) * float64(min) - sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max - } - return sleep -} diff --git a/vendor/github.com/hashicorp/go-tfe/README.md b/vendor/github.com/hashicorp/go-tfe/README.md index 05bbc78d8a3e..4ca44f4fef55 100644 --- a/vendor/github.com/hashicorp/go-tfe/README.md +++ b/vendor/github.com/hashicorp/go-tfe/README.md @@ -28,6 +28,7 @@ Currently the following endpoints are supported: - [x] [Organizations](https://www.terraform.io/docs/enterprise/api/organizations.html) - [x] [Organization Tokens](https://www.terraform.io/docs/enterprise/api/organization-tokens.html) - [x] [Policies](https://www.terraform.io/docs/enterprise/api/policies.html) +- [x] [Policy Sets](https://www.terraform.io/docs/enterprise/api/policy-sets.html) - [x] [Policy Checks](https://www.terraform.io/docs/enterprise/api/policy-checks.html) - [ ] [Registry Modules](https://www.terraform.io/docs/enterprise/api/modules.html) - [x] [Runs](https://www.terraform.io/docs/enterprise/api/run.html) diff --git a/vendor/github.com/hashicorp/go-tfe/apply.go b/vendor/github.com/hashicorp/go-tfe/apply.go index d99eaab53396..2c92896bd974 100644 --- a/vendor/github.com/hashicorp/go-tfe/apply.go +++ b/vendor/github.com/hashicorp/go-tfe/apply.go @@ -69,7 +69,7 @@ type ApplyStatusTimestamps struct { // Read an apply by its ID. func (s *applies) Read(ctx context.Context, applyID string) (*Apply, error) { if !validStringID(&applyID) { - return nil, errors.New("Invalid value for apply ID") + return nil, errors.New("invalid value for apply ID") } u := fmt.Sprintf("applies/%s", url.QueryEscape(applyID)) @@ -90,7 +90,7 @@ func (s *applies) Read(ctx context.Context, applyID string) (*Apply, error) { // Logs retrieves the logs of an apply. func (s *applies) Logs(ctx context.Context, applyID string) (io.Reader, error) { if !validStringID(&applyID) { - return nil, errors.New("Invalid value for apply ID") + return nil, errors.New("invalid value for apply ID") } // Get the apply to make sure it exists. @@ -101,12 +101,12 @@ func (s *applies) Logs(ctx context.Context, applyID string) (io.Reader, error) { // Return an error if the log URL is empty. if a.LogReadURL == "" { - return nil, fmt.Errorf("Apply %s does not have a log URL", applyID) + return nil, fmt.Errorf("apply %s does not have a log URL", applyID) } u, err := url.Parse(a.LogReadURL) if err != nil { - return nil, fmt.Errorf("Invalid log URL: %v", err) + return nil, fmt.Errorf("invalid log URL: %v", err) } done := func() (bool, error) { diff --git a/vendor/github.com/hashicorp/go-tfe/configuration_version.go b/vendor/github.com/hashicorp/go-tfe/configuration_version.go index 168c1c6dd478..64c0db0a0ceb 100644 --- a/vendor/github.com/hashicorp/go-tfe/configuration_version.go +++ b/vendor/github.com/hashicorp/go-tfe/configuration_version.go @@ -101,7 +101,7 @@ type ConfigurationVersionListOptions struct { // List returns all configuration versions of a workspace. func (s *configurationVersions) List(ctx context.Context, workspaceID string, options ConfigurationVersionListOptions) (*ConfigurationVersionList, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } u := fmt.Sprintf("workspaces/%s/configuration-versions", url.QueryEscape(workspaceID)) @@ -137,7 +137,7 @@ type ConfigurationVersionCreateOptions struct { // configuration version will be usable once data is uploaded to it. func (s *configurationVersions) Create(ctx context.Context, workspaceID string, options ConfigurationVersionCreateOptions) (*ConfigurationVersion, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } // Make sure we don't send a user provided ID. @@ -161,7 +161,7 @@ func (s *configurationVersions) Create(ctx context.Context, workspaceID string, // Read a configuration version by its ID. func (s *configurationVersions) Read(ctx context.Context, cvID string) (*ConfigurationVersion, error) { if !validStringID(&cvID) { - return nil, errors.New("Invalid value for configuration version ID") + return nil, errors.New("invalid value for configuration version ID") } u := fmt.Sprintf("configuration-versions/%s", url.QueryEscape(cvID)) diff --git a/vendor/github.com/hashicorp/go-tfe/go.mod b/vendor/github.com/hashicorp/go-tfe/go.mod new file mode 100644 index 000000000000..8cad701d8150 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/go.mod @@ -0,0 +1,14 @@ +module github.com/hashicorp/go-tfe + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/google/go-querystring v1.0.0 + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 + github.com/hashicorp/go-slug v0.1.0 + github.com/hashicorp/go-uuid v1.0.0 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c +) diff --git a/vendor/github.com/hashicorp/go-tfe/go.sum b/vendor/github.com/hashicorp/go-tfe/go.sum new file mode 100644 index 000000000000..dcc948753dd7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/go.sum @@ -0,0 +1,20 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 h1:qCv4319q2q7XKn0MQbi8p37hsJ+9Xo8e6yojA73JVxk= +github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-slug v0.1.0 h1:MJGEiOwRGrQCBmMMZABHqIESySFJ4ajrsjgDI4/aFI0= +github.com/hashicorp/go-slug v0.1.0/go.mod h1:+zDycQOzGqOqMW7Kn2fp9vz/NtqpMLQlgb9JUF+0km4= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/vendor/github.com/hashicorp/go-tfe/logreader.go b/vendor/github.com/hashicorp/go-tfe/logreader.go index cdc1aad9b4cf..aee4472fedb0 100644 --- a/vendor/github.com/hashicorp/go-tfe/logreader.go +++ b/vendor/github.com/hashicorp/go-tfe/logreader.go @@ -63,8 +63,13 @@ func (r *LogReader) read(l []byte) (int, error) { } req = req.WithContext(r.ctx) + // Attach the default headers. + for k, v := range r.client.headers { + req.Header[k] = v + } + // Retrieve the next chunk. - resp, err := r.client.http.Do(req) + resp, err := r.client.http.HTTPClient.Do(req) if err != nil { return 0, err } diff --git a/vendor/github.com/hashicorp/go-tfe/oauth_client.go b/vendor/github.com/hashicorp/go-tfe/oauth_client.go index be31fd8e35dc..b0b16bfb26c4 100644 --- a/vendor/github.com/hashicorp/go-tfe/oauth_client.go +++ b/vendor/github.com/hashicorp/go-tfe/oauth_client.go @@ -83,7 +83,7 @@ type OAuthClientListOptions struct { // List all the OAuth clients for a given organization. func (s *oAuthClients) List(ctx context.Context, organization string, options OAuthClientListOptions) (*OAuthClientList, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/oauth-clients", url.QueryEscape(organization)) @@ -121,16 +121,16 @@ type OAuthClientCreateOptions struct { func (o OAuthClientCreateOptions) valid() error { if !validString(o.APIURL) { - return errors.New("APIURL is required") + return errors.New("API URL is required") } if !validString(o.HTTPURL) { - return errors.New("HTTPURL is required") + return errors.New("HTTP URL is required") } if !validString(o.OAuthToken) { - return errors.New("OAuthToken is required") + return errors.New("OAuth token is required") } if o.ServiceProvider == nil { - return errors.New("ServiceProvider is required") + return errors.New("service provider is required") } return nil } @@ -138,7 +138,7 @@ func (o OAuthClientCreateOptions) valid() error { // Create an OAuth client to connect an organization and a VCS provider. func (s *oAuthClients) Create(ctx context.Context, organization string, options OAuthClientCreateOptions) (*OAuthClient, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } if err := options.valid(); err != nil { return nil, err @@ -165,7 +165,7 @@ func (s *oAuthClients) Create(ctx context.Context, organization string, options // Read an OAuth client by its ID. func (s *oAuthClients) Read(ctx context.Context, oAuthClientID string) (*OAuthClient, error) { if !validStringID(&oAuthClientID) { - return nil, errors.New("Invalid value for OAuth client ID") + return nil, errors.New("invalid value for OAuth client ID") } u := fmt.Sprintf("oauth-clients/%s", url.QueryEscape(oAuthClientID)) @@ -186,7 +186,7 @@ func (s *oAuthClients) Read(ctx context.Context, oAuthClientID string) (*OAuthCl // Delete an OAuth client by its ID. func (s *oAuthClients) Delete(ctx context.Context, oAuthClientID string) error { if !validStringID(&oAuthClientID) { - return errors.New("Invalid value for OAuth client ID") + return errors.New("invalid value for OAuth client ID") } u := fmt.Sprintf("oauth-clients/%s", url.QueryEscape(oAuthClientID)) diff --git a/vendor/github.com/hashicorp/go-tfe/oauth_token.go b/vendor/github.com/hashicorp/go-tfe/oauth_token.go index 2367a1e3b9ef..73ab48010b45 100644 --- a/vendor/github.com/hashicorp/go-tfe/oauth_token.go +++ b/vendor/github.com/hashicorp/go-tfe/oauth_token.go @@ -62,7 +62,7 @@ type OAuthTokenListOptions struct { // List all the OAuth tokens for a given organization. func (s *oAuthTokens) List(ctx context.Context, organization string, options OAuthTokenListOptions) (*OAuthTokenList, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/oauth-tokens", url.QueryEscape(organization)) @@ -83,7 +83,7 @@ func (s *oAuthTokens) List(ctx context.Context, organization string, options OAu // Read an OAuth token by its ID. func (s *oAuthTokens) Read(ctx context.Context, oAuthTokenID string) (*OAuthToken, error) { if !validStringID(&oAuthTokenID) { - return nil, errors.New("Invalid value for OAuth token ID") + return nil, errors.New("invalid value for OAuth token ID") } u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) @@ -113,7 +113,7 @@ type OAuthTokenUpdateOptions struct { // Update an existing OAuth token. func (s *oAuthTokens) Update(ctx context.Context, oAuthTokenID string, options OAuthTokenUpdateOptions) (*OAuthToken, error) { if !validStringID(&oAuthTokenID) { - return nil, errors.New("Invalid value for OAuth token ID") + return nil, errors.New("invalid value for OAuth token ID") } // Make sure we don't send a user provided ID. @@ -137,7 +137,7 @@ func (s *oAuthTokens) Update(ctx context.Context, oAuthTokenID string, options O // Delete an OAuth token by its ID. func (s *oAuthTokens) Delete(ctx context.Context, oAuthTokenID string) error { if !validStringID(&oAuthTokenID) { - return errors.New("Invalid value for OAuth token ID") + return errors.New("invalid value for OAuth token ID") } u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) diff --git a/vendor/github.com/hashicorp/go-tfe/organization.go b/vendor/github.com/hashicorp/go-tfe/organization.go index f4759a231f0d..32e289ed6273 100644 --- a/vendor/github.com/hashicorp/go-tfe/organization.go +++ b/vendor/github.com/hashicorp/go-tfe/organization.go @@ -147,13 +147,13 @@ type OrganizationCreateOptions struct { func (o OrganizationCreateOptions) valid() error { if !validString(o.Name) { - return errors.New("Name is required") + return errors.New("name is required") } if !validStringID(o.Name) { - return errors.New("Invalid value for name") + return errors.New("invalid value for name") } if !validString(o.Email) { - return errors.New("Email is required") + return errors.New("email is required") } return nil } @@ -184,7 +184,7 @@ func (s *organizations) Create(ctx context.Context, options OrganizationCreateOp // Read an organization by its name. func (s *organizations) Read(ctx context.Context, organization string) (*Organization, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) @@ -226,7 +226,7 @@ type OrganizationUpdateOptions struct { // Update attributes of an existing organization. func (s *organizations) Update(ctx context.Context, organization string, options OrganizationUpdateOptions) (*Organization, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } // Make sure we don't send a user provided ID. @@ -250,7 +250,7 @@ func (s *organizations) Update(ctx context.Context, organization string, options // Delete an organization by its name. func (s *organizations) Delete(ctx context.Context, organization string) error { if !validStringID(&organization) { - return errors.New("Invalid value for organization") + return errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) @@ -265,7 +265,7 @@ func (s *organizations) Delete(ctx context.Context, organization string) error { // Capacity shows the currently used capacity of an organization. func (s *organizations) Capacity(ctx context.Context, organization string) (*Capacity, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/capacity", url.QueryEscape(organization)) @@ -291,7 +291,7 @@ type RunQueueOptions struct { // RunQueue shows the current run queue of an organization. func (s *organizations) RunQueue(ctx context.Context, organization string, options RunQueueOptions) (*RunQueue, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/runs/queue", url.QueryEscape(organization)) diff --git a/vendor/github.com/hashicorp/go-tfe/organization_token.go b/vendor/github.com/hashicorp/go-tfe/organization_token.go index 33368da0ba45..e2b0e0dfaa88 100644 --- a/vendor/github.com/hashicorp/go-tfe/organization_token.go +++ b/vendor/github.com/hashicorp/go-tfe/organization_token.go @@ -44,7 +44,7 @@ type OrganizationToken struct { // Generate a new organization token, replacing any existing token. func (s *organizationTokens) Generate(ctx context.Context, organization string) (*OrganizationToken, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) @@ -65,7 +65,7 @@ func (s *organizationTokens) Generate(ctx context.Context, organization string) // Read an organization token. func (s *organizationTokens) Read(ctx context.Context, organization string) (*OrganizationToken, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) @@ -86,7 +86,7 @@ func (s *organizationTokens) Read(ctx context.Context, organization string) (*Or // Delete an organization token. func (s *organizationTokens) Delete(ctx context.Context, organization string) error { if !validStringID(&organization) { - return errors.New("Invalid value for organization") + return errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) diff --git a/vendor/github.com/hashicorp/go-tfe/plan.go b/vendor/github.com/hashicorp/go-tfe/plan.go index 194ce65fba33..31aef138df1b 100644 --- a/vendor/github.com/hashicorp/go-tfe/plan.go +++ b/vendor/github.com/hashicorp/go-tfe/plan.go @@ -70,7 +70,7 @@ type PlanStatusTimestamps struct { // Read a plan by its ID. func (s *plans) Read(ctx context.Context, planID string) (*Plan, error) { if !validStringID(&planID) { - return nil, errors.New("Invalid value for plan ID") + return nil, errors.New("invalid value for plan ID") } u := fmt.Sprintf("plans/%s", url.QueryEscape(planID)) @@ -91,7 +91,7 @@ func (s *plans) Read(ctx context.Context, planID string) (*Plan, error) { // Logs retrieves the logs of a plan. func (s *plans) Logs(ctx context.Context, planID string) (io.Reader, error) { if !validStringID(&planID) { - return nil, errors.New("Invalid value for plan ID") + return nil, errors.New("invalid value for plan ID") } // Get the plan to make sure it exists. @@ -102,12 +102,12 @@ func (s *plans) Logs(ctx context.Context, planID string) (io.Reader, error) { // Return an error if the log URL is empty. if p.LogReadURL == "" { - return nil, fmt.Errorf("Plan %s does not have a log URL", planID) + return nil, fmt.Errorf("plan %s does not have a log URL", planID) } u, err := url.Parse(p.LogReadURL) if err != nil { - return nil, fmt.Errorf("Invalid log URL: %v", err) + return nil, fmt.Errorf("invalid log URL: %v", err) } done := func() (bool, error) { diff --git a/vendor/github.com/hashicorp/go-tfe/policy.go b/vendor/github.com/hashicorp/go-tfe/policy.go index 80926af370f4..b558b2608d77 100644 --- a/vendor/github.com/hashicorp/go-tfe/policy.go +++ b/vendor/github.com/hashicorp/go-tfe/policy.go @@ -62,10 +62,15 @@ type PolicyList struct { // Policy represents a Terraform Enterprise policy. type Policy struct { - ID string `jsonapi:"primary,policies"` - Name string `jsonapi:"attr,name"` - Enforce []*Enforcement `jsonapi:"attr,enforce"` - UpdatedAt time.Time `jsonapi:"attr,updated-at,iso8601"` + ID string `jsonapi:"primary,policies"` + Name string `jsonapi:"attr,name"` + Description string `jsonapi:"attr,description"` + Enforce []*Enforcement `jsonapi:"attr,enforce"` + PolicySetCount int `jsonapi:"attr,policy-set-count"` + UpdatedAt time.Time `jsonapi:"attr,updated-at,iso8601"` + + // Relations + Organization *Organization `jsonapi:"relation,organization"` } // Enforcement describes a enforcement. @@ -77,12 +82,15 @@ type Enforcement struct { // PolicyListOptions represents the options for listing policies. type PolicyListOptions struct { ListOptions + + // A search string (partial policy name) used to filter the results. + Search *string `url:"search[name],omitempty"` } // List all the policies for a given organization func (s *policies) List(ctx context.Context, organization string, options PolicyListOptions) (*PolicyList, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/policies", url.QueryEscape(organization)) @@ -108,6 +116,9 @@ type PolicyCreateOptions struct { // The name of the policy. Name *string `jsonapi:"attr,name"` + // A description of the policy's purpose. + Description *string `jsonapi:"attr,description,omitempty"` + // The enforcements of the policy. Enforce []*EnforcementOptions `jsonapi:"attr,enforce"` } @@ -120,20 +131,20 @@ type EnforcementOptions struct { func (o PolicyCreateOptions) valid() error { if !validString(o.Name) { - return errors.New("Name is required") + return errors.New("name is required") } if !validStringID(o.Name) { - return errors.New("Invalid value for name") + return errors.New("invalid value for name") } if o.Enforce == nil { - return errors.New("Enforce is required") + return errors.New("enforce is required") } for _, e := range o.Enforce { if !validString(e.Path) { - return errors.New("Enforcement path is required") + return errors.New("enforcement path is required") } if e.Mode == nil { - return errors.New("Enforcement mode is required") + return errors.New("enforcement mode is required") } } return nil @@ -142,7 +153,7 @@ func (o PolicyCreateOptions) valid() error { // Create a policy and associate it with an organization. func (s *policies) Create(ctx context.Context, organization string, options PolicyCreateOptions) (*Policy, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } if err := options.valid(); err != nil { return nil, err @@ -169,7 +180,7 @@ func (s *policies) Create(ctx context.Context, organization string, options Poli // Read a policy by its ID. func (s *policies) Read(ctx context.Context, policyID string) (*Policy, error) { if !validStringID(&policyID) { - return nil, errors.New("Invalid value for policy ID") + return nil, errors.New("invalid value for policy ID") } u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) @@ -192,24 +203,17 @@ type PolicyUpdateOptions struct { // For internal use only! ID string `jsonapi:"primary,policies"` - // The enforcements of the policy. - Enforce []*EnforcementOptions `jsonapi:"attr,enforce"` -} + // A description of the policy's purpose. + Description *string `jsonapi:"attr,description,omitempty"` -func (o PolicyUpdateOptions) valid() error { - if o.Enforce == nil { - return errors.New("Enforce is required") - } - return nil + // The enforcements of the policy. + Enforce []*EnforcementOptions `jsonapi:"attr,enforce,omitempty"` } // Update an existing policy. func (s *policies) Update(ctx context.Context, policyID string, options PolicyUpdateOptions) (*Policy, error) { if !validStringID(&policyID) { - return nil, errors.New("Invalid value for policy ID") - } - if err := options.valid(); err != nil { - return nil, err + return nil, errors.New("invalid value for policy ID") } // Make sure we don't send a user provided ID. @@ -233,7 +237,7 @@ func (s *policies) Update(ctx context.Context, policyID string, options PolicyUp // Delete a policy by its ID. func (s *policies) Delete(ctx context.Context, policyID string) error { if !validStringID(&policyID) { - return errors.New("Invalid value for policy ID") + return errors.New("invalid value for policy ID") } u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) @@ -248,7 +252,7 @@ func (s *policies) Delete(ctx context.Context, policyID string) error { // Upload the policy content of the policy. func (s *policies) Upload(ctx context.Context, policyID string, content []byte) error { if !validStringID(&policyID) { - return errors.New("Invalid value for policy ID") + return errors.New("invalid value for policy ID") } u := fmt.Sprintf("policies/%s/upload", url.QueryEscape(policyID)) @@ -263,7 +267,7 @@ func (s *policies) Upload(ctx context.Context, policyID string, content []byte) // Download the policy content of the policy. func (s *policies) Download(ctx context.Context, policyID string) ([]byte, error) { if !validStringID(&policyID) { - return nil, errors.New("Invalid value for policy ID") + return nil, errors.New("invalid value for policy ID") } u := fmt.Sprintf("policies/%s/download", url.QueryEscape(policyID)) diff --git a/vendor/github.com/hashicorp/go-tfe/policy_check.go b/vendor/github.com/hashicorp/go-tfe/policy_check.go index d5417e300632..7196568d13aa 100644 --- a/vendor/github.com/hashicorp/go-tfe/policy_check.go +++ b/vendor/github.com/hashicorp/go-tfe/policy_check.go @@ -118,7 +118,7 @@ type PolicyCheckListOptions struct { // List all policy checks of the given run. func (s *policyChecks) List(ctx context.Context, runID string, options PolicyCheckListOptions) (*PolicyCheckList, error) { if !validStringID(&runID) { - return nil, errors.New("Invalid value for run ID") + return nil, errors.New("invalid value for run ID") } u := fmt.Sprintf("runs/%s/policy-checks", url.QueryEscape(runID)) @@ -139,7 +139,7 @@ func (s *policyChecks) List(ctx context.Context, runID string, options PolicyChe // Read a policy check by its ID. func (s *policyChecks) Read(ctx context.Context, policyCheckID string) (*PolicyCheck, error) { if !validStringID(&policyCheckID) { - return nil, errors.New("Invalid value for policy check ID") + return nil, errors.New("invalid value for policy check ID") } u := fmt.Sprintf("policy-checks/%s", url.QueryEscape(policyCheckID)) @@ -160,7 +160,7 @@ func (s *policyChecks) Read(ctx context.Context, policyCheckID string) (*PolicyC // Override a soft-mandatory or warning policy. func (s *policyChecks) Override(ctx context.Context, policyCheckID string) (*PolicyCheck, error) { if !validStringID(&policyCheckID) { - return nil, errors.New("Invalid value for policy check ID") + return nil, errors.New("invalid value for policy check ID") } u := fmt.Sprintf("policy-checks/%s/actions/override", url.QueryEscape(policyCheckID)) @@ -181,7 +181,7 @@ func (s *policyChecks) Override(ctx context.Context, policyCheckID string) (*Pol // Logs retrieves the logs of a policy check. func (s *policyChecks) Logs(ctx context.Context, policyCheckID string) (io.Reader, error) { if !validStringID(&policyCheckID) { - return nil, errors.New("Invalid value for policy check ID") + return nil, errors.New("invalid value for policy check ID") } // Loop until the context is canceled or the policy check is finished diff --git a/vendor/github.com/hashicorp/go-tfe/policy_set.go b/vendor/github.com/hashicorp/go-tfe/policy_set.go new file mode 100644 index 000000000000..15400da14927 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/policy_set.go @@ -0,0 +1,381 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ PolicySets = (*policySets)(nil) + +// PolicySets describes all the policy set related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/policies.html +type PolicySets interface { + // List all the policy sets for a given organization + List(ctx context.Context, organization string, options PolicySetListOptions) (*PolicySetList, error) + + // Create a policy set and associate it with an organization. + Create(ctx context.Context, organization string, options PolicySetCreateOptions) (*PolicySet, error) + + // Read a policy set by its ID. + Read(ctx context.Context, policySetID string) (*PolicySet, error) + + // Update an existing policy set. + Update(ctx context.Context, policySetID string, options PolicySetUpdateOptions) (*PolicySet, error) + + // Add policies to a policy set. + AddPolicies(ctx context.Context, policySetID string, options PolicySetAddPoliciesOptions) error + + // Remove policies from a policy set. + RemovePolicies(ctx context.Context, policySetID string, options PolicySetRemovePoliciesOptions) error + + // Attach a policy set to workspaces. + AttachToWorkspaces(ctx context.Context, policySetID string, options PolicySetAttachToWorkspacesOptions) error + + // Detach a policy set from workspaces. + DetachFromWorkspaces(ctx context.Context, policySetID string, options PolicySetDetachFromWorkspacesOptions) error + + // Delete a policy set by its ID. + Delete(ctx context.Context, policyID string) error +} + +// policySets implements PolicySets. +type policySets struct { + client *Client +} + +// PolicySetList represents a list of policy sets.. +type PolicySetList struct { + *Pagination + Items []*PolicySet +} + +// PolicySet represents a Terraform Enterprise policy set. +type PolicySet struct { + ID string `jsonapi:"primary,policy-sets"` + Name string `jsonapi:"attr,name"` + Description string `jsonapi:"attr,description"` + Global bool `jsonapi:"attr,global"` + PolicyCount int `jsonapi:"attr,policy-count"` + WorkspaceCount int `jsonapi:"attr,workspace-count"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + UpdatedAt time.Time `jsonapi:"attr,updated-at,iso8601"` + + // Relations + Organization *Organization `jsonapi:"relation,organization"` + Policies []*Policy `jsonapi:"relation,policies"` + Workspaces []*Workspace `jsonapi:"relation,workspaces"` +} + +// PolicySetListOptions represents the options for listing policy sets. +type PolicySetListOptions struct { + ListOptions + + // A search string (partial policy set name) used to filter the results. + Search *string `url:"search[name],omitempty"` +} + +// List all the policies for a given organization +func (s *policySets) List(ctx context.Context, organization string, options PolicySetListOptions) (*PolicySetList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/policy-sets", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + psl := &PolicySetList{} + err = s.client.do(ctx, req, psl) + if err != nil { + return nil, err + } + + return psl, nil +} + +// PolicySetCreateOptions represents the options for creating a new policy set. +type PolicySetCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policy-sets"` + + // The name of the policy set. + Name *string `jsonapi:"attr,name"` + + // The description of the policy set. + Description *string `jsonapi:"attr,description,omitempty"` + + // Whether or not the policy set is global. + Global *bool `jsonapi:"attr,global,omitempty"` + + // The initial members of the policy set. + Policies []*Policy `jsonapi:"relation,policies,omitempty"` + + // The initial list of workspaces the policy set should be attached to. + Workspaces []*Workspace `jsonapi:"relation,workspaces,omitempty"` +} + +func (o PolicySetCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("name is required") + } + if !validStringID(o.Name) { + return errors.New("invalid value for name") + } + return nil +} + +// Create a policy set and associate it with an organization. +func (s *policySets) Create(ctx context.Context, organization string, options PolicySetCreateOptions) (*PolicySet, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/policy-sets", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + ps := &PolicySet{} + err = s.client.do(ctx, req, ps) + if err != nil { + return nil, err + } + + return ps, err +} + +// Read a policy set by its ID. +func (s *policySets) Read(ctx context.Context, policySetID string) (*PolicySet, error) { + if !validStringID(&policySetID) { + return nil, errors.New("invalid value for policy set ID") + } + + u := fmt.Sprintf("policy-sets/%s", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ps := &PolicySet{} + err = s.client.do(ctx, req, ps) + if err != nil { + return nil, err + } + + return ps, err +} + +// PolicySetUpdateOptions represents the options for updating a policy set. +type PolicySetUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policy-sets"` + + /// The name of the policy set. + Name *string `jsonapi:"attr,name,omitempty"` + + // The description of the policy set. + Description *string `jsonapi:"attr,description,omitempty"` + + // Whether or not the policy set is global. + Global *bool `jsonapi:"attr,global,omitempty"` +} + +func (o PolicySetUpdateOptions) valid() error { + if o.Name != nil && !validStringID(o.Name) { + return errors.New("invalid value for name") + } + return nil +} + +// Update an existing policy set. +func (s *policySets) Update(ctx context.Context, policySetID string, options PolicySetUpdateOptions) (*PolicySet, error) { + if !validStringID(&policySetID) { + return nil, errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("policy-sets/%s", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + ps := &PolicySet{} + err = s.client.do(ctx, req, ps) + if err != nil { + return nil, err + } + + return ps, err +} + +// PolicySetAddPoliciesOptions represents the options for adding policies to a policy set. +type PolicySetAddPoliciesOptions struct { + /// The policies to add to the policy set. + Policies []*Policy +} + +func (o PolicySetAddPoliciesOptions) valid() error { + if o.Policies == nil { + return errors.New("policies is required") + } + if len(o.Policies) == 0 { + return errors.New("must provide at least one policy") + } + return nil +} + +// Add policies to a policy set +func (s *policySets) AddPolicies(ctx context.Context, policySetID string, options PolicySetAddPoliciesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/policies", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("POST", u, options.Policies) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// PolicySetRemovePoliciesOptions represents the options for removing policies from a policy set. +type PolicySetRemovePoliciesOptions struct { + /// The policies to remove from the policy set. + Policies []*Policy +} + +func (o PolicySetRemovePoliciesOptions) valid() error { + if o.Policies == nil { + return errors.New("policies is required") + } + if len(o.Policies) == 0 { + return errors.New("must provide at least one policy") + } + return nil +} + +// Remove policies from a policy set +func (s *policySets) RemovePolicies(ctx context.Context, policySetID string, options PolicySetRemovePoliciesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/policies", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("DELETE", u, options.Policies) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// PolicySetAttachToWorkspacesOptions represents the options for attaching a policy set to workspaces. +type PolicySetAttachToWorkspacesOptions struct { + /// The workspaces on which to attach the policy set. + Workspaces []*Workspace +} + +func (o PolicySetAttachToWorkspacesOptions) valid() error { + if o.Workspaces == nil { + return errors.New("workspaces is required") + } + if len(o.Workspaces) == 0 { + return errors.New("must provide at least one workspace") + } + return nil +} + +// Attach a policy set to workspaces +func (s *policySets) AttachToWorkspaces(ctx context.Context, policySetID string, options PolicySetAttachToWorkspacesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/workspaces", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("POST", u, options.Workspaces) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// PolicySetDetachFromWorkspacesOptions represents the options for detaching a policy set from workspaces. +type PolicySetDetachFromWorkspacesOptions struct { + /// The workspaces from which to detach the policy set. + Workspaces []*Workspace +} + +func (o PolicySetDetachFromWorkspacesOptions) valid() error { + if o.Workspaces == nil { + return errors.New("workspaces is required") + } + if len(o.Workspaces) == 0 { + return errors.New("must provide at least one workspace") + } + return nil +} + +// Detach a policy set from workspaces +func (s *policySets) DetachFromWorkspaces(ctx context.Context, policySetID string, options PolicySetDetachFromWorkspacesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/workspaces", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("DELETE", u, options.Workspaces) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Delete a policy set by its ID. +func (s *policySets) Delete(ctx context.Context, policySetID string) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + + u := fmt.Sprintf("policy-sets/%s", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/run.go b/vendor/github.com/hashicorp/go-tfe/run.go index 59501e6ad82a..b310bf6002cf 100644 --- a/vendor/github.com/hashicorp/go-tfe/run.go +++ b/vendor/github.com/hashicorp/go-tfe/run.go @@ -136,7 +136,7 @@ type RunListOptions struct { // List all the runs of the given workspace. func (s *runs) List(ctx context.Context, workspaceID string, options RunListOptions) (*RunList, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } u := fmt.Sprintf("workspaces/%s/runs", url.QueryEscape(workspaceID)) @@ -177,7 +177,7 @@ type RunCreateOptions struct { func (o RunCreateOptions) valid() error { if o.Workspace == nil { - return errors.New("Workspace is required") + return errors.New("workspace is required") } return nil } @@ -208,7 +208,7 @@ func (s *runs) Create(ctx context.Context, options RunCreateOptions) (*Run, erro // Read a run by its ID. func (s *runs) Read(ctx context.Context, runID string) (*Run, error) { if !validStringID(&runID) { - return nil, errors.New("Invalid value for run ID") + return nil, errors.New("invalid value for run ID") } u := fmt.Sprintf("runs/%s", url.QueryEscape(runID)) @@ -235,7 +235,7 @@ type RunApplyOptions struct { // Apply a run by its ID. func (s *runs) Apply(ctx context.Context, runID string, options RunApplyOptions) error { if !validStringID(&runID) { - return errors.New("Invalid value for run ID") + return errors.New("invalid value for run ID") } u := fmt.Sprintf("runs/%s/actions/apply", url.QueryEscape(runID)) @@ -256,7 +256,7 @@ type RunCancelOptions struct { // Cancel a run by its ID. func (s *runs) Cancel(ctx context.Context, runID string, options RunCancelOptions) error { if !validStringID(&runID) { - return errors.New("Invalid value for run ID") + return errors.New("invalid value for run ID") } u := fmt.Sprintf("runs/%s/actions/cancel", url.QueryEscape(runID)) @@ -277,7 +277,7 @@ type RunForceCancelOptions struct { // ForceCancel is used to forcefully cancel a run by its ID. func (s *runs) ForceCancel(ctx context.Context, runID string, options RunForceCancelOptions) error { if !validStringID(&runID) { - return errors.New("Invalid value for run ID") + return errors.New("invalid value for run ID") } u := fmt.Sprintf("runs/%s/actions/force-cancel", url.QueryEscape(runID)) @@ -298,7 +298,7 @@ type RunDiscardOptions struct { // Discard a run by its ID. func (s *runs) Discard(ctx context.Context, runID string, options RunDiscardOptions) error { if !validStringID(&runID) { - return errors.New("Invalid value for run ID") + return errors.New("invalid value for run ID") } u := fmt.Sprintf("runs/%s/actions/discard", url.QueryEscape(runID)) diff --git a/vendor/github.com/hashicorp/go-tfe/ssh_key.go b/vendor/github.com/hashicorp/go-tfe/ssh_key.go index b76a170db3fe..8735d07179e5 100644 --- a/vendor/github.com/hashicorp/go-tfe/ssh_key.go +++ b/vendor/github.com/hashicorp/go-tfe/ssh_key.go @@ -57,7 +57,7 @@ type SSHKeyListOptions struct { // List all the SSH keys for a given organization func (s *sshKeys) List(ctx context.Context, organization string, options SSHKeyListOptions) (*SSHKeyList, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/ssh-keys", url.QueryEscape(organization)) @@ -89,10 +89,10 @@ type SSHKeyCreateOptions struct { func (o SSHKeyCreateOptions) valid() error { if !validString(o.Name) { - return errors.New("Name is required") + return errors.New("name is required") } if !validString(o.Value) { - return errors.New("Value is required") + return errors.New("value is required") } return nil } @@ -100,7 +100,7 @@ func (o SSHKeyCreateOptions) valid() error { // Create an SSH key and associate it with an organization. func (s *sshKeys) Create(ctx context.Context, organization string, options SSHKeyCreateOptions) (*SSHKey, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } if err := options.valid(); err != nil { @@ -128,7 +128,7 @@ func (s *sshKeys) Create(ctx context.Context, organization string, options SSHKe // Read an SSH key by its ID. func (s *sshKeys) Read(ctx context.Context, sshKeyID string) (*SSHKey, error) { if !validStringID(&sshKeyID) { - return nil, errors.New("Invalid value for SSH key ID") + return nil, errors.New("invalid value for SSH key ID") } u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) @@ -161,7 +161,7 @@ type SSHKeyUpdateOptions struct { // Update an SSH key by its ID. func (s *sshKeys) Update(ctx context.Context, sshKeyID string, options SSHKeyUpdateOptions) (*SSHKey, error) { if !validStringID(&sshKeyID) { - return nil, errors.New("Invalid value for SSH key ID") + return nil, errors.New("invalid value for SSH key ID") } // Make sure we don't send a user provided ID. @@ -185,7 +185,7 @@ func (s *sshKeys) Update(ctx context.Context, sshKeyID string, options SSHKeyUpd // Delete an SSH key by its ID. func (s *sshKeys) Delete(ctx context.Context, sshKeyID string) error { if !validStringID(&sshKeyID) { - return errors.New("Invalid value for SSH key ID") + return errors.New("invalid value for SSH key ID") } u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) diff --git a/vendor/github.com/hashicorp/go-tfe/state_version.go b/vendor/github.com/hashicorp/go-tfe/state_version.go index 89bcfda1c440..768bd5125b18 100644 --- a/vendor/github.com/hashicorp/go-tfe/state_version.go +++ b/vendor/github.com/hashicorp/go-tfe/state_version.go @@ -67,10 +67,10 @@ type StateVersionListOptions struct { func (o StateVersionListOptions) valid() error { if !validString(o.Organization) { - return errors.New("Organization is required") + return errors.New("organization is required") } if !validString(o.Workspace) { - return errors.New("Workspace is required") + return errors.New("workspace is required") } return nil } @@ -121,10 +121,10 @@ func (o StateVersionCreateOptions) valid() error { return errors.New("MD5 is required") } if o.Serial == nil { - return errors.New("Serial is required") + return errors.New("serial is required") } if !validString(o.State) { - return errors.New("State is required") + return errors.New("state is required") } return nil } @@ -132,7 +132,7 @@ func (o StateVersionCreateOptions) valid() error { // Create a new state version for the given workspace. func (s *stateVersions) Create(ctx context.Context, workspaceID string, options StateVersionCreateOptions) (*StateVersion, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } if err := options.valid(); err != nil { return nil, err @@ -159,7 +159,7 @@ func (s *stateVersions) Create(ctx context.Context, workspaceID string, options // Read a state version by its ID. func (s *stateVersions) Read(ctx context.Context, svID string) (*StateVersion, error) { if !validStringID(&svID) { - return nil, errors.New("Invalid value for state version ID") + return nil, errors.New("invalid value for state version ID") } u := fmt.Sprintf("state-versions/%s", url.QueryEscape(svID)) @@ -180,7 +180,7 @@ func (s *stateVersions) Read(ctx context.Context, svID string) (*StateVersion, e // Current reads the latest available state from the given workspace. func (s *stateVersions) Current(ctx context.Context, workspaceID string) (*StateVersion, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } u := fmt.Sprintf("workspaces/%s/current-state-version", url.QueryEscape(workspaceID)) diff --git a/vendor/github.com/hashicorp/go-tfe/team.go b/vendor/github.com/hashicorp/go-tfe/team.go index e6a69c3d2555..0fa2fb61dec2 100644 --- a/vendor/github.com/hashicorp/go-tfe/team.go +++ b/vendor/github.com/hashicorp/go-tfe/team.go @@ -64,7 +64,7 @@ type TeamListOptions struct { // List all the teams of the given organization. func (s *teams) List(ctx context.Context, organization string, options TeamListOptions) (*TeamList, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/teams", url.QueryEscape(organization)) @@ -93,10 +93,10 @@ type TeamCreateOptions struct { func (o TeamCreateOptions) valid() error { if !validString(o.Name) { - return errors.New("Name is required") + return errors.New("name is required") } if !validStringID(o.Name) { - return errors.New("Invalid value for name") + return errors.New("invalid value for name") } return nil } @@ -104,7 +104,7 @@ func (o TeamCreateOptions) valid() error { // Create a new team with the given options. func (s *teams) Create(ctx context.Context, organization string, options TeamCreateOptions) (*Team, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } if err := options.valid(); err != nil { return nil, err @@ -131,7 +131,7 @@ func (s *teams) Create(ctx context.Context, organization string, options TeamCre // Read a single team by its ID. func (s *teams) Read(ctx context.Context, teamID string) (*Team, error) { if !validStringID(&teamID) { - return nil, errors.New("Invalid value for team ID") + return nil, errors.New("invalid value for team ID") } u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) @@ -152,7 +152,7 @@ func (s *teams) Read(ctx context.Context, teamID string) (*Team, error) { // Delete a team by its ID. func (s *teams) Delete(ctx context.Context, teamID string) error { if !validStringID(&teamID) { - return errors.New("Invalid value for team ID") + return errors.New("invalid value for team ID") } u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) diff --git a/vendor/github.com/hashicorp/go-tfe/team_access.go b/vendor/github.com/hashicorp/go-tfe/team_access.go index 33abc322561a..8d2d7e46c476 100644 --- a/vendor/github.com/hashicorp/go-tfe/team_access.go +++ b/vendor/github.com/hashicorp/go-tfe/team_access.go @@ -68,10 +68,10 @@ type TeamAccessListOptions struct { func (o TeamAccessListOptions) valid() error { if !validString(o.WorkspaceID) { - return errors.New("Workspace ID is required") + return errors.New("workspace ID is required") } if !validStringID(o.WorkspaceID) { - return errors.New("Invalid value for workspace ID") + return errors.New("invalid value for workspace ID") } return nil } @@ -113,13 +113,13 @@ type TeamAccessAddOptions struct { func (o TeamAccessAddOptions) valid() error { if o.Access == nil { - return errors.New("Access is required") + return errors.New("access is required") } if o.Team == nil { - return errors.New("Team is required") + return errors.New("team is required") } if o.Workspace == nil { - return errors.New("Workspace is required") + return errors.New("workspace is required") } return nil } @@ -150,7 +150,7 @@ func (s *teamAccesses) Add(ctx context.Context, options TeamAccessAddOptions) (* // Read a team access by its ID. func (s *teamAccesses) Read(ctx context.Context, teamAccessID string) (*TeamAccess, error) { if !validStringID(&teamAccessID) { - return nil, errors.New("Invalid value for team access ID") + return nil, errors.New("invalid value for team access ID") } u := fmt.Sprintf("team-workspaces/%s", url.QueryEscape(teamAccessID)) @@ -171,7 +171,7 @@ func (s *teamAccesses) Read(ctx context.Context, teamAccessID string) (*TeamAcce // Remove team access from a workspace. func (s *teamAccesses) Remove(ctx context.Context, teamAccessID string) error { if !validStringID(&teamAccessID) { - return errors.New("Invalid value for team access ID") + return errors.New("invalid value for team access ID") } u := fmt.Sprintf("team-workspaces/%s", url.QueryEscape(teamAccessID)) diff --git a/vendor/github.com/hashicorp/go-tfe/team_member.go b/vendor/github.com/hashicorp/go-tfe/team_member.go index 297d58a6bc63..8ab97fdac733 100644 --- a/vendor/github.com/hashicorp/go-tfe/team_member.go +++ b/vendor/github.com/hashicorp/go-tfe/team_member.go @@ -38,7 +38,7 @@ type teamMember struct { // List all members of a team. func (s *teamMembers) List(ctx context.Context, teamID string) ([]*User, error) { if !validStringID(&teamID) { - return nil, errors.New("Invalid value for team ID") + return nil, errors.New("invalid value for team ID") } options := struct { @@ -69,10 +69,10 @@ type TeamMemberAddOptions struct { func (o *TeamMemberAddOptions) valid() error { if o.Usernames == nil { - return errors.New("Usernames is required") + return errors.New("usernames is required") } if len(o.Usernames) == 0 { - return errors.New("Invalid value for usernames") + return errors.New("invalid value for usernames") } return nil } @@ -80,7 +80,7 @@ func (o *TeamMemberAddOptions) valid() error { // Add multiple users to a team. func (s *teamMembers) Add(ctx context.Context, teamID string, options TeamMemberAddOptions) error { if !validStringID(&teamID) { - return errors.New("Invalid value for team ID") + return errors.New("invalid value for team ID") } if err := options.valid(); err != nil { return err @@ -107,10 +107,10 @@ type TeamMemberRemoveOptions struct { func (o *TeamMemberRemoveOptions) valid() error { if o.Usernames == nil { - return errors.New("Usernames is required") + return errors.New("usernames is required") } if len(o.Usernames) == 0 { - return errors.New("Invalid value for usernames") + return errors.New("invalid value for usernames") } return nil } @@ -118,7 +118,7 @@ func (o *TeamMemberRemoveOptions) valid() error { // Remove multiple users from a team. func (s *teamMembers) Remove(ctx context.Context, teamID string, options TeamMemberRemoveOptions) error { if !validStringID(&teamID) { - return errors.New("Invalid value for team ID") + return errors.New("invalid value for team ID") } if err := options.valid(); err != nil { return err diff --git a/vendor/github.com/hashicorp/go-tfe/team_token.go b/vendor/github.com/hashicorp/go-tfe/team_token.go index baaf75789e6e..6763b6604b65 100644 --- a/vendor/github.com/hashicorp/go-tfe/team_token.go +++ b/vendor/github.com/hashicorp/go-tfe/team_token.go @@ -44,7 +44,7 @@ type TeamToken struct { // Generate a new team token, replacing any existing token. func (s *teamTokens) Generate(ctx context.Context, teamID string) (*TeamToken, error) { if !validStringID(&teamID) { - return nil, errors.New("Invalid value for team ID") + return nil, errors.New("invalid value for team ID") } u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) @@ -65,7 +65,7 @@ func (s *teamTokens) Generate(ctx context.Context, teamID string) (*TeamToken, e // Read a team token by its ID. func (s *teamTokens) Read(ctx context.Context, teamID string) (*TeamToken, error) { if !validStringID(&teamID) { - return nil, errors.New("Invalid value for team ID") + return nil, errors.New("invalid value for team ID") } u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) @@ -86,7 +86,7 @@ func (s *teamTokens) Read(ctx context.Context, teamID string) (*TeamToken, error // Delete a team token by its ID. func (s *teamTokens) Delete(ctx context.Context, teamID string) error { if !validStringID(&teamID) { - return errors.New("Invalid value for team ID") + return errors.New("invalid value for team ID") } u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) diff --git a/vendor/github.com/hashicorp/go-tfe/tfe.go b/vendor/github.com/hashicorp/go-tfe/tfe.go index 127d94a93135..c38938564774 100644 --- a/vendor/github.com/hashicorp/go-tfe/tfe.go +++ b/vendor/github.com/hashicorp/go-tfe/tfe.go @@ -7,30 +7,37 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "math/rand" "net/http" "net/url" "os" "reflect" + "strconv" "strings" + "time" "github.com/google/go-querystring/query" "github.com/hashicorp/go-cleanhttp" + retryablehttp "github.com/hashicorp/go-retryablehttp" "github.com/svanharmelen/jsonapi" + "golang.org/x/time/rate" ) const ( + userAgent = "go-tfe" + headerRateLimit = "X-RateLimit-Limit" + headerRateReset = "X-RateLimit-Reset" + // DefaultAddress of Terraform Enterprise. DefaultAddress = "https://app.terraform.io" // DefaultBasePath on which the API is served. DefaultBasePath = "/api/v2/" ) -const ( - userAgent = "go-tfe" -) - var ( + // random is used to generate pseudo-random numbers. + random = rand.New(rand.NewSource(time.Now().UnixNano())) + // ErrUnauthorized is returned when a receiving a 401. ErrUnauthorized = errors.New("unauthorized") // ErrResourceNotFound is returned when a receiving a 404. @@ -82,7 +89,8 @@ type Client struct { baseURL *url.URL token string headers http.Header - http *http.Client + http *retryablehttp.Client + limiter *rate.Limiter Applies Applies ConfigurationVersions ConfigurationVersions @@ -93,6 +101,7 @@ type Client struct { Plans Plans Policies Policies PolicyChecks PolicyChecks + PolicySets PolicySets Runs Runs SSHKeys SSHKeys StateVersions StateVersions @@ -131,7 +140,7 @@ func NewClient(cfg *Config) (*Client, error) { // Parse the address to make sure its a valid URL. baseURL, err := url.Parse(config.Address) if err != nil { - return nil, fmt.Errorf("Invalid address: %v", err) + return nil, fmt.Errorf("invalid address: %v", err) } baseURL.Path = config.BasePath @@ -141,7 +150,7 @@ func NewClient(cfg *Config) (*Client, error) { // This value must be provided by the user. if config.Token == "" { - return nil, fmt.Errorf("Missing API token") + return nil, fmt.Errorf("missing API token") } // Create the client. @@ -149,7 +158,20 @@ func NewClient(cfg *Config) (*Client, error) { baseURL: baseURL, token: config.Token, headers: config.Headers, - http: config.HTTPClient, + http: &retryablehttp.Client{ + Backoff: rateLimitBackoff, + CheckRetry: rateLimitRetry, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + HTTPClient: config.HTTPClient, + RetryWaitMin: 100 * time.Millisecond, + RetryWaitMax: 300 * time.Millisecond, + RetryMax: 5, + }, + } + + // Configure the rate limiter. + if err := client.configureLimiter(); err != nil { + return nil, err } // Create the services. @@ -162,6 +184,7 @@ func NewClient(cfg *Config) (*Client, error) { client.Plans = &plans{client: client} client.Policies = &policies{client: client} client.PolicyChecks = &policyChecks{client: client} + client.PolicySets = &policySets{client: client} client.Runs = &runs{client: client} client.SSHKeys = &sshKeys{client: client} client.StateVersions = &stateVersions{client: client} @@ -176,6 +199,96 @@ func NewClient(cfg *Config) (*Client, error) { return client, nil } +// rateLimitRetry provides a callback for Client.CheckRetry, which will only +// retry when receiving a 429 response which indicates being rate limited. +func rateLimitRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + // Do not retry on context.Canceled or context.DeadlineExceeded. + if ctx.Err() != nil { + return false, ctx.Err() + } + // Do not retry on any unexpected errors. + if err != nil { + return false, err + } + // Only retry when we are rate limited. + if resp.StatusCode == 429 { + return true, nil + } + return false, nil +} + +// rateLimitBackoff provides a callback for Client.Backoff which will use the +// X-RateLimit_Reset header to determine the time to wait. We add some jitter +// to prevent a thundering herd. +// +// min and max are mainly used for bounding the jitter that will be added to +// the reset time retrieved from the headers. But if the final wait time is +// less then min, min will be used instead. +func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // First create some jitter bounded by the min and max durations. + jitter := time.Duration(rand.Float64() * float64(max-min)) + + if resp != nil { + if v := resp.Header.Get(headerRateReset); v != "" { + if reset, _ := strconv.ParseFloat(v, 64); reset > 0 { + // Only update min if the given time to wait is longer. + if wait := time.Duration(reset * 1e9); wait > min { + min = wait + } + } + } + } + + return min + jitter +} + +// configureLimiter configures the rate limiter. +func (c *Client) configureLimiter() error { + u, err := c.baseURL.Parse("/") + if err != nil { + return err + } + + // Create a new request. + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return err + } + + // Attach the default headers. + for k, v := range c.headers { + req.Header[k] = v + } + + // Make a single request to retrieve the rate limit headers. + resp, err := c.http.HTTPClient.Do(req) + if err != nil { + return err + } + resp.Body.Close() + + // Set default values for when rate limiting is disabled. + limit := rate.Inf + burst := 0 + + if v := resp.Header.Get(headerRateLimit); v != "" { + if rateLimit, _ := strconv.ParseFloat(v, 64); rateLimit > 0 { + // Configure the limit and burst using a split of 2/3 for the limit and + // 1/3 for the burst. This enables clients to burst 1/3 of the allowed + // calls before the limiter kicks in. The remaining calls will then be + // spread out evenly using intervals of time.Second / limit which should + // prevent hitting the rate limit. + limit = rate.Limit(rateLimit * 0.66) + burst = int(rateLimit * 0.33) + } + } + + // Create a new limiter using the calculated values. + c.limiter = rate.NewLimiter(limit, burst) + + return nil +} + // ListOptions is used to specify pagination options when making API requests. // Pagination allows breaking up large result sets into chunks, or "pages". type ListOptions struct { @@ -202,30 +315,20 @@ type Pagination struct { // If v is supplied, the value will be JSONAPI encoded and included as the // request body. If the method is GET, the value will be parsed and added as // query parameters. -func (c *Client) newRequest(method, path string, v interface{}) (*http.Request, error) { +func (c *Client) newRequest(method, path string, v interface{}) (*retryablehttp.Request, error) { u, err := c.baseURL.Parse(path) if err != nil { return nil, err } - req := &http.Request{ - Method: method, - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - - // Set default headers. - for k, v := range c.headers { - req.Header[k] = v - } + // Create a request specific headers map. + reqHeaders := make(http.Header) + reqHeaders.Set("Authorization", "Bearer "+c.token) + var body interface{} switch method { case "GET": - req.Header.Set("Accept", "application/vnd.api+json") + reqHeaders.Set("Accept", "application/vnd.api+json") if v != nil { q, err := query.Values(v) @@ -235,37 +338,36 @@ func (c *Client) newRequest(method, path string, v interface{}) (*http.Request, u.RawQuery = q.Encode() } case "DELETE", "PATCH", "POST": - req.Header.Set("Accept", "application/vnd.api+json") - req.Header.Set("Content-Type", "application/vnd.api+json") + reqHeaders.Set("Accept", "application/vnd.api+json") + reqHeaders.Set("Content-Type", "application/vnd.api+json") if v != nil { - var body bytes.Buffer - if err := jsonapi.MarshalPayloadWithoutIncluded(&body, v); err != nil { + buf := bytes.NewBuffer(nil) + if err := jsonapi.MarshalPayloadWithoutIncluded(buf, v); err != nil { return nil, err } - req.Body = ioutil.NopCloser(&body) - req.ContentLength = int64(body.Len()) + body = buf } case "PUT": - req.Header.Set("Accept", "application/json") - req.Header.Set("Content-Type", "application/octet-stream") + reqHeaders.Set("Accept", "application/json") + reqHeaders.Set("Content-Type", "application/octet-stream") + body = v + } - if v != nil { - switch v := v.(type) { - case *bytes.Buffer: - req.Body = ioutil.NopCloser(v) - req.ContentLength = int64(v.Len()) - case []byte: - req.Body = ioutil.NopCloser(bytes.NewReader(v)) - req.ContentLength = int64(len(v)) - default: - return nil, fmt.Errorf("Unexpected type: %T", v) - } - } + req, err := retryablehttp.NewRequest(method, u.String(), body) + if err != nil { + return nil, err } - // Set the authorization header. - req.Header.Set("Authorization", "Bearer "+c.token) + // Set the default headers. + for k, v := range c.headers { + req.Header[k] = v + } + + // Set the request specific headers. + for k, v := range reqHeaders { + req.Header[k] = v + } return req, nil } @@ -279,7 +381,13 @@ func (c *Client) newRequest(method, path string, v interface{}) (*http.Request, // // The provided ctx must be non-nil. If it is canceled or times out, ctx.Err() // will be returned. -func (c *Client) do(ctx context.Context, req *http.Request, v interface{}) error { +func (c *Client) do(ctx context.Context, req *retryablehttp.Request, v interface{}) error { + // Wait will block until the limiter can obtain a new token + // or returns an error if the given context is canceled. + if err := c.limiter.Wait(ctx); err != nil { + return err + } + // Add the context to the request. req = req.WithContext(ctx) diff --git a/vendor/github.com/hashicorp/go-tfe/variable.go b/vendor/github.com/hashicorp/go-tfe/variable.go index ba28404c9fb4..9434cd4136fd 100644 --- a/vendor/github.com/hashicorp/go-tfe/variable.go +++ b/vendor/github.com/hashicorp/go-tfe/variable.go @@ -73,10 +73,10 @@ type VariableListOptions struct { func (o VariableListOptions) valid() error { if !validString(o.Organization) { - return errors.New("Organization is required") + return errors.New("organization is required") } if !validString(o.Workspace) { - return errors.New("Workspace is required") + return errors.New("workspace is required") } return nil } @@ -127,16 +127,16 @@ type VariableCreateOptions struct { func (o VariableCreateOptions) valid() error { if !validString(o.Key) { - return errors.New("Key is required") + return errors.New("key is required") } if !validString(o.Value) { - return errors.New("Value is required") + return errors.New("value is required") } if o.Category == nil { - return errors.New("Category is required") + return errors.New("category is required") } if o.Workspace == nil { - return errors.New("Workspace is required") + return errors.New("workspace is required") } return nil } @@ -167,7 +167,7 @@ func (s *variables) Create(ctx context.Context, options VariableCreateOptions) ( // Read a variable by its ID. func (s *variables) Read(ctx context.Context, variableID string) (*Variable, error) { if !validStringID(&variableID) { - return nil, errors.New("Invalid value for variable ID") + return nil, errors.New("invalid value for variable ID") } u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) @@ -206,7 +206,7 @@ type VariableUpdateOptions struct { // Update values of an existing variable. func (s *variables) Update(ctx context.Context, variableID string, options VariableUpdateOptions) (*Variable, error) { if !validStringID(&variableID) { - return nil, errors.New("Invalid value for variable ID") + return nil, errors.New("invalid value for variable ID") } // Make sure we don't send a user provided ID. @@ -230,7 +230,7 @@ func (s *variables) Update(ctx context.Context, variableID string, options Varia // Delete a variable by its ID. func (s *variables) Delete(ctx context.Context, variableID string) error { if !validStringID(&variableID) { - return errors.New("Invalid value for variable ID") + return errors.New("invalid value for variable ID") } u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) diff --git a/vendor/github.com/hashicorp/go-tfe/workspace.go b/vendor/github.com/hashicorp/go-tfe/workspace.go index 4d78a75d8fae..ca3b21d06545 100644 --- a/vendor/github.com/hashicorp/go-tfe/workspace.go +++ b/vendor/github.com/hashicorp/go-tfe/workspace.go @@ -112,7 +112,7 @@ type WorkspaceListOptions struct { // List all the workspaces within an organization. func (s *workspaces) List(ctx context.Context, organization string, options WorkspaceListOptions) (*WorkspaceList, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } u := fmt.Sprintf("organizations/%s/workspaces", url.QueryEscape(organization)) @@ -173,10 +173,10 @@ type VCSRepoOptions struct { func (o WorkspaceCreateOptions) valid() error { if !validString(o.Name) { - return errors.New("Name is required") + return errors.New("name is required") } if !validStringID(o.Name) { - return errors.New("Invalid value for name") + return errors.New("invalid value for name") } return nil } @@ -184,7 +184,7 @@ func (o WorkspaceCreateOptions) valid() error { // Create is used to create a new workspace. func (s *workspaces) Create(ctx context.Context, organization string, options WorkspaceCreateOptions) (*Workspace, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } if err := options.valid(); err != nil { return nil, err @@ -211,10 +211,10 @@ func (s *workspaces) Create(ctx context.Context, organization string, options Wo // Read a workspace by its name. func (s *workspaces) Read(ctx context.Context, organization, workspace string) (*Workspace, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } if !validStringID(&workspace) { - return nil, errors.New("Invalid value for workspace") + return nil, errors.New("invalid value for workspace") } u := fmt.Sprintf( @@ -270,10 +270,10 @@ type WorkspaceUpdateOptions struct { // Update settings of an existing workspace. func (s *workspaces) Update(ctx context.Context, organization, workspace string, options WorkspaceUpdateOptions) (*Workspace, error) { if !validStringID(&organization) { - return nil, errors.New("Invalid value for organization") + return nil, errors.New("invalid value for organization") } if !validStringID(&workspace) { - return nil, errors.New("Invalid value for workspace") + return nil, errors.New("invalid value for workspace") } // Make sure we don't send a user provided ID. @@ -301,10 +301,10 @@ func (s *workspaces) Update(ctx context.Context, organization, workspace string, // Delete a workspace by its name. func (s *workspaces) Delete(ctx context.Context, organization, workspace string) error { if !validStringID(&organization) { - return errors.New("Invalid value for organization") + return errors.New("invalid value for organization") } if !validStringID(&workspace) { - return errors.New("Invalid value for workspace") + return errors.New("invalid value for workspace") } u := fmt.Sprintf( @@ -329,7 +329,7 @@ type WorkspaceLockOptions struct { // Lock a workspace by its ID. func (s *workspaces) Lock(ctx context.Context, workspaceID string, options WorkspaceLockOptions) (*Workspace, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } u := fmt.Sprintf("workspaces/%s/actions/lock", url.QueryEscape(workspaceID)) @@ -350,7 +350,7 @@ func (s *workspaces) Lock(ctx context.Context, workspaceID string, options Works // Unlock a workspace by its ID. func (s *workspaces) Unlock(ctx context.Context, workspaceID string) (*Workspace, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } u := fmt.Sprintf("workspaces/%s/actions/unlock", url.QueryEscape(workspaceID)) @@ -383,7 +383,7 @@ func (o WorkspaceAssignSSHKeyOptions) valid() error { return errors.New("SSH key ID is required") } if !validStringID(o.SSHKeyID) { - return errors.New("Invalid value for SSH key ID") + return errors.New("invalid value for SSH key ID") } return nil } @@ -391,7 +391,7 @@ func (o WorkspaceAssignSSHKeyOptions) valid() error { // AssignSSHKey to a workspace. func (s *workspaces) AssignSSHKey(ctx context.Context, workspaceID string, options WorkspaceAssignSSHKeyOptions) (*Workspace, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } if err := options.valid(); err != nil { return nil, err @@ -428,7 +428,7 @@ type workspaceUnassignSSHKeyOptions struct { // UnassignSSHKey from a workspace. func (s *workspaces) UnassignSSHKey(ctx context.Context, workspaceID string) (*Workspace, error) { if !validStringID(&workspaceID) { - return nil, errors.New("Invalid value for workspace ID") + return nil, errors.New("invalid value for workspace ID") } u := fmt.Sprintf("workspaces/%s/relationships/ssh-key", url.QueryEscape(workspaceID)) diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS new file mode 100644 index 000000000000..15167cd746c5 --- /dev/null +++ b/vendor/golang.org/x/time/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS new file mode 100644 index 000000000000..1c4577e96806 --- /dev/null +++ b/vendor/golang.org/x/time/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE new file mode 100644 index 000000000000..6a66aea5eafe --- /dev/null +++ b/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS new file mode 100644 index 000000000000..733099041f84 --- /dev/null +++ b/vendor/golang.org/x/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 000000000000..ae93e2471974 --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,374 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + limit Limit + burst int + + mu sync.Mutex + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) + return +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } + + return +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// ReserveN returns false if n exceeds the Limiter's burst size. +// Usage example: +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + if n > lim.burst && lim.limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + now := time.Now() + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(now) + if delay == 0 { + return nil + } + t := time.NewTimer(delay) + defer t.Stop() + select { + case <-t.C: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + + if lim.limit == Inf { + lim.mu.Unlock() + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + lim.mu.Unlock() + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Avoid making delta overflow below when last is very old. + maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) + elapsed := now.Sub(last) + if elapsed > maxElapsed { + elapsed = maxElapsed + } + + // Calculate the new number of tokens, due to time that passed. + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + seconds := tokens / float64(limit) + return time.Nanosecond * time.Duration(1e9*seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + return d.Seconds() * float64(limit) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 09369f8fe359..ee153009144f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -296,7 +296,7 @@ github.com/hashicorp/consul/testutil/retry github.com/hashicorp/errwrap # github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de github.com/hashicorp/go-checkpoint -# github.com/hashicorp/go-cleanhttp v0.0.0-20171130225243-06c9ea3a335b +# github.com/hashicorp/go-cleanhttp v0.5.0 github.com/hashicorp/go-cleanhttp # github.com/hashicorp/go-getter v0.0.0-20180327010114-90bb99a48d86 github.com/hashicorp/go-getter @@ -307,7 +307,7 @@ github.com/hashicorp/go-hclog github.com/hashicorp/go-multierror # github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6 github.com/hashicorp/go-plugin -# github.com/hashicorp/go-retryablehttp v0.0.0-20160930035102-6e85be8fee1d +# github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 github.com/hashicorp/go-rootcerts @@ -315,7 +315,7 @@ github.com/hashicorp/go-rootcerts github.com/hashicorp/go-safetemp # github.com/hashicorp/go-slug v0.1.0 github.com/hashicorp/go-slug -# github.com/hashicorp/go-tfe v0.2.6 +# github.com/hashicorp/go-tfe v0.2.9 github.com/hashicorp/go-tfe # github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-uuid @@ -543,6 +543,8 @@ golang.org/x/text/encoding/unicode golang.org/x/text/internal/tag golang.org/x/text/internal/utf8internal golang.org/x/text/runes +# golang.org/x/time v0.0.0-20181108054448-85acf8d2951c +golang.org/x/time/rate # google.golang.org/api v0.0.0-20181015145326-625cd1887957 google.golang.org/api/iterator google.golang.org/api/option From c4d0be8a52f969398d04d99ce569f732bb2a1fff Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 8 Nov 2018 12:15:06 -0500 Subject: [PATCH 080/149] failing test for schemas with a single set attr Resources with certain combinations of attributes in a nested single set fail to perperly coerce their shimmed values. --- builtin/providers/test/provider.go | 1 + builtin/providers/test/resource_nested_set.go | 82 +++++++++++ .../test/resource_nested_set_test.go | 129 ++++++++++++++++++ 3 files changed, 212 insertions(+) create mode 100644 builtin/providers/test/resource_nested_set.go create mode 100644 builtin/providers/test/resource_nested_set_test.go diff --git a/builtin/providers/test/provider.go b/builtin/providers/test/provider.go index 1c0fc574d305..59dd550855e9 100644 --- a/builtin/providers/test/provider.go +++ b/builtin/providers/test/provider.go @@ -24,6 +24,7 @@ func Provider() terraform.ResourceProvider { "test_resource_diff_suppress": testResourceDiffSuppress(), "test_resource_force_new": testResourceForceNew(), "test_resource_nested": testResourceNested(), + "test_resource_nested_set": testResourceNestedSet(), }, DataSourcesMap: map[string]*schema.Resource{ "test_data_source": testDataSource(), diff --git a/builtin/providers/test/resource_nested_set.go b/builtin/providers/test/resource_nested_set.go new file mode 100644 index 000000000000..862c6c95b3f3 --- /dev/null +++ b/builtin/providers/test/resource_nested_set.go @@ -0,0 +1,82 @@ +package test + +import ( + "fmt" + "math/rand" + + "github.com/hashicorp/terraform/helper/schema" +) + +func testResourceNestedSet() *schema.Resource { + return &schema.Resource{ + Create: testResourceNestedSetCreate, + Read: testResourceNestedSetRead, + Delete: testResourceNestedSetDelete, + Update: testResourceNestedSetUpdate, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "optional": { + Type: schema.TypeBool, + Optional: true, + }, + "single": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "optional": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func testResourceNestedSetCreate(d *schema.ResourceData, meta interface{}) error { + id := fmt.Sprintf("%x", rand.Int63()) + d.SetId(id) + + // replicate some awkward handling of a computed value in a set + set := d.Get("single").(*schema.Set) + l := set.List() + if len(l) == 1 { + if s, ok := l[0].(map[string]interface{}); ok { + if v, _ := s["optional"].(string); v == "" { + s["optional"] = id + } + } + } + + d.Set("single", set) + + return testResourceNestedRead(d, meta) +} + +func testResourceNestedSetRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func testResourceNestedSetDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} + +func testResourceNestedSetUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} diff --git a/builtin/providers/test/resource_nested_set_test.go b/builtin/providers/test/resource_nested_set_test.go new file mode 100644 index 000000000000..56dd04bfc3a2 --- /dev/null +++ b/builtin/providers/test/resource_nested_set_test.go @@ -0,0 +1,129 @@ +package test + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestResourceNestedSet_basic(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + single { + value = "bar" + } +} + `), + }, + }, + }) +} + +// The set should not be generated because of it's computed value +func TestResourceNestedSet_noSet(t *testing.T) { + checkFunc := func(s *terraform.State) error { + root := s.ModuleByPath(addrs.RootModuleInstance) + res := root.Resources["test_resource_nested_set.foo"] + for k, v := range res.Primary.Attributes { + if strings.HasPrefix(k, "single") && k != "single.#" { + return fmt.Errorf("unexpected set value: %s:%s", k, v) + } + } + return nil + } + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { +} + `), + Check: checkFunc, + }, + }, + }) +} + +func TestResourceNestedSet_addRemove(t *testing.T) { + var id string + checkFunc := func(s *terraform.State) error { + root := s.ModuleByPath(addrs.RootModuleInstance) + res := root.Resources["test_resource_nested_set.foo"] + if res.Primary.ID == id { + return errors.New("expected new resource") + } + id = res.Primary.ID + return nil + } + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + single { + value = "bar" + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + single { + value = "bar" + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + single { + value = "bar" + optional = "baz" + } +} + `), + Check: checkFunc, + }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { +} + `), + Check: checkFunc, + }, + }, + }) +} From ebc9745788760bfb537241b89c40b907bb734d4f Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 8 Nov 2018 12:28:18 -0500 Subject: [PATCH 081/149] fix "too many items" error message --- configs/configschema/coerce_value.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/configschema/coerce_value.go b/configs/configschema/coerce_value.go index e6b163b9f784..bae5733df711 100644 --- a/configs/configschema/coerce_value.go +++ b/configs/configschema/coerce_value.go @@ -113,7 +113,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) } if l > blockS.MaxItems && blockS.MaxItems > 0 { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; must have at least %d", typeName, blockS.MinItems) + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) } if l == 0 { attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) @@ -161,7 +161,7 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) } if l > blockS.MaxItems && blockS.MaxItems > 0 { - return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; must have at least %d", typeName, blockS.MinItems) + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) } if l == 0 { attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) From 6fee1f24abc87ff2f386676429a2fe3cecea75d7 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 8 Nov 2018 18:01:39 -0500 Subject: [PATCH 082/149] don't add duplicate unknowns to a set The flatmap shim was lazily adding duplicate items and letting cty.Set clear them out, but if those duplicates contains unknown values they can't be checked for equality and will end up remaining in the set. --- config/hcl2shim/flatmap.go | 12 +++++++++ config/hcl2shim/flatmap_test.go | 44 +++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/config/hcl2shim/flatmap.go b/config/hcl2shim/flatmap.go index 2f7954d76c1e..bcecb30df747 100644 --- a/config/hcl2shim/flatmap.go +++ b/config/hcl2shim/flatmap.go @@ -356,6 +356,11 @@ func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (c return cty.UnknownVal(ty), nil } + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + for fullKey := range m { if !strings.HasPrefix(fullKey, prefix) { continue @@ -370,6 +375,12 @@ func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (c key = fullKey[:dot+len(prefix)] } + if seen[key] { + continue + } + + seen[key] = true + // The flatmap format doesn't allow us to distinguish between keys // that contain periods and nested objects, so by convention a // map is only ever of primitive type in flatmap, and we just assume @@ -386,5 +397,6 @@ func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (c if len(vals) == 0 { return cty.SetValEmpty(ety), nil } + return cty.SetVal(vals), nil } diff --git a/config/hcl2shim/flatmap_test.go b/config/hcl2shim/flatmap_test.go index 56c06c3dc588..07f93ac26547 100644 --- a/config/hcl2shim/flatmap_test.go +++ b/config/hcl2shim/flatmap_test.go @@ -635,6 +635,50 @@ func TestHCL2ValueFromFlatmap(t *testing.T) { }), }), }, + { + Flatmap: map[string]string{ + "single.#": "1", + "single.~1.value": "a", + "single.~1.optional": UnknownVariableValue, + "two.#": "2", + "two.~2381914684.value": "a", + "two.~2381914684.optional": UnknownVariableValue, + "two.~2798940671.value": "b", + "two.~2798940671.optional": UnknownVariableValue, + }, + Type: cty.Object(map[string]cty.Type{ + "single": cty.Set( + cty.Object(map[string]cty.Type{ + "value": cty.String, + "optional": cty.String, + }), + ), + "two": cty.Set( + cty.Object(map[string]cty.Type{ + "optional": cty.String, + "value": cty.String, + }), + ), + }), + Want: cty.ObjectVal(map[string]cty.Value{ + "single": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("a"), + "optional": cty.UnknownVal(cty.String), + }), + }), + "two": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("a"), + "optional": cty.UnknownVal(cty.String), + }), + cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal("b"), + "optional": cty.UnknownVal(cty.String), + }), + }), + }), + }, } for _, test := range tests { From ce5d7ff6d0d6f266148647a67ee47e7244a88180 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 13 Nov 2018 08:26:56 -0500 Subject: [PATCH 083/149] spelling --- helper/resource/state_shim.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helper/resource/state_shim.go b/helper/resource/state_shim.go index 9ad30d3ac9eb..9c698d16a4dd 100644 --- a/helper/resource/state_shim.go +++ b/helper/resource/state_shim.go @@ -71,7 +71,7 @@ func shimNewState(newState *states.State, schemas *terraform.Schemas) (*terrafor } if resSchema == nil { - return nil, fmt.Errorf("mising resource schema for %q in %q", resType, providerType) + return nil, fmt.Errorf("missing resource schema for %q in %q", resType, providerType) } for key, i := range res.Instances { From d2bd41c2600818a498c235722b671002939f477f Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 13 Nov 2018 18:50:47 -0500 Subject: [PATCH 084/149] add a nested set test --- builtin/providers/test/resource_nested_set.go | 31 ++++++++ .../test/resource_nested_set_test.go | 76 +++++++++++++++++++ 2 files changed, 107 insertions(+) diff --git a/builtin/providers/test/resource_nested_set.go b/builtin/providers/test/resource_nested_set.go index 862c6c95b3f3..c555265c3d0c 100644 --- a/builtin/providers/test/resource_nested_set.go +++ b/builtin/providers/test/resource_nested_set.go @@ -44,6 +44,37 @@ func testResourceNestedSet() *schema.Resource { }, }, }, + "multi": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "set": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "required": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "optional_int": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + + "optional": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + }, + }, }, } } diff --git a/builtin/providers/test/resource_nested_set_test.go b/builtin/providers/test/resource_nested_set_test.go index 56dd04bfc3a2..7a27b75a2457 100644 --- a/builtin/providers/test/resource_nested_set_test.go +++ b/builtin/providers/test/resource_nested_set_test.go @@ -127,3 +127,79 @@ resource "test_resource_nested_set" "foo" { }, }) } +func TestResourceNestedSet_multi(t *testing.T) { + checkFunc := func(s *terraform.State) error { + return nil + } + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + multi { + optional = "bar" + } +} + `), + Check: checkFunc, + }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { +} + `), + Check: checkFunc, + }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + multi { + set { + required = "val" + } + } +} + `), + Check: checkFunc, + }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + multi { + set { + required = "new" + } + } +} + `), + Check: checkFunc, + }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + multi { + set { + required = "new" + optional_int = 3 + } + } +} + `), + Check: checkFunc, + }, + }, + }) +} From 634430ebb2149d0dbb8fb44acd46554688d101e2 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 15 Nov 2018 13:17:15 +0100 Subject: [PATCH 085/149] Fix wildcard dependencies when upgrading states Fixes #19347 --- .../testdata/roundtrip/v3-simple.in.tfstate | 19 ++++++++++++++++- .../testdata/roundtrip/v3-simple.out.tfstate | 21 ++++++++++++++++++- states/statefile/version3_upgrade.go | 7 ++++++- terraform/valuesourcetype_string.go | 15 +++++++------ 4 files changed, 53 insertions(+), 9 deletions(-) diff --git a/states/statefile/testdata/roundtrip/v3-simple.in.tfstate b/states/statefile/testdata/roundtrip/v3-simple.in.tfstate index f4c5bde369b3..b9d9889efb34 100644 --- a/states/statefile/testdata/roundtrip/v3-simple.in.tfstate +++ b/states/statefile/testdata/roundtrip/v3-simple.in.tfstate @@ -19,7 +19,8 @@ "null_resource.bar": { "type": "null_resource", "depends_on": [ - "null_resource.foo" + "null_resource.foo.*", + "null_resource.foobar" ], "primary": { "id": "5388490630832483079", @@ -65,6 +66,22 @@ }, "deposed": [], "provider": "" + }, + "null_resource.foobar": { + "type": "null_resource", + "depends_on": [], + "primary": { + "id": "7388490630832483079", + "attributes": { + "id": "7388490630832483079", + "triggers.%": "1", + "triggers.whaaat": "0,1" + }, + "meta": {}, + "tainted": false + }, + "deposed": [], + "provider": "" } }, "depends_on": [] diff --git a/states/statefile/testdata/roundtrip/v3-simple.out.tfstate b/states/statefile/testdata/roundtrip/v3-simple.out.tfstate index 0d45bd97dcc8..38f325d66daa 100644 --- a/states/statefile/testdata/roundtrip/v3-simple.out.tfstate +++ b/states/statefile/testdata/roundtrip/v3-simple.out.tfstate @@ -23,7 +23,10 @@ "triggers.%": "1", "triggers.whaaat": "0,1" }, - "depends_on": ["null_resource.foo"] + "depends_on": [ + "null_resource.foo", + "null_resource.foobar" + ] } ] }, @@ -53,6 +56,22 @@ } } ] + }, + { + "mode": "managed", + "type": "null_resource", + "name": "foobar", + "provider": "provider.null", + "instances": [ + { + "schema_version": 0, + "attributes_flat": { + "id": "7388490630832483079", + "triggers.%": "1", + "triggers.whaaat": "0,1" + } + } + ] } ] } diff --git a/states/statefile/version3_upgrade.go b/states/statefile/version3_upgrade.go index da6dd295893e..7ec1c9465938 100644 --- a/states/statefile/version3_upgrade.go +++ b/states/statefile/version3_upgrade.go @@ -299,12 +299,17 @@ func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, } } + dependencies := make([]string, len(rsOld.Dependencies)) + for i, v := range rsOld.Dependencies { + dependencies[i] = strings.TrimSuffix(v, ".*") + } + return &instanceObjectStateV4{ IndexKey: instKeyRaw, Status: status, Deposed: string(deposedKey), AttributesFlat: attributes, - Dependencies: rsOld.Dependencies, + Dependencies: dependencies, SchemaVersion: schemaVersion, PrivateRaw: privateJSON, }, nil diff --git a/terraform/valuesourcetype_string.go b/terraform/valuesourcetype_string.go index e3218b41457f..1380c851f910 100644 --- a/terraform/valuesourcetype_string.go +++ b/terraform/valuesourcetype_string.go @@ -8,14 +8,15 @@ const ( _ValueSourceType_name_0 = "ValueFromUnknown" _ValueSourceType_name_1 = "ValueFromCLIArg" _ValueSourceType_name_2 = "ValueFromConfig" - _ValueSourceType_name_3 = "ValueFromEnvVarValueFromFile" + _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" _ValueSourceType_name_4 = "ValueFromInput" - _ValueSourceType_name_5 = "ValueFromPlan" - _ValueSourceType_name_6 = "ValueFromCaller" + _ValueSourceType_name_5 = "ValueFromNamedFile" + _ValueSourceType_name_6 = "ValueFromPlan" + _ValueSourceType_name_7 = "ValueFromCaller" ) var ( - _ValueSourceType_index_3 = [...]uint8{0, 15, 28} + _ValueSourceType_index_3 = [...]uint8{0, 15, 32} ) func (i ValueSourceType) String() string { @@ -31,10 +32,12 @@ func (i ValueSourceType) String() string { return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] case i == 73: return _ValueSourceType_name_4 - case i == 80: + case i == 78: return _ValueSourceType_name_5 - case i == 83: + case i == 80: return _ValueSourceType_name_6 + case i == 83: + return _ValueSourceType_name_7 default: return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" } From 547006d9432fa0946dd0dda7a3d09196d3e3dc3a Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 14 Nov 2018 11:45:04 +0000 Subject: [PATCH 086/149] tfdiags: Restructure tests into subtests --- tfdiags/contextual_test.go | 206 ++++++++++++++++++++----------------- 1 file changed, 111 insertions(+), 95 deletions(-) diff --git a/tfdiags/contextual_test.go b/tfdiags/contextual_test.go index 45c678cb1fe3..f50de2c9d36f 100644 --- a/tfdiags/contextual_test.go +++ b/tfdiags/contextual_test.go @@ -1,6 +1,7 @@ package tfdiags import ( + "fmt" "reflect" "testing" @@ -33,109 +34,124 @@ baz "b" { t.Fatal(parseDiags) } - body := f.Body - var diags Diagnostics - diags = diags.Append(AttributeValue( - Error, - "foo[0].bar", - "detail", - cty.Path{ - cty.GetAttrStep{Name: "foo"}, - cty.IndexStep{Key: cty.NumberIntVal(0)}, - cty.GetAttrStep{Name: "bar"}, - }, - )) - diags = diags.Append(AttributeValue( - Error, - "foo[1].bar", - "detail", - cty.Path{ - cty.GetAttrStep{Name: "foo"}, - cty.IndexStep{Key: cty.NumberIntVal(1)}, - cty.GetAttrStep{Name: "bar"}, - }, - )) - diags = diags.Append(AttributeValue( - Error, - "bar.bar", - "detail", - cty.Path{ - cty.GetAttrStep{Name: "bar"}, - cty.GetAttrStep{Name: "bar"}, - }, - )) - diags = diags.Append(AttributeValue( - Error, - `baz["a"].bar`, - "detail", - cty.Path{ - cty.GetAttrStep{Name: "baz"}, - cty.IndexStep{Key: cty.StringVal("a")}, - cty.GetAttrStep{Name: "bar"}, - }, - )) - diags = diags.Append(AttributeValue( - Error, - `baz["b"].bar`, - "detail", - cty.Path{ - cty.GetAttrStep{Name: "baz"}, - cty.IndexStep{Key: cty.StringVal("b")}, - cty.GetAttrStep{Name: "bar"}, + testCases := []struct { + Diag Diagnostic + ExpectedRange *SourceRange + }{ + { + AttributeValue( + Error, + "foo[0].bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 3, Column: 9, Byte: 15}, + End: SourcePos{Line: 3, Column: 13, Byte: 19}, + }, }, - )) - // Attribute value with subject already populated should not be disturbed. - // (in a real case, this might've been passed through from a deeper function - // in the call stack, for example.) - diags = diags.Append(&attributeDiagnostic{ - diagnosticBase: diagnosticBase{ - summary: "preexisting", - detail: "detail", + { + AttributeValue( + Error, + "foo[1].bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 6, Column: 9, Byte: 36}, + End: SourcePos{Line: 6, Column: 14, Byte: 41}, + }, }, - subject: &SourceRange{ - Filename: "somewhere_else.tf", + { + AttributeValue( + Error, + "bar.bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "bar"}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 9, Column: 9, Byte: 58}, + End: SourcePos{Line: 9, Column: 15, Byte: 64}, + }, }, - }) - - gotDiags := diags.InConfigBody(body) - - wantRanges := map[string]*SourceRange{ - `foo[0].bar`: { - Filename: "test.tf", - Start: SourcePos{Line: 3, Column: 9, Byte: 15}, - End: SourcePos{Line: 3, Column: 13, Byte: 19}, + { + AttributeValue( + Error, + `baz["a"].bar`, + "detail", + cty.Path{ + cty.GetAttrStep{Name: "baz"}, + cty.IndexStep{Key: cty.StringVal("a")}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 12, Column: 9, Byte: 85}, + End: SourcePos{Line: 12, Column: 15, Byte: 91}, + }, }, - `foo[1].bar`: { - Filename: "test.tf", - Start: SourcePos{Line: 6, Column: 9, Byte: 36}, - End: SourcePos{Line: 6, Column: 14, Byte: 41}, + { + AttributeValue( + Error, + `baz["b"].bar`, + "detail", + cty.Path{ + cty.GetAttrStep{Name: "baz"}, + cty.IndexStep{Key: cty.StringVal("b")}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 15, Column: 9, Byte: 112}, + End: SourcePos{Line: 15, Column: 15, Byte: 118}, + }, }, - `bar.bar`: { - Filename: "test.tf", - Start: SourcePos{Line: 9, Column: 9, Byte: 58}, - End: SourcePos{Line: 9, Column: 15, Byte: 64}, - }, - `baz["a"].bar`: { - Filename: "test.tf", - Start: SourcePos{Line: 12, Column: 9, Byte: 85}, - End: SourcePos{Line: 12, Column: 15, Byte: 91}, - }, - `baz["b"].bar`: { - Filename: "test.tf", - Start: SourcePos{Line: 15, Column: 9, Byte: 112}, - End: SourcePos{Line: 15, Column: 15, Byte: 118}, - }, - `preexisting`: { - Filename: "somewhere_else.tf", + { + // Attribute value with subject already populated should not be disturbed. + // (in a real case, this might've been passed through from a deeper function + // in the call stack, for example.) + &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + summary: "preexisting", + detail: "detail", + }, + subject: &SourceRange{ + Filename: "somewhere_else.tf", + }, + }, + &SourceRange{ + Filename: "somewhere_else.tf", + }, }, } - gotRanges := make(map[string]*SourceRange) - for _, diag := range gotDiags { - gotRanges[diag.Description().Summary] = diag.Source().Subject - } - for _, problem := range deep.Equal(gotRanges, wantRanges) { - t.Error(problem) + for i, tc := range testCases { + t.Run(fmt.Sprintf("%d:%s", i, tc.Diag.Description()), func(t *testing.T) { + var diags Diagnostics + diags = diags.Append(tc.Diag) + gotDiags := diags.InConfigBody(f.Body) + gotRange := gotDiags[0].Source().Subject + + for _, problem := range deep.Equal(gotRange, tc.ExpectedRange) { + t.Error(problem) + } + }) } } From 9e0c6ab5e00b9a239cd81ae9ca705b8a676c8364 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 15 Nov 2018 14:37:45 +0000 Subject: [PATCH 087/149] tfdiags: Add more test cases --- tfdiags/contextual_test.go | 384 +++++++++++++++++++++++++++++++++++++ 1 file changed, 384 insertions(+) diff --git a/tfdiags/contextual_test.go b/tfdiags/contextual_test.go index f50de2c9d36f..d98dd1d5b23d 100644 --- a/tfdiags/contextual_test.go +++ b/tfdiags/contextual_test.go @@ -28,11 +28,32 @@ baz "a" { baz "b" { bar = "boop" } +parent { + nested_str = "hello" + nested_str_tuple = ["aa", "bbb", "cccc"] + nested_num_tuple = [1, 9863, 22] + nested_map = { + first_key = "first_value" + second_key = "2nd value" + } +} +tuple_of_one = ["one"] +tuple_of_two = ["first", "22222"] +root_map = { + first = "1st" + second = "2nd" +} +simple_attr = "val" ` f, parseDiags := hclsyntax.ParseConfig([]byte(testConfig), "test.tf", hcl.Pos{Line: 1, Column: 1}) if len(parseDiags) != 0 { t.Fatal(parseDiags) } + emptySrcRng := &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 33, Column: 1, Byte: 440}, + End: SourcePos{Line: 33, Column: 1, Byte: 440}, + } testCases := []struct { Diag Diagnostic @@ -72,6 +93,19 @@ baz "b" { End: SourcePos{Line: 6, Column: 14, Byte: 41}, }, }, + { + AttributeValue( + Error, + "foo[99].bar", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "foo"}, + cty.IndexStep{Key: cty.NumberIntVal(99)}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + emptySrcRng, + }, { AttributeValue( Error, @@ -122,11 +156,25 @@ baz "b" { End: SourcePos{Line: 15, Column: 15, Byte: 118}, }, }, + { + AttributeValue( + Error, + `baz["not_exists"].bar`, + "detail", + cty.Path{ + cty.GetAttrStep{Name: "baz"}, + cty.IndexStep{Key: cty.StringVal("not_exists")}, + cty.GetAttrStep{Name: "bar"}, + }, + ), + emptySrcRng, + }, { // Attribute value with subject already populated should not be disturbed. // (in a real case, this might've been passed through from a deeper function // in the call stack, for example.) &attributeDiagnostic{ + attrPath: cty.Path{cty.GetAttrStep{Name: "foo"}}, diagnosticBase: diagnosticBase{ summary: "preexisting", detail: "detail", @@ -139,6 +187,342 @@ baz "b" { Filename: "somewhere_else.tf", }, }, + { + // Missing path + &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + summary: "missing path", + }, + }, + nil, + }, + + // Nested attributes + { + AttributeValue( + Error, + "parent.nested_str", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 18, Column: 16, Byte: 145}, + End: SourcePos{Line: 18, Column: 23, Byte: 152}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_str_tuple[99]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(99)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 19, Column: 3, Byte: 155}, + End: SourcePos{Line: 19, Column: 19, Byte: 171}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_str_tuple[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 19, Column: 23, Byte: 175}, + End: SourcePos{Line: 19, Column: 27, Byte: 179}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_str_tuple[2]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_str_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(2)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 19, Column: 36, Byte: 188}, + End: SourcePos{Line: 19, Column: 42, Byte: 194}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_num_tuple[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_num_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 20, Column: 23, Byte: 218}, + End: SourcePos{Line: 20, Column: 24, Byte: 219}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_num_tuple[1]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_num_tuple"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 20, Column: 26, Byte: 221}, + End: SourcePos{Line: 20, Column: 30, Byte: 225}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_map.first_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_map"}, + cty.IndexStep{Key: cty.StringVal("first_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 22, Column: 19, Byte: 266}, + End: SourcePos{Line: 22, Column: 30, Byte: 277}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_map.second_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_map"}, + cty.IndexStep{Key: cty.StringVal("second_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 23, Column: 19, Byte: 297}, + End: SourcePos{Line: 23, Column: 28, Byte: 306}, + }, + }, + { + AttributeValue( + Error, + "parent.nested_map.undefined_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "parent"}, + cty.GetAttrStep{Name: "nested_map"}, + cty.IndexStep{Key: cty.StringVal("undefined_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 21, Column: 3, Byte: 233}, + End: SourcePos{Line: 21, Column: 13, Byte: 243}, + }, + }, + + // Root attributes of complex types + { + AttributeValue( + Error, + "tuple_of_one[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_one"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 26, Column: 17, Byte: 330}, + End: SourcePos{Line: 26, Column: 22, Byte: 335}, + }, + }, + { + AttributeValue( + Error, + "tuple_of_two[0]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_two"}, + cty.IndexStep{Key: cty.NumberIntVal(0)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 27, Column: 17, Byte: 353}, + End: SourcePos{Line: 27, Column: 24, Byte: 360}, + }, + }, + { + AttributeValue( + Error, + "tuple_of_two[1]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_two"}, + cty.IndexStep{Key: cty.NumberIntVal(1)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 27, Column: 26, Byte: 362}, + End: SourcePos{Line: 27, Column: 33, Byte: 369}, + }, + }, + { + AttributeValue( + Error, + "tuple_of_one[null]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_one"}, + cty.IndexStep{Key: cty.NullVal(cty.Number)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 26, Column: 1, Byte: 314}, + End: SourcePos{Line: 26, Column: 13, Byte: 326}, + }, + }, + { + // index out of range + AttributeValue( + Error, + "tuple_of_two[99]", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "tuple_of_two"}, + cty.IndexStep{Key: cty.NumberIntVal(99)}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 27, Column: 1, Byte: 337}, + End: SourcePos{Line: 27, Column: 13, Byte: 349}, + }, + }, + { + AttributeValue( + Error, + "root_map.first", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "root_map"}, + cty.IndexStep{Key: cty.StringVal("first")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 29, Column: 13, Byte: 396}, + End: SourcePos{Line: 29, Column: 16, Byte: 399}, + }, + }, + { + AttributeValue( + Error, + "root_map.second", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "root_map"}, + cty.IndexStep{Key: cty.StringVal("second")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 30, Column: 13, Byte: 413}, + End: SourcePos{Line: 30, Column: 16, Byte: 416}, + }, + }, + { + AttributeValue( + Error, + "root_map.undefined_key", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "root_map"}, + cty.IndexStep{Key: cty.StringVal("undefined_key")}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 28, Column: 1, Byte: 371}, + End: SourcePos{Line: 28, Column: 9, Byte: 379}, + }, + }, + { + AttributeValue( + Error, + "simple_attr", + "detail", + cty.Path{ + cty.GetAttrStep{Name: "simple_attr"}, + }, + ), + &SourceRange{ + Filename: "test.tf", + Start: SourcePos{Line: 32, Column: 15, Byte: 434}, + End: SourcePos{Line: 32, Column: 20, Byte: 439}, + }, + }, + { + // This should never happen as error should always point to an attribute + // or index of an attribute, but we should not crash if it does + AttributeValue( + Error, + "key", + "index_step", + cty.Path{ + cty.IndexStep{Key: cty.StringVal("key")}, + }, + ), + emptySrcRng, + }, + { + // This should never happen as error should always point to an attribute + // or index of an attribute, but we should not crash if it does + AttributeValue( + Error, + "key.another", + "index_step", + cty.Path{ + cty.IndexStep{Key: cty.StringVal("key")}, + cty.IndexStep{Key: cty.StringVal("another")}, + }, + ), + emptySrcRng, + }, } for i, tc := range testCases { From ec127e4a7b3361d1c397ea922f4319766c4c74a1 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 15 Nov 2018 14:55:00 +0000 Subject: [PATCH 088/149] tfdiags: Detect more complex cfgs in ElaborateFromConfigBody --- tfdiags/contextual.go | 153 ++++++++++++++++++++++++++++-------------- 1 file changed, 103 insertions(+), 50 deletions(-) diff --git a/tfdiags/contextual.go b/tfdiags/contextual.go index a7e4e7d9aa7e..25b21403723a 100644 --- a/tfdiags/contextual.go +++ b/tfdiags/contextual.go @@ -131,11 +131,70 @@ func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic // propagated to every place in Terraform, and this happens only in the // presence of errors where performance isn't a concern. - traverse := d.attrPath[:len(d.attrPath)-1] + traverse := d.attrPath[:] final := d.attrPath[len(d.attrPath)-1] - // If we have more than one step then we'll first try to traverse to - // a child body corresponding to the requested path. + // Index should never be the first step + // as indexing of top blocks (such as resources & data sources) + // is handled elsewhere + if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + return &ret + } + + // Process index separately + idxStep, hasIdx := final.(cty.IndexStep) + if hasIdx { + final = d.attrPath[len(d.attrPath)-2] + traverse = d.attrPath[:len(d.attrPath)-1] + } + + // If we have more than one step after removing index + // then we'll first try to traverse to a child body + // corresponding to the requested path. + if len(traverse) > 1 { + body = traversePathSteps(traverse, body) + } + + // Default is to indicate a missing item in the deepest body we reached + // while traversing. + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + + // Once we get here, "final" should be a GetAttr step that maps to an + // attribute in our current body. + finalStep, isAttr := final.(cty.GetAttrStep) + if !isAttr { + return &ret + } + + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: finalStep.Name, + Required: true, + }, + }, + }) + if contentDiags.HasErrors() { + return &ret + } + + if attr, ok := content.Attributes[finalStep.Name]; ok { + hclRange := attr.Expr.Range() + if hasIdx { + // Try to be more precise by finding index range + hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) + } + subject = SourceRangeFromHCL(hclRange) + ret.subject = &subject + } + + return &ret +} + +func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { for i := 0; i < len(traverse); i++ { step := traverse[i] @@ -173,9 +232,7 @@ func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic }, }) if contentDiags.HasErrors() { - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret + return body } filtered := make([]*hcl.Block, 0, len(content.Blocks)) for _, block := range content.Blocks { @@ -184,23 +241,21 @@ func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic } } if len(filtered) == 0 { + // Step doesn't refer to a block + continue } switch indexType { case cty.NilType: // no index at all if len(filtered) != 1 { - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret + return body } body = filtered[0].Body case cty.Number: var idx int err := gocty.FromCtyValue(indexVal, &idx) if err != nil || idx >= len(filtered) { - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret + return body } body = filtered[idx].Body case cty.String: @@ -215,58 +270,56 @@ func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic if block == nil { // No block with this key, so we'll just indicate a // missing item in the containing block. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret + return body } body = block.Body default: // Should never happen, because only string and numeric indices // are supported by cty collections. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret + return body } default: // For any other kind of step, we'll just return our current body // as the subject and accept that this is a little inaccurate. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - return &ret + return body } } + return body +} - // Default is to indicate a missing item in the deepest body we reached - // while traversing. - subject := SourceRangeFromHCL(body.MissingItemRange()) - ret.subject = &subject - - // Once we get here, "final" should be a GetAttr step that maps to an - // attribute in our current body. - finalStep, isAttr := final.(cty.GetAttrStep) - if !isAttr { - return &ret - } - - content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: finalStep.Name, - Required: true, - }, - }, - }) - if contentDiags.HasErrors() { - return &ret - } - - if attr, ok := content.Attributes[finalStep.Name]; ok { - subject = SourceRangeFromHCL(attr.Expr.Range()) - ret.subject = &subject +func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { + switch idxStep.Key.Type() { + case cty.Number: + var idx int + err := gocty.FromCtyValue(idxStep.Key, &idx) + items, diags := hcl.ExprList(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + if err != nil || idx >= len(items) { + return attr.NameRange + } + return items[idx].Range() + case cty.String: + pairs, diags := hcl.ExprMap(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + stepKey := idxStep.Key.AsString() + for _, kvPair := range pairs { + key, err := kvPair.Key.Value(nil) + if err != nil { + return attr.Expr.Range() + } + if key.AsString() == stepKey { + startRng := kvPair.Value.StartRange() + return startRng + } + } + return attr.NameRange } - - return &ret + return attr.Expr.Range() } func (d *attributeDiagnostic) Source() Source { From aca52ce2b91d719625766bed3bdf429170e9bcfe Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 15 Nov 2018 14:55:30 +0000 Subject: [PATCH 089/149] tfdiags: Document missing tests --- tfdiags/contextual_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tfdiags/contextual_test.go b/tfdiags/contextual_test.go index d98dd1d5b23d..937dfaf22fda 100644 --- a/tfdiags/contextual_test.go +++ b/tfdiags/contextual_test.go @@ -45,6 +45,13 @@ root_map = { } simple_attr = "val" ` + // TODO: Test ConditionalExpr + // TODO: Test ForExpr + // TODO: Test FunctionCallExpr + // TODO: Test IndexExpr + // TODO: Test interpolation + // TODO: Test SplatExpr + f, parseDiags := hclsyntax.ParseConfig([]byte(testConfig), "test.tf", hcl.Pos{Line: 1, Column: 1}) if len(parseDiags) != 0 { t.Fatal(parseDiags) From 04439595abab9ac849d7d4c781e78df00336afe2 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 15 Nov 2018 14:09:38 +0100 Subject: [PATCH 090/149] Make the Atlas backend work after updating depencies Newer versions of the retryablehttp package use a context, so we need to add that in our custom `CheckRetry` function. In addition I removed the `return true, nil` to continue retrying in case of an error, and instead directly call the `DefaultRetryPolicy`. This is because the `DefaultRetryPolicy` will now also take the context into consideration. --- backend/atlas/state_client.go | 7 ++- backend/atlas/state_client_test.go | 5 +- go.mod | 4 +- go.sum | 10 ++-- .../hashicorp/go-retryablehttp/client.go | 8 ++-- .../hashicorp/go-retryablehttp/go.mod | 3 ++ .../hashicorp/go-retryablehttp/go.sum | 2 + vendor/github.com/hashicorp/go-tfe/go.mod | 2 +- vendor/github.com/hashicorp/go-tfe/go.sum | 4 +- .../github.com/hashicorp/go-tfe/policy_set.go | 48 ++++++++++--------- vendor/modules.txt | 4 +- 11 files changed, 52 insertions(+), 45 deletions(-) create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/go.mod create mode 100644 vendor/github.com/hashicorp/go-retryablehttp/go.sum diff --git a/backend/atlas/state_client.go b/backend/atlas/state_client.go index e49cb719241b..0e15ff17d082 100644 --- a/backend/atlas/state_client.go +++ b/backend/atlas/state_client.go @@ -2,6 +2,7 @@ package atlas import ( "bytes" + "context" "crypto/md5" "crypto/tls" "crypto/x509" @@ -231,7 +232,7 @@ func (c *stateClient) http() (*retryablehttp.Client, error) { } rc := retryablehttp.NewClient() - rc.CheckRetry = func(resp *http.Response, err error) (bool, error) { + rc.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { if err != nil { // don't bother retrying if the certs don't match if err, ok := err.(*url.Error); ok { @@ -239,10 +240,8 @@ func (c *stateClient) http() (*retryablehttp.Client, error) { return false, nil } } - // continue retrying - return true, nil } - return retryablehttp.DefaultRetryPolicy(resp, err) + return retryablehttp.DefaultRetryPolicy(ctx, resp, err) } t := cleanhttp.DefaultTransport() diff --git a/backend/atlas/state_client_test.go b/backend/atlas/state_client_test.go index 28a2c701c1a8..6c370941affb 100644 --- a/backend/atlas/state_client_test.go +++ b/backend/atlas/state_client_test.go @@ -2,6 +2,7 @@ package atlas import ( "bytes" + "context" "crypto/md5" "crypto/tls" "crypto/x509" @@ -83,12 +84,12 @@ func TestStateClient_noRetryOnBadCerts(t *testing.T) { // Instrument CheckRetry to make sure we didn't retry retries := 0 oldCheck := httpClient.CheckRetry - httpClient.CheckRetry = func(resp *http.Response, err error) (bool, error) { + httpClient.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { if retries > 0 { t.Fatal("retried after certificate error") } retries++ - return oldCheck(resp, err) + return oldCheck(ctx, resp, err) } _, err = client.Get() diff --git a/go.mod b/go.mod index cb722b2f5000..6332228c422f 100644 --- a/go.mod +++ b/go.mod @@ -62,11 +62,11 @@ require ( github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c // indirect github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6 - github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 + github.com/hashicorp/go-retryablehttp v0.5.0 github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc // indirect github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 // indirect - github.com/hashicorp/go-tfe v0.2.9 + github.com/hashicorp/go-tfe v0.3.0 github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 github.com/hashicorp/golang-lru v0.5.0 // indirect diff --git a/go.sum b/go.sum index e7924ef9199e..31194a3f78c7 100644 --- a/go.sum +++ b/go.sum @@ -137,8 +137,8 @@ github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uP github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6 h1:czAJ5CXRPr+6vd6RGdJelApnxNbK3dAkakgBwLEWfrc= github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6/go.mod h1:JSqWYsict+jzcj0+xElxyrBQRPNoiWQuddnxArJ7XHQ= -github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 h1:qCv4319q2q7XKn0MQbi8p37hsJ+9Xo8e6yojA73JVxk= -github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-retryablehttp v0.5.0 h1:aVN0FYnPwAgZI/hVzqwfMiM86ttcHTlQKbBVeVmXPIs= +github.com/hashicorp/go-retryablehttp v0.5.0/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:9HVkPxOpo+yO93Ah4yrO67d/qh0fbLLWbKqhYjyHq9A= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc h1:wAa9fGALVHfjYxZuXRnmuJG2CnwRpJYOTvY6YdErAh0= @@ -147,10 +147,8 @@ github.com/hashicorp/go-slug v0.1.0 h1:MJGEiOwRGrQCBmMMZABHqIESySFJ4ajrsjgDI4/aF github.com/hashicorp/go-slug v0.1.0/go.mod h1:+zDycQOzGqOqMW7Kn2fp9vz/NtqpMLQlgb9JUF+0km4= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 h1:7YOlAIO2YWnJZkQp7B5eFykaIY7C9JndqAFQyVV5BhM= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-tfe v0.2.7 h1:Cy0irO9Qfgdn7FmvxSoXIQrRa3iM/kFmp/c0oCboCow= -github.com/hashicorp/go-tfe v0.2.7/go.mod h1:WJgjAJVdnXYPOWF6j66VI20djUGfeFjeayIgUDhohsU= -github.com/hashicorp/go-tfe v0.2.9 h1:CmxjF5zBKh5XBf2fMseJPaSKxKIauIIS4r+6+hNX8JM= -github.com/hashicorp/go-tfe v0.2.9/go.mod h1:WJgjAJVdnXYPOWF6j66VI20djUGfeFjeayIgUDhohsU= +github.com/hashicorp/go-tfe v0.3.0 h1:X0oM8RNKgMlmaMOEzLkx8/RTIC3d2K30R8+G4cSXJPc= +github.com/hashicorp/go-tfe v0.3.0/go.mod h1:SRMjgjY06SfEKstIPRUVMtQfhSYR2H3GHVop0lfedkY= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 h1:at4+18LrM8myamuV7/vT6x2s1JNXp2k4PsSbt4I02X4= diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index 21f45e5ed647..04d3216b809d 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -49,6 +49,9 @@ var ( // a new client. It is purposely private to avoid modifications. defaultClient = NewClient() + // random is used to generate pseudo-random numbers. + random = rand.New(rand.NewSource(time.Now().UnixNano())) + // We need to consume response bodies to maintain http connections, but // limit the size we consume to respReadLimit. respReadLimit = int64(4096) @@ -319,14 +322,11 @@ func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Resp return min * time.Duration(attemptNum) } - // Seed rand; doing this every time is fine - rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) - // Pick a random number that lies somewhere between the min and max and // multiply by the attemptNum. attemptNum starts at zero so we always // increment here. We first get a random percentage, then apply that to the // difference between min and max, and add to min. - jitter := rand.Float64() * float64(max-min) + jitter := random.Float64() * float64(max-min) jitterMin := int64(jitter) + int64(min) return time.Duration(jitterMin * int64(attemptNum)) } diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.mod b/vendor/github.com/hashicorp/go-retryablehttp/go.mod new file mode 100644 index 000000000000..d28c8c8eb6c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-retryablehttp + +require github.com/hashicorp/go-cleanhttp v0.5.0 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.sum b/vendor/github.com/hashicorp/go-retryablehttp/go.sum new file mode 100644 index 000000000000..3ed0fd98e921 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.sum @@ -0,0 +1,2 @@ +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= diff --git a/vendor/github.com/hashicorp/go-tfe/go.mod b/vendor/github.com/hashicorp/go-tfe/go.mod index 8cad701d8150..1d2053c5a898 100644 --- a/vendor/github.com/hashicorp/go-tfe/go.mod +++ b/vendor/github.com/hashicorp/go-tfe/go.mod @@ -4,7 +4,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/google/go-querystring v1.0.0 github.com/hashicorp/go-cleanhttp v0.5.0 - github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 + github.com/hashicorp/go-retryablehttp v0.5.0 github.com/hashicorp/go-slug v0.1.0 github.com/hashicorp/go-uuid v1.0.0 github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/vendor/github.com/hashicorp/go-tfe/go.sum b/vendor/github.com/hashicorp/go-tfe/go.sum index dcc948753dd7..ac10c9d075bd 100644 --- a/vendor/github.com/hashicorp/go-tfe/go.sum +++ b/vendor/github.com/hashicorp/go-tfe/go.sum @@ -4,8 +4,8 @@ github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASu github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 h1:qCv4319q2q7XKn0MQbi8p37hsJ+9Xo8e6yojA73JVxk= -github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6/go.mod h1:fXcdFsQoipQa7mwORhKad5jmDCeSy/RCGzWA08PO0lM= +github.com/hashicorp/go-retryablehttp v0.5.0 h1:aVN0FYnPwAgZI/hVzqwfMiM86ttcHTlQKbBVeVmXPIs= +github.com/hashicorp/go-retryablehttp v0.5.0/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-slug v0.1.0 h1:MJGEiOwRGrQCBmMMZABHqIESySFJ4ajrsjgDI4/aFI0= github.com/hashicorp/go-slug v0.1.0/go.mod h1:+zDycQOzGqOqMW7Kn2fp9vz/NtqpMLQlgb9JUF+0km4= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= diff --git a/vendor/github.com/hashicorp/go-tfe/policy_set.go b/vendor/github.com/hashicorp/go-tfe/policy_set.go index 15400da14927..d70ad0835085 100644 --- a/vendor/github.com/hashicorp/go-tfe/policy_set.go +++ b/vendor/github.com/hashicorp/go-tfe/policy_set.go @@ -16,7 +16,7 @@ var _ PolicySets = (*policySets)(nil) // // TFE API docs: https://www.terraform.io/docs/enterprise/api/policies.html type PolicySets interface { - // List all the policy sets for a given organization + // List all the policy sets for a given organization. List(ctx context.Context, organization string, options PolicySetListOptions) (*PolicySetList, error) // Create a policy set and associate it with an organization. @@ -34,11 +34,11 @@ type PolicySets interface { // Remove policies from a policy set. RemovePolicies(ctx context.Context, policySetID string, options PolicySetRemovePoliciesOptions) error - // Attach a policy set to workspaces. - AttachToWorkspaces(ctx context.Context, policySetID string, options PolicySetAttachToWorkspacesOptions) error + // Add workspaces to a policy set. + AddWorkspaces(ctx context.Context, policySetID string, options PolicySetAddWorkspacesOptions) error - // Detach a policy set from workspaces. - DetachFromWorkspaces(ctx context.Context, policySetID string, options PolicySetDetachFromWorkspacesOptions) error + // Remove workspaces from a policy set. + RemoveWorkspaces(ctx context.Context, policySetID string, options PolicySetRemoveWorkspacesOptions) error // Delete a policy set by its ID. Delete(ctx context.Context, policyID string) error @@ -49,7 +49,7 @@ type policySets struct { client *Client } -// PolicySetList represents a list of policy sets.. +// PolicySetList represents a list of policy sets. type PolicySetList struct { *Pagination Items []*PolicySet @@ -80,7 +80,7 @@ type PolicySetListOptions struct { Search *string `url:"search[name],omitempty"` } -// List all the policies for a given organization +// List all the policies for a given organization. func (s *policySets) List(ctx context.Context, organization string, options PolicySetListOptions) (*PolicySetList, error) { if !validStringID(&organization) { return nil, errors.New("invalid value for organization") @@ -118,7 +118,7 @@ type PolicySetCreateOptions struct { // The initial members of the policy set. Policies []*Policy `jsonapi:"relation,policies,omitempty"` - // The initial list of workspaces the policy set should be attached to. + // The initial list of workspaces for which the policy set should be enforced. Workspaces []*Workspace `jsonapi:"relation,workspaces,omitempty"` } @@ -229,7 +229,8 @@ func (s *policySets) Update(ctx context.Context, policySetID string, options Pol return ps, err } -// PolicySetAddPoliciesOptions represents the options for adding policies to a policy set. +// PolicySetAddPoliciesOptions represents the options for adding policies +// to a policy set. type PolicySetAddPoliciesOptions struct { /// The policies to add to the policy set. Policies []*Policy @@ -263,7 +264,8 @@ func (s *policySets) AddPolicies(ctx context.Context, policySetID string, option return s.client.do(ctx, req, nil) } -// PolicySetRemovePoliciesOptions represents the options for removing policies from a policy set. +// PolicySetRemovePoliciesOptions represents the options for removing +// policies from a policy set. type PolicySetRemovePoliciesOptions struct { /// The policies to remove from the policy set. Policies []*Policy @@ -297,13 +299,14 @@ func (s *policySets) RemovePolicies(ctx context.Context, policySetID string, opt return s.client.do(ctx, req, nil) } -// PolicySetAttachToWorkspacesOptions represents the options for attaching a policy set to workspaces. -type PolicySetAttachToWorkspacesOptions struct { - /// The workspaces on which to attach the policy set. +// PolicySetAddWorkspacesOptions represents the options for adding workspaces +// to a policy set. +type PolicySetAddWorkspacesOptions struct { + /// The workspaces to add to the policy set. Workspaces []*Workspace } -func (o PolicySetAttachToWorkspacesOptions) valid() error { +func (o PolicySetAddWorkspacesOptions) valid() error { if o.Workspaces == nil { return errors.New("workspaces is required") } @@ -313,8 +316,8 @@ func (o PolicySetAttachToWorkspacesOptions) valid() error { return nil } -// Attach a policy set to workspaces -func (s *policySets) AttachToWorkspaces(ctx context.Context, policySetID string, options PolicySetAttachToWorkspacesOptions) error { +// Add workspaces to a policy set. +func (s *policySets) AddWorkspaces(ctx context.Context, policySetID string, options PolicySetAddWorkspacesOptions) error { if !validStringID(&policySetID) { return errors.New("invalid value for policy set ID") } @@ -331,13 +334,14 @@ func (s *policySets) AttachToWorkspaces(ctx context.Context, policySetID string, return s.client.do(ctx, req, nil) } -// PolicySetDetachFromWorkspacesOptions represents the options for detaching a policy set from workspaces. -type PolicySetDetachFromWorkspacesOptions struct { - /// The workspaces from which to detach the policy set. +// PolicySetRemoveWorkspacesOptions represents the options for removing +// workspaces from a policy set. +type PolicySetRemoveWorkspacesOptions struct { + /// The workspaces to remove from the policy set. Workspaces []*Workspace } -func (o PolicySetDetachFromWorkspacesOptions) valid() error { +func (o PolicySetRemoveWorkspacesOptions) valid() error { if o.Workspaces == nil { return errors.New("workspaces is required") } @@ -347,8 +351,8 @@ func (o PolicySetDetachFromWorkspacesOptions) valid() error { return nil } -// Detach a policy set from workspaces -func (s *policySets) DetachFromWorkspaces(ctx context.Context, policySetID string, options PolicySetDetachFromWorkspacesOptions) error { +// Remove workspaces from a policy set. +func (s *policySets) RemoveWorkspaces(ctx context.Context, policySetID string, options PolicySetRemoveWorkspacesOptions) error { if !validStringID(&policySetID) { return errors.New("invalid value for policy set ID") } diff --git a/vendor/modules.txt b/vendor/modules.txt index ee153009144f..8073c8a4b100 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -307,7 +307,7 @@ github.com/hashicorp/go-hclog github.com/hashicorp/go-multierror # github.com/hashicorp/go-plugin v0.0.0-20181002195811-1faddcf740b6 github.com/hashicorp/go-plugin -# github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6 +# github.com/hashicorp/go-retryablehttp v0.5.0 github.com/hashicorp/go-retryablehttp # github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 github.com/hashicorp/go-rootcerts @@ -315,7 +315,7 @@ github.com/hashicorp/go-rootcerts github.com/hashicorp/go-safetemp # github.com/hashicorp/go-slug v0.1.0 github.com/hashicorp/go-slug -# github.com/hashicorp/go-tfe v0.2.9 +# github.com/hashicorp/go-tfe v0.3.0 github.com/hashicorp/go-tfe # github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-uuid From b872491baaf525b436d9698465f46c89ad43dcaa Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 13 Nov 2018 21:04:56 -0500 Subject: [PATCH 091/149] incremental progress towards applying diffs --- helper/plugin/grpc_provider.go | 26 +++++++++++++++++++++- terraform/diff.go | 40 ++++++++++++++++++++++++---------- 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index a25aa67459a9..a4cb1779829b 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -466,6 +466,15 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl priorState.Meta = priorPrivate + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schema.InternalMap(res.Schema).Data(priorState, nil) + if err != nil { + // FIXME + panic(err) + } + priorState = data.State() + // turn the proposed state into a legacy configuration config := terraform.NewResourceConfigShimmed(proposedNewStateVal, block) @@ -489,8 +498,23 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl return resp, nil } + if priorState == nil { + priorState = &terraform.InstanceState{} + } + // now we need to apply the diff to the prior state, so get the planned state - plannedStateVal, err := schema.ApplyDiff(priorStateVal, diff, block) + plannedAttrs, err := diff.Apply(priorState.Attributes, block) + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, block.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = block.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) return resp, nil diff --git a/terraform/diff.go b/terraform/diff.go index 92b575086ed1..abf79e2012ce 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -415,18 +415,40 @@ func (d *InstanceDiff) Unlock() { d.mu.Unlock() } // This method is intended for shimming old subsystems that still use this // legacy diff type to work with the new-style types. func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { - // We always build a new value here, even if the given diff is "empty", - // because we might be planning to create a new instance that happens - // to have no attributes set, and so we want to produce an empty object - // rather than just echoing back the null old value. // Create an InstanceState attributes from our existing state. // We can use this to more easily apply the diff changes. attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. if attrs == nil { attrs = map[string]string{} } + fmt.Printf("\nBASE ATTRS: %#v\n", attrs) + fmt.Printf("\nDIFF: %#v\n", d) + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { // to mark a destroy, we remove all attributes attrs = map[string]string{} @@ -444,7 +466,7 @@ func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) // if new or old is unknown, then there's no mismatch old != config.UnknownVariableValue && diff.Old != config.UnknownVariableValue { - return base, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + return nil, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) } if diff.NewComputed { @@ -465,12 +487,8 @@ func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) attrs[attr] = diff.New } - val, err := hcl2shim.HCL2ValueFromFlatmap(attrs, schema.ImpliedType()) - if err != nil { - return val, err - } - - return schema.CoerceValue(val) + fmt.Printf("\nPLANNED ATTRS: %#v\n", attrs) + return attrs, nil } // ResourceAttrDiff is the diff of a single attribute of a resource. From df04e2e7a6d98dd6f1dbbf1cffd12e64d9d33d48 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 13 Nov 2018 22:57:55 -0500 Subject: [PATCH 092/149] move InstanceState shim into schema.Resource This was the resource can rebuild the flatmapped state using the schema and ResourceData, providing us the the correct set key values. --- helper/schema/resource.go | 24 ++++++++++++++++++++++-- helper/schema/shims.go | 10 +--------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/helper/schema/resource.go b/helper/schema/resource.go index a26dfc9f88da..d96bbcfde2a0 100644 --- a/helper/schema/resource.go +++ b/helper/schema/resource.go @@ -155,6 +155,27 @@ type Resource struct { Timeouts *ResourceTimeout } +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + // See Resource documentation. type CreateFunc func(*ResourceData, interface{}) error @@ -550,8 +571,7 @@ func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (* return nil, err } - s = InstanceStateFromStateValue(stateVal, r.SchemaVersion) - return s, nil + return r.ShimInstanceStateFromValue(stateVal) } // InternalValidate should be called to validate the structure diff --git a/helper/schema/shims.go b/helper/schema/shims.go index 52ae7d744640..5b978ee8e2ed 100644 --- a/helper/schema/shims.go +++ b/helper/schema/shims.go @@ -23,7 +23,7 @@ func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.Instanc // only needs to be created for the apply operation, and any customizations // have already been done. func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { - instanceState := InstanceStateFromStateValue(prior, res.SchemaVersion) + instanceState := terraform.NewInstanceStateShimmedFromValue(prior, res.SchemaVersion) configSchema := res.CoreConfigSchema() @@ -85,11 +85,3 @@ func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (c func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { return is.AttrsAsObjectValue(ty) } - -// InstanceStateFromStateValue converts a cty.Value to a -// terraform.InstanceState. This function requires the schema version used by -// the provider, because the legacy providers used the private Meta data in the -// InstanceState to store the schema version. -func InstanceStateFromStateValue(state cty.Value, schemaVersion int) *terraform.InstanceState { - return terraform.NewInstanceStateShimmedFromValue(state, schemaVersion) -} From 34766ca6661701e7aba27ea211b973f5a0683c27 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 13 Nov 2018 23:00:02 -0500 Subject: [PATCH 093/149] use the new InstanceState shim --- helper/plugin/grpc_provider.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index a4cb1779829b..4d6aa7307ed4 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -387,7 +387,11 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso return resp, nil } - instanceState := schema.InstanceStateFromStateValue(stateVal, res.SchemaVersion) + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) if err != nil { @@ -455,7 +459,12 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl Type: req.TypeName, } - priorState := schema.InstanceStateFromStateValue(priorStateVal, res.SchemaVersion) + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorPrivate := make(map[string]interface{}) if len(req.PriorPrivate) > 0 { if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { @@ -466,15 +475,6 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl priorState.Meta = priorPrivate - // We now rebuild the state through the ResourceData, so that the set indexes - // match what helper/schema expects. - data, err := schema.InternalMap(res.Schema).Data(priorState, nil) - if err != nil { - // FIXME - panic(err) - } - priorState = data.State() - // turn the proposed state into a legacy configuration config := terraform.NewResourceConfigShimmed(proposedNewStateVal, block) @@ -595,7 +595,12 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A Type: req.TypeName, } - priorState := schema.InstanceStateFromStateValue(priorStateVal, res.SchemaVersion) + //priorState := terraform.NewInstanceStateShimmedFromValue(priorStateVal, res.SchemaVersion) + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } private := make(map[string]interface{}) if len(req.PlannedPrivate) > 0 { From 16f28f73484a42b56584027889be71c72d30d480 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 13 Nov 2018 23:00:21 -0500 Subject: [PATCH 094/149] new mechanism for applying a diff to a value This attempts to apply the diff in order to get consistent output from the shimmed values. --- terraform/diff.go | 250 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 227 insertions(+), 23 deletions(-) diff --git a/terraform/diff.go b/terraform/diff.go index abf79e2012ce..fad3e531c687 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -446,49 +446,253 @@ func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block attrs = map[string]string{} } - fmt.Printf("\nBASE ATTRS: %#v\n", attrs) - fmt.Printf("\nDIFF: %#v\n", d) + return d.applyDiff(attrs, schema) +} + +func (d *InstanceDiff) applyDiff(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} if d.Destroy || d.DestroyDeposed || d.DestroyTainted { - // to mark a destroy, we remove all attributes - attrs = map[string]string{} - } else if attrs["id"] == "" || d.RequiresNew() { - // Since "id" is always computed, make sure it always has a value. Set - // it as unknown to generate the correct cty.Value - attrs["id"] = config.UnknownVariableValue + return result, nil + } + + // iterate over the schema rather than the attributes, so we can handle + // blocks separately from plain attributes + for name, attrSchema := range schema.Attributes { + var err error + var newAttrs map[string]string + + // handle non-block collections + switch ty := attrSchema.Type; { + case ty.IsListType() || ty.IsTupleType() || ty.IsMapType(): + newAttrs, err = d.applyCollectionDiff(name, attrs, attrSchema) + case ty.IsSetType(): + newAttrs, err = d.applySetDiff(name, attrs, schema) + default: + newAttrs, err = d.applyAttrDiff(name, attrs, attrSchema) + } + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[k] = v + } + } + + for name, block := range schema.BlockTypes { + newAttrs, err := d.applySetDiff(name, attrs, &block.Block) + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[k] = v + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(attrName string, oldAttrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + diff := d.Attributes[attrName] + old, exists := oldAttrs[attrName] + + if diff != nil && diff.NewComputed { + result[attrName] = config.UnknownVariableValue + return result, nil + } + + // skip "id", as we already handled it + if attrName == "id" { + if old == "" { + result["id"] = config.UnknownVariableValue + } else { + result["id"] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attrName] = old + + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + result[attrName] = "" + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != config.UnknownVariableValue && + diff.Old != config.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attrName, diff.Old, old) + } + + if attrSchema.Computed && diff.NewComputed { + result[attrName] = config.UnknownVariableValue + return result, nil + } + + if diff.NewRemoved { + // don't set anything in the new value + return result, nil + } + + result[attrName] = diff.New + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(attrName string, oldAttrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == attrName+".#" || k == attrName+".%" { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k] = config.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + keys := map[string]bool{} + for k := range d.Attributes { + keys[k] = true + } + for k := range oldAttrs { + keys[k] = true + } + + idx := attrName + ".#" + if attrSchema.Type.IsMapType() { + idx = attrName + ".%" + } + + // record if we got the index from the diff + setIndex := false + + for k := range keys { + if !strings.HasPrefix(k, attrName+".") { + continue + } + + // we need to verify if we saw the index later + if k == idx { + setIndex = true + } + + res, err := d.applyAttrDiff(k, oldAttrs, attrSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[k] = v + } } - for attr, diff := range d.Attributes { - old, exists := attrs[attr] + // Verify we have the index count. + // If it wasn't added from a diff, check it from the previous value. + // Make sure we keep the count if it existed before, so we can tell if it + // existed, or was null. + if !setIndex { + old := oldAttrs[idx] + if old != "" { + result[idx] = old + } + } - if exists && - old != diff.Old && - // if new or old is unknown, then there's no mismatch - old != config.UnknownVariableValue && - diff.Old != config.UnknownVariableValue { - return nil, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + return result, nil +} + +func (d *InstanceDiff) applySetDiff(attrName string, oldAttrs map[string]string, block *configschema.Block) (map[string]string, error) { + result := map[string]string{} + + idx := attrName + ".#" + // first find the index diff + for k, diff := range d.Attributes { + if k != idx { + continue } if diff.NewComputed { - attrs[attr] = config.UnknownVariableValue + result[k] = config.UnknownVariableValue + return result, nil + } + } + + // Flag if there was a diff used in the set at all. + // If not, take the pre-existing set values + setDiff := false + + // here we're trusting the diff to supply all the known items + for k, diff := range d.Attributes { + if !strings.HasPrefix(k, attrName+".") { continue } + setDiff = true if diff.NewRemoved { - delete(attrs, attr) + // no longer in the set + continue + } + + if diff.NewComputed { + result[k] = config.UnknownVariableValue continue } - // sometimes helper/schema gives us values that aren't really a diff - if diff.Old == diff.New { + // helper/schema doesn't list old removed values, but since the set + // exists NewRemoved may not be true. + if diff.New == "" && diff.Old == "" { continue } - attrs[attr] = diff.New + result[k] = diff.New + } + + // use the existing values if there was no set diff at all + if !setDiff { + for k, v := range oldAttrs { + if strings.HasPrefix(k, attrName+".") { + result[k] = v + } + } } - fmt.Printf("\nPLANNED ATTRS: %#v\n", attrs) - return attrs, nil + return result, nil } // ResourceAttrDiff is the diff of a single attribute of a resource. From 71b55601ce505fec3b5f9aff490a28bea8079050 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 13 Nov 2018 23:01:51 -0500 Subject: [PATCH 095/149] new failing tests for nested sets --- builtin/providers/test/resource_nested_set.go | 13 +++++++++++-- .../providers/test/resource_nested_set_test.go | 18 ++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/builtin/providers/test/resource_nested_set.go b/builtin/providers/test/resource_nested_set.go index c555265c3d0c..318e020e1b63 100644 --- a/builtin/providers/test/resource_nested_set.go +++ b/builtin/providers/test/resource_nested_set.go @@ -63,13 +63,22 @@ func testResourceNestedSet() *schema.Resource { Type: schema.TypeInt, Optional: true, }, + "bool": { + Type: schema.TypeBool, + Optional: true, + }, }, }, }, "optional": { - Type: schema.TypeString, - ForceNew: true, + Type: schema.TypeString, + // commenting this causes it to get missed during apply + //ForceNew: true, + Optional: true, + }, + "bool": { + Type: schema.TypeBool, Optional: true, }, }, diff --git a/builtin/providers/test/resource_nested_set_test.go b/builtin/providers/test/resource_nested_set_test.go index 7a27b75a2457..fb5694fa70bb 100644 --- a/builtin/providers/test/resource_nested_set_test.go +++ b/builtin/providers/test/resource_nested_set_test.go @@ -200,6 +200,24 @@ resource "test_resource_nested_set" "foo" { `), Check: checkFunc, }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + single { + value = "bar" + optional = "baz" + } + multi { + set { + required = "new" + optional_int = 3 + } + } +} + `), + Check: checkFunc, + }, }, }) } From 21dfa5676618dcccad1c3336c333db2856b89935 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 14 Nov 2018 14:19:29 -0500 Subject: [PATCH 096/149] use ShimInstanceStateFromValue in DiffFromValues This makes sure the diff is generated with the matching set ids from helper/schema. Update the tests to add ID fields to the state, which will exists in practice, since any state traversing through the shims will have the ID inserted. --- helper/schema/shims.go | 5 +- helper/schema/shims_test.go | 180 +++++++++++++++++------------------- 2 files changed, 87 insertions(+), 98 deletions(-) diff --git a/helper/schema/shims.go b/helper/schema/shims.go index 5b978ee8e2ed..d99fb39651ab 100644 --- a/helper/schema/shims.go +++ b/helper/schema/shims.go @@ -23,7 +23,10 @@ func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.Instanc // only needs to be created for the apply operation, and any customizations // have already been done. func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { - instanceState := terraform.NewInstanceStateShimmedFromValue(prior, res.SchemaVersion) + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } configSchema := res.CoreConfigSchema() diff --git a/helper/schema/shims_test.go b/helper/schema/shims_test.go index 68186497bc12..072c8ca7cb05 100644 --- a/helper/schema/shims_test.go +++ b/helper/schema/shims_test.go @@ -599,6 +599,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "availability_zone": "foo", }, @@ -901,6 +902,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "delete": "false", }, @@ -953,43 +955,6 @@ func TestShimSchemaMap_Diff(t *testing.T) { Err: false, }, - /* - // disabled for shims - // there is no longer any "list promotion" - { - Name: "List decode with promotion", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeList, - Required: true, - Elem: &Schema{Type: TypeInt}, - PromoteSingle: true, - }, - }, - - State: nil, - - Config: map[string]interface{}{ - "ports": "5", - }, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "0", - New: "1", - }, - "ports.0": &terraform.ResourceAttrDiff{ - Old: "", - New: "5", - }, - }, - }, - - Err: false, - }, - */ - { Name: "List decode with promotion with list", Schema: map[string]*Schema{ @@ -1109,6 +1074,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ports.#": "3", "ports.0": "1", @@ -1137,6 +1103,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ports.#": "2", "ports.0": "1", @@ -1353,6 +1320,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ports.#": "0", }, @@ -1493,6 +1461,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ports.#": "2", "ports.1": "1", @@ -1528,55 +1497,6 @@ func TestShimSchemaMap_Diff(t *testing.T) { Err: false, }, - /* - // disabled for shims - // you can't remove a required attribute - { - Name: "Set-7", - Schema: map[string]*Schema{ - "ports": &Schema{ - Type: TypeSet, - Required: true, - Elem: &Schema{Type: TypeInt}, - Set: func(a interface{}) int { - return a.(int) - }, - }, - }, - - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "ports.#": "2", - "ports.1": "1", - "ports.2": "2", - }, - }, - - Config: map[string]interface{}{}, - - Diff: &terraform.InstanceDiff{ - Attributes: map[string]*terraform.ResourceAttrDiff{ - "ports.#": &terraform.ResourceAttrDiff{ - Old: "2", - New: "0", - }, - "ports.1": &terraform.ResourceAttrDiff{ - Old: "1", - New: "0", - NewRemoved: true, - }, - "ports.2": &terraform.ResourceAttrDiff{ - Old: "2", - New: "0", - NewRemoved: true, - }, - }, - }, - - Err: false, - }, - */ - { Name: "Set-8", Schema: map[string]*Schema{ @@ -1592,6 +1512,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "availability_zone": "bar", "ports.#": "1", @@ -1634,6 +1555,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ingress.#": "2", "ingress.80.ports.#": "1", @@ -1718,6 +1640,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "availability_zone": "foo", "port": "80", @@ -1734,7 +1657,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, { - Name: "", + Name: "computed", Schema: map[string]*Schema{ "availability_zone": &Schema{ Type: TypeString, @@ -1748,11 +1671,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, }, - State: &terraform.InstanceState{ - Attributes: map[string]string{ - "port": "80", - }, - }, + State: nil, Config: map[string]interface{}{ "port": 80, @@ -1763,12 +1682,47 @@ func TestShimSchemaMap_Diff(t *testing.T) { "availability_zone": &terraform.ResourceAttrDiff{ NewComputed: true, }, + "port": &terraform.ResourceAttrDiff{ + New: "80", + }, }, }, Err: false, }, + { + Name: "computed, exists", + Schema: map[string]*Schema{ + "availability_zone": &Schema{ + Type: TypeString, + Computed: true, + ComputedWhen: []string{"port"}, + }, + + "port": &Schema{ + Type: TypeInt, + Optional: true, + }, + }, + + State: &terraform.InstanceState{ + ID: "id", + Attributes: map[string]string{ + "port": "80", + }, + }, + + Config: map[string]interface{}{ + "port": 80, + }, + + // there is no computed diff when the instance exists already + Diff: nil, + + Err: false, + }, + { Name: "Maps-1", Schema: map[string]*Schema{ @@ -1811,6 +1765,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "config_vars.%": "1", "config_vars.foo": "bar", @@ -1850,6 +1805,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "vars.%": "1", "vars.foo": "bar", @@ -1889,6 +1845,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "vars.%": "1", "vars.foo": "bar", @@ -1912,6 +1869,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "config_vars.#": "1", "config_vars.0.%": "1", @@ -1954,6 +1912,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "config_vars.#": "1", "config_vars.0.%": "2", @@ -2005,6 +1964,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "availability_zone": "bar", "address": "foo", @@ -2049,6 +2009,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "availability_zone": "bar", "ports.#": "1", @@ -2088,6 +2049,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "instances.#": "0", }, @@ -2275,6 +2237,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "vars.%": "0", }, @@ -2303,7 +2266,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, { - Name: " - Empty", + Name: "Empty", Schema: map[string]*Schema{}, State: &terraform.InstanceState{}, @@ -2324,6 +2287,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "some_threshold": "567.8", }, @@ -2376,6 +2340,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "block_device.#": "2", "block_device.616397234.delete_on_termination": "true", @@ -2410,6 +2375,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "port": "false", }, @@ -2453,6 +2419,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "route.#": "0", }, @@ -2476,6 +2443,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "active": "true", }, @@ -2503,6 +2471,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "instances.#": "1", "instances.3": "foo", @@ -2615,6 +2584,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "metadata_keys.#": "0", }, @@ -2675,6 +2645,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { Config: nil, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "tags.%": "0", }, @@ -2743,7 +2714,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, { - Name: ": StateFunc in nested set (#1759)", + Name: "StateFunc in nested set (#1759)", Schema: map[string]*Schema{ "service_account": &Schema{ Type: TypeList, @@ -2823,6 +2794,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "instances.#": "2", "instances.3": "333", @@ -2875,6 +2847,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "one": "false", "two": "true", @@ -2913,6 +2886,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { Schema: map[string]*Schema{}, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "id": "someid", }, @@ -2942,6 +2916,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ports.#": "3", "ports.1": "1", @@ -2990,6 +2965,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "description": "foo", }, @@ -3023,7 +2999,9 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, }, - State: &terraform.InstanceState{}, + State: &terraform.InstanceState{ + ID: "id", + }, Config: map[string]interface{}{ "foo": "${var.foo}", @@ -3063,6 +3041,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ports.#": "3", "ports.1": "1", @@ -3103,6 +3082,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "config.#": "2", "config.0": "a", @@ -3310,6 +3290,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "ports.#": "3", "ports.1": "1", @@ -3362,6 +3343,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { Schema: map[string]*Schema{}, State: &terraform.InstanceState{ + ID: "someid", Attributes: map[string]string{ "id": "someid", }, @@ -3397,6 +3379,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "etag": "foo", "version_id": "1", @@ -3442,6 +3425,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "foo": "bar", }, @@ -3471,6 +3455,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "attr": "bar", }, @@ -3508,6 +3493,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { }, State: &terraform.InstanceState{ + ID: "id", Attributes: map[string]string{ "unrelated_set.#": "0", "stream_enabled": "true", @@ -3549,11 +3535,10 @@ func TestShimSchemaMap_Diff(t *testing.T) { } { - d, err := InternalMap(tc.Schema).Diff(tc.State, terraform.NewResourceConfig(c), tc.CustomizeDiff, nil, false) + d, err := schemaMap(tc.Schema).Diff(tc.State, terraform.NewResourceConfig(c), tc.CustomizeDiff, nil, false) if err != nil != tc.Err { t.Fatalf("err: %s", err) } - if !cmp.Equal(d, tc.Diff, equateEmpty) { t.Fatal(cmp.Diff(d, tc.Diff, equateEmpty)) } @@ -3597,6 +3582,7 @@ func TestShimSchemaMap_Diff(t *testing.T) { } res := &Resource{Schema: tc.Schema} + d, err := diffFromValues(stateVal, configVal, res, tc.CustomizeDiff) if err != nil { if !tc.Err { From 83317975fede70f726c1cfcecd601778073882c3 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 09:54:27 -0500 Subject: [PATCH 097/149] add more tests with carious set combinations --- builtin/providers/test/resource_nested_set.go | 5 ++ .../test/resource_nested_set_test.go | 86 ++++++++++++++++++- 2 files changed, 90 insertions(+), 1 deletion(-) diff --git a/builtin/providers/test/resource_nested_set.go b/builtin/providers/test/resource_nested_set.go index 318e020e1b63..c1e6520fb994 100644 --- a/builtin/providers/test/resource_nested_set.go +++ b/builtin/providers/test/resource_nested_set.go @@ -23,6 +23,11 @@ func testResourceNestedSet() *schema.Resource { Type: schema.TypeBool, Optional: true, }, + "force_new": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "single": { Type: schema.TypeSet, Optional: true, diff --git a/builtin/providers/test/resource_nested_set_test.go b/builtin/providers/test/resource_nested_set_test.go index fb5694fa70bb..fe281870110d 100644 --- a/builtin/providers/test/resource_nested_set_test.go +++ b/builtin/providers/test/resource_nested_set_test.go @@ -127,7 +127,7 @@ resource "test_resource_nested_set" "foo" { }, }) } -func TestResourceNestedSet_multi(t *testing.T) { +func TestResourceNestedSet_multiAddRemove(t *testing.T) { checkFunc := func(s *terraform.State) error { return nil } @@ -218,6 +218,90 @@ resource "test_resource_nested_set" "foo" { `), Check: checkFunc, }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + optional = true + single { + value = "bar" + optional = "baz" + } + multi { + set { + required = "new" + optional_int = 3 + } + } +} + `), + Check: checkFunc, + }, + }, + }) +} + +func TestResourceNestedSet_forceNewEmptyString(t *testing.T) { + var id string + step := 0 + checkFunc := func(s *terraform.State) error { + root := s.ModuleByPath(addrs.RootModuleInstance) + res := root.Resources["test_resource_nested_set.foo"] + defer func() { + step++ + id = res.Primary.ID + }() + + if step == 2 && res.Primary.ID == id { + // setting an empty string currently does not trigger ForceNew, but + // it should in the future. + return nil + } + + if res.Primary.ID == id { + return errors.New("expected new resource") + } + + return nil + } + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + multi { + set { + required = "val" + } + } +} + `), + Check: checkFunc, + }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + multi { + set { + required = "" + } + } +} + `), + Check: checkFunc, + }, + + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + force_new = "" +} + `), + Check: checkFunc, + }, }, }) } From 17ecda53b5f552626a4a1eb844df5bf7c35019aa Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 09:55:34 -0500 Subject: [PATCH 098/149] strip empty containers from flatmap attributes In order to prevent mismatched states between read/plan/apply, we need to ensure that the attributes are generated consistently each time. Because of the various ways in which helper/schema and the hcl2 shims interpret empty values, the only way to ensure consistency is to always remove them altogether. --- helper/plugin/grpc_provider.go | 82 +++++++++++++++++++++++++++++ helper/plugin/grpc_provider_test.go | 33 ++++++++++++ 2 files changed, 115 insertions(+) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index 4d6aa7307ed4..7cb51035a4c9 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -3,7 +3,10 @@ package plugin import ( "encoding/json" "errors" + "regexp" + "sort" "strconv" + "strings" "github.com/zclconf/go-cty/cty" ctyconvert "github.com/zclconf/go-cty/cty/convert" @@ -416,6 +419,8 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso // helper/schema should always copy the ID over, but do it again just to be safe newInstanceState.Attributes["id"] = newInstanceState.ID + newInstanceState.Attributes = normalizeFlatmapContainers(newInstanceState.Attributes) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -498,12 +503,22 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl return resp, nil } + // strip out non-diffs + for k, v := range diff.Attributes { + if v.New == v.Old && !v.NewComputed && !v.NewRemoved { + delete(diff.Attributes, k) + } + } + if priorState == nil { priorState = &terraform.InstanceState{} } // now we need to apply the diff to the prior state, so get the planned state plannedAttrs, err := diff.Apply(priorState.Attributes, block) + + plannedAttrs = normalizeFlatmapContainers(plannedAttrs) + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, block.ImpliedType()) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) @@ -636,6 +651,13 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A } } + // strip out non-diffs + for k, v := range diff.Attributes { + if v.New == v.Old && !v.NewComputed && !v.NewRemoved { + delete(diff.Attributes, k) + } + } + if private != nil { diff.Meta = private } @@ -646,6 +668,10 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A return resp, nil } + if newInstanceState != nil { + newInstanceState.Attributes = normalizeFlatmapContainers(newInstanceState.Attributes) + } + newStateVal := cty.NullVal(block.ImpliedType()) // We keep the null val if we destroyed the resource, otherwise build the @@ -819,6 +845,62 @@ func pathToAttributePath(path cty.Path) *proto.AttributePath { return &proto.AttributePath{Steps: steps} } +// normalizeFlatmapContainers removes empty containers, and fixes counts in a +// set of flatmapped attributes. +func normalizeFlatmapContainers(attrs map[string]string) map[string]string { + keyRx := regexp.MustCompile(`.*\.[%#]$`) + + // find container keys + var keys []string + for k := range attrs { + if keyRx.MatchString(k) { + keys = append(keys, k) + } + } + + // sort the keys in reverse, so that we check the longest subkeys first + sort.Slice(keys, func(i, j int) bool { + a, b := keys[i], keys[j] + + if strings.HasPrefix(a, b) { + return true + } + + if strings.HasPrefix(b, a) { + return false + } + + return a > b + }) + + for _, k := range keys { + prefix := k[:len(k)-1] + indexes := map[string]int{} + for cand := range attrs { + if cand == k { + continue + } + + if strings.HasPrefix(cand, prefix) { + idx := cand[len(prefix):] + dot := strings.Index(idx, ".") + if dot > 0 { + idx = idx[:dot] + } + indexes[idx]++ + } + } + + if len(indexes) > 0 { + attrs[k] = strconv.Itoa(len(indexes)) + } else { + delete(attrs, k) + } + } + + return attrs +} + // helper/schema throws away timeout values from the config and stores them in // the Private/Meta fields. we need to copy those values into the planned state // so that core doesn't see a perpetual diff with the timeout block. diff --git a/helper/plugin/grpc_provider_test.go b/helper/plugin/grpc_provider_test.go index 03f059a0b405..a06185fc4c27 100644 --- a/helper/plugin/grpc_provider_test.go +++ b/helper/plugin/grpc_provider_test.go @@ -3,6 +3,8 @@ package plugin import ( "context" "fmt" + "reflect" + "strconv" "strings" "testing" "time" @@ -637,3 +639,34 @@ func TestGetSchemaTimeouts(t *testing.T) { t.Fatal("missing default timeout in schema") } } + +func TestNormalizeFlatmapContainers(t *testing.T) { + for i, tc := range []struct { + attrs map[string]string + expect map[string]string + }{ + { + attrs: map[string]string{"id": "1", "multi.2.set.#": "1", "multi.1.set.#": "0", "single.#": "0"}, + expect: map[string]string{"id": "1"}, + }, + { + attrs: map[string]string{"id": "1", "multi.2.set.#": "2", "multi.2.set.1.foo": "bar", "multi.1.set.#": "0", "single.#": "0"}, + expect: map[string]string{"id": "1", "multi.2.set.#": "1", "multi.2.set.1.foo": "bar"}, + }, + { + attrs: map[string]string{"id": "78629a0f5f3f164f", "multi.#": "1"}, + expect: map[string]string{"id": "78629a0f5f3f164f"}, + }, + { + attrs: map[string]string{"multi.529860700.set.#": "1", "multi.#": "1", "id": "78629a0f5f3f164f"}, + expect: map[string]string{"id": "78629a0f5f3f164f"}, + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + got := normalizeFlatmapContainers(tc.attrs) + if !reflect.DeepEqual(tc.expect, got) { + t.Fatalf("expected:\n%#v\ngot:\n%#v\n", tc.expect, got) + } + }) + } +} From a681124301ccc635e014c60347dfaead33ff326b Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 10:33:41 -0500 Subject: [PATCH 099/149] verify DiffSuppresFunc behavior Terraform used to provide empty diffs to the provider when calculating `ignore_changes`, which would cause some DiffSuppressFunc to fail, as can be seen in #18209. Verify that this is no longer the case in 0.12 --- .../providers/test/resource_diff_suppress.go | 53 +++++++++++-- .../test/resource_diff_suppress_test.go | 79 +++++++++++++++++++ 2 files changed, 127 insertions(+), 5 deletions(-) diff --git a/builtin/providers/test/resource_diff_suppress.go b/builtin/providers/test/resource_diff_suppress.go index 5c01a1d09d5e..cb5f7358f5c5 100644 --- a/builtin/providers/test/resource_diff_suppress.go +++ b/builtin/providers/test/resource_diff_suppress.go @@ -1,23 +1,36 @@ package test import ( + "fmt" + "math/rand" "strings" "github.com/hashicorp/terraform/helper/schema" ) func testResourceDiffSuppress() *schema.Resource { + diffSuppress := func(k, old, new string, d *schema.ResourceData) bool { + if old == "" || strings.Contains(new, "replace") { + return false + } + return true + } + return &schema.Resource{ Create: testResourceDiffSuppressCreate, Read: testResourceDiffSuppressRead, - Update: testResourceDiffSuppressUpdate, Delete: testResourceDiffSuppressDelete, + Update: testResourceDiffSuppressUpdate, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ + "optional": { + Type: schema.TypeString, + Optional: true, + }, "val_to_upper": { Type: schema.TypeString, Required: true, @@ -29,18 +42,48 @@ func testResourceDiffSuppress() *schema.Resource { return strings.ToUpper(old) == strings.ToUpper(new) }, }, - "optional": { - Type: schema.TypeString, + "network": { + Type: schema.TypeString, + Optional: true, + Default: "default", + ForceNew: true, + DiffSuppressFunc: diffSuppress, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: diffSuppress, + }, + + "node_pool": { + Type: schema.TypeList, Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, }, }, } } func testResourceDiffSuppressCreate(d *schema.ResourceData, meta interface{}) error { - d.SetId("testId") + d.Set("network", "modified") + d.Set("subnetwork", "modified") - return testResourceRead(d, meta) + id := fmt.Sprintf("%x", rand.Int63()) + d.SetId(id) + return nil } func testResourceDiffSuppressRead(d *schema.ResourceData, meta interface{}) error { diff --git a/builtin/providers/test/resource_diff_suppress_test.go b/builtin/providers/test/resource_diff_suppress_test.go index 59490e3584f8..89416f32a154 100644 --- a/builtin/providers/test/resource_diff_suppress_test.go +++ b/builtin/providers/test/resource_diff_suppress_test.go @@ -1,10 +1,14 @@ package test import ( + "errors" "strings" "testing" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" ) func TestResourceDiffSuppress_create(t *testing.T) { @@ -45,3 +49,78 @@ resource "test_resource_diff_suppress" "foo" { }, }) } + +func TestResourceDiffSuppress_updateIgnoreChanges(t *testing.T) { + // None of these steps should replace the instance + id := "" + checkFunc := func(s *terraform.State) error { + root := s.ModuleByPath(addrs.RootModuleInstance) + res := root.Resources["test_resource_diff_suppress.foo"] + if id != "" && res.Primary.ID != id { + return errors.New("expected no resource replacement") + } + id = res.Primary.ID + return nil + } + + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_diff_suppress" "foo" { + val_to_upper = "foo" + + network = "foo" + subnetwork = "foo" + + node_pool { + name = "default-pool" + } + lifecycle { + ignore_changes = ["node_pool"] + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_diff_suppress" "foo" { + val_to_upper = "foo" + + network = "ignored" + subnetwork = "ignored" + + node_pool { + name = "default-pool" + } + lifecycle { + ignore_changes = ["node_pool"] + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_diff_suppress" "foo" { + val_to_upper = "foo" + + network = "ignored" + subnetwork = "ignored" + + node_pool { + name = "ignored" + } + lifecycle { + ignore_changes = ["node_pool"] + } +} + `), + Check: checkFunc, + }, + }, + }) +} From 89b2c6f21e39c56dfef4bc20960735cd5ff5b2ea Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 11:24:14 -0500 Subject: [PATCH 100/149] comment fixes --- helper/plugin/grpc_provider.go | 1 - terraform/diff.go | 6 ------ 2 files changed, 7 deletions(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index 7cb51035a4c9..c668e9733b90 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -610,7 +610,6 @@ func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.A Type: req.TypeName, } - //priorState := terraform.NewInstanceStateShimmedFromValue(priorStateVal, res.SchemaVersion) priorState, err := res.ShimInstanceStateFromValue(priorStateVal) if err != nil { resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) diff --git a/terraform/diff.go b/terraform/diff.go index fad3e531c687..a28d7e4822d3 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -450,11 +450,6 @@ func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block } func (d *InstanceDiff) applyDiff(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { - // We always build a new value here, even if the given diff is "empty", - // because we might be planning to create a new instance that happens - // to have no attributes set, and so we want to produce an empty object - // rather than just echoing back the null old value. - // Rather applying the diff to mutate the attrs, we'll copy new values into // here to avoid the possibility of leaving stale values. result := map[string]string{} @@ -513,7 +508,6 @@ func (d *InstanceDiff) applyAttrDiff(attrName string, oldAttrs map[string]string return result, nil } - // skip "id", as we already handled it if attrName == "id" { if old == "" { result["id"] = config.UnknownVariableValue From eddf676c1fbed270b54fae34ef201a9e0a054019 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 15:11:16 -0500 Subject: [PATCH 101/149] add provider test with a nested list in a set in some cases helper/schema misses the list counts. --- builtin/providers/test/resource_nested_set.go | 21 ++++++++++++ .../test/resource_nested_set_test.go | 34 +++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/builtin/providers/test/resource_nested_set.go b/builtin/providers/test/resource_nested_set.go index c1e6520fb994..d50c16da1928 100644 --- a/builtin/providers/test/resource_nested_set.go +++ b/builtin/providers/test/resource_nested_set.go @@ -89,6 +89,27 @@ func testResourceNestedSet() *schema.Resource { }, }, }, + "with_list": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "required": { + Type: schema.TypeString, + Required: true, + }, + + "list": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, }, } } diff --git a/builtin/providers/test/resource_nested_set_test.go b/builtin/providers/test/resource_nested_set_test.go index fe281870110d..f85e2a190967 100644 --- a/builtin/providers/test/resource_nested_set_test.go +++ b/builtin/providers/test/resource_nested_set_test.go @@ -305,3 +305,37 @@ resource "test_resource_nested_set" "foo" { }, }) } + +func TestResourceNestedSet_setWithList(t *testing.T) { + checkFunc := func(s *terraform.State) error { + return nil + } + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + with_list { + required = "bar" + list = ["initial value"] + } +} + `), + Check: checkFunc, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource_nested_set" "foo" { + with_list { + required = "bar" + list = ["second value"] + } +} + `), + Check: checkFunc, + }, + }, + }) +} From e95f2b586e744dfb0cf92352de491be8a09552c2 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 15:12:16 -0500 Subject: [PATCH 102/149] another test case in helper/plugin --- helper/plugin/grpc_provider.go | 2 +- helper/plugin/grpc_provider_test.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index c668e9733b90..53809e273c08 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -847,7 +847,7 @@ func pathToAttributePath(path cty.Path) *proto.AttributePath { // normalizeFlatmapContainers removes empty containers, and fixes counts in a // set of flatmapped attributes. func normalizeFlatmapContainers(attrs map[string]string) map[string]string { - keyRx := regexp.MustCompile(`.*\.[%#]$`) + keyRx := regexp.MustCompile(`.\.[%#]$`) // find container keys var keys []string diff --git a/helper/plugin/grpc_provider_test.go b/helper/plugin/grpc_provider_test.go index a06185fc4c27..5dd33ac2b0d0 100644 --- a/helper/plugin/grpc_provider_test.go +++ b/helper/plugin/grpc_provider_test.go @@ -661,6 +661,10 @@ func TestNormalizeFlatmapContainers(t *testing.T) { attrs: map[string]string{"multi.529860700.set.#": "1", "multi.#": "1", "id": "78629a0f5f3f164f"}, expect: map[string]string{"id": "78629a0f5f3f164f"}, }, + { + attrs: map[string]string{"set.2.required": "bar", "set.2.list.#": "1", "set.2.list.0": "x", "set.1.list.#": "0"}, + expect: map[string]string{"set.2.list.#": "1", "set.2.list.0": "x", "set.2.required": "bar", "set.#": "1"}, + }, } { t.Run(strconv.Itoa(i), func(t *testing.T) { got := normalizeFlatmapContainers(tc.attrs) From db968733da419cf905116d6a16c55dde22d5b794 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 15:26:16 -0500 Subject: [PATCH 103/149] re-count the flatmapped containers When applying a legacy diff, recount the flatmapped containers. We can't trust helper/schema to return the correct value, if it even exists. --- terraform/diff.go | 47 ++++++++++++++++++++++++++++++++---------- terraform/diff_test.go | 37 +++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 11 deletions(-) diff --git a/terraform/diff.go b/terraform/diff.go index a28d7e4822d3..ae26183160d2 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -7,6 +7,7 @@ import ( "reflect" "regexp" "sort" + "strconv" "strings" "sync" @@ -617,17 +618,9 @@ func (d *InstanceDiff) applyCollectionDiff(attrName string, oldAttrs map[string] } } - // Verify we have the index count. - // If it wasn't added from a diff, check it from the previous value. - // Make sure we keep the count if it existed before, so we can tell if it - // existed, or was null. - if !setIndex { - old := oldAttrs[idx] - if old != "" { - result[idx] = old - } - } - + // Don't trust helper/schema to return a valid count, or even have one at + // all. + result[idx] = countFlatmapContainerValues(idx, result) return result, nil } @@ -686,9 +679,41 @@ func (d *InstanceDiff) applySetDiff(attrName string, oldAttrs map[string]string, } } + result[idx] = countFlatmapContainerValues(idx, result) + return result, nil } +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + // ResourceAttrDiff is the diff of a single attribute of a resource. type ResourceAttrDiff struct { Old string // Old Value diff --git a/terraform/diff_test.go b/terraform/diff_test.go index b2ace77f4cb2..e7ee0d818542 100644 --- a/terraform/diff_test.go +++ b/terraform/diff_test.go @@ -3,6 +3,7 @@ package terraform import ( "fmt" "reflect" + "strconv" "strings" "testing" @@ -1213,3 +1214,39 @@ CREATE: nodeA longfoo: "foo" => "bar" (forces new resource) secretfoo: "" => "" (attribute changed) ` + +func TestCountFlatmapContainerValues(t *testing.T) { + for i, tc := range []struct { + attrs map[string]string + key string + count string + }{ + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.2.list.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"map.#": "3", "map.a": "b", "map.a.#": "0", "map.b": "4"}, + key: "map.#", + count: "2", + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + count := countFlatmapContainerValues(tc.key, tc.attrs) + if count != tc.count { + t.Fatalf("expected %q, got %q", tc.count, count) + } + }) + } +} From e4270993be013de410ffd7ca76aa2f00a27df688 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 16 Nov 2018 15:28:30 -0500 Subject: [PATCH 104/149] remove unused value --- terraform/diff.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/terraform/diff.go b/terraform/diff.go index ae26183160d2..c8e660ad5e66 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -595,19 +595,11 @@ func (d *InstanceDiff) applyCollectionDiff(attrName string, oldAttrs map[string] idx = attrName + ".%" } - // record if we got the index from the diff - setIndex := false - for k := range keys { if !strings.HasPrefix(k, attrName+".") { continue } - // we need to verify if we saw the index later - if k == idx { - setIndex = true - } - res, err := d.applyAttrDiff(k, oldAttrs, attrSchema) if err != nil { return result, err From 2293391241aad65475efd3326363e91351906120 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 12 Nov 2018 16:08:33 -0800 Subject: [PATCH 105/149] command: Fix TestMetaBackend_emptyWithExplicitState This test was incorrectly updated in a previous iteration, with it creating a modified state to write but then not actually writing it, writing an empty test state instead. This made the test fail because a backup state file is created only if the new state snapshot is different to the old when written. --- command/meta_backend_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index e3a59cdd58da..fc6ba51fd68e 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -207,7 +207,7 @@ func TestMetaBackend_emptyWithExplicitState(t *testing.T) { // Write some state next := testState() markStateForMatching(next, "bar") // just any change so it shows as different than before - s.WriteState(testState()) + s.WriteState(next) if err := s.PersistState(); err != nil { t.Fatalf("unexpected error: %s", err) } From 94510bc1b9b56f9e85ea8534bc207b06e9f251f2 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 12 Nov 2018 18:26:49 -0800 Subject: [PATCH 106/149] states/statemgr: Migrate, Import, and Export functions In our recent refactoring of the state manager interfaces we made serial and lineage management the responsibility of the state managers themselves, not exposing them at all to most callers, and allowing for simple state managers that don't implement them at all. However, we do have some specific cases where we need to preserve these properly when available, such as migration between backends, and the "terraform state push" and "terraform state pull" commands. These new functions and their associated optional interface allow the logic here to be captured in one place and access via some simple calls. Separating this from the main interface leaves things simple for the normal uses of state managers. Since these functions are mostly just thin wrappers around other functionality, they are not yet well-tested directly, but will be indirectly tested through the tests of their callers. A subsequent commit will add more unit tests here. --- state/remote/state.go | 31 ++++ states/statemgr/filesystem.go | 63 ++++++- states/statemgr/migrate.go | 212 ++++++++++++++++++++++ states/statemgr/migrate_test.go | 102 +++++++++++ states/statemgr/snapshotmetarel_string.go | 26 +++ 5 files changed, 429 insertions(+), 5 deletions(-) create mode 100644 states/statemgr/migrate.go create mode 100644 states/statemgr/migrate_test.go create mode 100644 states/statemgr/snapshotmetarel_string.go diff --git a/state/remote/state.go b/state/remote/state.go index 5ead38e82b98..e73fbe8f5812 100644 --- a/state/remote/state.go +++ b/state/remote/state.go @@ -28,6 +28,7 @@ type State struct { } var _ statemgr.Full = (*State)(nil) +var _ statemgr.Migrator = (*State)(nil) // statemgr.Reader impl. func (s *State) State() *states.State { @@ -37,6 +38,14 @@ func (s *State) State() *states.State { return s.state.DeepCopy() } +// StateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) StateForMigration() *statefile.File { + s.mu.Lock() + defer s.mu.Unlock() + + return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) +} + // statemgr.Writer impl. func (s *State) WriteState(state *states.State) error { s.mu.Lock() @@ -50,6 +59,28 @@ func (s *State) WriteState(state *states.State) error { return nil } +// WriteStateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + checkFile := statefile.New(s.state, s.lineage, s.serial) + if !force { + if err := statemgr.CheckValidImport(f, checkFile); err != nil { + return err + } + } + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = f.State.DeepCopy() + s.lineage = f.Lineage + s.serial = f.Serial + + return nil +} + // statemgr.Refresher impl. func (s *State) RefreshState() error { s.mu.Lock() diff --git a/states/statemgr/filesystem.go b/states/statemgr/filesystem.go index c9011162e6b4..740c23e75076 100644 --- a/states/statemgr/filesystem.go +++ b/states/statemgr/filesystem.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "log" "os" "path/filepath" "sync" @@ -62,6 +63,7 @@ type Filesystem struct { var ( _ Full = (*Filesystem)(nil) _ PersistentMeta = (*Filesystem)(nil) + _ Migrator = (*Filesystem)(nil) ) // NewFilesystem creates a filesystem-based state manager that reads and writes @@ -121,9 +123,6 @@ func (s *Filesystem) State() *states.State { // WriteState is an incorrect implementation of Writer that actually also // persists. -// WriteState for LocalState always persists the state as well. -// -// StateWriter impl. func (s *Filesystem) WriteState(state *states.State) error { // TODO: this should use a more robust method of writing state, by first // writing to a temp file on the same filesystem, and renaming the file over @@ -137,7 +136,10 @@ func (s *Filesystem) WriteState(state *states.State) error { } defer s.mutex()() + return s.writeState(state, nil) +} +func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { // We'll try to write our backup first, so we can be sure we've created // it successfully before clobbering the original file it came from. if !s.writtenBackup && s.backupFile != nil && s.backupPath != "" && !statefile.StatesMarshalEqual(state, s.backupFile.State) { @@ -180,8 +182,14 @@ func (s *Filesystem) WriteState(state *states.State) error { return nil } - if s.readFile == nil || !statefile.StatesMarshalEqual(s.file.State, s.readFile.State) { - s.file.Serial++ + if meta == nil { + if s.readFile == nil || !statefile.StatesMarshalEqual(s.file.State, s.readFile.State) { + s.file.Serial++ + } + } else { + // Force new metadata + s.file.Lineage = meta.Lineage + s.file.Serial = meta.Serial } if err := statefile.Write(s.file, s.stateFileOut); err != nil { @@ -345,6 +353,51 @@ func (s *Filesystem) StateSnapshotMeta() SnapshotMeta { } } +// StateForMigration is part of our implementation of Migrator. +func (s *Filesystem) StateForMigration() *statefile.File { + return s.file.DeepCopy() +} + +// WriteStateForMigration is part of our implementation of Migrator. +func (s *Filesystem) WriteStateForMigration(f *statefile.File, force bool) error { + if s.readFile == nil { + err := s.RefreshState() + if err != nil { + return err + } + } + defer s.mutex()() + + if !force { + err := CheckValidImport(f, s.readFile) + if err != nil { + return err + } + } + + if s.readFile != nil { + log.Printf( + "[TRACE] statemgr.Filesystem: Importing snapshot with lineage %q serial %d over snapshot with lineage %q serial %d at %s", + f.Lineage, f.Serial, + s.readFile.Lineage, s.readFile.Serial, + s.path, + ) + } else { + log.Printf( + "[TRACE] statemgr.Filesystem: Importing snapshot with lineage %q serial %d as the initial state snapshot at %s", + f.Lineage, f.Serial, + s.path, + ) + } + + err := s.writeState(f.State, &SnapshotMeta{Lineage: f.Lineage, Serial: f.Serial}) + if err != nil { + return err + } + + return nil +} + // Open the state file, creating the directories and file as needed. func (s *Filesystem) createStateFiles() error { diff --git a/states/statemgr/migrate.go b/states/statemgr/migrate.go new file mode 100644 index 000000000000..8e263e07b97e --- /dev/null +++ b/states/statemgr/migrate.go @@ -0,0 +1,212 @@ +package statemgr + +import ( + "fmt" + + "github.com/hashicorp/terraform/states/statefile" +) + +// Migrator is an optional interface implemented by state managers that +// are capable of direct migration of state snapshots with their associated +// metadata unchanged. +// +// This interface is used when available by function Migrate. See that +// function for more information on how it is used. +type Migrator interface { + PersistentMeta + + // StateForMigration returns a full statefile representing the latest + // snapshot (as would be returned by Reader.State) and the associated + // snapshot metadata (as would be returned by + // PersistentMeta.StateSnapshotMeta). + // + // Just as with Reader.State, this must not fail. + StateForMigration() *statefile.File + + // WriteStateForMigration accepts a full statefile including associated + // snapshot metadata, and atomically updates the stored file (as with + // Writer.WriteState) and the metadata. + // + // If "force" is not set, the manager must call CheckValidImport with + // the given file and the current file and complete the update only if + // that function returns nil. If force is set this may override such + // checks, but some backends do not support forcing and so will act + // as if force is always true. + WriteStateForMigration(f *statefile.File, force bool) error +} + +// Migrate writes the latest transient state snapshot from src into dest, +// preserving snapshot metadata (serial and lineage) where possible. +// +// If both managers implement the optional interface Migrator then it will +// be used to copy the snapshot and its associated metadata. Otherwise, +// the normal Reader and Writer interfaces will be used instead. +// +// If the destination manager refuses the new state or fails to write it then +// its error is returned directly. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to persist the newly-written state after a successful result, +// just as with calls to Writer.WriteState. +// +// This function doesn't do any locking of its own, so if the state managers +// also implement Locker the caller should hold a lock on both managers +// for the duration of this call. +func Migrate(dst, src Transient) error { + if dstM, ok := dst.(Migrator); ok { + if srcM, ok := src.(Migrator); ok { + // Full-fidelity migration, them. + s := srcM.StateForMigration() + return dstM.WriteStateForMigration(s, true) + } + } + + // Managers to not support full-fidelity migration, so migration will not + // preserve serial/lineage. + s := src.State() + return dst.WriteState(s) +} + +// Import loads the given state snapshot into the given manager, preserving +// its metadata (serial and lineage) if the target manager supports metadata. +// +// A state manager must implement the optional interface Migrator to get +// access to the full metadata. +// +// Unless "force" is true, Import will check first that the metadata given +// in the file matches the current snapshot metadata for the manager, if the +// manager supports metadata. Some managers do not support forcing, so a +// write with an unsuitable lineage or serial may still be rejected even if +// "force" is set. "force" has no effect for managers that do not support +// snapshot metadata. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to persist the newly-written state after a successful result, +// just as with calls to Writer.WriteState. +// +// This function doesn't do any locking of its own, so if the state manager +// also implements Locker the caller should hold a lock on it for the +// duration of this call. +func Import(f *statefile.File, mgr Transient, force bool) error { + if mgrM, ok := mgr.(Migrator); ok { + return mgrM.WriteStateForMigration(f, force) + } + + // For managers that don't implement Migrator, this is just a normal write + // of the state contained in the given file. + return mgr.WriteState(f.State) +} + +// Export retrieves the latest state snapshot from the given manager, including +// its metadata (serial and lineage) where possible. +// +// A state manager must also implement either Migrator or PersistentMeta +// for the metadata to be included. Otherwise, the relevant fields will have +// zero value in the returned object. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to refresh from persistent storage first if needed. +// +// This function doesn't do any locking of its own, so if the state manager +// also implements Locker the caller should hold a lock on it for the +// duration of this call. +func Export(mgr Reader) *statefile.File { + switch mgrT := mgr.(type) { + case Migrator: + return mgrT.StateForMigration() + case PersistentMeta: + s := mgr.State() + meta := mgrT.StateSnapshotMeta() + return statefile.New(s, meta.Lineage, meta.Serial) + default: + s := mgr.State() + return statefile.New(s, "", 0) + } +} + +// SnapshotMetaRel describes a relationship between two SnapshotMeta values, +// returned from the SnapshotMeta.Compare method where the "first" value +// is the receiver of that method and the "second" is the given argument. +type SnapshotMetaRel rune + +//go:generate stringer -type=SnapshotMetaRel + +const ( + // SnapshotOlder indicates that two snapshots have a common lineage and + // that the first has a lower serial value. + SnapshotOlder SnapshotMetaRel = '<' + + // SnapshotNewer indicates that two snapshots have a common lineage and + // that the first has a higher serial value. + SnapshotNewer SnapshotMetaRel = '>' + + // SnapshotEqual indicates that two snapshots have a common lineage and + // the same serial value. + SnapshotEqual SnapshotMetaRel = '=' + + // SnapshotUnrelated indicates that two snapshots have different lineage + // and thus cannot be meaningfully compared. + SnapshotUnrelated SnapshotMetaRel = '!' + + // SnapshotLegacy indicates that one or both of the snapshots + // does not have a lineage at all, and thus no comparison is possible. + SnapshotLegacy SnapshotMetaRel = '?' +) + +// Compare determines the relationship, if any, between the given existing +// SnapshotMeta and the potential "new" SnapshotMeta that is the receiver. +func (m SnapshotMeta) Compare(existing SnapshotMeta) SnapshotMetaRel { + switch { + case m.Lineage == "" || existing.Lineage == "": + return SnapshotLegacy + case m.Lineage != existing.Lineage: + return SnapshotUnrelated + case m.Serial > existing.Serial: + return SnapshotNewer + case m.Serial < existing.Serial: + return SnapshotOlder + default: + // both serials are equal, by elimination + return SnapshotEqual + } +} + +// CheckValidImport returns nil if the "new" snapshot can be imported as a +// successor of the "existing" snapshot without forcing. +// +// If not, an error is returned describing why. +func CheckValidImport(newFile, existingFile *statefile.File) error { + if existingFile == nil || existingFile.State.Empty() { + // It's always okay to overwrite an empty state, regardless of + // its lineage/serial. + return nil + } + new := SnapshotMeta{ + Lineage: newFile.Lineage, + Serial: newFile.Serial, + } + existing := SnapshotMeta{ + Lineage: existingFile.Lineage, + Serial: existingFile.Serial, + } + rel := new.Compare(existing) + switch rel { + case SnapshotNewer: + return nil // a newer snapshot is fine + case SnapshotLegacy: + return nil // anything goes for a legacy state + case SnapshotUnrelated: + return fmt.Errorf("cannot import state with lineage %q over unrelated state with lineage %q", new.Lineage, existing.Lineage) + case SnapshotEqual: + if statefile.StatesMarshalEqual(newFile.State, existingFile.State) { + // If lineage, serial, and state all match then this is fine. + return nil + } + return fmt.Errorf("cannot overwrite existing state with serial %d with a different state that has the same serial", new.Serial) + case SnapshotOlder: + return fmt.Errorf("cannot import state with serial %d over newer state with serial %d", new.Serial, existing.Serial) + default: + // Should never happen, but we'll check to make sure for safety + return fmt.Errorf("unsupported state snapshot relationship %s", rel) + } +} diff --git a/states/statemgr/migrate_test.go b/states/statemgr/migrate_test.go new file mode 100644 index 000000000000..0cf2113a252e --- /dev/null +++ b/states/statemgr/migrate_test.go @@ -0,0 +1,102 @@ +package statemgr + +import ( + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" +) + +func TestCheckValidImport(t *testing.T) { + barState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("bar"), false, + ) + }) + notBarState := states.BuildState(func(s *states.SyncState) { + s.SetOutputValue( + addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance), + cty.StringVal("not bar"), false, + ) + }) + emptyState := states.NewState() + + tests := map[string]struct { + New *statefile.File + Existing *statefile.File + WantErr string + }{ + "exact match": { + New: statefile.New(barState, "lineage", 1), + Existing: statefile.New(barState, "lineage", 1), + WantErr: ``, + }, + "overwrite unrelated empty state": { + New: statefile.New(barState, "lineage1", 1), + Existing: statefile.New(emptyState, "lineage2", 1), + WantErr: ``, + }, + "different state with same serial": { + New: statefile.New(barState, "lineage", 1), + Existing: statefile.New(notBarState, "lineage", 1), + WantErr: `cannot overwrite existing state with serial 1 with a different state that has the same serial`, + }, + "different state with newer serial": { + New: statefile.New(barState, "lineage", 2), + Existing: statefile.New(notBarState, "lineage", 1), + WantErr: ``, + }, + "different state with older serial": { + New: statefile.New(barState, "lineage", 1), + Existing: statefile.New(notBarState, "lineage", 2), + WantErr: `cannot import state with serial 1 over newer state with serial 2`, + }, + "different lineage with same serial": { + New: statefile.New(barState, "lineage1", 2), + Existing: statefile.New(notBarState, "lineage2", 2), + WantErr: `cannot import state with lineage "lineage1" over unrelated state with lineage "lineage2"`, + }, + "different lineage with different serial": { + New: statefile.New(barState, "lineage1", 3), + Existing: statefile.New(notBarState, "lineage2", 2), + WantErr: `cannot import state with lineage "lineage1" over unrelated state with lineage "lineage2"`, + }, + "new state is legacy": { + New: statefile.New(barState, "", 2), + Existing: statefile.New(notBarState, "lineage", 2), + WantErr: ``, + }, + "old state is legacy": { + New: statefile.New(barState, "lineage", 2), + Existing: statefile.New(notBarState, "", 2), + WantErr: ``, + }, + "both states are legacy": { + New: statefile.New(barState, "", 2), + Existing: statefile.New(notBarState, "", 2), + WantErr: ``, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotErr := CheckValidImport(test.New, test.Existing) + + if test.WantErr == "" { + if gotErr != nil { + t.Errorf("unexpected error: %s", gotErr) + } + } else { + if gotErr == nil { + t.Errorf("succeeded, but want error: %s", test.WantErr) + } else if got, want := gotErr.Error(), test.WantErr; got != want { + t.Errorf("wrong error\ngot: %s\nwant: %s", got, want) + } + } + }) + } +} diff --git a/states/statemgr/snapshotmetarel_string.go b/states/statemgr/snapshotmetarel_string.go new file mode 100644 index 000000000000..28e1b966f94f --- /dev/null +++ b/states/statemgr/snapshotmetarel_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=SnapshotMetaRel"; DO NOT EDIT. + +package statemgr + +import "strconv" + +const ( + _SnapshotMetaRel_name_0 = "SnapshotUnrelated" + _SnapshotMetaRel_name_1 = "SnapshotOlderSnapshotEqualSnapshotNewerSnapshotLegacy" +) + +var ( + _SnapshotMetaRel_index_1 = [...]uint8{0, 13, 26, 39, 53} +) + +func (i SnapshotMetaRel) String() string { + switch { + case i == 33: + return _SnapshotMetaRel_name_0 + case 60 <= i && i <= 63: + i -= 60 + return _SnapshotMetaRel_name_1[_SnapshotMetaRel_index_1[i]:_SnapshotMetaRel_index_1[i+1]] + default: + return "SnapshotMetaRel(" + strconv.FormatInt(int64(i), 10) + ")" + } +} From 24046ab83381eef3848353c55cd6c111afc8daab Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 12 Nov 2018 18:27:39 -0800 Subject: [PATCH 107/149] command: More TRACE logging for the Backend instantiation codepaths --- command/meta_backend.go | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/command/meta_backend.go b/command/meta_backend.go index e1655c215595..5ce527dbc52f 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -89,7 +89,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics return nil, diags } - log.Printf("[INFO] command: backend initialized: %T", b) + log.Printf("[TRACE] Meta.Backend: instantiated backend of type %T", b) } // Setup the CLI opts we pass into backends that support it. @@ -111,6 +111,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics // If the result of loading the backend is an enhanced backend, // then return that as-is. This works even if b == nil (it will be !ok). if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.Backend: backend %T supports operations", b) return enhanced, nil } @@ -119,7 +120,7 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics // non-enhanced (if any) as the state backend. if !opts.ForceLocal { - log.Printf("[INFO] command: backend %T is not enhanced, wrapping in local", b) + log.Printf("[TRACE] Meta.Backend: backend %T does not support operations, so wrapping it in a local backend", b) } // Build the local backend @@ -290,18 +291,18 @@ func (m *Meta) backendConfig(opts *BackendOpts) (*configs.Backend, int, tfdiags. } if conf == nil { - log.Println("[INFO] command: no config, returning nil") + log.Println("[TRACE] Meta.Backend: no config given or present on disk, so returning nil config") return nil, 0, nil } - log.Println("[WARN] BackendOpts.Config not set, but config found") + log.Printf("[TRACE] Meta.Backend: BackendOpts.Config not set, so using settings loaded from %s", conf.DeclRange) opts.Config = conf } c := opts.Config if c == nil { - log.Println("[INFO] command: no explicit backend config") + log.Println("[TRACE] Meta.Backend: no explicit backend config, so returning nil config") return nil, 0, nil } @@ -323,9 +324,12 @@ func (m *Meta) backendConfig(opts *BackendOpts) (*configs.Backend, int, tfdiags. // If we have an override configuration body then we must apply it now. if opts.ConfigOverride != nil { + log.Println("[TRACE] Meta.Backend: merging -backend-config=... CLI overrides into backend configuration") configBody = configs.MergeBodies(configBody, opts.ConfigOverride) } + log.Printf("[TRACE] Meta.Backend: built configuration for %q backend with hash value %d", c.Type, configHash) + // We'll shallow-copy configs.Backend here so that we can replace the // body without affecting others that hold this reference. configCopy := *c @@ -382,8 +386,12 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di // Load the state, it must be non-nil for the tests below but can be empty s := sMgr.State() if s == nil { - log.Printf("[DEBUG] command: no data state file found for backend config") + log.Printf("[TRACE] Meta.Backend: backend has not previously been initialized in this working directory") s = terraform.NewState() + } else if s.Backend != nil { + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized for %q backend", s.Backend.Type) + } else { + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized but has no backend (is using legacy remote state?)") } // if we want to force reconfiguration of the backend, we set the backend @@ -418,10 +426,12 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di switch { // No configuration set at all. Pure local state. case c == nil && s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: using default local state only (no backend configuration, and no existing initialized backend)") return nil, nil // We're unsetting a backend (moving from backend => local) case c == nil && !s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: previously-initialized %q backend is no longer present in config", s.Backend.Type) if !opts.Init { initReason := fmt.Sprintf( "Unsetting the previously set backend %q", @@ -435,6 +445,7 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di // Configuring a backend for the first time. case c != nil && s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: moving from default local state only to %q backend", c.Type) if !opts.Init { initReason := fmt.Sprintf( "Initial configuration of the requested backend %q", @@ -451,8 +462,10 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di // If our configuration is the same, then we're just initializing // a previously configured remote backend. if !m.backendConfigNeedsMigration(c, s.Backend) { + log.Printf("[TRACE] Meta.Backend: using already-initialized %q backend configuration", c.Type) return m.backend_C_r_S_unchanged(c, cHash, sMgr) } + log.Printf("[TRACE] Meta.Backend: backend configuration has changed (from type %q to type %q)", s.Backend.Type, c.Type) if !opts.Init { initReason := fmt.Sprintf( @@ -615,7 +628,10 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta // We only care about non-empty states. if localS := localState.State(); !localS.Empty() { + log.Printf("[TRACE] Meta.Backend: will need to migrate workspace states because of existing %q workspace", workspace) localStates = append(localStates, localState) + } else { + log.Printf("[TRACE] Meta.Backend: ignoring local %q workspace because its state is empty", workspace) } } From aacbe1d14be9d9422941192e840c84bec640b985 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 12 Nov 2018 18:30:01 -0800 Subject: [PATCH 108/149] command: Fix TestMetaBackend_configureNewWithState This test was initially failing because its fixture had a state which our new state models consider to be "empty", and thus it was not migrated. After fixing that (by adding an output to the fixture), this revealed a bug that the lineage was not being persisted through the migration. This is fixed by using the statemgr.Migrate method instead of writing via the normal Writer interface, which allows two cooperating state managers to properly transfer the lineage and serial along with the state snapshot. --- command/meta_backend_migrate.go | 6 ++++-- command/meta_backend_test.go | 11 +++++------ .../backend-new-migrate/terraform.tfstate | 7 ++++++- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/command/meta_backend_migrate.go b/command/meta_backend_migrate.go index 91c24a9c64c6..eceb3032d6af 100644 --- a/command/meta_backend_migrate.go +++ b/command/meta_backend_migrate.go @@ -382,8 +382,10 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { } } - // Confirmed! Write. - if err := stateTwo.WriteState(one); err != nil { + // Confirmed! We'll have the statemgr package handle the migration, which + // includes preserving any lineage/serial information where possible, if + // both managers support such metadata. + if err := statemgr.Migrate(stateTwo, stateOne); err != nil { return fmt.Errorf(strings.TrimSpace(errBackendStateCopy), opts.OneType, opts.TwoType, err) } diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index fc6ba51fd68e..f0c9510948e1 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -326,24 +326,23 @@ func TestMetaBackend_configureNewWithState(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %s", err) } - if err := s.RefreshState(); err != nil { + state, err := statemgr.RefreshAndRead(s) + if err != nil { t.Fatalf("unexpected error: %s", err) } - state := s.State() if state == nil { t.Fatal("state is nil") } - if testStateMgrCurrentLineage(s) != "backend-new-migrate" { - t.Fatalf("bad: %#v", state) + if got, want := testStateMgrCurrentLineage(s), "backend-new-migrate"; got != want { + t.Fatalf("lineage changed during migration\nnow: %s\nwas: %s", got, want) } // Write some state state = states.NewState() mark := markStateForMatching(state, "changing") - s.WriteState(state) - if err := s.PersistState(); err != nil { + if err := statemgr.WriteAndPersist(s, state); err != nil { t.Fatalf("unexpected error: %s", err) } diff --git a/command/test-fixtures/backend-new-migrate/terraform.tfstate b/command/test-fixtures/backend-new-migrate/terraform.tfstate index b1b1415d0181..f1d8b968b36d 100644 --- a/command/test-fixtures/backend-new-migrate/terraform.tfstate +++ b/command/test-fixtures/backend-new-migrate/terraform.tfstate @@ -8,7 +8,12 @@ "path": [ "root" ], - "outputs": {}, + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, "resources": {}, "depends_on": [] } From 985b414dca721a5e2f35812856c74227a6bfc0ee Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Tue, 13 Nov 2018 15:51:01 -0800 Subject: [PATCH 109/149] states/statemgr: Fix the Filesystem state manager tests Now that we're verifying the Terraform version during state loading, we need to force a particular Terraform version to use during these tests. --- states/statemgr/filesystem_test.go | 40 +++++++++++++++++++++++++++++- version.go | 2 +- version/version.go | 8 ++++-- 3 files changed, 46 insertions(+), 4 deletions(-) diff --git a/states/statemgr/filesystem_test.go b/states/statemgr/filesystem_test.go index 4e50b4a4398b..7977a6f8d9a1 100644 --- a/states/statemgr/filesystem_test.go +++ b/states/statemgr/filesystem_test.go @@ -4,23 +4,26 @@ import ( "io/ioutil" "os" "os/exec" + "strings" "sync" "testing" "github.com/go-test/deep" - version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/states/statefile" + tfversion "github.com/hashicorp/terraform/version" ) func TestFilesystem(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() ls := testFilesystem(t) defer os.Remove(ls.readPath) TestFull(t, ls) } func TestFilesystemRace(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() ls := testFilesystem(t) defer os.Remove(ls.readPath) @@ -37,6 +40,7 @@ func TestFilesystemRace(t *testing.T) { } func TestFilesystemLocks(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() s := testFilesystem(t) defer os.Remove(s.readPath) @@ -97,6 +101,7 @@ func TestFilesystemLocks(t *testing.T) { // Verify that we can write to the state file, as Windows' mandatory locking // will prevent writing to a handle different than the one that hold the lock. func TestFilesystem_writeWhileLocked(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() s := testFilesystem(t) defer os.Remove(s.readPath) @@ -119,6 +124,7 @@ func TestFilesystem_writeWhileLocked(t *testing.T) { } func TestFilesystem_pathOut(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() f, err := ioutil.TempFile("", "tf") if err != nil { t.Fatalf("err: %s", err) @@ -134,6 +140,7 @@ func TestFilesystem_pathOut(t *testing.T) { } func TestFilesystem_backup(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() f, err := ioutil.TempFile("", "tf") if err != nil { t.Fatalf("err: %s", err) @@ -166,6 +173,7 @@ func TestFilesystem_backup(t *testing.T) { } func TestFilesystem_nonExist(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() ls := NewFilesystem("ishouldntexist") if err := ls.RefreshState(); err != nil { t.Fatalf("err: %s", err) @@ -177,6 +185,7 @@ func TestFilesystem_nonExist(t *testing.T) { } func TestFilesystem_impl(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() var _ Reader = new(Filesystem) var _ Writer = new(Filesystem) var _ Persister = new(Filesystem) @@ -212,6 +221,7 @@ func testFilesystem(t *testing.T) *Filesystem { // Make sure we can refresh while the state is locked func TestFilesystem_refreshWhileLocked(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() f, err := ioutil.TempFile("", "tf") if err != nil { t.Fatalf("err: %s", err) @@ -253,3 +263,31 @@ func TestFilesystem_refreshWhileLocked(t *testing.T) { t.Fatal("missing state") } } + +func testOverrideVersion(t *testing.T, v string) func() { + oldVersionStr := tfversion.Version + oldPrereleaseStr := tfversion.Prerelease + oldSemVer := tfversion.SemVer + + var newPrereleaseStr string + if dash := strings.Index(v, "-"); dash != -1 { + newPrereleaseStr = v[dash+1:] + v = v[:dash] + } + + newSemVer, err := version.NewVersion(v) + if err != nil { + t.Errorf("invalid override version %q: %s", v, err) + } + newVersionStr := newSemVer.String() + + tfversion.Version = newVersionStr + tfversion.Prerelease = newPrereleaseStr + tfversion.SemVer = newSemVer + + return func() { // reset function + tfversion.Version = oldVersionStr + tfversion.Prerelease = oldPrereleaseStr + tfversion.SemVer = oldSemVer + } +} diff --git a/version.go b/version.go index baefdc2d9700..36d16cb28e24 100644 --- a/version.go +++ b/version.go @@ -7,6 +7,6 @@ import ( // The git commit that was compiled. This will be filled in by the compiler. var GitCommit string -const Version = version.Version +var Version = version.Version var VersionPrerelease = version.Prerelease diff --git a/version/version.go b/version/version.go index 0cfac684e944..e6cc16506d51 100644 --- a/version/version.go +++ b/version/version.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -const Version = "0.12.0" +var Version = "0.12.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release @@ -21,7 +21,11 @@ var Prerelease = "dev" // SemVer is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a // proper semantic version, which should always be the case. -var SemVer = version.Must(version.NewVersion(Version)) +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(Version)) +} // Header is the header name used to send the current terraform version // in http requests. From 22c84c71a434f6ec697e522396304138c1768f73 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Tue, 13 Nov 2018 16:48:59 -0800 Subject: [PATCH 110/149] command: Use statemgr.Import and statemgr.Export for state push and pull We previously hacked around the import/export functionality being missing in the statemgr layer after refactoring, but now it's been reintroduced to fix functionality elsewhere we should use the centralized Import and Export functions to ensure consistent behavior. In particular, this pushes the logic for checking lineage and serial during push down into the state manager itself, which is better because all other details about lineage and serial are managed within the state managers. --- command/state_pull.go | 26 ++++++++----------- command/state_pull_test.go | 2 +- command/state_push.go | 24 +++++++---------- .../local-state.tfstate | 14 +++++++++- .../state-push-bad-lineage/replace.tfstate | 14 +++++++++- states/statemgr/helper.go | 20 +------------- 6 files changed, 48 insertions(+), 52 deletions(-) diff --git a/command/state_pull.go b/command/state_pull.go index 6724f6628165..0ac531aa11ed 100644 --- a/command/state_pull.go +++ b/command/state_pull.go @@ -35,7 +35,7 @@ func (c *StatePullCommand) Run(args []string) int { return 1 } - // Get the state + // Get the state manager for the current workspace env := c.Workspace() stateMgr, err := b.StateMgr(env) if err != nil { @@ -47,24 +47,20 @@ func (c *StatePullCommand) Run(args []string) int { return 1 } - state := stateMgr.State() - if state == nil { - // Output on "error" so it shows up on stderr - c.Ui.Error("Empty state (no state)") - return 0 - } + // Get a statefile object representing the latest snapshot + stateFile := statemgr.Export(stateMgr) - // Get the state file. - stateFile := statemgr.StateFile(stateMgr, state) + if stateFile != nil { // we produce no output if the statefile is nil + var buf bytes.Buffer + err = statefile.Write(stateFile, &buf) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) + return 1 + } - var buf bytes.Buffer - err = statefile.Write(stateFile, &buf) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) - return 1 + c.Ui.Output(buf.String()) } - c.Ui.Output(buf.String()) return 0 } diff --git a/command/state_pull_test.go b/command/state_pull_test.go index 7338f8493721..8c7680601785 100644 --- a/command/state_pull_test.go +++ b/command/state_pull_test.go @@ -47,7 +47,7 @@ func TestStatePull_noState(t *testing.T) { defer testFixCwd(t, tmp, cwd) p := testProvider() - ui := new(cli.MockUi) + ui := cli.NewMockUi() c := &StatePullCommand{ Meta: Meta{ testingOverrides: metaOverridesForProvider(p), diff --git a/command/state_push.go b/command/state_push.go index 9de232abca42..2aa39bd8f07c 100644 --- a/command/state_push.go +++ b/command/state_push.go @@ -70,7 +70,7 @@ func (c *StatePushCommand) Run(args []string) int { return 1 } - // Get the state + // Get the state manager for the currently-selected workspace env := c.Workspace() stateMgr, err := b.StateMgr(env) if err != nil { @@ -81,23 +81,17 @@ func (c *StatePushCommand) Run(args []string) int { c.Ui.Error(fmt.Sprintf("Failed to refresh destination state: %s", err)) return 1 } - dstState := stateMgr.State() - // If we're not forcing, then perform safety checks - if !flagForce && !dstState.Empty() { - dstStateFile := statemgr.StateFile(stateMgr, dstState) - - if dstStateFile.Lineage != srcStateFile.Lineage { - c.Ui.Error(strings.TrimSpace(errStatePushLineage)) - return 1 - } - if dstStateFile.Serial > srcStateFile.Serial { - c.Ui.Error(strings.TrimSpace(errStatePushSerialNewer)) - return 1 - } + if srcStateFile == nil { + // We'll push a new empty state instead + srcStateFile = statemgr.NewStateFile() } - // Overwrite it + // Import it, forcing through the lineage/serial if requested and possible. + if err := statemgr.Import(srcStateFile, stateMgr, flagForce); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) + return 1 + } if err := stateMgr.WriteState(srcStateFile.State); err != nil { c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) return 1 diff --git a/command/test-fixtures/state-push-bad-lineage/local-state.tfstate b/command/test-fixtures/state-push-bad-lineage/local-state.tfstate index 4023b53e02b9..fe06c362c5de 100644 --- a/command/test-fixtures/state-push-bad-lineage/local-state.tfstate +++ b/command/test-fixtures/state-push-bad-lineage/local-state.tfstate @@ -1,5 +1,17 @@ { "version": 3, "serial": 1, - "lineage": "mismatch" + "lineage": "mismatch", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/state-push-bad-lineage/replace.tfstate b/command/test-fixtures/state-push-bad-lineage/replace.tfstate index 0e3b7013acfc..dad6859ea272 100644 --- a/command/test-fixtures/state-push-bad-lineage/replace.tfstate +++ b/command/test-fixtures/state-push-bad-lineage/replace.tfstate @@ -1,5 +1,17 @@ { "version": 3, "serial": 2, - "lineage": "hello" + "lineage": "hello", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "baz" + } + }, + "resources": {} + } + ] } diff --git a/states/statemgr/helper.go b/states/statemgr/helper.go index 5feb09e9431e..7eef70efcd6d 100644 --- a/states/statemgr/helper.go +++ b/states/statemgr/helper.go @@ -15,28 +15,10 @@ func NewStateFile() *statefile.File { return &statefile.File{ Lineage: NewLineage(), TerraformVersion: version.SemVer, + State: states.NewState(), } } -// StateFile is a special helper to obtain a statefile representation -// of a state snapshot that can be written later by a call -func StateFile(mgr Storage, state *states.State) *statefile.File { - ret := &statefile.File{ - State: state.DeepCopy(), - TerraformVersion: version.SemVer, - } - - // If the given manager uses snapshot metadata then we'll save that - // in our file so we can check it again during WritePlannedStateUpdate. - if mr, ok := mgr.(PersistentMeta); ok { - m := mr.StateSnapshotMeta() - ret.Lineage = m.Lineage - ret.Serial = m.Serial - } - - return ret -} - // RefreshAndRead refreshes the persistent snapshot in the given state manager // and then returns it. // From f39a5d096235f4c21c81ae6b9037904080e9a4d8 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Tue, 13 Nov 2018 17:39:06 -0800 Subject: [PATCH 111/149] command: Fix various TestMetaBackend-prefix tests Our new state model has a different implementation of "empty" that doesn't consider lineage/serial, so we need to have some actual content in these state fixtures to avoid them being skipped during state migrations. --- command/meta_backend_test.go | 8 ++++---- .../local-state.tfstate | 14 +++++++++++++- .../local-state.tfstate | 14 +++++++++++++- .../terraform.tfstate.d/env2/terraform.tfstate | 14 +++++++++++++- .../local-state.tfstate | 14 +++++++++++++- .../terraform.tfstate.d/env2/terraform.tfstate | 14 +++++++++++++- .../terraform.tfstate.d/env2/terraform.tfstate | 14 +++++++++++++- .../terraform.tfstate.d/env1/terraform.tfstate | 14 +++++++++++++- .../terraform.tfstate.d/env2/terraform.tfstate | 14 +++++++++++++- .../local-state.tfstate | 14 +++++++++++++- .../backend-change/local-state.tfstate | 14 +++++++++++++- .../backend-new-migrate-existing/terraform.tfstate | 7 ++++++- .../backend-unset/local-state.tfstate | 14 +++++++++++++- 13 files changed, 153 insertions(+), 16 deletions(-) diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index f0c9510948e1..09ffccb76406 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -502,8 +502,8 @@ func TestMetaBackend_configureNewWithStateExisting(t *testing.T) { if state == nil { t.Fatal("state is nil") } - if testStateMgrCurrentLineage(s) != "local" { - t.Fatalf("bad: %#v", state) + if got, want := testStateMgrCurrentLineage(s), "local"; got != want { + t.Fatalf("wrong lineage %q; want %q", got, want) } // Write some state @@ -1414,8 +1414,8 @@ func TestMetaBackend_configuredUnsetCopy(t *testing.T) { if state == nil { t.Fatal("state is nil") } - if testStateMgrCurrentLineage(s) != "configuredUnset" { - t.Fatalf("bad: %#v", state) + if got, want := testStateMgrCurrentLineage(s), "configuredUnset"; got != want { + t.Fatalf("wrong state lineage %q; want %q", got, want) } // Verify a backup doesn't exist diff --git a/command/test-fixtures/backend-change-multi-default-to-single/local-state.tfstate b/command/test-fixtures/backend-change-multi-default-to-single/local-state.tfstate index 88c1d86ec4b7..c30143790cd5 100644 --- a/command/test-fixtures/backend-change-multi-default-to-single/local-state.tfstate +++ b/command/test-fixtures/backend-change-multi-default-to-single/local-state.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change" + "lineage": "backend-change", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-multi-to-multi/local-state.tfstate b/command/test-fixtures/backend-change-multi-to-multi/local-state.tfstate index 88c1d86ec4b7..c30143790cd5 100644 --- a/command/test-fixtures/backend-change-multi-to-multi/local-state.tfstate +++ b/command/test-fixtures/backend-change-multi-to-multi/local-state.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change" + "lineage": "backend-change", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate b/command/test-fixtures/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate index 855a27f4cf54..83854e0f6979 100644 --- a/command/test-fixtures/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate +++ b/command/test-fixtures/backend-change-multi-to-multi/terraform.tfstate.d/env2/terraform.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change-env2" + "lineage": "backend-change-env2", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-multi-to-no-default-with-default/local-state.tfstate b/command/test-fixtures/backend-change-multi-to-no-default-with-default/local-state.tfstate index 980f732f6a7b..18ae42c9aa43 100644 --- a/command/test-fixtures/backend-change-multi-to-no-default-with-default/local-state.tfstate +++ b/command/test-fixtures/backend-change-multi-to-no-default-with-default/local-state.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change-env1" + "lineage": "backend-change-env1", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate b/command/test-fixtures/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate index 855a27f4cf54..83854e0f6979 100644 --- a/command/test-fixtures/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate +++ b/command/test-fixtures/backend-change-multi-to-no-default-with-default/terraform.tfstate.d/env2/terraform.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change-env2" + "lineage": "backend-change-env2", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate b/command/test-fixtures/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate index 855a27f4cf54..83854e0f6979 100644 --- a/command/test-fixtures/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate +++ b/command/test-fixtures/backend-change-multi-to-no-default-without-default/terraform.tfstate.d/env2/terraform.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change-env2" + "lineage": "backend-change-env2", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate b/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate index 88c1d86ec4b7..c30143790cd5 100644 --- a/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate +++ b/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env1/terraform.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change" + "lineage": "backend-change", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate b/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate index 855a27f4cf54..83854e0f6979 100644 --- a/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate +++ b/command/test-fixtures/backend-change-multi-to-single/terraform.tfstate.d/env2/terraform.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change-env2" + "lineage": "backend-change-env2", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change-single-to-single/local-state.tfstate b/command/test-fixtures/backend-change-single-to-single/local-state.tfstate index 88c1d86ec4b7..c30143790cd5 100644 --- a/command/test-fixtures/backend-change-single-to-single/local-state.tfstate +++ b/command/test-fixtures/backend-change-single-to-single/local-state.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change" + "lineage": "backend-change", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-change/local-state.tfstate b/command/test-fixtures/backend-change/local-state.tfstate index 88c1d86ec4b7..c30143790cd5 100644 --- a/command/test-fixtures/backend-change/local-state.tfstate +++ b/command/test-fixtures/backend-change/local-state.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "backend-change" + "lineage": "backend-change", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } diff --git a/command/test-fixtures/backend-new-migrate-existing/terraform.tfstate b/command/test-fixtures/backend-new-migrate-existing/terraform.tfstate index 7fc619980475..ce8d954f4975 100644 --- a/command/test-fixtures/backend-new-migrate-existing/terraform.tfstate +++ b/command/test-fixtures/backend-new-migrate-existing/terraform.tfstate @@ -8,7 +8,12 @@ "path": [ "root" ], - "outputs": {}, + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, "resources": {}, "depends_on": [] } diff --git a/command/test-fixtures/backend-unset/local-state.tfstate b/command/test-fixtures/backend-unset/local-state.tfstate index 51d5880305ef..35caa2dc6ac0 100644 --- a/command/test-fixtures/backend-unset/local-state.tfstate +++ b/command/test-fixtures/backend-unset/local-state.tfstate @@ -2,5 +2,17 @@ "version": 3, "terraform_version": "0.8.2", "serial": 7, - "lineage": "configuredUnset" + "lineage": "configuredUnset", + "modules": [ + { + "path": ["root"], + "outputs": { + "foo": { + "type": "string", + "value": "bar" + } + }, + "resources": {} + } + ] } From 6c7cecfbd87ce30ad39cb8a15bbef7922da800ad Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Nov 2018 15:08:36 -0800 Subject: [PATCH 112/149] command: More logging during migration This just finishes off the logging added in earlier commits to get all the way through to the actual migration call. --- command/meta_backend_migrate.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/command/meta_backend_migrate.go b/command/meta_backend_migrate.go index eceb3032d6af..79ba0c8fe404 100644 --- a/command/meta_backend_migrate.go +++ b/command/meta_backend_migrate.go @@ -375,9 +375,11 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { // Confirm with the user whether we want to copy state over confirm, err := confirmFunc(stateOne, stateTwo, opts) if err != nil { + log.Print("[TRACE] backendMigrateState: error reading input, so aborting migration") return err } if !confirm { + log.Print("[TRACE] backendMigrateState: user cancelled at confirmation prompt, so aborting migration") return nil } } @@ -385,6 +387,7 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { // Confirmed! We'll have the statemgr package handle the migration, which // includes preserving any lineage/serial information where possible, if // both managers support such metadata. + log.Print("[TRACE] backendMigrateState: migration confirmed, so migrating") if err := statemgr.Migrate(stateTwo, stateOne); err != nil { return fmt.Errorf(strings.TrimSpace(errBackendStateCopy), opts.OneType, opts.TwoType, err) From ec27526cc3f3fc0116a2756ba1ec459c38adf331 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Nov 2018 16:03:14 -0800 Subject: [PATCH 113/149] command: Fix TestMetaBackend_configuredChangeCopy_multiToMulti This was failing because we now handle the settings for the local backend a little differently as a result of decoding it with the HCL2 machinery. Specifically, the backend.State* fields are now assumed to be what is given in configuration, and any CLI overrides are maintained separately in OverrideState* fields so that they can be imposed "just in time" in StatePaths. This is particularly important because OverrideStatePath (when set) is used regardless of workspace name, while StatePath is a suitable value only for the "default" workspace, with others needing to be constructed from StateWorkspaceDir instead. --- backend/local/backend.go | 36 ++++++++++++++++++++++++----------- backend/local/cli.go | 11 ++++++++--- command/meta_backend_test.go | 4 ++-- states/statemgr/filesystem.go | 19 ++++++++++++++++++ 4 files changed, 54 insertions(+), 16 deletions(-) diff --git a/backend/local/backend.go b/backend/local/backend.go index ce4edc0c3564..07397e9f412b 100644 --- a/backend/local/backend.go +++ b/backend/local/backend.go @@ -62,6 +62,14 @@ type Local struct { StateBackupPath string StateWorkspaceDir string + // The OverrideState* paths are set based on per-operation CLI arguments + // and will override what'd be built from the State* fields if non-empty. + // While the interpretation of the State* fields depends on the active + // workspace, the OverrideState* fields are always used literally. + OverrideStatePath string + OverrideStateOutPath string + OverrideStateBackupPath string + // We only want to create a single instance of a local state, so store them // here as they're loaded. states map[string]statemgr.Full @@ -251,6 +259,7 @@ func (b *Local) DeleteWorkspace(name string) error { func (b *Local) StateMgr(name string) (statemgr.Full, error) { statePath, stateOutPath, backupPath := b.StatePaths(name) + log.Printf("[TRACE] backend/local: state manager for workspace %q will:\n - read initial snapshot from %s\n - write new snapshots to %s\n - create any backup at %s", name, statePath, stateOutPath, backupPath) // If we have a backend handling state, delegate to that. if b.Backend != nil { @@ -484,26 +493,31 @@ func (b *Local) schemaConfigure(ctx context.Context) error { // StatePaths returns the StatePath, StateOutPath, and StateBackupPath as // configured from the CLI. func (b *Local) StatePaths(name string) (stateIn, stateOut, backupOut string) { - statePath := b.StatePath - stateOutPath := b.StateOutPath - backupPath := b.StateBackupPath + statePath := b.OverrideStatePath + stateOutPath := b.OverrideStateOutPath + backupPath := b.OverrideStateBackupPath - if name == "" { - name = backend.DefaultStateName + isDefault := name == backend.DefaultStateName || name == "" + + baseDir := "" + if !isDefault { + baseDir = filepath.Join(b.stateWorkspaceDir(), name) } - if name == backend.DefaultStateName { + if statePath == "" { + if isDefault { + statePath = b.StatePath // s.StatePath applies only to the default workspace, since StateWorkspaceDir is used otherwise + } if statePath == "" { - statePath = DefaultStateFilename + statePath = filepath.Join(baseDir, DefaultStateFilename) } - } else { - statePath = filepath.Join(b.stateWorkspaceDir(), name, DefaultStateFilename) } - if stateOutPath == "" { stateOutPath = statePath } - + if backupPath == "" { + backupPath = b.StateBackupPath + } switch backupPath { case "-": backupPath = "" diff --git a/backend/local/cli.go b/backend/local/cli.go index 3385e663934c..c3d7a65ac488 100644 --- a/backend/local/cli.go +++ b/backend/local/cli.go @@ -1,6 +1,8 @@ package local import ( + "log" + "github.com/hashicorp/terraform/backend" ) @@ -16,15 +18,18 @@ func (b *Local) CLIInit(opts *backend.CLIOpts) error { // configure any new cli options if opts.StatePath != "" { - b.StatePath = opts.StatePath + log.Printf("[TRACE] backend/local: CLI option -state is overriding state path to %s", opts.StatePath) + b.OverrideStatePath = opts.StatePath } if opts.StateOutPath != "" { - b.StateOutPath = opts.StateOutPath + log.Printf("[TRACE] backend/local: CLI option -state-out is overriding state output path to %s", opts.StateOutPath) + b.OverrideStateOutPath = opts.StateOutPath } if opts.StateBackupPath != "" { - b.StateBackupPath = opts.StateBackupPath + log.Printf("[TRACE] backend/local: CLI option -backup is overriding state backup path to %s", opts.StateBackupPath) + b.OverrideStateBackupPath = opts.StateBackupPath } return nil diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index 09ffccb76406..6ec20aa87065 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -1154,7 +1154,7 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) { // Verify existing workspaces exist envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { - t.Fatal("env should exist") + t.Fatalf("%s should exist, but does not", envPath) } } @@ -1162,7 +1162,7 @@ func TestMetaBackend_configuredChangeCopy_multiToMulti(t *testing.T) { // Verify new workspaces exist envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { - t.Fatal("env should exist") + t.Fatalf("%s should exist, but does not", envPath) } } } diff --git a/states/statemgr/filesystem.go b/states/statemgr/filesystem.go index 740c23e75076..47ea17b25c56 100644 --- a/states/statemgr/filesystem.go +++ b/states/statemgr/filesystem.go @@ -143,6 +143,7 @@ func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { // We'll try to write our backup first, so we can be sure we've created // it successfully before clobbering the original file it came from. if !s.writtenBackup && s.backupFile != nil && s.backupPath != "" && !statefile.StatesMarshalEqual(state, s.backupFile.State) { + log.Printf("[TRACE] statemgr.Filesystem: creating backup snapshot at %s", s.backupPath) bfh, err := os.Create(s.backupPath) if err != nil { return fmt.Errorf("failed to create local state backup file: %s", err) @@ -170,6 +171,7 @@ func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { } s.file.State = state.DeepCopy() + log.Print("[TRACE] statemgr.Filesystem: truncating the state file") if _, err := s.stateFileOut.Seek(0, os.SEEK_SET); err != nil { return err } @@ -179,19 +181,25 @@ func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { if state == nil { // if we have no state, don't write anything else. + log.Print("[TRACE] statemgr.Filesystem: state is nil, so leaving the file empty") return nil } if meta == nil { if s.readFile == nil || !statefile.StatesMarshalEqual(s.file.State, s.readFile.State) { s.file.Serial++ + log.Printf("[TRACE] statemgr.Filesystem: state has changed since last snapshot, so incrementing serial to %d", s.file.Serial) + } else { + log.Print("[TRACE] statemgr.Filesystem: no state changes since last snapshot") } } else { // Force new metadata s.file.Lineage = meta.Lineage s.file.Serial = meta.Serial + log.Printf("[TRACE] statemgr.Filesystem: forcing lineage %q serial %d for migration/import", s.file.Lineage, s.file.Serial) } + log.Printf("[TRACE] statemgr.Filesystem: writing snapshot at %s", s.path) if err := statefile.Write(s.file, s.stateFileOut); err != nil { return err } @@ -220,6 +228,8 @@ func (s *Filesystem) RefreshState() error { // output file, and the output file has been locked already, we can't open // the file again. if !s.written && (s.stateFileOut == nil || s.readPath != s.path) { + log.Printf("[TRACE] statemgr.Filesystem: reading initial snapshot from %s", s.readPath) + // we haven't written a state file yet, so load from readPath f, err := os.Open(s.readPath) if err != nil { @@ -237,8 +247,11 @@ func (s *Filesystem) RefreshState() error { reader = f } } else { + log.Printf("[TRACE] statemgr.Filesystem: reading snapshot from %s", s.path) + // no state to refresh if s.stateFileOut == nil { + log.Printf("[TRACE] statemgr.Filesystem: no state snapshot has been written yet") return nil } @@ -251,16 +264,21 @@ func (s *Filesystem) RefreshState() error { // nothing to backup if there's no initial state if f == nil { + log.Printf("[TRACE] statemgr.Filesystem: no initial state, so will skip writing a backup") s.writtenBackup = true } // if there's no state we just assign the nil return value if err != nil && err != statefile.ErrNoState { + log.Printf("[TRACE] statemgr.Filesystem: state snapshot is nil") return err } s.file = f s.readFile = s.file.DeepCopy() + if s.file != nil { + log.Printf("[TRACE] statemgr.Filesystem: read snapshot with lineage %q serial %d", s.file.Lineage, s.file.Serial) + } return nil } @@ -400,6 +418,7 @@ func (s *Filesystem) WriteStateForMigration(f *statefile.File, force bool) error // Open the state file, creating the directories and file as needed. func (s *Filesystem) createStateFiles() error { + log.Printf("[TRACE] statemgr.Filesystem: preparing to manage state snapshots at %s", s.path) // This could race, but we only use it to clean up empty files if _, err := os.Stat(s.path); os.IsNotExist(err) { From aecb66d3dbdc04a05e13500fa0bbbb00cafc3e6e Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Nov 2018 16:06:48 -0800 Subject: [PATCH 114/149] command: Fix TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithoutDefault As part of integrating the new "remote" backend we relaxed the requirement that a "default" workspace must exist in all backends and now skip migrating empty workspace states to avoid creating unnecessary "default" workspaces when switching between backends that require it and backends that don't, such as when switching from the local backend (which always has a "default" workspace) to Terraform Enterprise. --- command/meta_backend_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index 6ec20aa87065..d837eaff94b3 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -1279,9 +1279,9 @@ func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithoutDefault(t *test } sort.Strings(workspaces) - expected := []string{"default", "env2"} + expected := []string{"env2"} // default is skipped because it is absent in the source backend if !reflect.DeepEqual(workspaces, expected) { - t.Fatalf("bad: %#v", workspaces) + t.Fatalf("wrong workspaces\ngot: %#v\nwant: %#v", workspaces, expected) } { @@ -1306,7 +1306,7 @@ func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithoutDefault(t *test // Verify existing workspaces exist envPath := filepath.Join(backendLocal.DefaultWorkspaceDir, "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { - t.Fatal("env should exist") + t.Fatalf("%s should exist, but does not", envPath) } } @@ -1314,7 +1314,7 @@ func TestMetaBackend_configuredChangeCopy_multiToNoDefaultWithoutDefault(t *test // Verify new workspaces exist envPath := filepath.Join("envdir-new", "env2", backendLocal.DefaultStateFilename) if _, err := os.Stat(envPath); err != nil { - t.Fatal("env should exist") + t.Fatalf("%s should exist, but does not", envPath) } } } From 2b9f92be31ec0ec71bfb29fcc972c383dfe178f6 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Wed, 14 Nov 2018 16:31:56 -0800 Subject: [PATCH 115/149] command: Partially fix TestMetaBackend_planLocalStatePath This test is testing some strange implementation details of the old local backend which do not hold with the new filesystem state manager. Specifically, it was expecting state to be read from the stateOutPath rather than the statePath, which makes no sense here because the backend is configured to read from the default terraform.tfstate file (which does not exist.) There is another problem with this test which will be addressed in a subsequent commit. --- command/meta_backend.go | 3 +++ command/meta_backend_test.go | 9 ++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/command/meta_backend.go b/command/meta_backend.go index 5ce527dbc52f..f519ebcec213 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -171,6 +171,7 @@ func (m *Meta) BackendForPlan(settings plans.Backend) (backend.Enhanced, tfdiags return nil, diags } b := f() + log.Printf("[TRACE] Meta.BackendForPlan: instantiated backend of type %T", b) schema := b.ConfigSchema() configVal, err := settings.Config.Decode(schema.ImpliedType()) @@ -204,11 +205,13 @@ func (m *Meta) BackendForPlan(settings plans.Backend) (backend.Enhanced, tfdiags // If the result of loading the backend is an enhanced backend, // then return that as-is. This works even if b == nil (it will be !ok). if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.BackendForPlan: backend %T supports operations", b) return enhanced, nil } // Otherwise, we'll wrap our state-only remote backend in the local backend // to cause any operations to be run locally. + log.Printf("[TRACE] Meta.Backend: backend %T does not support operations, so wrapping it in a local backend", b) cliOpts := m.backendCLIOpts() cliOpts.Validation = false // don't validate here in case config contains file(...) calls where the file doesn't exist local := backendLocal.NewWithBackend(b) diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index d837eaff94b3..2adfbabb625d 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -1549,7 +1549,7 @@ func TestMetaBackend_planLocalStatePath(t *testing.T) { if err != nil { t.Fatal(err) } - backendConfig := plans.Backend{ + plannedBackend := plans.Backend{ Type: "local", Config: backendConfigRaw, Workspace: "default", @@ -1569,7 +1569,7 @@ func TestMetaBackend_planLocalStatePath(t *testing.T) { m.stateOutPath = statePath // Get the backend - b, diags := m.BackendForPlan(backendConfig) + b, diags := m.BackendForPlan(plannedBackend) if diags.HasErrors() { t.Fatal(diags.Err()) } @@ -1583,10 +1583,9 @@ func TestMetaBackend_planLocalStatePath(t *testing.T) { t.Fatalf("unexpected error: %s", err) } state := s.State() - if state == nil { - t.Fatal("state is nil") + if state != nil { + t.Fatal("default workspace state is not nil, but should be because we've not put anything there") } - assertStateHasMarker(t, state, mark) // Verify the default path doesn't exist if _, err := os.Stat(DefaultStateFilename); err == nil { From b316e4ab568cbcf1824473fb08112032ca75cace Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 15 Nov 2018 13:31:30 -0800 Subject: [PATCH 116/149] command: Fix TestImport_remoteState The import command was imposing the default state path at the CLI level, rather than leaving that to be handled by the backend. As a result, the output state was always forced to be terraform.tfstate, regardless of the backend settings. --- command/import.go | 2 +- command/import_test.go | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/command/import.go b/command/import.go index 50b260b2d1fa..4126503b9de9 100644 --- a/command/import.go +++ b/command/import.go @@ -39,7 +39,7 @@ func (c *ImportCommand) Run(args []string) int { cmdFlags := c.Meta.flagSet("import") cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", 0, "parallelism") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") cmdFlags.StringVar(&configPath, "config", pwd, "path") diff --git a/command/import_test.go b/command/import_test.go index 1a6605a5e98b..7eed616ce3e4 100644 --- a/command/import_test.go +++ b/command/import_test.go @@ -1,6 +1,7 @@ package command import ( + "log" "fmt" "io/ioutil" "os" @@ -161,7 +162,7 @@ func TestImport_remoteState(t *testing.T) { statePath := "imported.tfstate" // init our backend - ui := new(cli.MockUi) + ui := cli.NewMockUi() m := Meta{ testingOverrides: metaOverridesForProvider(testProvider()), Ui: ui, @@ -178,8 +179,10 @@ func TestImport_remoteState(t *testing.T) { }, } + // (Using log here rather than t.Log so that these messages interleave with other trace logs) + log.Print("[TRACE] TestImport_remoteState running: terraform init") if code := ic.Run([]string{}); code != 0 { - t.Fatalf("bad: \n%s", ui.ErrorWriter) + t.Fatalf("init failed\n%s", ui.ErrorWriter) } p := testProvider() @@ -233,7 +236,7 @@ func TestImport_remoteState(t *testing.T) { "test_instance.foo", "bar", } - + log.Printf("[TRACE] TestImport_remoteState running: terraform import %s %s", args[0], args[1]) if code := c.Run(args); code != 0 { fmt.Println(ui.OutputWriter) t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) From be79bf0412dce80dbcc391de8684e042fb9e8cec Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 15 Nov 2018 14:45:10 -0800 Subject: [PATCH 117/149] command: Fix TestPlan_outBackend In an earlier change we fixed the "backendFromConfig" codepath to be able to properly detect changes to the -backend-config arguments during "terraform init", but this detection is too strict for the normal case of running an operation in a previously-initialized directory. Before any of the recent changes, the logic here was to selectively update the hash to include -backend-config settings in the init case. Since that late hash recalculation was confusing, here we take the alternative path of using the hash only in the normal case and full value comparison in the init case. Treating both of these cases separately makes things marginally easier to follow here. --- command/meta_backend.go | 10 ++++++++++ command/plan_test.go | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/command/meta_backend.go b/command/meta_backend.go index f519ebcec213..a6a9da4c06f2 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -462,6 +462,16 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di // Potentially changing a backend configuration case c != nil && !s.Backend.Empty(): + // If we're not initializing, then it's sufficient for the configuration + // hashes to match, since that suggests that the static backend + // settings in the configuration files are unchanged. (The only + // record we have of CLI overrides is in the settings cache in this + // case, so we have no other source to compare with. + if !opts.Init && cHash == s.Backend.Hash { + log.Printf("[TRACE] Meta.Backend: using already-initialized, unchanged %q backend configuration", c.Type) + return m.backend_C_r_S_unchanged(c, cHash, sMgr) + } + // If our configuration is the same, then we're just initializing // a previously configured remote backend. if !m.backendConfigNeedsMigration(c, s.Backend) { diff --git a/command/plan_test.go b/command/plan_test.go index 1abd6c9f375a..3220968f9916 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -342,7 +342,7 @@ func TestPlan_outBackend(t *testing.T) { } if code := c.Run(args); code != 0 { t.Logf("stdout: %s", ui.OutputWriter.String()) - t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + t.Fatalf("plan command failed with exit code %d\n\n%s", code, ui.ErrorWriter.String()) } plan := testReadPlan(t, outPath) From 27abd9c6b88dbc448793090bf743dd2a2748ab6b Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 15 Nov 2018 16:01:56 -0800 Subject: [PATCH 118/149] command: Fix TestMetaBackend_localDoesNotDeleteLocal The changes to how we handle setting the state path on the local backend broke the heuristic we were using here for detecting migration from one local backend to another with the same state path, which would by default end up deleting the state altogether after migration. We now use the StatePaths method to do this, which takes into account both the default values and any settings that have been set. Additionally this addresses a flaw in the old method which could potentially have deleted all non-default workspace state files if the "path" setting were changed without also changing the "workspace_dir" setting. This new approach is conservative because it will preserve all of the files if any one overlaps. --- backend/local/backend.go | 36 ++++++++++++++++++++++++++++++++++++ command/meta_backend.go | 4 +++- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/backend/local/backend.go b/backend/local/backend.go index 07397e9f412b..5de7e1818c63 100644 --- a/backend/local/backend.go +++ b/backend/local/backend.go @@ -528,6 +528,42 @@ func (b *Local) StatePaths(name string) (stateIn, stateOut, backupOut string) { return statePath, stateOutPath, backupPath } +// PathsConflictWith returns true if any state path used by a workspace in +// the receiver is the same as any state path used by the other given +// local backend instance. +// +// This should be used when "migrating" from one local backend configuration to +// another in order to avoid deleting the "old" state snapshots if they are +// in the same files as the "new" state snapshots. +func (b *Local) PathsConflictWith(other *Local) bool { + otherPaths := map[string]struct{}{} + otherWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + for _, name := range otherWorkspaces { + p, _, _ := other.StatePaths(name) + otherPaths[p] = struct{}{} + } + + ourWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + + for _, name := range ourWorkspaces { + p, _, _ := b.StatePaths(name) + if _, exists := otherPaths[p]; exists { + return true + } + } + return false +} + // this only ensures that the named directory exists func (b *Local) createState(name string) error { if name == backend.DefaultStateName { diff --git a/command/meta_backend.go b/command/meta_backend.go index a6a9da4c06f2..4b69aa66ab0c 100644 --- a/command/meta_backend.go +++ b/command/meta_backend.go @@ -668,13 +668,15 @@ func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalSta erase := true if newLocalB, ok := b.(*backendLocal.Local); ok { if localB, ok := localB.(*backendLocal.Local); ok { - if newLocalB.StatePath == localB.StatePath { + if newLocalB.PathsConflictWith(localB) { erase = false + log.Printf("[TRACE] Meta.Backend: both old and new backends share the same local state paths, so not erasing old state") } } } if erase { + log.Printf("[TRACE] Meta.Backend: removing old state snapshots from old backend") for _, localState := range localStates { // We always delete the local state, unless that was our new state too. if err := localState.WriteState(nil); err != nil { From 48601d261da65bca60e6e317c5f7909350a81d9b Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Thu, 15 Nov 2018 17:01:19 -0800 Subject: [PATCH 119/149] states/statemgr: In Filesystem, back up output file, not input file The filesystem backend has the option of using a different file for its initial read. Previously we were incorrectly writing the contents of that file out into the backup file, rather than the prior contents of the output file. Now we will always read the output file in RefreshState in order to decide what we will back up but then we will optionally additionally read the input file and prefer its content as the "current" state snapshot. This is verified by command.TestMetaBackend_planLocalStatePath and TestMetaBackend_configureNew, which are both now passing. --- command/meta_backend_test.go | 2 - states/statemgr/filesystem.go | 115 +++++++++++++++++----------- states/statemgr/filesystem_test.go | 116 ++++++++++++++++++++++++++++- 3 files changed, 185 insertions(+), 48 deletions(-) diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index 2adfbabb625d..15995de8f7be 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -238,7 +238,6 @@ func TestMetaBackend_configureInterpolation(t *testing.T) { // Newly configured backend func TestMetaBackend_configureNew(t *testing.T) { - // Create a temporary working directory that is empty td := tempDir(t) copy.CopyDir(testFixturePath("backend-new"), td) defer os.RemoveAll(td) @@ -1532,7 +1531,6 @@ func TestMetaBackend_planLocal(t *testing.T) { // A plan with a custom state save path func TestMetaBackend_planLocalStatePath(t *testing.T) { - // Create a temporary working directory that is empty td := tempDir(t) copy.CopyDir(testFixturePath("backend-plan-local"), td) defer os.RemoveAll(td) diff --git a/states/statemgr/filesystem.go b/states/statemgr/filesystem.go index 47ea17b25c56..3a8e791100ff 100644 --- a/states/statemgr/filesystem.go +++ b/states/statemgr/filesystem.go @@ -56,7 +56,6 @@ type Filesystem struct { file *statefile.File readFile *statefile.File backupFile *statefile.File - written bool writtenBackup bool } @@ -115,9 +114,6 @@ func (s *Filesystem) State() *states.State { if s.file == nil { return nil } - if s.backupPath != "" && s.backupFile == nil { - s.backupFile = s.file.DeepCopy() - } return s.file.DeepCopy().State } @@ -128,36 +124,19 @@ func (s *Filesystem) WriteState(state *states.State) error { // writing to a temp file on the same filesystem, and renaming the file over // the original. + defer s.mutex()() + if s.readFile == nil { - err := s.RefreshState() + err := s.refreshState() if err != nil { return err } } - defer s.mutex()() return s.writeState(state, nil) } func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { - // We'll try to write our backup first, so we can be sure we've created - // it successfully before clobbering the original file it came from. - if !s.writtenBackup && s.backupFile != nil && s.backupPath != "" && !statefile.StatesMarshalEqual(state, s.backupFile.State) { - log.Printf("[TRACE] statemgr.Filesystem: creating backup snapshot at %s", s.backupPath) - bfh, err := os.Create(s.backupPath) - if err != nil { - return fmt.Errorf("failed to create local state backup file: %s", err) - } - defer bfh.Close() - - err = statefile.Write(s.backupFile, bfh) - if err != nil { - return fmt.Errorf("failed to write to local state backup file: %s", err) - } - - s.writtenBackup = true - } - if s.stateFileOut == nil { if err := s.createStateFiles(); err != nil { return nil @@ -165,13 +144,46 @@ func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { } defer s.stateFileOut.Sync() + // We'll try to write our backup first, so we can be sure we've created + // it successfully before clobbering the original file it came from. + if !s.writtenBackup && s.backupFile != nil && s.backupPath != "" { + if !statefile.StatesMarshalEqual(state, s.backupFile.State) { + log.Printf("[TRACE] statemgr.Filesystem: creating backup snapshot at %s", s.backupPath) + bfh, err := os.Create(s.backupPath) + if err != nil { + return fmt.Errorf("failed to create local state backup file: %s", err) + } + defer bfh.Close() + + err = statefile.Write(s.backupFile, bfh) + if err != nil { + return fmt.Errorf("failed to write to local state backup file: %s", err) + } + + s.writtenBackup = true + } else { + log.Print("[TRACE] statemgr.Filesystem: not making a backup, because the new snapshot is identical to the old") + } + } else { + // This branch is all just logging, to help understand why we didn't make a backup. + switch { + case s.backupPath == "": + log.Print("[TRACE] statemgr.Filesystem: state file backups are disabled") + case s.writtenBackup: + log.Printf("[TRACE] statemgr.Filesystem: have already backed up original %s to %s on a previous write", s.path, s.backupPath) + case s.backupFile == nil: + log.Printf("[TRACE] statemgr.Filesystem: no original state snapshot to back up") + default: + log.Printf("[TRACE] statemgr.Filesystem: not creating a backup for an unknown reason") + } + } + s.file = s.file.DeepCopy() if s.file == nil { s.file = NewStateFile() } s.file.State = state.DeepCopy() - log.Print("[TRACE] statemgr.Filesystem: truncating the state file") if _, err := s.stateFileOut.Seek(0, os.SEEK_SET); err != nil { return err } @@ -204,7 +216,8 @@ func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { return err } - s.written = true + // Any future reads must come from the file we've now updated + s.readPath = s.path return nil } @@ -217,7 +230,10 @@ func (s *Filesystem) PersistState() error { // RefreshState is an implementation of Refresher. func (s *Filesystem) RefreshState() error { defer s.mutex()() + return s.refreshState() +} +func (s *Filesystem) refreshState() error { var reader io.Reader // The s.readPath file is only OK to read if we have not written any state out @@ -227,10 +243,9 @@ func (s *Filesystem) RefreshState() error { // This is important for Windows, as if the input file is the same as the // output file, and the output file has been locked already, we can't open // the file again. - if !s.written && (s.stateFileOut == nil || s.readPath != s.path) { - log.Printf("[TRACE] statemgr.Filesystem: reading initial snapshot from %s", s.readPath) - + if s.stateFileOut == nil || s.readPath != s.path { // we haven't written a state file yet, so load from readPath + log.Printf("[TRACE] statemgr.Filesystem: reading initial snapshot from %s", s.readPath) f, err := os.Open(s.readPath) if err != nil { // It is okay if the file doesn't exist; we'll treat that as a nil state. @@ -247,11 +262,9 @@ func (s *Filesystem) RefreshState() error { reader = f } } else { - log.Printf("[TRACE] statemgr.Filesystem: reading snapshot from %s", s.path) - + log.Printf("[TRACE] statemgr.Filesystem: reading latest snapshot from %s", s.path) // no state to refresh if s.stateFileOut == nil { - log.Printf("[TRACE] statemgr.Filesystem: no state snapshot has been written yet") return nil } @@ -261,23 +274,20 @@ func (s *Filesystem) RefreshState() error { } f, err := statefile.Read(reader) - - // nothing to backup if there's no initial state - if f == nil { - log.Printf("[TRACE] statemgr.Filesystem: no initial state, so will skip writing a backup") - s.writtenBackup = true - } - - // if there's no state we just assign the nil return value - if err != nil && err != statefile.ErrNoState { - log.Printf("[TRACE] statemgr.Filesystem: state snapshot is nil") - return err + // if there's no state then a nil file is fine + if err != nil { + if err != statefile.ErrNoState { + return err + } + log.Printf("[TRACE] statemgr.Filesystem: snapshot file has nil snapshot, but that's okay") } s.file = f s.readFile = s.file.DeepCopy() if s.file != nil { log.Printf("[TRACE] statemgr.Filesystem: read snapshot with lineage %q serial %d", s.file.Lineage, s.file.Serial) + } else { + log.Print("[TRACE] statemgr.Filesystem: read nil snapshot") } return nil } @@ -378,13 +388,14 @@ func (s *Filesystem) StateForMigration() *statefile.File { // WriteStateForMigration is part of our implementation of Migrator. func (s *Filesystem) WriteStateForMigration(f *statefile.File, force bool) error { + defer s.mutex()() + if s.readFile == nil { - err := s.RefreshState() + err := s.refreshState() if err != nil { return err } } - defer s.mutex()() if !force { err := CheckValidImport(f, s.readFile) @@ -436,6 +447,20 @@ func (s *Filesystem) createStateFiles() error { } s.stateFileOut = f + + // If the file already existed with content then that'll be the content + // of our backup file if we write a change later. + s.backupFile, err = statefile.Read(s.stateFileOut) + if err != nil { + if err != statefile.ErrNoState { + return err + } + log.Printf("[TRACE] statemgr.Filesystem: no previously-stored snapshot exists") + } else { + log.Printf("[TRACE] statemgr.Filesystem: existing snapshot has lineage %q serial %d", s.backupFile.Lineage, s.backupFile.Serial) + } + + // Refresh now, to load in the snapshot if the file already existed return nil } diff --git a/states/statemgr/filesystem_test.go b/states/statemgr/filesystem_test.go index 7977a6f8d9a1..c21acd34e39a 100644 --- a/states/statemgr/filesystem_test.go +++ b/states/statemgr/filesystem_test.go @@ -4,13 +4,17 @@ import ( "io/ioutil" "os" "os/exec" + "path/filepath" "strings" "sync" "testing" "github.com/go-test/deep" version "github.com/hashicorp/go-version" + "github.com/zclconf/go-cty/cty" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statefile" tfversion "github.com/hashicorp/terraform/version" ) @@ -57,7 +61,7 @@ func TestFilesystemLocks(t *testing.T) { t.Fatal("unexpected lock failure", err, string(out)) } - if string(out) != "lock failed" { + if !strings.Contains(string(out), "lock failed") { t.Fatal("expected 'locked failed', got", string(out)) } @@ -172,6 +176,116 @@ func TestFilesystem_backup(t *testing.T) { } } +// This test verifies a particularly tricky behavior where the input file +// is overridden and backups are enabled at the same time. This combination +// requires special care because we must ensure that when we create a backup +// it is of the original contents of the output file (which we're overwriting), +// not the contents of the input file (which is left unchanged). +func TestFilesystem_backupAndReadPath(t *testing.T) { + defer testOverrideVersion(t, "1.2.3")() + + workDir, err := ioutil.TempDir("", "tf") + if err != nil { + t.Fatalf("failed to create temporary directory: %s", err) + } + defer os.RemoveAll(workDir) + + markerOutput := addrs.OutputValue{Name: "foo"}.Absolute(addrs.RootModuleInstance) + + outState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-output-state"), + false, // not sensitive + ) + }) + outFile, err := os.Create(filepath.Join(workDir, "output.tfstate")) + if err != nil { + t.Fatalf("failed to create temporary outFile %s", err) + } + defer outFile.Close() + err = statefile.Write(&statefile.File{ + Lineage: "-", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: outState, + }, outFile) + if err != nil { + t.Fatalf("failed to write initial outfile state to %s: %s", outFile.Name(), err) + } + + inState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-input-state"), + false, // not sensitive + ) + }) + inFile, err := os.Create(filepath.Join(workDir, "input.tfstate")) + if err != nil { + t.Fatalf("failed to create temporary inFile %s", err) + } + defer inFile.Close() + err = statefile.Write(&statefile.File{ + Lineage: "-", + Serial: 0, + TerraformVersion: version.Must(version.NewVersion("1.2.3")), + State: inState, + }, inFile) + if err != nil { + t.Fatalf("failed to write initial infile state to %s: %s", inFile.Name(), err) + } + + backupPath := outFile.Name() + ".backup" + + ls := NewFilesystemBetweenPaths(inFile.Name(), outFile.Name()) + ls.SetBackupPath(backupPath) + + newState := states.BuildState(func(ss *states.SyncState) { + ss.SetOutputValue( + markerOutput, + cty.StringVal("from-new-state"), + false, // not sensitive + ) + }) + err = ls.WriteState(newState) + if err != nil { + t.Fatalf("failed to write new state: %s", err) + } + + // The backup functionality should've saved a copy of the original contents + // of the _output_ file, even though the first snapshot was read from + // the _input_ file. + t.Run("backup file", func (t *testing.T) { + bfh, err := os.Open(backupPath) + if err != nil { + t.Fatal(err) + } + bf, err := statefile.Read(bfh) + if err != nil { + t.Fatal(err) + } + os := bf.State.OutputValue(markerOutput) + if got, want := os.Value, cty.StringVal("from-output-state"); !want.RawEquals(got) { + t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) + } + }) + t.Run("output file", func (t *testing.T) { + ofh, err := os.Open(outFile.Name()) + if err != nil { + t.Fatal(err) + } + of, err := statefile.Read(ofh) + if err != nil { + t.Fatal(err) + } + os := of.State.OutputValue(markerOutput) + if got, want := os.Value, cty.StringVal("from-new-state"); !want.RawEquals(got) { + t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) + } + }) +} + func TestFilesystem_nonExist(t *testing.T) { defer testOverrideVersion(t, "1.2.3")() ls := NewFilesystem("ishouldntexist") From e2ba90fdfa9356ad1955b81473c3ad1a5d38b667 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 15:54:49 -0800 Subject: [PATCH 120/149] command: Fix TestMetaBackend_planLocalMatch We now don't create a local state backup until the first snapshot write, so we don't expect there to be a backup file until the end of the test. (There is already a check at the end there, unmodified by this change.) --- command/meta_backend_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/command/meta_backend_test.go b/command/meta_backend_test.go index 15995de8f7be..b6e23a8f04a6 100644 --- a/command/meta_backend_test.go +++ b/command/meta_backend_test.go @@ -1683,11 +1683,6 @@ func TestMetaBackend_planLocalMatch(t *testing.T) { t.Fatal("state is empty") } - // Verify a backup exists - if _, err := os.Stat(DefaultStateFilename + DefaultBackupExtension); err != nil { - t.Fatalf("err: %s", err) - } - // Verify we have no configured backend/legacy path := filepath.Join(m.DataDir(), DefaultStateFilename) if _, err := os.Stat(path); err == nil { From 762a173c7f7d2e6adda10fbe1eaa50a1ba503d29 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 16:09:25 -0800 Subject: [PATCH 121/149] command: Fix TestRefresh_outPath We now only create a backup state file if the given output file already exists, which it does not in this test. (The behavior of creating the backup files is already covered by other tests, so no need for this one go out of its way to do it.) --- command/refresh_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/command/refresh_test.go b/command/refresh_test.go index 84808f1743d7..843cd213dc3b 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -343,11 +343,11 @@ func TestRefresh_outPath(t *testing.T) { t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) } - backupState := testStateRead(t, outPath+DefaultBackupExtension) - actualStr := strings.TrimSpace(backupState.String()) - expectedStr := strings.TrimSpace(state.String()) - if actualStr != expectedStr { - t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + if _, err := os.Stat(outPath+DefaultBackupExtension); !os.IsNotExist(err) { + if err != nil { + t.Fatalf("failed to test for backup file: %s", err) + } + t.Fatalf("backup file exists, but it should not because output file did not initially exist") } } From 53b5b95ef553947605cee81d8e023e49e144946a Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 16:37:02 -0800 Subject: [PATCH 122/149] command: Fix TestRefresh_backup The local filesystem state manager no longer creates backup files eagerly, instead creating them only if on first write there is already a snapshot present in the target file. Therefore for this test to exercise the codepaths it intends to we must create an initial state snapshot for it to overwrite, creating the backup in the process. There are several other tests for this behavior elsewhere, so this test is primarily to verify that the refresh command is configuring the backend appropriately to get the backups written in the desired location. --- command/refresh_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/command/refresh_test.go b/command/refresh_test.go index 843cd213dc3b..928384c37a22 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -511,8 +511,14 @@ func TestRefresh_backup(t *testing.T) { t.Fatalf("err: %s", err) } outPath := outf.Name() - outf.Close() - os.Remove(outPath) + defer outf.Close() + + // Need to put some state content in the output file so that there's + // something to back up. + err = statefile.Write(statefile.New(state, "baz", 0), outf) + if err != nil { + t.Fatalf("error writing initial output state file %s", err) + } // Backup path backupf, err := ioutil.TempFile(testingDir, "tf") From 37bc187f951f9d055ae581cdf252e511898b0652 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 17:24:06 -0800 Subject: [PATCH 123/149] command: "terraform output" mustn't panic when no state is present This is verified by TestOutput_noArgs. --- command/output.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/command/output.go b/command/output.go index b8fed2e2c125..23484ddeb923 100644 --- a/command/output.go +++ b/command/output.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config/hcl2shim" "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/tfdiags" ) @@ -95,6 +96,10 @@ func (c *OutputCommand) Run(args []string) int { } state := stateStore.State() + if state == nil { + state = states.NewState() + } + mod := state.Module(moduleAddr) if mod == nil { c.Ui.Error(fmt.Sprintf( From 6cb3b0f4cf1b536de543d7c85a473b1ecef5246d Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 18:01:29 -0800 Subject: [PATCH 124/149] states/statemgr: Local locks meta is near output path, not input path This was a mistake while adapting this code from the old state.LocalState. Since the lock is held on the output file (s.path) the metadata should live adjacent to that rather than being built from the read path (s.readPath) that is used only as the initial snapshot on first instantiation. This also includes more logging, continuing the trend of other recent commits in these files. The local state behavior is sufficiently complex that these trace logs are a great help in debugging issues such as this one with the wrong files being used or actions being taken in the wrong order. --- states/statemgr/filesystem.go | 7 +++++-- states/statemgr/filesystem_lock_unix.go | 3 +++ states/statemgr/filesystem_lock_windows.go | 5 +++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/states/statemgr/filesystem.go b/states/statemgr/filesystem.go index 3a8e791100ff..ed14a11bf662 100644 --- a/states/statemgr/filesystem.go +++ b/states/statemgr/filesystem.go @@ -345,7 +345,9 @@ func (s *Filesystem) Unlock(id string) error { } } - os.Remove(s.lockInfoPath()) + lockInfoPath := s.lockInfoPath() + log.Printf("[TRACE] statemgr.Filesystem: removing lock metadata file %s", lockInfoPath) + os.Remove(lockInfoPath) fileName := s.stateFileOut.Name() @@ -466,7 +468,7 @@ func (s *Filesystem) createStateFiles() error { // return the path for the lockInfo metadata. func (s *Filesystem) lockInfoPath() string { - stateDir, stateName := filepath.Split(s.readPath) + stateDir, stateName := filepath.Split(s.path) if stateName == "" { panic("empty state file path") } @@ -500,6 +502,7 @@ func (s *Filesystem) writeLockInfo(info *LockInfo) error { info.Path = s.readPath info.Created = time.Now().UTC() + log.Printf("[TRACE] statemgr.Filesystem: writing lock metadata to %s", path) err := ioutil.WriteFile(path, info.Marshal(), 0600) if err != nil { return fmt.Errorf("could not write lock info for %q: %s", s.readPath, err) diff --git a/states/statemgr/filesystem_lock_unix.go b/states/statemgr/filesystem_lock_unix.go index 6b2672e7f227..4c4f571ed2d3 100644 --- a/states/statemgr/filesystem_lock_unix.go +++ b/states/statemgr/filesystem_lock_unix.go @@ -3,6 +3,7 @@ package statemgr import ( + "log" "os" "syscall" ) @@ -10,6 +11,7 @@ import ( // use fcntl POSIX locks for the most consistent behavior across platforms, and // hopefully some campatibility over NFS and CIFS. func (s *Filesystem) lock() error { + log.Printf("[TRACE] statemgr.Filesystem: locking %s using fcntl flock", s.path) flock := &syscall.Flock_t{ Type: syscall.F_RDLCK | syscall.F_WRLCK, Whence: int16(os.SEEK_SET), @@ -22,6 +24,7 @@ func (s *Filesystem) lock() error { } func (s *Filesystem) unlock() error { + log.Printf("[TRACE] statemgr.Filesystem: unlocking %s using fcntl flock", s.path) flock := &syscall.Flock_t{ Type: syscall.F_UNLCK, Whence: int16(os.SEEK_SET), diff --git a/states/statemgr/filesystem_lock_windows.go b/states/statemgr/filesystem_lock_windows.go index 1e2f49fab5a3..91b4a2a66efb 100644 --- a/states/statemgr/filesystem_lock_windows.go +++ b/states/statemgr/filesystem_lock_windows.go @@ -3,6 +3,7 @@ package statemgr import ( + "log" "math" "syscall" "unsafe" @@ -22,6 +23,8 @@ const ( ) func (s *Filesystem) lock() error { + log.Printf("[TRACE] statemgr.Filesystem: locking %s using LockFileEx", s.path) + // even though we're failing immediately, an overlapped event structure is // required ol, err := newOverlapped() @@ -41,6 +44,8 @@ func (s *Filesystem) lock() error { } func (s *Filesystem) unlock() error { + log.Printf("[TRACE] statemgr.Filesystem: unlocked by closing %s", s.path) + // the file is closed in Unlock return nil } From 300eceeb259256a927a06bb5f422a7848b4d92f1 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 18:16:06 -0800 Subject: [PATCH 125/149] plans/planfile: fix TestRoundtrip This was broken by an earlier change to verify the Terraform version number when reading a state file. To fix it, we'll use our current version in our constructed file which should then match when it's read back in. --- plans/planfile/planfile_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plans/planfile/planfile_test.go b/plans/planfile/planfile_test.go index 5b44e05b2549..5b5abb5767dd 100644 --- a/plans/planfile/planfile_test.go +++ b/plans/planfile/planfile_test.go @@ -7,12 +7,12 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/configs/configload" "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/states/statefile" + tfversion "github.com/hashicorp/terraform/version" ) func TestRoundtrip(t *testing.T) { @@ -33,7 +33,7 @@ func TestRoundtrip(t *testing.T) { // We don't need to test the entire thing because the state file // serialization is already tested in its own package. stateFileIn := &statefile.File{ - TerraformVersion: version.Must(version.NewVersion("1.0.0")), + TerraformVersion: tfversion.SemVer, Serial: 1, Lineage: "abc123", State: states.NewState(), From 73c9521a04a7c59eb6d91391acab5524d66f3652 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 18:36:11 -0800 Subject: [PATCH 126/149] command/e2etest: Temporarily disable tests that access network Several of these tests rely on external services (e.g. Terraform Registry) that have not yet been updated to support the needs of Terraform v0.12.0, so for now we'll skip all of these tests and wait until those systems have been updated. This should be removed before Terraform v0.12.0 final to enable these tests to be used as part of pre-release smoke testing. --- command/e2etest/main_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/command/e2etest/main_test.go b/command/e2etest/main_test.go index 64dc8f148e8e..a053e501e793 100644 --- a/command/e2etest/main_test.go +++ b/command/e2etest/main_test.go @@ -56,4 +56,10 @@ func skipIfCannotAccessNetwork(t *testing.T) { if !canAccessNetwork() { t.Skip("network access not allowed; use TF_ACC=1 to enable") } + + // During the early part of the Terraform v0.12 release process, certain + // upstream resources are not yet ready to support it and so these + // tests cannot be run. These will be re-enabled prior to Terraform v0.12.0 + // final. + t.Skip("all tests with external network access are temporarily disabled until upstream services are updated") } From 5255e852389c2eee7862e04848a82cdba7493ffa Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 18:37:26 -0800 Subject: [PATCH 127/149] "go fmt" fixups Apparently my editor is still not reliably formatting on save, so I missed a few formatting quirks in these files. --- command/import_test.go | 2 +- command/refresh_test.go | 2 +- states/statemgr/filesystem_test.go | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/command/import_test.go b/command/import_test.go index 7eed616ce3e4..0c47571222e6 100644 --- a/command/import_test.go +++ b/command/import_test.go @@ -1,9 +1,9 @@ package command import ( - "log" "fmt" "io/ioutil" + "log" "os" "path/filepath" "strings" diff --git a/command/refresh_test.go b/command/refresh_test.go index 928384c37a22..5791ec6ec5a4 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -343,7 +343,7 @@ func TestRefresh_outPath(t *testing.T) { t.Fatalf("wrong new object\ngot: %swant: %s", spew.Sdump(actual), spew.Sdump(expected)) } - if _, err := os.Stat(outPath+DefaultBackupExtension); !os.IsNotExist(err) { + if _, err := os.Stat(outPath + DefaultBackupExtension); !os.IsNotExist(err) { if err != nil { t.Fatalf("failed to test for backup file: %s", err) } diff --git a/states/statemgr/filesystem_test.go b/states/statemgr/filesystem_test.go index c21acd34e39a..1d393417e8dd 100644 --- a/states/statemgr/filesystem_test.go +++ b/states/statemgr/filesystem_test.go @@ -256,7 +256,7 @@ func TestFilesystem_backupAndReadPath(t *testing.T) { // The backup functionality should've saved a copy of the original contents // of the _output_ file, even though the first snapshot was read from // the _input_ file. - t.Run("backup file", func (t *testing.T) { + t.Run("backup file", func(t *testing.T) { bfh, err := os.Open(backupPath) if err != nil { t.Fatal(err) @@ -270,7 +270,7 @@ func TestFilesystem_backupAndReadPath(t *testing.T) { t.Errorf("wrong marker value in backup state file\ngot: %#v\nwant: %#v", got, want) } }) - t.Run("output file", func (t *testing.T) { + t.Run("output file", func(t *testing.T) { ofh, err := os.Open(outFile.Name()) if err != nil { t.Fatal(err) From c133de863b9f6cce231819c8787d66d3112fc31c Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 18:54:33 -0800 Subject: [PATCH 128/149] build: Update most things for Go 1.11 modules We're still using vendoring for now until we get _all_ of our tooling updated, so the main idea here is to force use of the vendor directory when running tests and building for development so we can quickly find situations where we forget to run "go mod vendor". We also setting GO111MODULE=off for installation of tools. Right now this is the best way to install a tool in GOBIN without also interfering with go.mod and go.sum, until a better pattern for managing tool dependencies is devised by the Go team. Finally, we run "go mod download" before launching "gox" in the main build process, to prime the local module cache once so that the concurrent "go build" processes won't race to populate it redundantly. This means that we'll be producing final builds from the module cache rather than from vendor as with everything else -- there's currently no way to tell gox to use -mod=vendor -- but that should be fine in practice since our go.sum file will ensure that we get the exact sources we expect in the module cache before building. --- .travis.yml | 2 +- Makefile | 33 ++++++++++++++++++--------------- scripts/build.sh | 4 ++++ 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/.travis.yml b/.travis.yml index 665a74e8715d..787a70a0855f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,7 +11,7 @@ go: # add TF_ETCDV3_TEST=1 to run etcdv3 tests # if added, TF_ETCDV3_ENDPOINTS must be set to a comma-separated list of (insecure) etcd endpoints against which to test env: - - CONSUL_VERSION=0.7.5 GOMAXPROCS=4 + - CONSUL_VERSION=0.7.5 GOMAXPROCS=4 GO111MODULE=on # Fetch consul for the backend and provider tests before_install: diff --git a/Makefile b/Makefile index 6c08f8e18fe8..305435d6741c 100644 --- a/Makefile +++ b/Makefile @@ -6,10 +6,10 @@ WEBSITE_REPO=github.com/hashicorp/terraform-website default: test tools: - go get -u github.com/kardianos/govendor - go get -u golang.org/x/tools/cmd/stringer - go get -u golang.org/x/tools/cmd/cover - go get -u github.com/golang/mock/mockgen + GO111MODULE=off go get -u github.com/kardianos/govendor + GO111MODULE=off go get -u golang.org/x/tools/cmd/stringer + GO111MODULE=off go get -u golang.org/x/tools/cmd/cover + GO111MODULE=off go get -u github.com/golang/mock/mockgen # bin generates the releaseable binaries for Terraform bin: fmtcheck generate @@ -18,10 +18,10 @@ bin: fmtcheck generate # dev creates binaries for testing Terraform locally. These are put # into ./bin/ as well as $GOPATH/bin dev: fmtcheck generate - @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" + go install -mod=vendor . quickdev: generate - @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" + go install -mod=vendor . # Shorthand for building and installing just one plugin for local testing. # Run as (for example): make plugin-dev PLUGIN=provider-aws @@ -33,34 +33,34 @@ plugin-dev: generate # we run this one package at a time here because running the entire suite in # one command creates memory usage issues when running in Travis-CI. test: fmtcheck generate - go list $(TEST) | xargs -t -n4 go test $(TESTARGS) -timeout=2m -parallel=4 + go list -mod=vendor $(TEST) | xargs -t -n4 go test $(TESTARGS) -mod=vendor -timeout=2m -parallel=4 # testacc runs acceptance tests testacc: fmtcheck generate @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make testacc TEST=./builtin/providers/aws"; \ + echo " make testacc TEST=./builtin/providers/test"; \ exit 1; \ fi - TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m + TF_ACC=1 go test $(TEST) -v $(TESTARGS) -mod=vendor -timeout 120m # e2etest runs the end-to-end tests against a generated Terraform binary # The TF_ACC here allows network access, but does not require any special # credentials since the e2etests use local-only providers such as "null". e2etest: generate - TF_ACC=1 go test -v ./command/e2etest + TF_ACC=1 go test -mod=vendor -v ./command/e2etest test-compile: fmtcheck generate @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make test-compile TEST=./builtin/providers/aws"; \ + echo " make test-compile TEST=./builtin/providers/test"; \ exit 1; \ fi go test -c $(TEST) $(TESTARGS) # testrace runs the race checker testrace: fmtcheck generate - TF_ACC= go test -race $(TEST) $(TESTARGS) + TF_ACC= go test -mod=vendor -race $(TEST) $(TESTARGS) cover: @go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \ @@ -75,10 +75,13 @@ cover: # "make protobuf". generate: @which stringer > /dev/null; if [ $$? -ne 0 ]; then \ - go get -u golang.org/x/tools/cmd/stringer; \ + GO111MODULE=off go get -u golang.org/x/tools/cmd/stringer; \ fi - go generate ./... - @go fmt command/internal_plugin_list.go > /dev/null + # We turn off modules for "go generate" because our downstream generate + # commands are not all ready to deal with Go modules yet, and this + # avoids downloading all of the deps that are in the vendor dir anyway. + GO111MODULE=off go generate ./... + GO111MODULE=off go fmt command/internal_plugin_list.go > /dev/null # We separate the protobuf generation because most development tasks on # Terraform do not involve changing protobuf files and protoc is not a diff --git a/scripts/build.sh b/scripts/build.sh index 45e297d1d614..bdef1111f7fc 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -47,6 +47,10 @@ if [[ -n "${TF_RELEASE}" ]]; then LD_FLAGS="-X main.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X github.com/hashicorp/terraform/version.Prerelease= -s -w" fi +# Ensure all remote modules are downloaded and cached before build so that +# the concurrent builds launched by gox won't race to redundantly download them. +go mod download + # Build! echo "==> Building..." gox \ From 1ff9a540202b8c36e33db950374bbb4495737d8f Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Fri, 16 Nov 2018 18:58:50 -0800 Subject: [PATCH 129/149] build: Temporarily disable the website tests for Travis-CI There are some known broken links right now because this repository has been updated ahead of some necessary changes in the terraform-website repository. We'll need to wait until the end of the v0.12 release process to re-enable this because the terraform-website repository is currently set up for the v0.11 content and will continue to be until v0.12.0 final is ready for release. --- .travis.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 787a70a0855f..ba4a2e45af82 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,7 +37,8 @@ script: - make test - make e2etest - GOOS=windows go build -- make website-test +# website-test is temporarily disabled while we get the website build back in shape after the v0.12 reorganization +#- make website-test branches: only: From 4fe9632f09d88bb44336e787f108b0330a5935a2 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 19 Nov 2018 09:39:16 -0800 Subject: [PATCH 130/149] plugin: Establish our current plugin protocol as version 5 The main significant change here is that the package name for the proto definition is "tfplugin5", which is important because this name is part of the wire protocol for references to types defined in our package. Along with that, we also move the generated package into "internal" to make it explicit that importing the generated Go package from elsewhere is not the right approach for externally-implemented SDKs, which should instead vendor the proto definition they are using and generate their own stubs to ensure that the wire protocol is the only hard dependency between Terraform Core and plugins. After this is merged, any provider binaries built against our helper/schema package will need to be rebuilt so that they use the new "tfplugin5" package name instead of "proto". In a future commit we will include more elaborate and organized documentation on how an external codebase might make use of our RPC interface definition to implement an SDK, but the primary concern here is to ensure we have the right wire package name before release. --- Makefile | 2 +- helper/plugin/grpc_provider.go | 2 +- helper/plugin/grpc_provider_test.go | 2 +- helper/plugin/grpc_provisioner.go | 2 +- helper/plugin/grpc_provisioner_test.go | 2 +- helper/resource/grpc_test_provider.go | 2 +- .../proto => internal/tfplugin5}/generate.sh | 2 +- .../tfplugin5/tfplugin5.pb.go | 867 ++++++++++-------- .../tfplugin5/tfplugin5.proto | 20 +- plugin/convert/diagnostics.go | 2 +- plugin/convert/diagnostics_test.go | 2 +- plugin/convert/schema.go | 2 +- plugin/convert/schema_test.go | 2 +- plugin/grpc_provider.go | 2 +- plugin/grpc_provider_test.go | 2 +- plugin/grpc_provisioner.go | 2 +- plugin/grpc_provisioner_test.go | 2 +- plugin/mock_proto/generate.go | 4 +- plugin/mock_proto/mock.go | 78 +- plugin/serve.go | 2 +- 20 files changed, 541 insertions(+), 460 deletions(-) rename {plugin/proto => internal/tfplugin5}/generate.sh (88%) rename plugin/proto/plugin.pb.go => internal/tfplugin5/tfplugin5.pb.go (79%) rename plugin/proto/plugin.proto => internal/tfplugin5/tfplugin5.proto (89%) diff --git a/Makefile b/Makefile index 305435d6741c..765aae11e347 100644 --- a/Makefile +++ b/Makefile @@ -90,7 +90,7 @@ generate: # If you are working on changes to protobuf interfaces you may either use # this target or run the individual scripts below directly. protobuf: - bash plugin/proto/generate.sh + bash internal/tfplugin5/generate.sh bash plans/internal/planproto/generate.sh fmt: diff --git a/helper/plugin/grpc_provider.go b/helper/plugin/grpc_provider.go index 53809e273c08..c7fbc8ebe29e 100644 --- a/helper/plugin/grpc_provider.go +++ b/helper/plugin/grpc_provider.go @@ -16,8 +16,8 @@ import ( "github.com/hashicorp/terraform/config/hcl2shim" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/helper/schema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/plugin/proto" "github.com/hashicorp/terraform/terraform" ) diff --git a/helper/plugin/grpc_provider_test.go b/helper/plugin/grpc_provider_test.go index 5dd33ac2b0d0..f27edf3993ef 100644 --- a/helper/plugin/grpc_provider_test.go +++ b/helper/plugin/grpc_provider_test.go @@ -12,7 +12,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform/helper/schema" - "github.com/hashicorp/terraform/plugin/proto" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/terraform" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/msgpack" diff --git a/helper/plugin/grpc_provisioner.go b/helper/plugin/grpc_provisioner.go index e06fb6fd63de..14494e462d4b 100644 --- a/helper/plugin/grpc_provisioner.go +++ b/helper/plugin/grpc_provisioner.go @@ -4,8 +4,8 @@ import ( "log" "github.com/hashicorp/terraform/helper/schema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/plugin/proto" "github.com/hashicorp/terraform/terraform" "github.com/zclconf/go-cty/cty" ctyconvert "github.com/zclconf/go-cty/cty/convert" diff --git a/helper/plugin/grpc_provisioner_test.go b/helper/plugin/grpc_provisioner_test.go index ebd712d60dda..c64045ab429e 100644 --- a/helper/plugin/grpc_provisioner_test.go +++ b/helper/plugin/grpc_provisioner_test.go @@ -1,5 +1,5 @@ package plugin -import "github.com/hashicorp/terraform/plugin/proto" +import proto "github.com/hashicorp/terraform/internal/tfplugin5" var _ proto.ProvisionerServer = (*GRPCProvisionerServer)(nil) diff --git a/helper/resource/grpc_test_provider.go b/helper/resource/grpc_test_provider.go index d2b12b4edd7f..8cfa8e7f5946 100644 --- a/helper/resource/grpc_test_provider.go +++ b/helper/resource/grpc_test_provider.go @@ -6,8 +6,8 @@ import ( "time" "github.com/hashicorp/terraform/helper/plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" tfplugin "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/plugin/proto" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/terraform" "google.golang.org/grpc" diff --git a/plugin/proto/generate.sh b/internal/tfplugin5/generate.sh similarity index 88% rename from plugin/proto/generate.sh rename to internal/tfplugin5/generate.sh index eedb093a94e3..de1d693ca40c 100644 --- a/plugin/proto/generate.sh +++ b/internal/tfplugin5/generate.sh @@ -13,4 +13,4 @@ DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" cd "$DIR" -protoc -I ./ plugin.proto --go_out=plugins=grpc:./ +protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ diff --git a/plugin/proto/plugin.pb.go b/internal/tfplugin5/tfplugin5.pb.go similarity index 79% rename from plugin/proto/plugin.pb.go rename to internal/tfplugin5/tfplugin5.pb.go index af7f1fd71286..483f4eb42fc6 100644 --- a/plugin/proto/plugin.pb.go +++ b/internal/tfplugin5/tfplugin5.pb.go @@ -1,15 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: plugin.proto +// source: tfplugin5.proto -package proto - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package tfplugin5 import ( - context "golang.org/x/net/context" + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -36,6 +35,7 @@ var Diagnostic_Severity_name = map[int32]string{ 1: "ERROR", 2: "WARNING", } + var Diagnostic_Severity_value = map[string]int32{ "INVALID": 0, "ERROR": 1, @@ -45,8 +45,9 @@ var Diagnostic_Severity_value = map[string]int32{ func (x Diagnostic_Severity) String() string { return proto.EnumName(Diagnostic_Severity_name, int32(x)) } + func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{1, 0} + return fileDescriptor_17ae6090ff270234, []int{1, 0} } type Schema_NestedBlock_NestingMode int32 @@ -66,6 +67,7 @@ var Schema_NestedBlock_NestingMode_name = map[int32]string{ 3: "SET", 4: "MAP", } + var Schema_NestedBlock_NestingMode_value = map[string]int32{ "INVALID": 0, "SINGLE": 1, @@ -77,8 +79,9 @@ var Schema_NestedBlock_NestingMode_value = map[string]int32{ func (x Schema_NestedBlock_NestingMode) String() string { return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) } + func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{5, 2, 0} + return fileDescriptor_17ae6090ff270234, []int{5, 2, 0} } // DynamicValue is an opaque encoding of terraform data, with the field name @@ -95,16 +98,17 @@ func (m *DynamicValue) Reset() { *m = DynamicValue{} } func (m *DynamicValue) String() string { return proto.CompactTextString(m) } func (*DynamicValue) ProtoMessage() {} func (*DynamicValue) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{0} + return fileDescriptor_17ae6090ff270234, []int{0} } + func (m *DynamicValue) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DynamicValue.Unmarshal(m, b) } func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) } -func (dst *DynamicValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DynamicValue.Merge(dst, src) +func (m *DynamicValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicValue.Merge(m, src) } func (m *DynamicValue) XXX_Size() int { return xxx_messageInfo_DynamicValue.Size(m) @@ -130,7 +134,7 @@ func (m *DynamicValue) GetJson() []byte { } type Diagnostic struct { - Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=proto.Diagnostic_Severity" json:"severity,omitempty"` + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` @@ -143,16 +147,17 @@ func (m *Diagnostic) Reset() { *m = Diagnostic{} } func (m *Diagnostic) String() string { return proto.CompactTextString(m) } func (*Diagnostic) ProtoMessage() {} func (*Diagnostic) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{1} + return fileDescriptor_17ae6090ff270234, []int{1} } + func (m *Diagnostic) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Diagnostic.Unmarshal(m, b) } func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) } -func (dst *Diagnostic) XXX_Merge(src proto.Message) { - xxx_messageInfo_Diagnostic.Merge(dst, src) +func (m *Diagnostic) XXX_Merge(src proto.Message) { + xxx_messageInfo_Diagnostic.Merge(m, src) } func (m *Diagnostic) XXX_Size() int { return xxx_messageInfo_Diagnostic.Size(m) @@ -202,16 +207,17 @@ func (m *AttributePath) Reset() { *m = AttributePath{} } func (m *AttributePath) String() string { return proto.CompactTextString(m) } func (*AttributePath) ProtoMessage() {} func (*AttributePath) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{2} + return fileDescriptor_17ae6090ff270234, []int{2} } + func (m *AttributePath) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AttributePath.Unmarshal(m, b) } func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) } -func (dst *AttributePath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributePath.Merge(dst, src) +func (m *AttributePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath.Merge(m, src) } func (m *AttributePath) XXX_Size() int { return xxx_messageInfo_AttributePath.Size(m) @@ -244,16 +250,17 @@ func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } func (*AttributePath_Step) ProtoMessage() {} func (*AttributePath_Step) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{2, 0} + return fileDescriptor_17ae6090ff270234, []int{2, 0} } + func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) } func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) } -func (dst *AttributePath_Step) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributePath_Step.Merge(dst, src) +func (m *AttributePath_Step) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath_Step.Merge(m, src) } func (m *AttributePath_Step) XXX_Size() int { return xxx_messageInfo_AttributePath_Step.Size(m) @@ -271,16 +278,20 @@ type isAttributePath_Step_Selector interface { type AttributePath_Step_AttributeName struct { AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` } + type AttributePath_Step_ElementKeyString struct { ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` } + type AttributePath_Step_ElementKeyInt struct { ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` } -func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} -func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { if m != nil { @@ -400,16 +411,17 @@ func (m *Stop) Reset() { *m = Stop{} } func (m *Stop) String() string { return proto.CompactTextString(m) } func (*Stop) ProtoMessage() {} func (*Stop) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{3} + return fileDescriptor_17ae6090ff270234, []int{3} } + func (m *Stop) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Stop.Unmarshal(m, b) } func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Stop.Marshal(b, m, deterministic) } -func (dst *Stop) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop.Merge(dst, src) +func (m *Stop) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop.Merge(m, src) } func (m *Stop) XXX_Size() int { return xxx_messageInfo_Stop.Size(m) @@ -430,16 +442,17 @@ func (m *Stop_Request) Reset() { *m = Stop_Request{} } func (m *Stop_Request) String() string { return proto.CompactTextString(m) } func (*Stop_Request) ProtoMessage() {} func (*Stop_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{3, 0} + return fileDescriptor_17ae6090ff270234, []int{3, 0} } + func (m *Stop_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Stop_Request.Unmarshal(m, b) } func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) } -func (dst *Stop_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop_Request.Merge(dst, src) +func (m *Stop_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Request.Merge(m, src) } func (m *Stop_Request) XXX_Size() int { return xxx_messageInfo_Stop_Request.Size(m) @@ -451,7 +464,7 @@ func (m *Stop_Request) XXX_DiscardUnknown() { var xxx_messageInfo_Stop_Request proto.InternalMessageInfo type Stop_Response struct { - Error string `protobuf:"bytes,1,opt,name=Error,json=error,proto3" json:"Error,omitempty"` + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -461,16 +474,17 @@ func (m *Stop_Response) Reset() { *m = Stop_Response{} } func (m *Stop_Response) String() string { return proto.CompactTextString(m) } func (*Stop_Response) ProtoMessage() {} func (*Stop_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{3, 1} + return fileDescriptor_17ae6090ff270234, []int{3, 1} } + func (m *Stop_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Stop_Response.Unmarshal(m, b) } func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) } -func (dst *Stop_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stop_Response.Merge(dst, src) +func (m *Stop_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Response.Merge(m, src) } func (m *Stop_Response) XXX_Size() int { return xxx_messageInfo_Stop_Response.Size(m) @@ -503,16 +517,17 @@ func (m *RawState) Reset() { *m = RawState{} } func (m *RawState) String() string { return proto.CompactTextString(m) } func (*RawState) ProtoMessage() {} func (*RawState) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{4} + return fileDescriptor_17ae6090ff270234, []int{4} } + func (m *RawState) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RawState.Unmarshal(m, b) } func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RawState.Marshal(b, m, deterministic) } -func (dst *RawState) XXX_Merge(src proto.Message) { - xxx_messageInfo_RawState.Merge(dst, src) +func (m *RawState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawState.Merge(m, src) } func (m *RawState) XXX_Size() int { return xxx_messageInfo_RawState.Size(m) @@ -554,16 +569,17 @@ func (m *Schema) Reset() { *m = Schema{} } func (m *Schema) String() string { return proto.CompactTextString(m) } func (*Schema) ProtoMessage() {} func (*Schema) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{5} + return fileDescriptor_17ae6090ff270234, []int{5} } + func (m *Schema) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Schema.Unmarshal(m, b) } func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Schema.Marshal(b, m, deterministic) } -func (dst *Schema) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema.Merge(dst, src) +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) } func (m *Schema) XXX_Size() int { return xxx_messageInfo_Schema.Size(m) @@ -601,16 +617,17 @@ func (m *Schema_Block) Reset() { *m = Schema_Block{} } func (m *Schema_Block) String() string { return proto.CompactTextString(m) } func (*Schema_Block) ProtoMessage() {} func (*Schema_Block) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{5, 0} + return fileDescriptor_17ae6090ff270234, []int{5, 0} } + func (m *Schema_Block) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Schema_Block.Unmarshal(m, b) } func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) } -func (dst *Schema_Block) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_Block.Merge(dst, src) +func (m *Schema_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Block.Merge(m, src) } func (m *Schema_Block) XXX_Size() int { return xxx_messageInfo_Schema_Block.Size(m) @@ -659,16 +676,17 @@ func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } func (*Schema_Attribute) ProtoMessage() {} func (*Schema_Attribute) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{5, 1} + return fileDescriptor_17ae6090ff270234, []int{5, 1} } + func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) } func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) } -func (dst *Schema_Attribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_Attribute.Merge(dst, src) +func (m *Schema_Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Attribute.Merge(m, src) } func (m *Schema_Attribute) XXX_Size() int { return xxx_messageInfo_Schema_Attribute.Size(m) @@ -731,7 +749,7 @@ func (m *Schema_Attribute) GetSensitive() bool { type Schema_NestedBlock struct { TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` - Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=proto.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -743,16 +761,17 @@ func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } func (*Schema_NestedBlock) ProtoMessage() {} func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{5, 2} + return fileDescriptor_17ae6090ff270234, []int{5, 2} } + func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) } func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) } -func (dst *Schema_NestedBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_Schema_NestedBlock.Merge(dst, src) +func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_NestedBlock.Merge(m, src) } func (m *Schema_NestedBlock) XXX_Size() int { return xxx_messageInfo_Schema_NestedBlock.Size(m) @@ -808,16 +827,17 @@ func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } func (*GetProviderSchema) ProtoMessage() {} func (*GetProviderSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{6} + return fileDescriptor_17ae6090ff270234, []int{6} } + func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) } func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) } -func (dst *GetProviderSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema.Merge(dst, src) +func (m *GetProviderSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema.Merge(m, src) } func (m *GetProviderSchema) XXX_Size() int { return xxx_messageInfo_GetProviderSchema.Size(m) @@ -838,16 +858,17 @@ func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Req func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } func (*GetProviderSchema_Request) ProtoMessage() {} func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{6, 0} + return fileDescriptor_17ae6090ff270234, []int{6, 0} } + func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) } func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) } -func (dst *GetProviderSchema_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema_Request.Merge(dst, src) +func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Request.Merge(m, src) } func (m *GetProviderSchema_Request) XXX_Size() int { return xxx_messageInfo_GetProviderSchema_Request.Size(m) @@ -872,16 +893,17 @@ func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Re func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } func (*GetProviderSchema_Response) ProtoMessage() {} func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{6, 1} + return fileDescriptor_17ae6090ff270234, []int{6, 1} } + func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) } func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) } -func (dst *GetProviderSchema_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProviderSchema_Response.Merge(dst, src) +func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Response.Merge(m, src) } func (m *GetProviderSchema_Response) XXX_Size() int { return xxx_messageInfo_GetProviderSchema_Response.Size(m) @@ -930,16 +952,17 @@ func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } func (*PrepareProviderConfig) ProtoMessage() {} func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{7} + return fileDescriptor_17ae6090ff270234, []int{7} } + func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) } func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) } -func (dst *PrepareProviderConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig.Merge(dst, src) +func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig.Merge(m, src) } func (m *PrepareProviderConfig) XXX_Size() int { return xxx_messageInfo_PrepareProviderConfig.Size(m) @@ -961,16 +984,17 @@ func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderCo func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } func (*PrepareProviderConfig_Request) ProtoMessage() {} func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{7, 0} + return fileDescriptor_17ae6090ff270234, []int{7, 0} } + func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) } func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) } -func (dst *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig_Request.Merge(dst, src) +func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src) } func (m *PrepareProviderConfig_Request) XXX_Size() int { return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) @@ -1000,16 +1024,17 @@ func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderC func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } func (*PrepareProviderConfig_Response) ProtoMessage() {} func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{7, 1} + return fileDescriptor_17ae6090ff270234, []int{7, 1} } + func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) } func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) } -func (dst *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareProviderConfig_Response.Merge(dst, src) +func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src) } func (m *PrepareProviderConfig_Response) XXX_Size() int { return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) @@ -1044,16 +1069,17 @@ func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } func (*UpgradeResourceState) ProtoMessage() {} func (*UpgradeResourceState) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{8} + return fileDescriptor_17ae6090ff270234, []int{8} } + func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) } func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) } -func (dst *UpgradeResourceState) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState.Merge(dst, src) +func (m *UpgradeResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState.Merge(m, src) } func (m *UpgradeResourceState) XXX_Size() int { return xxx_messageInfo_UpgradeResourceState.Size(m) @@ -1083,16 +1109,17 @@ func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceSta func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } func (*UpgradeResourceState_Request) ProtoMessage() {} func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{8, 0} + return fileDescriptor_17ae6090ff270234, []int{8, 0} } + func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) } func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) } -func (dst *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState_Request.Merge(dst, src) +func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src) } func (m *UpgradeResourceState_Request) XXX_Size() int { return xxx_messageInfo_UpgradeResourceState_Request.Size(m) @@ -1142,16 +1169,17 @@ func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceSt func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } func (*UpgradeResourceState_Response) ProtoMessage() {} func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{8, 1} + return fileDescriptor_17ae6090ff270234, []int{8, 1} } + func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) } func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) } -func (dst *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpgradeResourceState_Response.Merge(dst, src) +func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src) } func (m *UpgradeResourceState_Response) XXX_Size() int { return xxx_messageInfo_UpgradeResourceState_Response.Size(m) @@ -1186,16 +1214,17 @@ func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceType func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } func (*ValidateResourceTypeConfig) ProtoMessage() {} func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{9} + return fileDescriptor_17ae6090ff270234, []int{9} } + func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) } func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) } -func (dst *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig.Merge(dst, src) +func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src) } func (m *ValidateResourceTypeConfig) XXX_Size() int { return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) @@ -1218,16 +1247,17 @@ func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateReso func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{9, 0} + return fileDescriptor_17ae6090ff270234, []int{9, 0} } + func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) } func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) } -func (dst *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(dst, src) +func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src) } func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) @@ -1263,16 +1293,17 @@ func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateRes func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{9, 1} + return fileDescriptor_17ae6090ff270234, []int{9, 1} } + func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) } func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) } -func (dst *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(dst, src) +func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src) } func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) @@ -1300,16 +1331,17 @@ func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConf func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } func (*ValidateDataSourceConfig) ProtoMessage() {} func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{10} + return fileDescriptor_17ae6090ff270234, []int{10} } + func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) } func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) } -func (dst *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig.Merge(dst, src) +func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src) } func (m *ValidateDataSourceConfig) XXX_Size() int { return xxx_messageInfo_ValidateDataSourceConfig.Size(m) @@ -1332,16 +1364,17 @@ func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSo func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } func (*ValidateDataSourceConfig_Request) ProtoMessage() {} func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{10, 0} + return fileDescriptor_17ae6090ff270234, []int{10, 0} } + func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) } func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) } -func (dst *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(dst, src) +func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src) } func (m *ValidateDataSourceConfig_Request) XXX_Size() int { return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) @@ -1377,16 +1410,17 @@ func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataS func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } func (*ValidateDataSourceConfig_Response) ProtoMessage() {} func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{10, 1} + return fileDescriptor_17ae6090ff270234, []int{10, 1} } + func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) } func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) } -func (dst *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(dst, src) +func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src) } func (m *ValidateDataSourceConfig_Response) XXX_Size() int { return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) @@ -1414,16 +1448,17 @@ func (m *Configure) Reset() { *m = Configure{} } func (m *Configure) String() string { return proto.CompactTextString(m) } func (*Configure) ProtoMessage() {} func (*Configure) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{11} + return fileDescriptor_17ae6090ff270234, []int{11} } + func (m *Configure) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Configure.Unmarshal(m, b) } func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Configure.Marshal(b, m, deterministic) } -func (dst *Configure) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure.Merge(dst, src) +func (m *Configure) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure.Merge(m, src) } func (m *Configure) XXX_Size() int { return xxx_messageInfo_Configure.Size(m) @@ -1446,16 +1481,17 @@ func (m *Configure_Request) Reset() { *m = Configure_Request{} } func (m *Configure_Request) String() string { return proto.CompactTextString(m) } func (*Configure_Request) ProtoMessage() {} func (*Configure_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{11, 0} + return fileDescriptor_17ae6090ff270234, []int{11, 0} } + func (m *Configure_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Configure_Request.Unmarshal(m, b) } func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) } -func (dst *Configure_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure_Request.Merge(dst, src) +func (m *Configure_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Request.Merge(m, src) } func (m *Configure_Request) XXX_Size() int { return xxx_messageInfo_Configure_Request.Size(m) @@ -1491,16 +1527,17 @@ func (m *Configure_Response) Reset() { *m = Configure_Response{} } func (m *Configure_Response) String() string { return proto.CompactTextString(m) } func (*Configure_Response) ProtoMessage() {} func (*Configure_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{11, 1} + return fileDescriptor_17ae6090ff270234, []int{11, 1} } + func (m *Configure_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Configure_Response.Unmarshal(m, b) } func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) } -func (dst *Configure_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configure_Response.Merge(dst, src) +func (m *Configure_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Response.Merge(m, src) } func (m *Configure_Response) XXX_Size() int { return xxx_messageInfo_Configure_Response.Size(m) @@ -1528,16 +1565,17 @@ func (m *ReadResource) Reset() { *m = ReadResource{} } func (m *ReadResource) String() string { return proto.CompactTextString(m) } func (*ReadResource) ProtoMessage() {} func (*ReadResource) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{12} + return fileDescriptor_17ae6090ff270234, []int{12} } + func (m *ReadResource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadResource.Unmarshal(m, b) } func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) } -func (dst *ReadResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource.Merge(dst, src) +func (m *ReadResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource.Merge(m, src) } func (m *ReadResource) XXX_Size() int { return xxx_messageInfo_ReadResource.Size(m) @@ -1560,16 +1598,17 @@ func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } func (*ReadResource_Request) ProtoMessage() {} func (*ReadResource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{12, 0} + return fileDescriptor_17ae6090ff270234, []int{12, 0} } + func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) } func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) } -func (dst *ReadResource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource_Request.Merge(dst, src) +func (m *ReadResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Request.Merge(m, src) } func (m *ReadResource_Request) XXX_Size() int { return xxx_messageInfo_ReadResource_Request.Size(m) @@ -1606,16 +1645,17 @@ func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } func (*ReadResource_Response) ProtoMessage() {} func (*ReadResource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{12, 1} + return fileDescriptor_17ae6090ff270234, []int{12, 1} } + func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) } func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) } -func (dst *ReadResource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadResource_Response.Merge(dst, src) +func (m *ReadResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Response.Merge(m, src) } func (m *ReadResource_Response) XXX_Size() int { return xxx_messageInfo_ReadResource_Response.Size(m) @@ -1650,16 +1690,17 @@ func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } func (*PlanResourceChange) ProtoMessage() {} func (*PlanResourceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{13} + return fileDescriptor_17ae6090ff270234, []int{13} } + func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) } func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) } -func (dst *PlanResourceChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange.Merge(dst, src) +func (m *PlanResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange.Merge(m, src) } func (m *PlanResourceChange) XXX_Size() int { return xxx_messageInfo_PlanResourceChange.Size(m) @@ -1685,16 +1726,17 @@ func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_R func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } func (*PlanResourceChange_Request) ProtoMessage() {} func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{13, 0} + return fileDescriptor_17ae6090ff270234, []int{13, 0} } + func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) } func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) } -func (dst *PlanResourceChange_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange_Request.Merge(dst, src) +func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Request.Merge(m, src) } func (m *PlanResourceChange_Request) XXX_Size() int { return xxx_messageInfo_PlanResourceChange_Request.Size(m) @@ -1754,16 +1796,17 @@ func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_ func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } func (*PlanResourceChange_Response) ProtoMessage() {} func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{13, 1} + return fileDescriptor_17ae6090ff270234, []int{13, 1} } + func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) } func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) } -func (dst *PlanResourceChange_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlanResourceChange_Response.Merge(dst, src) +func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Response.Merge(m, src) } func (m *PlanResourceChange_Response) XXX_Size() int { return xxx_messageInfo_PlanResourceChange_Response.Size(m) @@ -1812,16 +1855,17 @@ func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } func (*ApplyResourceChange) ProtoMessage() {} func (*ApplyResourceChange) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{14} + return fileDescriptor_17ae6090ff270234, []int{14} } + func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) } func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) } -func (dst *ApplyResourceChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange.Merge(dst, src) +func (m *ApplyResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange.Merge(m, src) } func (m *ApplyResourceChange) XXX_Size() int { return xxx_messageInfo_ApplyResourceChange.Size(m) @@ -1847,16 +1891,17 @@ func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } func (*ApplyResourceChange_Request) ProtoMessage() {} func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{14, 0} + return fileDescriptor_17ae6090ff270234, []int{14, 0} } + func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) } func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) } -func (dst *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange_Request.Merge(dst, src) +func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src) } func (m *ApplyResourceChange_Request) XXX_Size() int { return xxx_messageInfo_ApplyResourceChange_Request.Size(m) @@ -1915,16 +1960,17 @@ func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChang func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } func (*ApplyResourceChange_Response) ProtoMessage() {} func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{14, 1} + return fileDescriptor_17ae6090ff270234, []int{14, 1} } + func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) } func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) } -func (dst *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResourceChange_Response.Merge(dst, src) +func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src) } func (m *ApplyResourceChange_Response) XXX_Size() int { return xxx_messageInfo_ApplyResourceChange_Response.Size(m) @@ -1966,16 +2012,17 @@ func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } func (*ImportResourceState) ProtoMessage() {} func (*ImportResourceState) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{15} + return fileDescriptor_17ae6090ff270234, []int{15} } + func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) } func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) } -func (dst *ImportResourceState) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState.Merge(dst, src) +func (m *ImportResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState.Merge(m, src) } func (m *ImportResourceState) XXX_Size() int { return xxx_messageInfo_ImportResourceState.Size(m) @@ -1998,16 +2045,17 @@ func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } func (*ImportResourceState_Request) ProtoMessage() {} func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{15, 0} + return fileDescriptor_17ae6090ff270234, []int{15, 0} } + func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) } func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) } -func (dst *ImportResourceState_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_Request.Merge(dst, src) +func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Request.Merge(m, src) } func (m *ImportResourceState_Request) XXX_Size() int { return xxx_messageInfo_ImportResourceState_Request.Size(m) @@ -2045,16 +2093,17 @@ func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportReso func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } func (*ImportResourceState_ImportedResource) ProtoMessage() {} func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{15, 1} + return fileDescriptor_17ae6090ff270234, []int{15, 1} } + func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) } func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) } -func (dst *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_ImportedResource.Merge(dst, src) +func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src) } func (m *ImportResourceState_ImportedResource) XXX_Size() int { return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) @@ -2098,16 +2147,17 @@ func (m *ImportResourceState_Response) Reset() { *m = ImportResourceStat func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } func (*ImportResourceState_Response) ProtoMessage() {} func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{15, 2} + return fileDescriptor_17ae6090ff270234, []int{15, 2} } + func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) } func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) } -func (dst *ImportResourceState_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportResourceState_Response.Merge(dst, src) +func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Response.Merge(m, src) } func (m *ImportResourceState_Response) XXX_Size() int { return xxx_messageInfo_ImportResourceState_Response.Size(m) @@ -2142,16 +2192,17 @@ func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } func (*ReadDataSource) ProtoMessage() {} func (*ReadDataSource) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{16} + return fileDescriptor_17ae6090ff270234, []int{16} } + func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) } func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) } -func (dst *ReadDataSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource.Merge(dst, src) +func (m *ReadDataSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource.Merge(m, src) } func (m *ReadDataSource) XXX_Size() int { return xxx_messageInfo_ReadDataSource.Size(m) @@ -2174,16 +2225,17 @@ func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } func (*ReadDataSource_Request) ProtoMessage() {} func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{16, 0} + return fileDescriptor_17ae6090ff270234, []int{16, 0} } + func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) } func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) } -func (dst *ReadDataSource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource_Request.Merge(dst, src) +func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Request.Merge(m, src) } func (m *ReadDataSource_Request) XXX_Size() int { return xxx_messageInfo_ReadDataSource_Request.Size(m) @@ -2220,16 +2272,17 @@ func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } func (*ReadDataSource_Response) ProtoMessage() {} func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{16, 1} + return fileDescriptor_17ae6090ff270234, []int{16, 1} } + func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) } func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) } -func (dst *ReadDataSource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDataSource_Response.Merge(dst, src) +func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Response.Merge(m, src) } func (m *ReadDataSource_Response) XXX_Size() int { return xxx_messageInfo_ReadDataSource_Response.Size(m) @@ -2264,16 +2317,17 @@ func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } func (*GetProvisionerSchema) ProtoMessage() {} func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{17} + return fileDescriptor_17ae6090ff270234, []int{17} } + func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) } func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) } -func (dst *GetProvisionerSchema) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema.Merge(dst, src) +func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema.Merge(m, src) } func (m *GetProvisionerSchema) XXX_Size() int { return xxx_messageInfo_GetProvisionerSchema.Size(m) @@ -2294,16 +2348,17 @@ func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSche func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } func (*GetProvisionerSchema_Request) ProtoMessage() {} func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{17, 0} + return fileDescriptor_17ae6090ff270234, []int{17, 0} } + func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) } func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) } -func (dst *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema_Request.Merge(dst, src) +func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src) } func (m *GetProvisionerSchema_Request) XXX_Size() int { return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) @@ -2326,16 +2381,17 @@ func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSch func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } func (*GetProvisionerSchema_Response) ProtoMessage() {} func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{17, 1} + return fileDescriptor_17ae6090ff270234, []int{17, 1} } + func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) } func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) } -func (dst *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetProvisionerSchema_Response.Merge(dst, src) +func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src) } func (m *GetProvisionerSchema_Response) XXX_Size() int { return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) @@ -2370,16 +2426,17 @@ func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerCo func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } func (*ValidateProvisionerConfig) ProtoMessage() {} func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{18} + return fileDescriptor_17ae6090ff270234, []int{18} } + func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) } func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) } -func (dst *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig.Merge(dst, src) +func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src) } func (m *ValidateProvisionerConfig) XXX_Size() int { return xxx_messageInfo_ValidateProvisionerConfig.Size(m) @@ -2401,16 +2458,17 @@ func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvi func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } func (*ValidateProvisionerConfig_Request) ProtoMessage() {} func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{18, 0} + return fileDescriptor_17ae6090ff270234, []int{18, 0} } + func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) } func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) } -func (dst *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(dst, src) +func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src) } func (m *ValidateProvisionerConfig_Request) XXX_Size() int { return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) @@ -2439,16 +2497,17 @@ func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProv func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } func (*ValidateProvisionerConfig_Response) ProtoMessage() {} func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{18, 1} + return fileDescriptor_17ae6090ff270234, []int{18, 1} } + func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) } func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) } -func (dst *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(dst, src) +func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src) } func (m *ValidateProvisionerConfig_Response) XXX_Size() int { return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) @@ -2476,16 +2535,17 @@ func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } func (*ProvisionResource) ProtoMessage() {} func (*ProvisionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{19} + return fileDescriptor_17ae6090ff270234, []int{19} } + func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) } func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) } -func (dst *ProvisionResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource.Merge(dst, src) +func (m *ProvisionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource.Merge(m, src) } func (m *ProvisionResource) XXX_Size() int { return xxx_messageInfo_ProvisionResource.Size(m) @@ -2508,16 +2568,17 @@ func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Req func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } func (*ProvisionResource_Request) ProtoMessage() {} func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{19, 0} + return fileDescriptor_17ae6090ff270234, []int{19, 0} } + func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) } func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) } -func (dst *ProvisionResource_Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource_Request.Merge(dst, src) +func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Request.Merge(m, src) } func (m *ProvisionResource_Request) XXX_Size() int { return xxx_messageInfo_ProvisionResource_Request.Size(m) @@ -2554,16 +2615,17 @@ func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Re func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } func (*ProvisionResource_Response) ProtoMessage() {} func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { - return fileDescriptor_plugin_1291da820e90ac50, []int{19, 1} + return fileDescriptor_17ae6090ff270234, []int{19, 1} } + func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) } func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) } -func (dst *ProvisionResource_Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProvisionResource_Response.Merge(dst, src) +func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Response.Merge(m, src) } func (m *ProvisionResource_Response) XXX_Size() int { return xxx_messageInfo_ProvisionResource_Response.Size(m) @@ -2589,66 +2651,187 @@ func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { } func init() { - proto.RegisterType((*DynamicValue)(nil), "proto.DynamicValue") - proto.RegisterType((*Diagnostic)(nil), "proto.Diagnostic") - proto.RegisterType((*AttributePath)(nil), "proto.AttributePath") - proto.RegisterType((*AttributePath_Step)(nil), "proto.AttributePath.Step") - proto.RegisterType((*Stop)(nil), "proto.Stop") - proto.RegisterType((*Stop_Request)(nil), "proto.Stop.Request") - proto.RegisterType((*Stop_Response)(nil), "proto.Stop.Response") - proto.RegisterType((*RawState)(nil), "proto.RawState") - proto.RegisterMapType((map[string]string)(nil), "proto.RawState.FlatmapEntry") - proto.RegisterType((*Schema)(nil), "proto.Schema") - proto.RegisterType((*Schema_Block)(nil), "proto.Schema.Block") - proto.RegisterType((*Schema_Attribute)(nil), "proto.Schema.Attribute") - proto.RegisterType((*Schema_NestedBlock)(nil), "proto.Schema.NestedBlock") - proto.RegisterType((*GetProviderSchema)(nil), "proto.GetProviderSchema") - proto.RegisterType((*GetProviderSchema_Request)(nil), "proto.GetProviderSchema.Request") - proto.RegisterType((*GetProviderSchema_Response)(nil), "proto.GetProviderSchema.Response") - proto.RegisterMapType((map[string]*Schema)(nil), "proto.GetProviderSchema.Response.DataSourceSchemasEntry") - proto.RegisterMapType((map[string]*Schema)(nil), "proto.GetProviderSchema.Response.ResourceSchemasEntry") - proto.RegisterType((*PrepareProviderConfig)(nil), "proto.PrepareProviderConfig") - proto.RegisterType((*PrepareProviderConfig_Request)(nil), "proto.PrepareProviderConfig.Request") - proto.RegisterType((*PrepareProviderConfig_Response)(nil), "proto.PrepareProviderConfig.Response") - proto.RegisterType((*UpgradeResourceState)(nil), "proto.UpgradeResourceState") - proto.RegisterType((*UpgradeResourceState_Request)(nil), "proto.UpgradeResourceState.Request") - proto.RegisterType((*UpgradeResourceState_Response)(nil), "proto.UpgradeResourceState.Response") - proto.RegisterType((*ValidateResourceTypeConfig)(nil), "proto.ValidateResourceTypeConfig") - proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "proto.ValidateResourceTypeConfig.Request") - proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "proto.ValidateResourceTypeConfig.Response") - proto.RegisterType((*ValidateDataSourceConfig)(nil), "proto.ValidateDataSourceConfig") - proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "proto.ValidateDataSourceConfig.Request") - proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "proto.ValidateDataSourceConfig.Response") - proto.RegisterType((*Configure)(nil), "proto.Configure") - proto.RegisterType((*Configure_Request)(nil), "proto.Configure.Request") - proto.RegisterType((*Configure_Response)(nil), "proto.Configure.Response") - proto.RegisterType((*ReadResource)(nil), "proto.ReadResource") - proto.RegisterType((*ReadResource_Request)(nil), "proto.ReadResource.Request") - proto.RegisterType((*ReadResource_Response)(nil), "proto.ReadResource.Response") - proto.RegisterType((*PlanResourceChange)(nil), "proto.PlanResourceChange") - proto.RegisterType((*PlanResourceChange_Request)(nil), "proto.PlanResourceChange.Request") - proto.RegisterType((*PlanResourceChange_Response)(nil), "proto.PlanResourceChange.Response") - proto.RegisterType((*ApplyResourceChange)(nil), "proto.ApplyResourceChange") - proto.RegisterType((*ApplyResourceChange_Request)(nil), "proto.ApplyResourceChange.Request") - proto.RegisterType((*ApplyResourceChange_Response)(nil), "proto.ApplyResourceChange.Response") - proto.RegisterType((*ImportResourceState)(nil), "proto.ImportResourceState") - proto.RegisterType((*ImportResourceState_Request)(nil), "proto.ImportResourceState.Request") - proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "proto.ImportResourceState.ImportedResource") - proto.RegisterType((*ImportResourceState_Response)(nil), "proto.ImportResourceState.Response") - proto.RegisterType((*ReadDataSource)(nil), "proto.ReadDataSource") - proto.RegisterType((*ReadDataSource_Request)(nil), "proto.ReadDataSource.Request") - proto.RegisterType((*ReadDataSource_Response)(nil), "proto.ReadDataSource.Response") - proto.RegisterType((*GetProvisionerSchema)(nil), "proto.GetProvisionerSchema") - proto.RegisterType((*GetProvisionerSchema_Request)(nil), "proto.GetProvisionerSchema.Request") - proto.RegisterType((*GetProvisionerSchema_Response)(nil), "proto.GetProvisionerSchema.Response") - proto.RegisterType((*ValidateProvisionerConfig)(nil), "proto.ValidateProvisionerConfig") - proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "proto.ValidateProvisionerConfig.Request") - proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "proto.ValidateProvisionerConfig.Response") - proto.RegisterType((*ProvisionResource)(nil), "proto.ProvisionResource") - proto.RegisterType((*ProvisionResource_Request)(nil), "proto.ProvisionResource.Request") - proto.RegisterType((*ProvisionResource_Response)(nil), "proto.ProvisionResource.Response") - proto.RegisterEnum("proto.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) - proto.RegisterEnum("proto.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) + proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) + proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) + proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") + proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") + proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") + proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") + proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") + proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") + proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") + proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") + proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") + proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") + proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") + proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") + proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") + proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") + proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") + proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") + proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") + proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") + proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") + proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") + proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") + proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") + proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") + proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") + proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") + proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") + proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") + proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") + proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") + proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") + proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") + proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") + proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") + proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") + proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") + proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") + proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") + proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") + proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") + proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") + proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") + proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") + proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") + proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") + proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") + proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") + proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") + proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") + proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") + proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") + proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") + proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") + proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") + proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") + proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") + proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") +} + +func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) } + +var fileDescriptor_17ae6090ff270234 = []byte{ + // 1834 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xdd, 0x8f, 0x23, 0x47, + 0x11, 0xbf, 0xf1, 0xac, 0x77, 0xed, 0xf2, 0x7e, 0x78, 0xfb, 0x2e, 0x87, 0x99, 0x24, 0xb0, 0x98, + 0x8f, 0xdd, 0x28, 0x9c, 0x2f, 0xda, 0x83, 0x24, 0x2c, 0xa7, 0x88, 0xbd, 0xbd, 0xe5, 0xce, 0xe2, + 0xb2, 0x2c, 0xed, 0xcb, 0x1d, 0x12, 0x52, 0xac, 0x3e, 0x4f, 0xaf, 0x6f, 0x38, 0x7b, 0x66, 0xd2, + 0xd3, 0xde, 0x5b, 0x8b, 0x47, 0x44, 0x9e, 0x91, 0x10, 0x1f, 0x12, 0x11, 0x2f, 0x48, 0xfc, 0x0d, + 0xc0, 0x3f, 0xc0, 0x1f, 0x11, 0x78, 0x42, 0x3c, 0xa2, 0x3c, 0xc2, 0x0b, 0x12, 0xea, 0xaf, 0x99, + 0xb6, 0x3d, 0xf6, 0xce, 0xed, 0x26, 0x42, 0xbc, 0x4d, 0x77, 0xfd, 0xaa, 0xea, 0xd7, 0xd5, 0xd5, + 0x55, 0xdd, 0x36, 0x6c, 0xf0, 0x93, 0x78, 0x30, 0xea, 0x07, 0xe1, 0x37, 0x5b, 0x31, 0x8b, 0x78, + 0x84, 0xaa, 0xe9, 0x44, 0xf3, 0x36, 0xac, 0xde, 0x1d, 0x87, 0x64, 0x18, 0xf4, 0x1e, 0x91, 0xc1, + 0x88, 0xa2, 0x06, 0xac, 0x0c, 0x93, 0x7e, 0x4c, 0x7a, 0xcf, 0x1a, 0xce, 0x96, 0xb3, 0xb3, 0x8a, + 0xcd, 0x10, 0x21, 0x58, 0xfa, 0x71, 0x12, 0x85, 0x8d, 0x92, 0x9c, 0x96, 0xdf, 0xcd, 0xbf, 0x3b, + 0x00, 0x77, 0x03, 0xd2, 0x0f, 0xa3, 0x84, 0x07, 0x3d, 0xb4, 0x07, 0x95, 0x84, 0x9e, 0x52, 0x16, + 0xf0, 0xb1, 0xd4, 0x5e, 0xdf, 0xfd, 0x42, 0x2b, 0xf3, 0x9d, 0x01, 0x5b, 0x1d, 0x8d, 0xc2, 0x29, + 0x5e, 0x38, 0x4e, 0x46, 0xc3, 0x21, 0x61, 0x63, 0xe9, 0xa1, 0x8a, 0xcd, 0x10, 0x5d, 0x87, 0x65, + 0x9f, 0x72, 0x12, 0x0c, 0x1a, 0xae, 0x14, 0xe8, 0x11, 0x7a, 0x13, 0xaa, 0x84, 0x73, 0x16, 0x3c, + 0x19, 0x71, 0xda, 0x58, 0xda, 0x72, 0x76, 0x6a, 0xbb, 0x0d, 0xcb, 0xdd, 0xbe, 0x91, 0x1d, 0x13, + 0xfe, 0x14, 0x67, 0xd0, 0xe6, 0x4d, 0xa8, 0x18, 0xff, 0xa8, 0x06, 0x2b, 0xed, 0xa3, 0x47, 0xfb, + 0x0f, 0xda, 0x77, 0xeb, 0x57, 0x50, 0x15, 0xca, 0x87, 0x18, 0x7f, 0x1f, 0xd7, 0x1d, 0x31, 0xff, + 0x78, 0x1f, 0x1f, 0xb5, 0x8f, 0xee, 0xd5, 0x4b, 0xcd, 0xbf, 0x3a, 0xb0, 0x36, 0x61, 0x0d, 0xdd, + 0x82, 0x72, 0xc2, 0x69, 0x9c, 0x34, 0x9c, 0x2d, 0x77, 0xa7, 0xb6, 0xfb, 0xea, 0x3c, 0xb7, 0xad, + 0x0e, 0xa7, 0x31, 0x56, 0x58, 0xef, 0x97, 0x0e, 0x2c, 0x89, 0x31, 0xda, 0x86, 0xf5, 0x94, 0x4d, + 0x37, 0x24, 0x43, 0x2a, 0x83, 0x55, 0xbd, 0x7f, 0x05, 0xaf, 0xa5, 0xf3, 0x47, 0x64, 0x48, 0x51, + 0x0b, 0x10, 0x1d, 0xd0, 0x21, 0x0d, 0x79, 0xf7, 0x19, 0x1d, 0x77, 0x13, 0xce, 0x82, 0xb0, 0xaf, + 0xc2, 0x73, 0xff, 0x0a, 0xae, 0x6b, 0xd9, 0xf7, 0xe8, 0xb8, 0x23, 0x25, 0x68, 0x07, 0x36, 0x6c, + 0x7c, 0x10, 0x72, 0x19, 0x32, 0x57, 0x58, 0xce, 0xc0, 0xed, 0x90, 0xdf, 0x01, 0xb1, 0x53, 0x03, + 0xda, 0xe3, 0x11, 0x6b, 0xde, 0x12, 0xb4, 0xa2, 0xd8, 0xab, 0xc2, 0x0a, 0xa6, 0x1f, 0x8c, 0x68, + 0xc2, 0xbd, 0x2d, 0xa8, 0x60, 0x9a, 0xc4, 0x51, 0x98, 0x50, 0x74, 0x0d, 0xca, 0x87, 0x8c, 0x45, + 0x4c, 0x91, 0xc4, 0x6a, 0xd0, 0xfc, 0x95, 0x03, 0x15, 0x4c, 0x9e, 0x77, 0x38, 0xe1, 0x34, 0x4d, + 0x0d, 0x27, 0x4b, 0x0d, 0xb4, 0x07, 0x2b, 0x27, 0x03, 0xc2, 0x87, 0x24, 0x6e, 0x94, 0x64, 0x90, + 0xb6, 0xac, 0x20, 0x19, 0xcd, 0xd6, 0x77, 0x15, 0xe4, 0x30, 0xe4, 0x6c, 0x8c, 0x8d, 0x82, 0xb7, + 0x07, 0xab, 0xb6, 0x00, 0xd5, 0xc1, 0x7d, 0x46, 0xc7, 0x9a, 0x80, 0xf8, 0x14, 0xa4, 0x4e, 0x45, + 0xbe, 0xea, 0x5c, 0x51, 0x83, 0xbd, 0xd2, 0xdb, 0x4e, 0xf3, 0xe3, 0x32, 0x2c, 0x77, 0x7a, 0x4f, + 0xe9, 0x90, 0x88, 0x94, 0x3a, 0xa5, 0x2c, 0x09, 0x34, 0x33, 0x17, 0x9b, 0x21, 0xba, 0x01, 0xe5, + 0x27, 0x83, 0xa8, 0xf7, 0x4c, 0xaa, 0xd7, 0x76, 0x3f, 0x67, 0x51, 0x53, 0xba, 0xad, 0x3b, 0x42, + 0x8c, 0x15, 0xca, 0xfb, 0x9d, 0x03, 0x65, 0x39, 0xb1, 0xc0, 0xe4, 0xb7, 0x01, 0xd2, 0xcd, 0x4b, + 0xf4, 0x92, 0x5f, 0x9e, 0xb5, 0x9b, 0xa6, 0x07, 0xb6, 0xe0, 0xe8, 0x1d, 0xa8, 0x49, 0x4f, 0x5d, + 0x3e, 0x8e, 0x69, 0xd2, 0x70, 0x67, 0xb2, 0x4a, 0x6b, 0x1f, 0xd1, 0x84, 0x53, 0x5f, 0x71, 0x03, + 0xa9, 0xf1, 0x50, 0x28, 0x78, 0x7f, 0x71, 0xa0, 0x9a, 0x5a, 0x16, 0xdb, 0x91, 0x65, 0x15, 0x96, + 0xdf, 0x62, 0x4e, 0xd8, 0x36, 0xa7, 0x57, 0x7c, 0xa3, 0x2d, 0xa8, 0xf9, 0x34, 0xe9, 0xb1, 0x20, + 0xe6, 0x62, 0x41, 0xea, 0x74, 0xd9, 0x53, 0xc8, 0x83, 0x0a, 0xa3, 0x1f, 0x8c, 0x02, 0x46, 0x7d, + 0x79, 0xc2, 0x2a, 0x38, 0x1d, 0x0b, 0x59, 0x24, 0x51, 0x64, 0xd0, 0x28, 0x2b, 0x99, 0x19, 0x0b, + 0x59, 0x2f, 0x1a, 0xc6, 0x23, 0x4e, 0xfd, 0xc6, 0xb2, 0x92, 0x99, 0x31, 0x7a, 0x05, 0xaa, 0x09, + 0x0d, 0x93, 0x80, 0x07, 0xa7, 0xb4, 0xb1, 0x22, 0x85, 0xd9, 0x84, 0xf7, 0x51, 0x09, 0x6a, 0xd6, + 0x2a, 0xd1, 0xcb, 0x50, 0x15, 0x5c, 0xad, 0x63, 0x82, 0x2b, 0x62, 0x42, 0x9e, 0x8f, 0x17, 0xdb, + 0x46, 0x74, 0x00, 0x2b, 0x21, 0x4d, 0xb8, 0x38, 0x43, 0xae, 0xac, 0x4e, 0xaf, 0x2d, 0x8c, 0xb0, + 0xfc, 0x0e, 0xc2, 0xfe, 0xbb, 0x91, 0x4f, 0xb1, 0xd1, 0x14, 0x84, 0x86, 0x41, 0xd8, 0x0d, 0x38, + 0x1d, 0x26, 0x32, 0x26, 0x2e, 0xae, 0x0c, 0x83, 0xb0, 0x2d, 0xc6, 0x52, 0x48, 0xce, 0xb4, 0xb0, + 0xac, 0x85, 0xe4, 0x4c, 0x0a, 0x9b, 0x77, 0xd4, 0xca, 0xb4, 0xc5, 0xc9, 0xd2, 0x03, 0xb0, 0xdc, + 0x69, 0x1f, 0xdd, 0x7b, 0x70, 0x58, 0x77, 0x50, 0x05, 0x96, 0x1e, 0xb4, 0x3b, 0x0f, 0xeb, 0x25, + 0xb4, 0x02, 0x6e, 0xe7, 0xf0, 0x61, 0xdd, 0x15, 0x1f, 0xef, 0xee, 0x1f, 0xd7, 0x97, 0x9a, 0xbf, + 0x59, 0x82, 0xcd, 0x7b, 0x94, 0x1f, 0xb3, 0xe8, 0x34, 0xf0, 0x29, 0x53, 0xa4, 0xed, 0x93, 0xfb, + 0x2f, 0xd7, 0x3a, 0xba, 0x37, 0xa0, 0x12, 0x6b, 0xa4, 0x8c, 0x5d, 0x6d, 0x77, 0x73, 0x66, 0xc5, + 0x38, 0x85, 0x20, 0x0a, 0x75, 0x46, 0x93, 0x68, 0xc4, 0x7a, 0xb4, 0x9b, 0x48, 0xa1, 0x49, 0xe4, + 0x3d, 0x4b, 0x6d, 0xc6, 0x7d, 0xcb, 0xf8, 0x13, 0x1f, 0x52, 0x5b, 0xcd, 0x27, 0xea, 0x54, 0x6f, + 0xb0, 0xc9, 0x59, 0x34, 0x80, 0xab, 0x3e, 0xe1, 0xa4, 0x3b, 0xe5, 0x49, 0x25, 0xfd, 0xed, 0x62, + 0x9e, 0xee, 0x12, 0x4e, 0x3a, 0xb3, 0xbe, 0x36, 0xfd, 0xe9, 0x79, 0xf4, 0x16, 0xd4, 0xfc, 0xb4, + 0xf1, 0x88, 0x1d, 0x13, 0x5e, 0x5e, 0xca, 0x6d, 0x4b, 0xd8, 0x46, 0x7a, 0xef, 0xc1, 0xb5, 0xbc, + 0xf5, 0xe4, 0x14, 0xa3, 0x6d, 0xbb, 0x18, 0xe5, 0xc6, 0x38, 0xab, 0x4f, 0xde, 0x63, 0xb8, 0x9e, + 0x4f, 0xfe, 0x92, 0x86, 0x9b, 0x1f, 0x3b, 0xf0, 0xd2, 0x31, 0xa3, 0x31, 0x61, 0xd4, 0x44, 0xed, + 0x20, 0x0a, 0x4f, 0x82, 0xbe, 0xb7, 0x97, 0xa6, 0x07, 0xba, 0x09, 0xcb, 0x3d, 0x39, 0xa9, 0xf3, + 0xc1, 0x3e, 0x32, 0xf6, 0x3d, 0x00, 0x6b, 0x98, 0xf7, 0x33, 0xc7, 0xca, 0xa7, 0xef, 0xc0, 0x46, + 0xac, 0x3c, 0xf8, 0xdd, 0x62, 0x66, 0xd6, 0x0d, 0x5e, 0x51, 0x99, 0xde, 0x8d, 0x52, 0xd1, 0xdd, + 0x68, 0xfe, 0xbc, 0x04, 0xd7, 0xde, 0x8b, 0xfb, 0x8c, 0xf8, 0x34, 0xdd, 0x15, 0xd1, 0x41, 0x3c, + 0x96, 0x2d, 0x6e, 0x61, 0xad, 0xb0, 0x2a, 0x77, 0x69, 0xb2, 0x72, 0xbf, 0x01, 0x55, 0x46, 0x9e, + 0x77, 0x13, 0x61, 0x4e, 0x16, 0x86, 0xda, 0xee, 0xd5, 0x9c, 0x5e, 0x85, 0x2b, 0x4c, 0x7f, 0x79, + 0x3f, 0xb5, 0x83, 0xf2, 0x0e, 0xac, 0x8f, 0x14, 0x31, 0x5f, 0xdb, 0x38, 0x27, 0x26, 0x6b, 0x06, + 0xae, 0x9a, 0xe7, 0x85, 0x43, 0xf2, 0x67, 0x07, 0xbc, 0x47, 0x64, 0x10, 0xf8, 0x82, 0x9c, 0x8e, + 0x89, 0x68, 0x07, 0x7a, 0xd7, 0x1f, 0x17, 0x0c, 0x4c, 0x96, 0x12, 0xa5, 0x62, 0x29, 0x71, 0x60, + 0x2d, 0x7e, 0x8a, 0xbc, 0x53, 0x98, 0xfc, 0x1f, 0x1d, 0x68, 0x18, 0xf2, 0xd9, 0x79, 0xf8, 0xbf, + 0xa0, 0xfe, 0x27, 0x07, 0xaa, 0x8a, 0xe8, 0x88, 0x51, 0xaf, 0x9f, 0x71, 0x7d, 0x1d, 0x36, 0x39, + 0x65, 0x8c, 0x9c, 0x44, 0x6c, 0xd8, 0xb5, 0xaf, 0x09, 0x55, 0x5c, 0x4f, 0x05, 0x8f, 0x74, 0xd6, + 0xfd, 0x6f, 0xb8, 0x7f, 0xe2, 0xc0, 0x2a, 0xa6, 0xc4, 0x37, 0xf9, 0xe2, 0xf9, 0x05, 0x43, 0x7d, + 0x1b, 0xd6, 0x7a, 0x23, 0xc6, 0xc4, 0xd5, 0x52, 0x25, 0xf9, 0x39, 0xac, 0x57, 0x35, 0x5a, 0x1d, + 0x98, 0xb1, 0xc5, 0xfd, 0x1b, 0x50, 0x0d, 0xe9, 0xf3, 0x62, 0x47, 0xa5, 0x12, 0xd2, 0xe7, 0x97, + 0x3c, 0x25, 0x1f, 0x2e, 0x01, 0x3a, 0x1e, 0x90, 0xd0, 0xac, 0xf8, 0xe0, 0x29, 0x09, 0xfb, 0xd4, + 0xfb, 0x8f, 0x53, 0x70, 0xe1, 0x6f, 0x43, 0x2d, 0x66, 0x41, 0xc4, 0x8a, 0x2d, 0x1b, 0x24, 0x56, + 0x51, 0x3e, 0x04, 0x14, 0xb3, 0x28, 0x8e, 0x12, 0xea, 0x77, 0xb3, 0x15, 0xbb, 0x8b, 0x0d, 0xd4, + 0x8d, 0xca, 0x91, 0x59, 0x79, 0x96, 0x28, 0x4b, 0x85, 0x12, 0x05, 0x7d, 0x19, 0xd6, 0x14, 0xe3, + 0x98, 0x05, 0xa7, 0xc2, 0x65, 0x59, 0xde, 0xf9, 0x56, 0xe5, 0xe4, 0xb1, 0x9a, 0xf3, 0x3e, 0xb1, + 0x4b, 0xd8, 0x6d, 0x58, 0x8b, 0x07, 0x24, 0x0c, 0x8b, 0x56, 0xb0, 0x55, 0x8d, 0x56, 0x04, 0x0f, + 0xc4, 0xb5, 0x41, 0x5e, 0x0a, 0x93, 0x2e, 0xa3, 0xf1, 0x80, 0xf4, 0xa8, 0xde, 0x9f, 0xf9, 0xcf, + 0xb1, 0x0d, 0xa3, 0x81, 0x95, 0x02, 0xda, 0x86, 0x0d, 0x43, 0xc1, 0xd0, 0x76, 0x25, 0xed, 0x75, + 0x3d, 0xad, 0x89, 0x5f, 0xb8, 0x9f, 0x37, 0xff, 0xe0, 0xc2, 0xd5, 0xfd, 0x38, 0x1e, 0x8c, 0xa7, + 0x32, 0xe1, 0xdf, 0x9f, 0x7d, 0x26, 0xcc, 0xc4, 0xd7, 0x7d, 0x91, 0xf8, 0xbe, 0x70, 0x02, 0xe4, + 0xc4, 0xb2, 0x9c, 0x17, 0x4b, 0xef, 0x17, 0xce, 0xa5, 0xcf, 0x65, 0x03, 0x56, 0x8c, 0x0f, 0xf5, + 0xb4, 0x30, 0xc3, 0xe9, 0x8d, 0x72, 0x0b, 0x6f, 0xd4, 0x3f, 0x4b, 0x70, 0xb5, 0x3d, 0x8c, 0x23, + 0xc6, 0x27, 0x3b, 0xfd, 0x9b, 0x05, 0xf7, 0x69, 0x1d, 0x4a, 0x81, 0xaf, 0x1f, 0x86, 0xa5, 0xc0, + 0xf7, 0xce, 0xa0, 0xae, 0xcc, 0xd1, 0xb4, 0xec, 0x9d, 0xfb, 0xac, 0x28, 0xb4, 0xc5, 0x0a, 0x65, + 0x87, 0xc0, 0x9d, 0x08, 0x81, 0xf7, 0x7b, 0x3b, 0xbe, 0xef, 0x03, 0x0a, 0x34, 0x8d, 0xae, 0xb9, + 0x12, 0x9b, 0xd2, 0x7d, 0xd3, 0x72, 0x91, 0xb3, 0xf4, 0xd6, 0x34, 0x7f, 0xbc, 0x19, 0x4c, 0xcd, + 0x24, 0x17, 0xaf, 0x90, 0x7f, 0x73, 0x60, 0x5d, 0xf4, 0x84, 0xac, 0x0d, 0x7f, 0x76, 0x0d, 0x98, + 0x4d, 0xbc, 0x4e, 0xca, 0x85, 0x92, 0x4d, 0x87, 0xf9, 0xc2, 0xeb, 0xfb, 0xad, 0x03, 0xd7, 0xcc, + 0x53, 0x42, 0xb4, 0xde, 0xbc, 0x67, 0xd3, 0x99, 0xc5, 0xeb, 0x96, 0x38, 0xe7, 0x29, 0x76, 0xfe, + 0xc3, 0xc9, 0x46, 0x5d, 0x9c, 0xdd, 0x47, 0x0e, 0x7c, 0xde, 0x5c, 0x84, 0x2c, 0x8a, 0x9f, 0xc2, + 0xd5, 0xfd, 0x53, 0xb9, 0x30, 0xfc, 0xc3, 0x81, 0xcd, 0x94, 0x56, 0x7a, 0x6b, 0x48, 0x2e, 0x4e, + 0x0b, 0xbd, 0x05, 0xd0, 0x8b, 0xc2, 0x90, 0xf6, 0xb8, 0xb9, 0x8b, 0x2f, 0xaa, 0xa2, 0x19, 0xd4, + 0xfb, 0x91, 0xb5, 0x9e, 0xeb, 0xb0, 0x1c, 0x8d, 0x78, 0x3c, 0xe2, 0x3a, 0x25, 0xf5, 0xe8, 0xc2, + 0xdb, 0xb0, 0xfb, 0xeb, 0x2a, 0x54, 0xcc, 0xb3, 0x09, 0xfd, 0x10, 0xaa, 0xf7, 0x28, 0xd7, 0xbf, + 0x22, 0x7d, 0xe5, 0x9c, 0x17, 0xa9, 0x4a, 0xa0, 0xaf, 0x16, 0x7a, 0xb7, 0xa2, 0xc1, 0x9c, 0x37, + 0x1a, 0xda, 0xb1, 0xf4, 0x73, 0x11, 0xa9, 0xa7, 0xd7, 0x0a, 0x20, 0xb5, 0xb7, 0x9f, 0x2c, 0x7a, + 0x20, 0xa0, 0x1b, 0x96, 0xa1, 0xf9, 0xb0, 0xd4, 0x6f, 0xab, 0x28, 0x5c, 0x3b, 0x1f, 0xcd, 0xbf, + 0xe0, 0xa3, 0xd7, 0x73, 0x6c, 0x4d, 0x83, 0x52, 0xc7, 0x5f, 0x2f, 0x06, 0xd6, 0x6e, 0x83, 0xfc, + 0x77, 0x22, 0xda, 0xb6, 0xac, 0xe4, 0x01, 0x52, 0x77, 0x3b, 0xe7, 0x03, 0xb5, 0xab, 0xfb, 0xd6, + 0x3b, 0x00, 0xbd, 0x62, 0xa9, 0xa5, 0xb3, 0xa9, 0xd1, 0x57, 0xe7, 0x48, 0xb5, 0xa5, 0x1f, 0x4c, + 0xde, 0xca, 0xd1, 0x17, 0xed, 0xf7, 0xa7, 0x25, 0x48, 0xed, 0x6d, 0xcd, 0x07, 0x68, 0x93, 0xbd, + 0xbc, 0x6b, 0x2f, 0xb2, 0xd3, 0x74, 0x56, 0x9c, 0x9a, 0xff, 0xda, 0x79, 0x30, 0xed, 0xe4, 0x24, + 0xf7, 0x4a, 0x85, 0x6c, 0xf5, 0x1c, 0x79, 0xea, 0x66, 0xfb, 0x5c, 0x5c, 0xe6, 0x27, 0xa7, 0x2d, + 0x4e, 0xf8, 0xc9, 0x6b, 0x9b, 0x79, 0x7e, 0xf2, 0x71, 0xda, 0xcf, 0xe3, 0xe9, 0x4e, 0x88, 0xbe, + 0x34, 0x15, 0xe8, 0x4c, 0x94, 0x5a, 0x6f, 0x2e, 0x82, 0x68, 0xc3, 0xdf, 0x52, 0xbf, 0xb1, 0xa3, + 0x89, 0x9f, 0x28, 0x79, 0x14, 0xa7, 0x46, 0x1a, 0xb3, 0x02, 0xa5, 0xba, 0xfb, 0xa1, 0x0b, 0x35, + 0xab, 0x31, 0xa0, 0xf7, 0xed, 0xe2, 0xb4, 0x9d, 0x53, 0x76, 0xec, 0x1e, 0x97, 0x9b, 0xd5, 0x73, + 0x80, 0x9a, 0xea, 0xd9, 0x82, 0x7e, 0x84, 0xf2, 0xce, 0xe2, 0x0c, 0x2a, 0x75, 0x7a, 0xa3, 0x20, + 0x5a, 0x7b, 0x7e, 0x92, 0xd3, 0x6a, 0x26, 0xca, 0xef, 0x8c, 0x34, 0xb7, 0xfc, 0xe6, 0xa1, 0x94, + 0x87, 0x37, 0x9c, 0x4b, 0x6c, 0xc4, 0x93, 0x65, 0xf9, 0xe7, 0xd9, 0xad, 0xff, 0x06, 0x00, 0x00, + 0xff, 0xff, 0xdc, 0x6b, 0x80, 0xf2, 0x4f, 0x1b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2663,21 +2846,21 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ProviderClient interface { - // ////// Information about what a provider supports/expects + //////// Information about what a provider supports/expects GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) - // ////// One-time initialization, called before other functions below + //////// One-time initialization, called before other functions below Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) - // ////// Managed Resource Lifecycle + //////// Managed Resource Lifecycle ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) - // ////// Graceful Shutdown + //////// Graceful Shutdown Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) } @@ -2691,7 +2874,7 @@ func NewProviderClient(cc *grpc.ClientConn) ProviderClient { func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { out := new(GetProviderSchema_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/GetSchema", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) if err != nil { return nil, err } @@ -2700,7 +2883,7 @@ func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Re func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { out := new(PrepareProviderConfig_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/PrepareProviderConfig", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) if err != nil { return nil, err } @@ -2709,7 +2892,7 @@ func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareP func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { out := new(ValidateResourceTypeConfig_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/ValidateResourceTypeConfig", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) if err != nil { return nil, err } @@ -2718,7 +2901,7 @@ func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *Val func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { out := new(ValidateDataSourceConfig_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/ValidateDataSourceConfig", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) if err != nil { return nil, err } @@ -2727,7 +2910,7 @@ func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *Valid func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { out := new(UpgradeResourceState_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/UpgradeResourceState", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) if err != nil { return nil, err } @@ -2736,7 +2919,7 @@ func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeRe func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { out := new(Configure_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/Configure", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) if err != nil { return nil, err } @@ -2745,7 +2928,7 @@ func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, o func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { out := new(ReadResource_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/ReadResource", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) if err != nil { return nil, err } @@ -2754,7 +2937,7 @@ func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Requ func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { out := new(PlanResourceChange_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/PlanResourceChange", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) if err != nil { return nil, err } @@ -2763,7 +2946,7 @@ func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourc func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { out := new(ApplyResourceChange_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/ApplyResourceChange", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) if err != nil { return nil, err } @@ -2772,7 +2955,7 @@ func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResou func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { out := new(ImportResourceState_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/ImportResourceState", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) if err != nil { return nil, err } @@ -2781,7 +2964,7 @@ func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportReso func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { out := new(ReadDataSource_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/ReadDataSource", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) if err != nil { return nil, err } @@ -2790,7 +2973,7 @@ func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_ func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { out := new(Stop_Response) - err := c.cc.Invoke(ctx, "/proto.Provider/Stop", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) if err != nil { return nil, err } @@ -2799,21 +2982,21 @@ func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grp // ProviderServer is the server API for Provider service. type ProviderServer interface { - // ////// Information about what a provider supports/expects + //////// Information about what a provider supports/expects GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) - // ////// One-time initialization, called before other functions below + //////// One-time initialization, called before other functions below Configure(context.Context, *Configure_Request) (*Configure_Response, error) - // ////// Managed Resource Lifecycle + //////// Managed Resource Lifecycle ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) - // ////// Graceful Shutdown + //////// Graceful Shutdown Stop(context.Context, *Stop_Request) (*Stop_Response, error) } @@ -2831,7 +3014,7 @@ func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/GetSchema", + FullMethod: "/tfplugin5.Provider/GetSchema", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) @@ -2849,7 +3032,7 @@ func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/PrepareProviderConfig", + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) @@ -2867,7 +3050,7 @@ func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/ValidateResourceTypeConfig", + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) @@ -2885,7 +3068,7 @@ func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/ValidateDataSourceConfig", + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) @@ -2903,7 +3086,7 @@ func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/UpgradeResourceState", + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) @@ -2921,7 +3104,7 @@ func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/Configure", + FullMethod: "/tfplugin5.Provider/Configure", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) @@ -2939,7 +3122,7 @@ func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/ReadResource", + FullMethod: "/tfplugin5.Provider/ReadResource", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) @@ -2957,7 +3140,7 @@ func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/PlanResourceChange", + FullMethod: "/tfplugin5.Provider/PlanResourceChange", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) @@ -2975,7 +3158,7 @@ func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/ApplyResourceChange", + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) @@ -2993,7 +3176,7 @@ func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/ImportResourceState", + FullMethod: "/tfplugin5.Provider/ImportResourceState", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) @@ -3011,7 +3194,7 @@ func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/ReadDataSource", + FullMethod: "/tfplugin5.Provider/ReadDataSource", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) @@ -3029,7 +3212,7 @@ func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provider/Stop", + FullMethod: "/tfplugin5.Provider/Stop", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) @@ -3038,7 +3221,7 @@ func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(inter } var _Provider_serviceDesc = grpc.ServiceDesc{ - ServiceName: "proto.Provider", + ServiceName: "tfplugin5.Provider", HandlerType: (*ProviderServer)(nil), Methods: []grpc.MethodDesc{ { @@ -3091,7 +3274,7 @@ var _Provider_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "plugin.proto", + Metadata: "tfplugin5.proto", } // ProvisionerClient is the client API for Provisioner service. @@ -3114,7 +3297,7 @@ func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient { func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { out := new(GetProvisionerSchema_Response) - err := c.cc.Invoke(ctx, "/proto.Provisioner/GetSchema", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) if err != nil { return nil, err } @@ -3123,7 +3306,7 @@ func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSch func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { out := new(ValidateProvisionerConfig_Response) - err := c.cc.Invoke(ctx, "/proto.Provisioner/ValidateProvisionerConfig", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) if err != nil { return nil, err } @@ -3131,7 +3314,7 @@ func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *V } func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { - stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/proto.Provisioner/ProvisionResource", opts...) + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) if err != nil { return nil, err } @@ -3164,7 +3347,7 @@ func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { out := new(Stop_Response) - err := c.cc.Invoke(ctx, "/proto.Provisioner/Stop", in, out, opts...) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) if err != nil { return nil, err } @@ -3193,7 +3376,7 @@ func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provisioner/GetSchema", + FullMethod: "/tfplugin5.Provisioner/GetSchema", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) @@ -3211,7 +3394,7 @@ func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provisioner/ValidateProvisionerConfig", + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) @@ -3250,7 +3433,7 @@ func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/proto.Provisioner/Stop", + FullMethod: "/tfplugin5.Provisioner/Stop", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) @@ -3259,7 +3442,7 @@ func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(in } var _Provisioner_serviceDesc = grpc.ServiceDesc{ - ServiceName: "proto.Provisioner", + ServiceName: "tfplugin5.Provisioner", HandlerType: (*ProvisionerServer)(nil), Methods: []grpc.MethodDesc{ { @@ -3282,125 +3465,5 @@ var _Provisioner_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, }, }, - Metadata: "plugin.proto", -} - -func init() { proto.RegisterFile("plugin.proto", fileDescriptor_plugin_1291da820e90ac50) } - -var fileDescriptor_plugin_1291da820e90ac50 = []byte{ - // 1815 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xdb, 0x6f, 0x5b, 0x49, - 0x19, 0xef, 0xf1, 0x89, 0x13, 0xfb, 0x73, 0x2e, 0xce, 0x34, 0x14, 0xf7, 0x6c, 0x41, 0xc1, 0xd9, - 0x55, 0x53, 0x16, 0xb9, 0x8b, 0x8b, 0x42, 0x55, 0xad, 0x54, 0xd2, 0x6d, 0xe8, 0x5a, 0x74, 0xb3, - 0x61, 0x5c, 0xb2, 0x12, 0x48, 0x98, 0xa9, 0xcf, 0x34, 0x3d, 0x1b, 0x9f, 0x4b, 0xe7, 0x8c, 0x93, - 0xb5, 0x10, 0x12, 0x12, 0x8f, 0xbc, 0x2d, 0x0b, 0x2f, 0xfc, 0x13, 0xc0, 0x0b, 0x7f, 0xc2, 0xbe, - 0x21, 0xf1, 0x06, 0xe2, 0x01, 0xa1, 0x95, 0x78, 0x42, 0x48, 0xfc, 0x07, 0x68, 0xae, 0xe7, 0xd8, - 0x3e, 0xb1, 0x4d, 0xa2, 0x45, 0xfb, 0xe4, 0x33, 0xdf, 0xf7, 0x9b, 0xef, 0x36, 0xdf, 0x65, 0xc6, - 0xb0, 0x9a, 0x0c, 0x86, 0x27, 0x41, 0xd4, 0x4a, 0x58, 0xcc, 0x63, 0x54, 0x96, 0x3f, 0xcd, 0xb7, - 0x61, 0xf5, 0xf1, 0x28, 0x22, 0x61, 0xd0, 0x3f, 0x26, 0x83, 0x21, 0x45, 0x0d, 0x58, 0x09, 0xd3, - 0x93, 0x84, 0xf4, 0x4f, 0x1b, 0xce, 0xb6, 0xb3, 0xbb, 0x8a, 0xcd, 0x12, 0x21, 0x58, 0xfa, 0x30, - 0x8d, 0xa3, 0x46, 0x49, 0x92, 0xe5, 0x77, 0xf3, 0x6f, 0x0e, 0xc0, 0xe3, 0x80, 0x9c, 0x44, 0x71, - 0xca, 0x83, 0x3e, 0xda, 0x83, 0x4a, 0x4a, 0xcf, 0x28, 0x0b, 0xf8, 0x48, 0xee, 0x5e, 0x6f, 0x7b, - 0x4a, 0x5b, 0x2b, 0x03, 0xb5, 0xba, 0x1a, 0x81, 0x2d, 0x56, 0x28, 0x4d, 0x87, 0x61, 0x48, 0xd8, - 0x48, 0x4a, 0xaf, 0x62, 0xb3, 0x44, 0x37, 0x60, 0xd9, 0xa7, 0x9c, 0x04, 0x83, 0x86, 0x2b, 0x19, - 0x7a, 0x85, 0xda, 0x50, 0x25, 0x9c, 0xb3, 0xe0, 0xf9, 0x90, 0xd3, 0xc6, 0xd2, 0xb6, 0xb3, 0x5b, - 0x6b, 0x6f, 0x69, 0x55, 0xfb, 0x86, 0x7e, 0x44, 0xf8, 0x4b, 0x9c, 0xc1, 0x9a, 0x77, 0xa1, 0x62, - 0x74, 0xa3, 0x1a, 0xac, 0x74, 0x0e, 0x8f, 0xf7, 0x9f, 0x76, 0x1e, 0xd7, 0xaf, 0xa1, 0x2a, 0x94, - 0x0f, 0x30, 0x7e, 0x1f, 0xd7, 0x1d, 0x41, 0xff, 0x60, 0x1f, 0x1f, 0x76, 0x0e, 0x9f, 0xd4, 0x4b, - 0xcd, 0xbf, 0x38, 0xb0, 0x36, 0x26, 0x0d, 0xdd, 0x85, 0x72, 0xca, 0x69, 0x92, 0x36, 0x9c, 0x6d, - 0x77, 0xb7, 0xd6, 0xbe, 0x59, 0xa4, 0xb2, 0xd5, 0xe5, 0x34, 0xc1, 0x0a, 0xe7, 0xfd, 0xda, 0x81, - 0x25, 0xb1, 0x46, 0xb7, 0x61, 0xdd, 0x5a, 0xd2, 0x8b, 0x48, 0x48, 0x65, 0x80, 0xaa, 0xef, 0x5e, - 0xc3, 0x6b, 0x96, 0x7e, 0x48, 0x42, 0x8a, 0x5a, 0x80, 0xe8, 0x80, 0x86, 0x34, 0xe2, 0xbd, 0x53, - 0x3a, 0xea, 0xa5, 0x9c, 0x05, 0xd1, 0x89, 0x0a, 0xcb, 0xbb, 0xd7, 0x70, 0x5d, 0xf3, 0xbe, 0x47, - 0x47, 0x5d, 0xc9, 0x41, 0xbb, 0xb0, 0x91, 0xc7, 0x07, 0x11, 0x97, 0xa1, 0x72, 0x85, 0xe4, 0x0c, - 0xdc, 0x89, 0xf8, 0x23, 0x10, 0xa7, 0x33, 0xa0, 0x7d, 0x1e, 0xb3, 0xe6, 0x3d, 0x61, 0x56, 0x9c, - 0x78, 0x55, 0x58, 0xc1, 0xf4, 0xd5, 0x90, 0xa6, 0xdc, 0xdb, 0x86, 0x0a, 0xa6, 0x69, 0x12, 0x47, - 0x29, 0x45, 0x5b, 0x50, 0x3e, 0x60, 0x2c, 0x66, 0xca, 0x48, 0x5c, 0xa6, 0x62, 0xd1, 0xfc, 0xd8, - 0x81, 0x0a, 0x26, 0xe7, 0x5d, 0x4e, 0x38, 0xb5, 0xe9, 0xe0, 0x64, 0xe9, 0x80, 0xf6, 0x60, 0xe5, - 0xc5, 0x80, 0xf0, 0x90, 0x24, 0x8d, 0x92, 0x0c, 0xd0, 0x2d, 0x1d, 0x20, 0xb3, 0xab, 0xf5, 0x5d, - 0xc5, 0x3e, 0x88, 0x38, 0x1b, 0x61, 0x03, 0xf6, 0x1e, 0xc0, 0x6a, 0x9e, 0x81, 0xea, 0xe0, 0x9e, - 0xd2, 0x91, 0x56, 0x2e, 0x3e, 0x85, 0x41, 0x67, 0x22, 0x3f, 0x75, 0x7e, 0xa8, 0xc5, 0x83, 0xd2, - 0x7d, 0xa7, 0xf9, 0x69, 0x19, 0x96, 0xbb, 0xfd, 0x97, 0x34, 0x24, 0x22, 0x8d, 0xce, 0x28, 0x4b, - 0x03, 0x6d, 0x95, 0x8b, 0xcd, 0x12, 0xdd, 0x81, 0xf2, 0xf3, 0x41, 0xdc, 0x3f, 0x95, 0xdb, 0x6b, - 0xed, 0xeb, 0xda, 0x2c, 0xb5, 0xaf, 0xf5, 0x48, 0xb0, 0xb0, 0x42, 0x78, 0xbf, 0x71, 0xa0, 0x2c, - 0x09, 0x33, 0xc4, 0x7d, 0x1b, 0xc0, 0x1e, 0x5a, 0xaa, 0x5d, 0xfd, 0xf2, 0xb8, 0x4c, 0x9b, 0x12, - 0x38, 0x07, 0x45, 0x0f, 0xa0, 0x26, 0xb5, 0xf4, 0xf8, 0x28, 0xa1, 0x69, 0xc3, 0x1d, 0xcb, 0x22, - 0xbd, 0xf3, 0x90, 0xa6, 0x9c, 0xfa, 0xca, 0x26, 0x90, 0xe8, 0x67, 0x02, 0xec, 0x7d, 0xea, 0x40, - 0xd5, 0x4a, 0x15, 0xe1, 0xcf, 0xb2, 0x08, 0xcb, 0x6f, 0x41, 0x13, 0x72, 0x4d, 0x85, 0x8a, 0x6f, - 0xb4, 0x0d, 0x35, 0x9f, 0xa6, 0x7d, 0x16, 0x24, 0x5c, 0x38, 0xa2, 0xaa, 0x28, 0x4f, 0x42, 0x1e, - 0x54, 0x18, 0x7d, 0x35, 0x0c, 0x18, 0xf5, 0x65, 0x25, 0x55, 0xb0, 0x5d, 0x0b, 0x5e, 0x2c, 0x51, - 0x64, 0xd0, 0x28, 0x2b, 0x9e, 0x59, 0x0b, 0x5e, 0x3f, 0x0e, 0x93, 0x21, 0xa7, 0x7e, 0x63, 0x59, - 0xf1, 0xcc, 0x1a, 0xdd, 0x82, 0x6a, 0x4a, 0xa3, 0x34, 0xe0, 0xc1, 0x19, 0x6d, 0xac, 0x48, 0x66, - 0x46, 0xf0, 0x3e, 0x29, 0x41, 0x2d, 0xe7, 0x25, 0x7a, 0x0d, 0xaa, 0xc2, 0xd6, 0x5c, 0x59, 0xe0, - 0x8a, 0x20, 0xc8, 0x7a, 0x58, 0xfc, 0xe8, 0xd0, 0x43, 0x58, 0x89, 0x68, 0xca, 0x45, 0xbd, 0xb8, - 0xb2, 0xfb, 0xbc, 0x71, 0x61, 0x64, 0xe5, 0x77, 0x10, 0x9d, 0xbc, 0x17, 0xfb, 0x14, 0x9b, 0x5d, - 0xc2, 0x90, 0x30, 0x88, 0x7a, 0x01, 0xa7, 0x61, 0x2a, 0x63, 0xe1, 0xe2, 0x4a, 0x18, 0x44, 0x1d, - 0xb1, 0x96, 0x4c, 0xf2, 0x91, 0x66, 0x96, 0x35, 0x93, 0x7c, 0x24, 0x99, 0xcd, 0x47, 0xca, 0x23, - 0x2d, 0x71, 0xbc, 0xbd, 0x00, 0x2c, 0x77, 0x3b, 0x87, 0x4f, 0x9e, 0x1e, 0xd4, 0x1d, 0x54, 0x81, - 0xa5, 0xa7, 0x9d, 0xee, 0xb3, 0x7a, 0x09, 0xad, 0x80, 0xdb, 0x3d, 0x78, 0x56, 0x77, 0xc5, 0xc7, - 0x7b, 0xfb, 0x47, 0xf5, 0xa5, 0xe6, 0xcf, 0x97, 0x60, 0xf3, 0x09, 0xe5, 0x47, 0x2c, 0x3e, 0x0b, - 0x7c, 0xca, 0x94, 0xd1, 0xf9, 0x0a, 0xfd, 0xbb, 0x9b, 0x2b, 0xd1, 0x3b, 0x50, 0x49, 0x34, 0x52, - 0xc6, 0xac, 0xd6, 0x5e, 0x1b, 0xf3, 0x16, 0x5b, 0x36, 0x22, 0x50, 0x67, 0x34, 0x8d, 0x87, 0xac, - 0x4f, 0x7b, 0xa9, 0x64, 0x9a, 0xa4, 0xdd, 0xd3, 0x5b, 0xa6, 0xd4, 0xb6, 0x8c, 0x1e, 0xf1, 0x21, - 0x77, 0x2a, 0x7a, 0xaa, 0x2a, 0x77, 0x83, 0x8d, 0x53, 0xd1, 0x4b, 0xb8, 0xee, 0x13, 0x4e, 0x7a, - 0x13, 0x5a, 0x54, 0x82, 0xdf, 0x9f, 0xaf, 0xe5, 0x31, 0xe1, 0xa4, 0x3b, 0xad, 0x67, 0xd3, 0x9f, - 0xa4, 0xa3, 0x7b, 0x50, 0xf3, 0xed, 0x30, 0x11, 0xa7, 0x24, 0x34, 0x6c, 0x4e, 0x8d, 0x19, 0x9c, - 0x47, 0x79, 0xdf, 0x87, 0xad, 0x22, 0x3f, 0x0a, 0x1a, 0xcd, 0x4e, 0xbe, 0xd1, 0x4c, 0xc5, 0x34, - 0xeb, 0x3b, 0x5e, 0x17, 0x6e, 0x14, 0x1b, 0x7d, 0x05, 0xa1, 0xcd, 0x3f, 0x39, 0xf0, 0xa5, 0x23, - 0x46, 0x13, 0xc2, 0xa8, 0x89, 0xd4, 0x3b, 0x71, 0xf4, 0x22, 0x38, 0xf1, 0xf6, 0x6c, 0x1a, 0xa0, - 0x37, 0x61, 0xb9, 0x2f, 0x89, 0xfa, 0xdc, 0x4d, 0x49, 0xe4, 0xe7, 0x38, 0xd6, 0x10, 0xef, 0x67, - 0xb9, 0x94, 0x79, 0x1b, 0x36, 0x12, 0x25, 0xdc, 0xef, 0xcd, 0x97, 0xb0, 0x6e, 0xb0, 0xca, 0x82, - 0xc9, 0xc0, 0x97, 0x16, 0x09, 0x7c, 0xf3, 0x17, 0x25, 0xd8, 0xfa, 0x41, 0x72, 0xc2, 0x88, 0x4f, - 0xed, 0x01, 0x88, 0x41, 0xe0, 0x45, 0x99, 0x3f, 0x33, 0xcb, 0x3f, 0xd7, 0x84, 0x4b, 0xe3, 0x4d, - 0xf8, 0x1b, 0x50, 0x65, 0xe4, 0xbc, 0x97, 0x0a, 0x71, 0xb2, 0xde, 0x6b, 0xed, 0x8d, 0x89, 0x71, - 0x83, 0x2b, 0x4c, 0x7f, 0x79, 0x3f, 0xcd, 0xc5, 0xe1, 0x01, 0xac, 0x0f, 0x95, 0x4d, 0xbe, 0xde, - 0x3e, 0x23, 0x0c, 0x6b, 0x06, 0xaa, 0xc6, 0xde, 0xa5, 0xa2, 0xf0, 0x07, 0x07, 0xbc, 0x63, 0x32, - 0x08, 0x7c, 0x61, 0x93, 0x0e, 0x83, 0x68, 0xea, 0xfa, 0x6c, 0xbb, 0x0b, 0xc6, 0x22, 0x3b, 0xf8, - 0xd2, 0xfc, 0x83, 0x7f, 0x98, 0x73, 0x78, 0xc2, 0x68, 0x67, 0x21, 0xa3, 0x7f, 0xe7, 0x40, 0xc3, - 0x18, 0x9d, 0x65, 0xfa, 0x17, 0xda, 0xe4, 0xdf, 0x3b, 0x50, 0x55, 0x06, 0x0e, 0x19, 0xf5, 0xfa, - 0xf9, 0x92, 0xd9, 0xe4, 0x94, 0x31, 0xf2, 0x22, 0x66, 0x61, 0x2f, 0x3f, 0xd4, 0xab, 0xb8, 0x6e, - 0x19, 0xc7, 0x3a, 0xb1, 0xfe, 0xbf, 0x36, 0x7f, 0xe6, 0xc0, 0x2a, 0xa6, 0xc4, 0x37, 0x79, 0xe1, - 0xfd, 0x64, 0xc1, 0xd0, 0xde, 0x87, 0xb5, 0xfe, 0x90, 0x31, 0x71, 0xf1, 0x53, 0x49, 0x3c, 0xc3, - 0xda, 0x55, 0x8d, 0x54, 0xb5, 0xf0, 0x2a, 0x67, 0xf3, 0x5b, 0x50, 0x8d, 0xe8, 0xf9, 0xfc, 0x32, - 0xa8, 0x44, 0xf4, 0xfc, 0x0a, 0x15, 0xf0, 0x6f, 0x17, 0xd0, 0xd1, 0x80, 0x44, 0xc6, 0xcb, 0x77, - 0x5e, 0x92, 0xe8, 0x84, 0x7a, 0xff, 0x71, 0x16, 0x74, 0xf6, 0x5b, 0x50, 0x4b, 0x58, 0x10, 0xb3, - 0xf9, 0xae, 0x82, 0xc4, 0x29, 0x53, 0xf7, 0x01, 0x25, 0x2c, 0x4e, 0xe2, 0x94, 0xfa, 0xbd, 0xcc, - 0x4b, 0xf7, 0xe2, 0xcd, 0x75, 0x03, 0x3f, 0x34, 0xde, 0x66, 0xc9, 0xb0, 0x34, 0x37, 0x19, 0xd0, - 0x0e, 0xac, 0x29, 0x2b, 0x13, 0x16, 0x9c, 0x09, 0x55, 0x65, 0x79, 0x13, 0x5b, 0x95, 0xc4, 0x23, - 0x45, 0xf3, 0x3e, 0x73, 0x72, 0xe1, 0xbf, 0x0f, 0x6b, 0xc9, 0x80, 0x44, 0xd1, 0x22, 0x9d, 0x68, - 0x55, 0x23, 0x95, 0x61, 0x0f, 0xc5, 0x50, 0x97, 0xd7, 0xb4, 0xb4, 0xc7, 0x68, 0x32, 0x20, 0x7d, - 0xaa, 0xcf, 0xa2, 0xf8, 0x21, 0xb4, 0x61, 0xd0, 0x58, 0x81, 0xd1, 0x6d, 0xd8, 0x30, 0xaa, 0x8d, - 0xb9, 0xae, 0x34, 0x77, 0x5d, 0x93, 0xb5, 0xc1, 0x97, 0x9a, 0xb8, 0xcd, 0x8f, 0x5d, 0xb8, 0xbe, - 0x9f, 0x24, 0x83, 0xd1, 0xc4, 0x89, 0xff, 0xeb, 0xf3, 0x3d, 0xf1, 0xa9, 0x78, 0xba, 0x8b, 0xc6, - 0xf3, 0x7f, 0x3a, 0xe8, 0x82, 0xd8, 0x95, 0x8b, 0x62, 0xe7, 0xfd, 0xd2, 0xb9, 0x52, 0xad, 0x35, - 0x60, 0xc5, 0xc8, 0x57, 0x97, 0x7a, 0xb3, 0x9c, 0x3c, 0x14, 0x77, 0xa1, 0x43, 0xf9, 0x47, 0x09, - 0xae, 0x77, 0xc2, 0x24, 0x66, 0x7c, 0x7c, 0x18, 0xef, 0x2d, 0x78, 0x26, 0xeb, 0x50, 0x0a, 0x7c, - 0xfd, 0x04, 0x2b, 0x05, 0xbe, 0xc7, 0xa1, 0xae, 0xc4, 0x51, 0xdb, 0xbe, 0xe6, 0x5e, 0xe6, 0xe7, - 0x1e, 0xa7, 0x42, 0xe4, 0x5d, 0x77, 0xc7, 0x5c, 0xf7, 0x7e, 0x9b, 0x8f, 0xe9, 0x0f, 0x01, 0x05, - 0xda, 0x84, 0x9e, 0xb9, 0x94, 0x9a, 0xd6, 0xfb, 0xa6, 0x16, 0x5f, 0xe0, 0x72, 0x6b, 0xd2, 0x6e, - 0xbc, 0x19, 0x4c, 0x50, 0xd2, 0xcb, 0x75, 0xba, 0x3f, 0x3b, 0xb0, 0x2e, 0xfa, 0x79, 0x36, 0x32, - 0x3f, 0x9f, 0x61, 0xf9, 0xe1, 0xd8, 0x5b, 0xa0, 0x3c, 0x37, 0xa9, 0x74, 0x58, 0x2f, 0xe5, 0xd3, - 0xaf, 0x1c, 0xd8, 0x32, 0x97, 0x77, 0x31, 0x22, 0x8b, 0x1e, 0x27, 0x49, 0xce, 0x9e, 0xbb, 0xa2, - 0x76, 0x2d, 0xb6, 0xf8, 0x79, 0x92, 0x47, 0x5c, 0xce, 0xaa, 0x4f, 0x1c, 0xb8, 0x69, 0x2e, 0x28, - 0x39, 0xd3, 0xae, 0x78, 0x61, 0xbe, 0xf2, 0x40, 0xff, 0xab, 0x03, 0x9b, 0xd6, 0x1c, 0x3b, 0xd5, - 0x4f, 0x2f, 0x67, 0x0e, 0xba, 0x07, 0xd0, 0x8f, 0xa3, 0x88, 0xf6, 0xb9, 0xb9, 0x02, 0x5f, 0xd4, - 0x05, 0x33, 0x98, 0xf7, 0x41, 0xce, 0x87, 0x1b, 0xb0, 0x1c, 0x0f, 0x79, 0x32, 0xe4, 0x3a, 0xdd, - 0xf4, 0xea, 0x52, 0x21, 0x6f, 0xff, 0xb1, 0x02, 0x15, 0xf3, 0x30, 0x41, 0x47, 0x50, 0x7d, 0x42, - 0xb9, 0xfe, 0xef, 0x65, 0x7b, 0xc6, 0x1b, 0x4f, 0x25, 0xc8, 0xd7, 0xe6, 0xbe, 0x02, 0x91, 0x7f, - 0xc1, 0xeb, 0x07, 0xbd, 0xae, 0xf7, 0x16, 0x72, 0xad, 0x86, 0x37, 0xe6, 0xa0, 0xb4, 0x96, 0x74, - 0xd6, 0x65, 0x1c, 0xdd, 0xd1, 0x42, 0x2e, 0x86, 0x58, 0x7d, 0x5f, 0x5f, 0x04, 0xaa, 0x95, 0x86, - 0x17, 0x5f, 0xa6, 0xd1, 0xed, 0x09, 0x39, 0x93, 0x00, 0xab, 0x70, 0x77, 0x3e, 0x50, 0xab, 0x23, - 0xc5, 0xcf, 0x2e, 0xb4, 0xa3, 0x25, 0x14, 0x31, 0xad, 0x9a, 0xd7, 0x67, 0x83, 0xb4, 0x8a, 0xef, - 0xe4, 0xee, 0xda, 0xa8, 0xa1, 0xb7, 0x58, 0x8a, 0x15, 0x76, 0xb3, 0x80, 0xa3, 0x25, 0x74, 0xc6, - 0x6f, 0xbe, 0xe8, 0x35, 0xf3, 0x7c, 0xcb, 0x11, 0xad, 0x9c, 0x5b, 0xc5, 0x4c, 0x2d, 0xea, 0x47, - 0x45, 0xd7, 0x4b, 0x64, 0x52, 0x6e, 0x9a, 0x65, 0xc5, 0x36, 0x67, 0x41, 0xb4, 0xf0, 0x1f, 0x17, - 0x5e, 0x65, 0x90, 0xd9, 0x5a, 0xc0, 0xb3, 0xe2, 0x77, 0x66, 0x62, 0x32, 0xf9, 0x05, 0x23, 0xca, - 0xca, 0x2f, 0x1a, 0x5f, 0x93, 0xf2, 0x8b, 0x31, 0x5a, 0xfe, 0xfb, 0x93, 0x13, 0x09, 0x7d, 0x25, - 0x17, 0xcc, 0x8c, 0x6c, 0xa5, 0x7e, 0xf5, 0x22, 0xb6, 0x16, 0xf8, 0x4d, 0xf5, 0xef, 0x31, 0xb2, - 0x7f, 0xc6, 0xf1, 0x38, 0xb1, 0x9b, 0xb7, 0xc6, 0x89, 0x6a, 0x4b, 0xfb, 0x9f, 0x25, 0xa8, 0xe5, - 0x9a, 0x34, 0x3a, 0xce, 0x37, 0x8f, 0x9d, 0x89, 0xd6, 0x90, 0x9f, 0x31, 0x53, 0x59, 0x79, 0x01, - 0x48, 0x9b, 0x96, 0xcc, 0x98, 0x09, 0x68, 0xb2, 0x7e, 0xa6, 0x10, 0x56, 0xd9, 0x9d, 0x05, 0x90, - 0xf6, 0x06, 0x32, 0xdd, 0xee, 0x6d, 0x3b, 0x9c, 0xe2, 0x4c, 0xb5, 0xc3, 0x22, 0x84, 0x92, 0xfc, - 0x96, 0x73, 0x89, 0x40, 0x3f, 0x5f, 0x96, 0xc4, 0x7b, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa0, - 0xbf, 0x92, 0x5f, 0xee, 0x19, 0x00, 0x00, + Metadata: "tfplugin5.proto", } diff --git a/plugin/proto/plugin.proto b/internal/tfplugin5/tfplugin5.proto similarity index 89% rename from plugin/proto/plugin.proto rename to internal/tfplugin5/tfplugin5.proto index 1bbb4c422cf8..d0b0ab42819d 100644 --- a/plugin/proto/plugin.proto +++ b/internal/tfplugin5/tfplugin5.proto @@ -1,6 +1,24 @@ +// Terraform Plugin RPC protocol version 5.0 +// +// This file defines version 5.0 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will be updated in-place in the source Terraform repository for +// any minor versions of protocol 5, but later minor versions will always be +// backwards compatible. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the master +// branch or any other development branch. +// syntax = "proto3"; -package proto; +package tfplugin5; // DynamicValue is an opaque encoding of terraform data, with the field name // indicating the encoding scheme used. diff --git a/plugin/convert/diagnostics.go b/plugin/convert/diagnostics.go index 50f7e2ec0490..51cb2fe2fe5f 100644 --- a/plugin/convert/diagnostics.go +++ b/plugin/convert/diagnostics.go @@ -1,7 +1,7 @@ package convert import ( - "github.com/hashicorp/terraform/plugin/proto" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/plugin/convert/diagnostics_test.go b/plugin/convert/diagnostics_test.go index fe5c59d8f19a..5825269a586b 100644 --- a/plugin/convert/diagnostics_test.go +++ b/plugin/convert/diagnostics_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/hashicorp/terraform/plugin/proto" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" ) diff --git a/plugin/convert/schema.go b/plugin/convert/schema.go index b29e475774ee..a9f0d9f7dddc 100644 --- a/plugin/convert/schema.go +++ b/plugin/convert/schema.go @@ -6,7 +6,7 @@ import ( "sort" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/plugin/proto" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/providers" ) diff --git a/plugin/convert/schema_test.go b/plugin/convert/schema_test.go index ba40e0a1bb90..8ebf0fdd001b 100644 --- a/plugin/convert/schema_test.go +++ b/plugin/convert/schema_test.go @@ -6,7 +6,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/plugin/proto" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/zclconf/go-cty/cty" ) diff --git a/plugin/grpc_provider.go b/plugin/grpc_provider.go index 9e087d4567b1..c34abc3a0f41 100644 --- a/plugin/grpc_provider.go +++ b/plugin/grpc_provider.go @@ -10,8 +10,8 @@ import ( "github.com/zclconf/go-cty/cty" plugin "github.com/hashicorp/go-plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/plugin/proto" "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/version" "github.com/zclconf/go-cty/cty/msgpack" diff --git a/plugin/grpc_provider_test.go b/plugin/grpc_provider_test.go index 4b6bb5aee6eb..a2fe37a3abb5 100644 --- a/plugin/grpc_provider_test.go +++ b/plugin/grpc_provider_test.go @@ -12,8 +12,8 @@ import ( "github.com/hashicorp/terraform/tfdiags" "github.com/zclconf/go-cty/cty" + proto "github.com/hashicorp/terraform/internal/tfplugin5" mockproto "github.com/hashicorp/terraform/plugin/mock_proto" - "github.com/hashicorp/terraform/plugin/proto" ) var _ providers.Interface = (*GRPCProvider)(nil) diff --git a/plugin/grpc_provisioner.go b/plugin/grpc_provisioner.go index 7b85fbb439bb..136c88d68162 100644 --- a/plugin/grpc_provisioner.go +++ b/plugin/grpc_provisioner.go @@ -9,8 +9,8 @@ import ( plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/configs/configschema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/plugin/convert" - "github.com/hashicorp/terraform/plugin/proto" "github.com/hashicorp/terraform/provisioners" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/msgpack" diff --git a/plugin/grpc_provisioner_test.go b/plugin/grpc_provisioner_test.go index 80329e8ee34e..2c281333bcfe 100644 --- a/plugin/grpc_provisioner_test.go +++ b/plugin/grpc_provisioner_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform/config/hcl2shim" - "github.com/hashicorp/terraform/plugin/proto" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/provisioners" "github.com/zclconf/go-cty/cty" diff --git a/plugin/mock_proto/generate.go b/plugin/mock_proto/generate.go index 3cccb8301085..7418ee7b3695 100644 --- a/plugin/mock_proto/generate.go +++ b/plugin/mock_proto/generate.go @@ -1,3 +1,3 @@ -//go:generate mockgen -destination mock.go github.com/hashicorp/terraform/plugin/proto ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer +//go:generate mockgen -destination mock.go github.com/hashicorp/terraform/internal/tfplugin5 ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer -package mock_proto +package mock_tfplugin5 diff --git a/plugin/mock_proto/mock.go b/plugin/mock_proto/mock.go index 50c049585303..2be9d1b1bb55 100644 --- a/plugin/mock_proto/mock.go +++ b/plugin/mock_proto/mock.go @@ -1,13 +1,13 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/hashicorp/terraform/plugin/proto (interfaces: ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer) +// Source: github.com/hashicorp/terraform/internal/tfplugin5 (interfaces: ProviderClient,ProvisionerClient,Provisioner_ProvisionResourceClient,Provisioner_ProvisionResourceServer) -// Package mock_proto is a generated GoMock package. -package mock_proto +// Package mock_tfplugin5 is a generated GoMock package. +package mock_tfplugin5 import ( context "context" gomock "github.com/golang/mock/gomock" - proto "github.com/hashicorp/terraform/plugin/proto" + tfplugin5 "github.com/hashicorp/terraform/internal/tfplugin5" grpc "google.golang.org/grpc" metadata "google.golang.org/grpc/metadata" reflect "reflect" @@ -37,13 +37,13 @@ func (m *MockProviderClient) EXPECT() *MockProviderClientMockRecorder { } // ApplyResourceChange mocks base method -func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *proto.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*proto.ApplyResourceChange_Response, error) { +func (m *MockProviderClient) ApplyResourceChange(arg0 context.Context, arg1 *tfplugin5.ApplyResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.ApplyResourceChange_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ApplyResourceChange", varargs...) - ret0, _ := ret[0].(*proto.ApplyResourceChange_Response) + ret0, _ := ret[0].(*tfplugin5.ApplyResourceChange_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -55,13 +55,13 @@ func (mr *MockProviderClientMockRecorder) ApplyResourceChange(arg0, arg1 interfa } // Configure mocks base method -func (m *MockProviderClient) Configure(arg0 context.Context, arg1 *proto.Configure_Request, arg2 ...grpc.CallOption) (*proto.Configure_Response, error) { +func (m *MockProviderClient) Configure(arg0 context.Context, arg1 *tfplugin5.Configure_Request, arg2 ...grpc.CallOption) (*tfplugin5.Configure_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Configure", varargs...) - ret0, _ := ret[0].(*proto.Configure_Response) + ret0, _ := ret[0].(*tfplugin5.Configure_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -73,13 +73,13 @@ func (mr *MockProviderClientMockRecorder) Configure(arg0, arg1 interface{}, arg2 } // GetSchema mocks base method -func (m *MockProviderClient) GetSchema(arg0 context.Context, arg1 *proto.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*proto.GetProviderSchema_Response, error) { +func (m *MockProviderClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProviderSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProviderSchema_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetSchema", varargs...) - ret0, _ := ret[0].(*proto.GetProviderSchema_Response) + ret0, _ := ret[0].(*tfplugin5.GetProviderSchema_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -91,13 +91,13 @@ func (mr *MockProviderClientMockRecorder) GetSchema(arg0, arg1 interface{}, arg2 } // ImportResourceState mocks base method -func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *proto.ImportResourceState_Request, arg2 ...grpc.CallOption) (*proto.ImportResourceState_Response, error) { +func (m *MockProviderClient) ImportResourceState(arg0 context.Context, arg1 *tfplugin5.ImportResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.ImportResourceState_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ImportResourceState", varargs...) - ret0, _ := ret[0].(*proto.ImportResourceState_Response) + ret0, _ := ret[0].(*tfplugin5.ImportResourceState_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -109,13 +109,13 @@ func (mr *MockProviderClientMockRecorder) ImportResourceState(arg0, arg1 interfa } // PlanResourceChange mocks base method -func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *proto.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*proto.PlanResourceChange_Response, error) { +func (m *MockProviderClient) PlanResourceChange(arg0 context.Context, arg1 *tfplugin5.PlanResourceChange_Request, arg2 ...grpc.CallOption) (*tfplugin5.PlanResourceChange_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "PlanResourceChange", varargs...) - ret0, _ := ret[0].(*proto.PlanResourceChange_Response) + ret0, _ := ret[0].(*tfplugin5.PlanResourceChange_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -127,13 +127,13 @@ func (mr *MockProviderClientMockRecorder) PlanResourceChange(arg0, arg1 interfac } // PrepareProviderConfig mocks base method -func (m *MockProviderClient) PrepareProviderConfig(arg0 context.Context, arg1 *proto.PrepareProviderConfig_Request, arg2 ...grpc.CallOption) (*proto.PrepareProviderConfig_Response, error) { +func (m *MockProviderClient) PrepareProviderConfig(arg0 context.Context, arg1 *tfplugin5.PrepareProviderConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.PrepareProviderConfig_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "PrepareProviderConfig", varargs...) - ret0, _ := ret[0].(*proto.PrepareProviderConfig_Response) + ret0, _ := ret[0].(*tfplugin5.PrepareProviderConfig_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -145,13 +145,13 @@ func (mr *MockProviderClientMockRecorder) PrepareProviderConfig(arg0, arg1 inter } // ReadDataSource mocks base method -func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *proto.ReadDataSource_Request, arg2 ...grpc.CallOption) (*proto.ReadDataSource_Response, error) { +func (m *MockProviderClient) ReadDataSource(arg0 context.Context, arg1 *tfplugin5.ReadDataSource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadDataSource_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ReadDataSource", varargs...) - ret0, _ := ret[0].(*proto.ReadDataSource_Response) + ret0, _ := ret[0].(*tfplugin5.ReadDataSource_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -163,13 +163,13 @@ func (mr *MockProviderClientMockRecorder) ReadDataSource(arg0, arg1 interface{}, } // ReadResource mocks base method -func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *proto.ReadResource_Request, arg2 ...grpc.CallOption) (*proto.ReadResource_Response, error) { +func (m *MockProviderClient) ReadResource(arg0 context.Context, arg1 *tfplugin5.ReadResource_Request, arg2 ...grpc.CallOption) (*tfplugin5.ReadResource_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ReadResource", varargs...) - ret0, _ := ret[0].(*proto.ReadResource_Response) + ret0, _ := ret[0].(*tfplugin5.ReadResource_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -181,13 +181,13 @@ func (mr *MockProviderClientMockRecorder) ReadResource(arg0, arg1 interface{}, a } // Stop mocks base method -func (m *MockProviderClient) Stop(arg0 context.Context, arg1 *proto.Stop_Request, arg2 ...grpc.CallOption) (*proto.Stop_Response, error) { +func (m *MockProviderClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Stop", varargs...) - ret0, _ := ret[0].(*proto.Stop_Response) + ret0, _ := ret[0].(*tfplugin5.Stop_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -199,13 +199,13 @@ func (mr *MockProviderClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 ...i } // UpgradeResourceState mocks base method -func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *proto.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*proto.UpgradeResourceState_Response, error) { +func (m *MockProviderClient) UpgradeResourceState(arg0 context.Context, arg1 *tfplugin5.UpgradeResourceState_Request, arg2 ...grpc.CallOption) (*tfplugin5.UpgradeResourceState_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "UpgradeResourceState", varargs...) - ret0, _ := ret[0].(*proto.UpgradeResourceState_Response) + ret0, _ := ret[0].(*tfplugin5.UpgradeResourceState_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -217,13 +217,13 @@ func (mr *MockProviderClientMockRecorder) UpgradeResourceState(arg0, arg1 interf } // ValidateDataSourceConfig mocks base method -func (m *MockProviderClient) ValidateDataSourceConfig(arg0 context.Context, arg1 *proto.ValidateDataSourceConfig_Request, arg2 ...grpc.CallOption) (*proto.ValidateDataSourceConfig_Response, error) { +func (m *MockProviderClient) ValidateDataSourceConfig(arg0 context.Context, arg1 *tfplugin5.ValidateDataSourceConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateDataSourceConfig_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ValidateDataSourceConfig", varargs...) - ret0, _ := ret[0].(*proto.ValidateDataSourceConfig_Response) + ret0, _ := ret[0].(*tfplugin5.ValidateDataSourceConfig_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -235,13 +235,13 @@ func (mr *MockProviderClientMockRecorder) ValidateDataSourceConfig(arg0, arg1 in } // ValidateResourceTypeConfig mocks base method -func (m *MockProviderClient) ValidateResourceTypeConfig(arg0 context.Context, arg1 *proto.ValidateResourceTypeConfig_Request, arg2 ...grpc.CallOption) (*proto.ValidateResourceTypeConfig_Response, error) { +func (m *MockProviderClient) ValidateResourceTypeConfig(arg0 context.Context, arg1 *tfplugin5.ValidateResourceTypeConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateResourceTypeConfig_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ValidateResourceTypeConfig", varargs...) - ret0, _ := ret[0].(*proto.ValidateResourceTypeConfig_Response) + ret0, _ := ret[0].(*tfplugin5.ValidateResourceTypeConfig_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -276,13 +276,13 @@ func (m *MockProvisionerClient) EXPECT() *MockProvisionerClientMockRecorder { } // GetSchema mocks base method -func (m *MockProvisionerClient) GetSchema(arg0 context.Context, arg1 *proto.GetProvisionerSchema_Request, arg2 ...grpc.CallOption) (*proto.GetProvisionerSchema_Response, error) { +func (m *MockProvisionerClient) GetSchema(arg0 context.Context, arg1 *tfplugin5.GetProvisionerSchema_Request, arg2 ...grpc.CallOption) (*tfplugin5.GetProvisionerSchema_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "GetSchema", varargs...) - ret0, _ := ret[0].(*proto.GetProvisionerSchema_Response) + ret0, _ := ret[0].(*tfplugin5.GetProvisionerSchema_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -294,13 +294,13 @@ func (mr *MockProvisionerClientMockRecorder) GetSchema(arg0, arg1 interface{}, a } // ProvisionResource mocks base method -func (m *MockProvisionerClient) ProvisionResource(arg0 context.Context, arg1 *proto.ProvisionResource_Request, arg2 ...grpc.CallOption) (proto.Provisioner_ProvisionResourceClient, error) { +func (m *MockProvisionerClient) ProvisionResource(arg0 context.Context, arg1 *tfplugin5.ProvisionResource_Request, arg2 ...grpc.CallOption) (tfplugin5.Provisioner_ProvisionResourceClient, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ProvisionResource", varargs...) - ret0, _ := ret[0].(proto.Provisioner_ProvisionResourceClient) + ret0, _ := ret[0].(tfplugin5.Provisioner_ProvisionResourceClient) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -312,13 +312,13 @@ func (mr *MockProvisionerClientMockRecorder) ProvisionResource(arg0, arg1 interf } // Stop mocks base method -func (m *MockProvisionerClient) Stop(arg0 context.Context, arg1 *proto.Stop_Request, arg2 ...grpc.CallOption) (*proto.Stop_Response, error) { +func (m *MockProvisionerClient) Stop(arg0 context.Context, arg1 *tfplugin5.Stop_Request, arg2 ...grpc.CallOption) (*tfplugin5.Stop_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "Stop", varargs...) - ret0, _ := ret[0].(*proto.Stop_Response) + ret0, _ := ret[0].(*tfplugin5.Stop_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -330,13 +330,13 @@ func (mr *MockProvisionerClientMockRecorder) Stop(arg0, arg1 interface{}, arg2 . } // ValidateProvisionerConfig mocks base method -func (m *MockProvisionerClient) ValidateProvisionerConfig(arg0 context.Context, arg1 *proto.ValidateProvisionerConfig_Request, arg2 ...grpc.CallOption) (*proto.ValidateProvisionerConfig_Response, error) { +func (m *MockProvisionerClient) ValidateProvisionerConfig(arg0 context.Context, arg1 *tfplugin5.ValidateProvisionerConfig_Request, arg2 ...grpc.CallOption) (*tfplugin5.ValidateProvisionerConfig_Response, error) { varargs := []interface{}{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "ValidateProvisionerConfig", varargs...) - ret0, _ := ret[0].(*proto.ValidateProvisionerConfig_Response) + ret0, _ := ret[0].(*tfplugin5.ValidateProvisionerConfig_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -408,9 +408,9 @@ func (mr *MockProvisioner_ProvisionResourceClientMockRecorder) Header() *gomock. } // Recv mocks base method -func (m *MockProvisioner_ProvisionResourceClient) Recv() (*proto.ProvisionResource_Response, error) { +func (m *MockProvisioner_ProvisionResourceClient) Recv() (*tfplugin5.ProvisionResource_Response, error) { ret := m.ctrl.Call(m, "Recv") - ret0, _ := ret[0].(*proto.ProvisionResource_Response) + ret0, _ := ret[0].(*tfplugin5.ProvisionResource_Response) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -504,7 +504,7 @@ func (mr *MockProvisioner_ProvisionResourceServerMockRecorder) RecvMsg(arg0 inte } // Send mocks base method -func (m *MockProvisioner_ProvisionResourceServer) Send(arg0 *proto.ProvisionResource_Response) error { +func (m *MockProvisioner_ProvisionResourceServer) Send(arg0 *tfplugin5.ProvisionResource_Response) error { ret := m.ctrl.Call(m, "Send", arg0) ret0, _ := ret[0].(error) return ret0 diff --git a/plugin/serve.go b/plugin/serve.go index b668bbfef7cf..4e43cf00024d 100644 --- a/plugin/serve.go +++ b/plugin/serve.go @@ -3,7 +3,7 @@ package plugin import ( "github.com/hashicorp/go-plugin" grpcplugin "github.com/hashicorp/terraform/helper/plugin" - "github.com/hashicorp/terraform/plugin/proto" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/terraform" ) From 6606b525ec53bb4eb42a166e1dfcdaa7a830a2bf Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 19 Nov 2018 10:03:17 -0800 Subject: [PATCH 131/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf69fd0b37bd..cdd1edc6020b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ BACKWARDS INCOMPATIBILITIES / NOTES: * command: Remove `-module-depth` flag from plan, apply, show, and graph. This flag was not widely used and the various updates and improvements to cli output should remove the need for this flag. [GH-19267] +* plugins: The protobuf/grpc package name for the provider protocol was changed from `proto` to `tfplugin5` in preparation for future protocol versioning. This means that plugin binaries built for alpha1 and alpha2 are no longer compatible and will need to be rebuilt. IMPROVEMENTS: From 5b676059a4cfe7217b6d7ea7da873c2270ee6a24 Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 19 Nov 2018 10:04:01 -0800 Subject: [PATCH 132/149] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cdd1edc6020b..3f968caf2f20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ BACKWARDS INCOMPATIBILITIES / NOTES: * command: Remove `-module-depth` flag from plan, apply, show, and graph. This flag was not widely used and the various updates and improvements to cli output should remove the need for this flag. [GH-19267] -* plugins: The protobuf/grpc package name for the provider protocol was changed from `proto` to `tfplugin5` in preparation for future protocol versioning. This means that plugin binaries built for alpha1 and alpha2 are no longer compatible and will need to be rebuilt. +* plugins: The protobuf/grpc package name for the provider protocol was changed from `proto` to `tfplugin5` in preparation for future protocol versioning. This means that plugin binaries built for alpha1 and alpha2 are no longer compatible and will need to be rebuilt. [GH-19393] IMPROVEMENTS: From 884aa387b8f1ffbc66e2fde6bb018df542631b4b Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Mon, 19 Nov 2018 11:15:58 -0800 Subject: [PATCH 133/149] command: Use vendoring when building helper programs in tests In a couple places in tests we execute a child "go build" to make a helper program. Now that we're running in module mode, "go build" will normally default to downloading and caching dependencies, which we don't want because we're still using vendoring for the moment. Therefore we need to instruct these child builds to use vendoring too, avoiding the need to download all of the dependencies and ensuring that we'll be building with the same dependencies that we'd use for a normal build. --- command/command_test.go | 2 +- e2e/e2e.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/command/command_test.go b/command/command_test.go index 7e786ffcc7fa..94c1e0c17ee9 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -798,7 +798,7 @@ func testLockState(sourceDir, path string) (func(), error) { source := filepath.Join(sourceDir, "statelocker.go") lockBin := filepath.Join(buildDir, "statelocker") - out, err := exec.Command("go", "build", "-o", lockBin, source).CombinedOutput() + out, err := exec.Command("go", "build", "-mod=vendor", "-o", lockBin, source).CombinedOutput() if err != nil { cleanFunc() return nil, fmt.Errorf("%s %s", err, out) diff --git a/e2e/e2e.go b/e2e/e2e.go index b8c1d69eafd2..23ebc121cb6a 100644 --- a/e2e/e2e.go +++ b/e2e/e2e.go @@ -236,6 +236,7 @@ func GoBuild(pkgPath, tmpPrefix string) string { cmd := exec.Command( "go", "build", + "-mod=vendor", "-o", tmpFilename, pkgPath, ) From 0b7be2d0e3f0feeb7b3ef48c14f6a09abc30e101 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 19 Nov 2018 16:59:27 -0500 Subject: [PATCH 134/149] fixes for the remaining tests It's possible that a computed collection could be handled by the attribute name, rather than the index count value. Use a new testDiffFn for some tests, which don't work with the old function that can't determine `computed` without the schema. --- terraform/context_plan_test.go | 100 ++++++++++++++++++++++++++++++++- terraform/diff.go | 2 +- terraform/provider_mock.go | 2 + 3 files changed, 100 insertions(+), 4 deletions(-) diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index e7f0e7ab2b76..623c93d3243f 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -2075,7 +2075,56 @@ func TestContext2Plan_computedList(t *testing.T) { }, }, } - p.DiffFn = testDiffFn + p.DiffFn = func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + } + + computedKeys := map[string]bool{} + for _, k := range c.ComputedKeys { + computedKeys[k] = true + } + + compute, _ := c.Raw["compute"].(string) + if compute != "" { + diff.Attributes[compute] = &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + } + } + + fooOld := s.Attributes["foo"] + fooNew, _ := c.Raw["foo"].(string) + if fooOld != fooNew { + diff.Attributes["foo"] = &ResourceAttrDiff{ + Old: fooOld, + New: fooNew, + NewComputed: computedKeys["foo"], + } + } + + numOld := s.Attributes["num"] + numNew, _ := c.Raw["num"].(string) + if numOld != numNew { + diff.Attributes["num"] = &ResourceAttrDiff{ + Old: numOld, + New: numNew, + NewComputed: computedKeys["num"], + } + } + + listOld := s.Attributes["list.#"] + if listOld == "" { + diff.Attributes["list.#"] = &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + } + } + + return diff, nil + } ctx := testContext2(t, &ContextOpts{ Config: m, @@ -2129,6 +2178,7 @@ func TestContext2Plan_computedMultiIndex(t *testing.T) { m := testModule(t, "plan-computed-multi-index") p := testProvider("aws") p.DiffFn = testDiffFn + p.GetSchemaReturn = &ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "aws_instance": { @@ -2141,6 +2191,47 @@ func TestContext2Plan_computedMultiIndex(t *testing.T) { }, } + p.DiffFn = func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + } + + compute, _ := c.Raw["compute"].(string) + if compute != "" { + diff.Attributes[compute] = &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + } + } + + fooOld := s.Attributes["foo"] + fooNew, _ := c.Raw["foo"].(string) + fooComputed := false + for _, k := range c.ComputedKeys { + if k == "foo" { + fooComputed = true + } + } + if fooNew != "" { + diff.Attributes["foo"] = &ResourceAttrDiff{ + Old: fooOld, + New: fooNew, + NewComputed: fooComputed, + } + } + + ipOld := s.Attributes["ip"] + ipComputed := ipOld == "" + diff.Attributes["ip"] = &ResourceAttrDiff{ + Old: ipOld, + New: "", + NewComputed: ipComputed, + } + + return diff, nil + } + ctx := testContext2(t, &ContextOpts{ Config: m, ProviderResolver: providers.ResolverFixed( @@ -2174,14 +2265,17 @@ func TestContext2Plan_computedMultiIndex(t *testing.T) { switch i := ric.Addr.String(); i { case "aws_instance.foo[0]": checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "ip": cty.UnknownVal(cty.List(cty.String)), + "ip": cty.UnknownVal(cty.List(cty.String)), + "foo": cty.ListValEmpty(cty.String), }), ric.After) case "aws_instance.foo[1]": checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "ip": cty.UnknownVal(cty.List(cty.String)), + "ip": cty.UnknownVal(cty.List(cty.String)), + "foo": cty.ListValEmpty(cty.String), }), ric.After) case "aws_instance.bar[0]": checkVals(t, objectVal(t, schema, map[string]cty.Value{ + "ip": cty.UnknownVal(cty.List(cty.String)), "foo": cty.UnknownVal(cty.List(cty.String)), }), ric.After) default: diff --git a/terraform/diff.go b/terraform/diff.go index c8e660ad5e66..8e7a8d9c5bfb 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -563,7 +563,7 @@ func (d *InstanceDiff) applyCollectionDiff(attrName string, oldAttrs map[string] // check the index first for special handling for k, diff := range d.Attributes { // check the index value, which can be set, and 0 - if k == attrName+".#" || k == attrName+".%" { + if k == attrName+".#" || k == attrName+".%" || k == attrName { if diff.NewRemoved { return result, nil } diff --git a/terraform/provider_mock.go b/terraform/provider_mock.go index 73d26f6dc7c7..a2c5de33770c 100644 --- a/terraform/provider_mock.go +++ b/terraform/provider_mock.go @@ -282,6 +282,7 @@ func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) } priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) cfg := NewResourceConfigShimmed(r.Config, schema) + legacyDiff, err := p.DiffFn(info, priorState, cfg) var res providers.PlanResourceChangeResponse @@ -294,6 +295,7 @@ func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) if err != nil { res.Diagnostics = res.Diagnostics.Append(err) } + res.PlannedState = newVal var requiresNew []string From f375691819547c3309c9819e2452f10cd4e14eb3 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 19 Nov 2018 17:59:38 -0500 Subject: [PATCH 135/149] add missing key-value from test --- helper/plugin/grpc_provider_test.go | 2 +- states/state_string.go | 7 ++++++- terraform/context_plan_test.go | 3 ++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/helper/plugin/grpc_provider_test.go b/helper/plugin/grpc_provider_test.go index f27edf3993ef..1dcc04df5062 100644 --- a/helper/plugin/grpc_provider_test.go +++ b/helper/plugin/grpc_provider_test.go @@ -662,7 +662,7 @@ func TestNormalizeFlatmapContainers(t *testing.T) { expect: map[string]string{"id": "78629a0f5f3f164f"}, }, { - attrs: map[string]string{"set.2.required": "bar", "set.2.list.#": "1", "set.2.list.0": "x", "set.1.list.#": "0"}, + attrs: map[string]string{"set.2.required": "bar", "set.2.list.#": "1", "set.2.list.0": "x", "set.1.list.#": "0", "set.#": "2"}, expect: map[string]string{"set.2.list.#": "1", "set.2.list.0": "x", "set.2.required": "bar", "set.#": "1"}, }, } { diff --git a/states/state_string.go b/states/state_string.go index 8a81a78f16a0..bca4581c9547 100644 --- a/states/state_string.go +++ b/states/state_string.go @@ -164,11 +164,16 @@ func (m *Module) testString() string { } } attrKeys := make([]string, 0, len(attributes)) - for ak, _ := range attributes { + for ak, val := range attributes { if ak == "id" { continue } + // don't show empty containers in the output + if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { + continue + } + attrKeys = append(attrKeys, ak) } diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 623c93d3243f..53bd8136c070 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -2159,7 +2159,8 @@ func TestContext2Plan_computedList(t *testing.T) { switch i := ric.Addr.String(); i { case "aws_instance.bar": checkVals(t, objectVal(t, schema, map[string]cty.Value{ - "foo": cty.UnknownVal(cty.String), + "list": cty.UnknownVal(cty.List(cty.String)), + "foo": cty.UnknownVal(cty.String), }), ric.After) case "aws_instance.foo": checkVals(t, objectVal(t, schema, map[string]cty.Value{ From 79a9a1587963cd77b8ecbff866f95c264bcba663 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Tue, 20 Nov 2018 09:58:59 +0100 Subject: [PATCH 136/149] command/state: lock when pushing state Next to adding the locking for the `state push` command, this commit also fixes a small bug where the lock would not be propertly released when running the `state show` command. And finally it renames some variables in the `[un]taint` code in order to try to standardize the var names of a few frequently used variables (e.g. statemgr.Full, states.State, states.SyncState). --- command/apply_destroy_test.go | 2 +- command/apply_test.go | 4 ++-- command/command_test.go | 23 +++++++++++++++++------ command/import.go | 1 + command/plan_test.go | 2 +- command/refresh_test.go | 2 +- command/state_push.go | 12 ++++++++++++ command/state_push_test.go | 32 ++++++++++++++++++++++++++++++++ command/state_show.go | 8 ++++++++ command/taint.go | 22 +++++++++++----------- command/taint_test.go | 2 +- command/untaint.go | 22 +++++++++++----------- command/untaint_test.go | 2 +- 13 files changed, 99 insertions(+), 35 deletions(-) diff --git a/command/apply_destroy_test.go b/command/apply_destroy_test.go index ea34db70db04..3cdf998f713c 100644 --- a/command/apply_destroy_test.go +++ b/command/apply_destroy_test.go @@ -127,7 +127,7 @@ func TestApply_destroyLockedState(t *testing.T) { }) statePath := testStateFile(t, originalState) - unlock, err := testLockState("./testdata", statePath) + unlock, err := testLockState(testDataDir, statePath) if err != nil { t.Fatal(err) } diff --git a/command/apply_test.go b/command/apply_test.go index e511cfea474e..da755d3296ba 100644 --- a/command/apply_test.go +++ b/command/apply_test.go @@ -64,7 +64,7 @@ func TestApply(t *testing.T) { func TestApply_lockedState(t *testing.T) { statePath := testTempFile(t) - unlock, err := testLockState("./testdata", statePath) + unlock, err := testLockState(testDataDir, statePath) if err != nil { t.Fatal(err) } @@ -98,7 +98,7 @@ func TestApply_lockedState(t *testing.T) { func TestApply_lockedStateWait(t *testing.T) { statePath := testTempFile(t) - unlock, err := testLockState("./testdata", statePath) + unlock, err := testLockState(testDataDir, statePath) if err != nil { t.Fatal(err) } diff --git a/command/command_test.go b/command/command_test.go index 94c1e0c17ee9..ce6958715cfc 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -38,8 +38,11 @@ import ( backendInit "github.com/hashicorp/terraform/backend/init" ) -// This is the directory where our test fixtures are. -var fixtureDir = "./test-fixtures" +// These are the directories for our test data and fixtures. +var ( + fixtureDir = "./test-fixtures" + testDataDir = "./testdata" +) // a top level temp directory which will be cleaned after all tests var testingDir string @@ -50,14 +53,19 @@ func init() { // Initialize the backends backendInit.Init(nil) - // Expand the fixture dir on init because we change the working - // directory in some tests. + // Expand the data and fixture dirs on init because + // we change the working directory in some tests. var err error fixtureDir, err = filepath.Abs(fixtureDir) if err != nil { panic(err) } + testDataDir, err = filepath.Abs(testDataDir) + if err != nil { + panic(err) + } + testingDir, err = ioutil.TempDir(testingDir, "tf") if err != nil { panic(err) @@ -783,7 +791,7 @@ func testRemoteState(t *testing.T, s *states.State, c int) (*terraform.State, *h // testlockState calls a separate process to the lock the state file at path. // deferFunc should be called in the caller to properly unlock the file. -// Since many tests change the working durectory, the sourcedir argument must be +// Since many tests change the working directory, the sourcedir argument must be // supplied to locate the statelocker.go source. func testLockState(sourceDir, path string) (func(), error) { // build and run the binary ourselves so we can quickly terminate it for cleanup @@ -798,7 +806,10 @@ func testLockState(sourceDir, path string) (func(), error) { source := filepath.Join(sourceDir, "statelocker.go") lockBin := filepath.Join(buildDir, "statelocker") - out, err := exec.Command("go", "build", "-mod=vendor", "-o", lockBin, source).CombinedOutput() + cmd := exec.Command("go", "build", "-mod=vendor", "-o", lockBin, source) + cmd.Dir = filepath.Dir(sourceDir) + + out, err := cmd.CombinedOutput() if err != nil { cleanFunc() return nil, fmt.Errorf("%s %s", err, out) diff --git a/command/import.go b/command/import.go index 4126503b9de9..eadc201b7867 100644 --- a/command/import.go +++ b/command/import.go @@ -235,6 +235,7 @@ func (c *ImportCommand) Run(args []string) int { return 1 } + // Make sure to unlock the state defer func() { err := opReq.StateLocker.Unlock(nil) if err != nil { diff --git a/command/plan_test.go b/command/plan_test.go index 3220968f9916..9e3791d84b5c 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -56,7 +56,7 @@ func TestPlan_lockedState(t *testing.T) { } testPath := testFixturePath("plan") - unlock, err := testLockState("./testdata", filepath.Join(testPath, DefaultStateFilename)) + unlock, err := testLockState(testDataDir, filepath.Join(testPath, DefaultStateFilename)) if err != nil { t.Fatal(err) } diff --git a/command/refresh_test.go b/command/refresh_test.go index 5791ec6ec5a4..603781466c4d 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -115,7 +115,7 @@ func TestRefresh_lockedState(t *testing.T) { state := testState() statePath := testStateFile(t, state) - unlock, err := testLockState("./testdata", statePath) + unlock, err := testLockState(testDataDir, statePath) if err != nil { t.Fatal(err) } diff --git a/command/state_push.go b/command/state_push.go index 2aa39bd8f07c..5df0af528b31 100644 --- a/command/state_push.go +++ b/command/state_push.go @@ -1,11 +1,13 @@ package command import ( + "context" "fmt" "io" "os" "strings" + "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/states/statefile" "github.com/hashicorp/terraform/states/statemgr" "github.com/mitchellh/cli" @@ -77,6 +79,16 @@ func (c *StatePushCommand) Run(args []string) int { c.Ui.Error(fmt.Sprintf("Failed to load destination state: %s", err)) return 1 } + + if c.stateLock { + stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateMgr, "taint"); err != nil { + c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) + return 1 + } + defer stateLocker.Unlock(nil) + } + if err := stateMgr.RefreshState(); err != nil { c.Ui.Error(fmt.Sprintf("Failed to refresh destination state: %s", err)) return 1 diff --git a/command/state_push_test.go b/command/state_push_test.go index 4240dffff15d..990a320478e2 100644 --- a/command/state_push_test.go +++ b/command/state_push_test.go @@ -3,6 +3,7 @@ package command import ( "bytes" "os" + "strings" "testing" "github.com/hashicorp/terraform/backend" @@ -41,6 +42,37 @@ func TestStatePush_empty(t *testing.T) { } } +func TestStatePush_lockedState(t *testing.T) { + // Create a temporary working directory that is empty + td := tempDir(t) + copy.CopyDir(testFixturePath("state-push-good"), td) + defer os.RemoveAll(td) + defer testChdir(t, td)() + + p := testProvider() + ui := new(cli.MockUi) + c := &StatePushCommand{ + Meta: Meta{ + testingOverrides: metaOverridesForProvider(p), + Ui: ui, + }, + } + + unlock, err := testLockState(testDataDir, "local-state.tfstate") + if err != nil { + t.Fatal(err) + } + defer unlock() + + args := []string{"replace.tfstate"} + if code := c.Run(args); code != 1 { + t.Fatalf("bad: %d", code) + } + if !strings.Contains(ui.ErrorWriter.String(), "Error acquiring the state lock") { + t.Fatalf("expected a lock error, got: %s", ui.ErrorWriter.String()) + } +} + func TestStatePush_replaceMatch(t *testing.T) { // Create a temporary working directory that is empty td := tempDir(t) diff --git a/command/state_show.go b/command/state_show.go index 8f501dccee73..79a3afc490b6 100644 --- a/command/state_show.go +++ b/command/state_show.go @@ -79,6 +79,14 @@ func (c *StateShowCommand) Run(args []string) int { return 1 } + // Make sure to unlock the state + defer func() { + err := opReq.StateLocker.Unlock(nil) + if err != nil { + c.Ui.Error(err.Error()) + } + }() + // Get the schemas from the context schemas := ctx.Schemas() diff --git a/command/taint.go b/command/taint.go index b0cff3dc0e44..ec9188d3a81a 100644 --- a/command/taint.go +++ b/command/taint.go @@ -76,7 +76,7 @@ func (c *TaintCommand) Run(args []string) int { // Get the state env := c.Workspace() - st, err := b.StateMgr(env) + stateMgr, err := b.StateMgr(env) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 @@ -84,21 +84,21 @@ func (c *TaintCommand) Run(args []string) int { if c.stateLock { stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) - if err := stateLocker.Lock(st, "taint"); err != nil { + if err := stateLocker.Lock(stateMgr, "taint"); err != nil { c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) return 1 } defer stateLocker.Unlock(nil) } - if err := st.RefreshState(); err != nil { + if err := stateMgr.RefreshState(); err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 } // Get the actual state structure - s := st.State() - if s.Empty() { + state := stateMgr.State() + if state.Empty() { if allowMissing { return c.allowMissingExit(addr) } @@ -112,11 +112,11 @@ func (c *TaintCommand) Run(args []string) int { return 1 } - state := s.SyncWrapper() + ss := state.SyncWrapper() // Get the resource and instance we're going to taint - rs := state.Resource(addr.ContainingResource()) - is := state.ResourceInstance(addr) + rs := ss.Resource(addr.ContainingResource()) + is := ss.ResourceInstance(addr) if is == nil { if allowMissing { return c.allowMissingExit(addr) @@ -152,13 +152,13 @@ func (c *TaintCommand) Run(args []string) int { } obj.Status = states.ObjectTainted - state.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) + ss.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) - if err := st.WriteState(s); err != nil { + if err := stateMgr.WriteState(state); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } - if err := st.PersistState(); err != nil { + if err := stateMgr.PersistState(); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } diff --git a/command/taint_test.go b/command/taint_test.go index 8193f0982d60..bcdb76a9c3a5 100644 --- a/command/taint_test.go +++ b/command/taint_test.go @@ -64,7 +64,7 @@ func TestTaint_lockedState(t *testing.T) { }) statePath := testStateFile(t, state) - unlock, err := testLockState("./testdata", statePath) + unlock, err := testLockState(testDataDir, statePath) if err != nil { t.Fatal(err) } diff --git a/command/untaint.go b/command/untaint.go index 082f9ce11c39..ce173364200e 100644 --- a/command/untaint.go +++ b/command/untaint.go @@ -72,7 +72,7 @@ func (c *UntaintCommand) Run(args []string) int { // Get the state workspace := c.Workspace() - st, err := b.StateMgr(workspace) + stateMgr, err := b.StateMgr(workspace) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 @@ -80,21 +80,21 @@ func (c *UntaintCommand) Run(args []string) int { if c.stateLock { stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) - if err := stateLocker.Lock(st, "untaint"); err != nil { + if err := stateLocker.Lock(stateMgr, "untaint"); err != nil { c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) return 1 } defer stateLocker.Unlock(nil) } - if err := st.RefreshState(); err != nil { + if err := stateMgr.RefreshState(); err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 } // Get the actual state structure - s := st.State() - if s.Empty() { + state := stateMgr.State() + if state.Empty() { if allowMissing { return c.allowMissingExit(addr) } @@ -108,11 +108,11 @@ func (c *UntaintCommand) Run(args []string) int { return 1 } - state := s.SyncWrapper() + ss := state.SyncWrapper() // Get the resource and instance we're going to taint - rs := state.Resource(addr.ContainingResource()) - is := state.ResourceInstance(addr) + rs := ss.Resource(addr.ContainingResource()) + is := ss.ResourceInstance(addr) if is == nil { if allowMissing { return c.allowMissingExit(addr) @@ -157,13 +157,13 @@ func (c *UntaintCommand) Run(args []string) int { return 1 } obj.Status = states.ObjectReady - state.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) + ss.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) - if err := st.WriteState(s); err != nil { + if err := stateMgr.WriteState(state); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } - if err := st.PersistState(); err != nil { + if err := stateMgr.PersistState(); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } diff --git a/command/untaint_test.go b/command/untaint_test.go index 789f40af5ad2..c5f7275f1b6c 100644 --- a/command/untaint_test.go +++ b/command/untaint_test.go @@ -67,7 +67,7 @@ func TestUntaint_lockedState(t *testing.T) { ) }) statePath := testStateFile(t, state) - unlock, err := testLockState("./testdata", statePath) + unlock, err := testLockState(testDataDir, statePath) if err != nil { t.Fatal(err) } From 77844852e62c423bc80bbde4e79d0446fa466522 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Tue, 20 Nov 2018 17:03:51 +0100 Subject: [PATCH 137/149] go-mod: update go-tfe --- go.mod | 2 +- go.sum | 4 +- vendor/github.com/hashicorp/go-tfe/tfe.go | 38 +++++++++---------- .../github.com/hashicorp/go-tfe/workspace.go | 1 + vendor/modules.txt | 2 +- 5 files changed, 24 insertions(+), 23 deletions(-) diff --git a/go.mod b/go.mod index 6332228c422f..6a471e1524a1 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,7 @@ require ( github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc // indirect github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 // indirect - github.com/hashicorp/go-tfe v0.3.0 + github.com/hashicorp/go-tfe v0.3.1 github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 github.com/hashicorp/golang-lru v0.5.0 // indirect diff --git a/go.sum b/go.sum index 31194a3f78c7..43301678dc33 100644 --- a/go.sum +++ b/go.sum @@ -147,8 +147,8 @@ github.com/hashicorp/go-slug v0.1.0 h1:MJGEiOwRGrQCBmMMZABHqIESySFJ4ajrsjgDI4/aF github.com/hashicorp/go-slug v0.1.0/go.mod h1:+zDycQOzGqOqMW7Kn2fp9vz/NtqpMLQlgb9JUF+0km4= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 h1:7YOlAIO2YWnJZkQp7B5eFykaIY7C9JndqAFQyVV5BhM= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-tfe v0.3.0 h1:X0oM8RNKgMlmaMOEzLkx8/RTIC3d2K30R8+G4cSXJPc= -github.com/hashicorp/go-tfe v0.3.0/go.mod h1:SRMjgjY06SfEKstIPRUVMtQfhSYR2H3GHVop0lfedkY= +github.com/hashicorp/go-tfe v0.3.1 h1:178hBlqjBsXohfcJ2/t2RM8c29IviQrEkj+mqdbkQzM= +github.com/hashicorp/go-tfe v0.3.1/go.mod h1:SRMjgjY06SfEKstIPRUVMtQfhSYR2H3GHVop0lfedkY= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v0.0.0-20180322230233-23480c066577 h1:at4+18LrM8myamuV7/vT6x2s1JNXp2k4PsSbt4I02X4= diff --git a/vendor/github.com/hashicorp/go-tfe/tfe.go b/vendor/github.com/hashicorp/go-tfe/tfe.go index c38938564774..f2e83169d1dd 100644 --- a/vendor/github.com/hashicorp/go-tfe/tfe.go +++ b/vendor/github.com/hashicorp/go-tfe/tfe.go @@ -289,25 +289,6 @@ func (c *Client) configureLimiter() error { return nil } -// ListOptions is used to specify pagination options when making API requests. -// Pagination allows breaking up large result sets into chunks, or "pages". -type ListOptions struct { - // The page number to request. The results vary based on the PageSize. - PageNumber int `url:"page[number],omitempty"` - - // The number of elements returned in a single page. - PageSize int `url:"page[size],omitempty"` -} - -// Pagination is used to return the pagination details of an API request. -type Pagination struct { - CurrentPage int `json:"current-page"` - PreviousPage int `json:"prev-page"` - NextPage int `json:"next-page"` - TotalPages int `json:"total-pages"` - TotalCount int `json:"total-count"` -} - // newRequest creates an API request. A relative URL path can be provided in // path, in which case it is resolved relative to the apiVersionPath of the // Client. Relative URL paths should always be specified without a preceding @@ -479,6 +460,25 @@ func (c *Client) do(ctx context.Context, req *retryablehttp.Request, v interface return nil } +// ListOptions is used to specify pagination options when making API requests. +// Pagination allows breaking up large result sets into chunks, or "pages". +type ListOptions struct { + // The page number to request. The results vary based on the PageSize. + PageNumber int `url:"page[number],omitempty"` + + // The number of elements returned in a single page. + PageSize int `url:"page[size],omitempty"` +} + +// Pagination is used to return the pagination details of an API request. +type Pagination struct { + CurrentPage int `json:"current-page"` + PreviousPage int `json:"prev-page"` + NextPage int `json:"next-page"` + TotalPages int `json:"total-pages"` + TotalCount int `json:"total-count"` +} + func parsePagination(body io.Reader) (*Pagination, error) { var raw struct { Meta struct { diff --git a/vendor/github.com/hashicorp/go-tfe/workspace.go b/vendor/github.com/hashicorp/go-tfe/workspace.go index ca3b21d06545..968af91b76bc 100644 --- a/vendor/github.com/hashicorp/go-tfe/workspace.go +++ b/vendor/github.com/hashicorp/go-tfe/workspace.go @@ -66,6 +66,7 @@ type Workspace struct { Locked bool `jsonapi:"attr,locked"` MigrationEnvironment string `jsonapi:"attr,migration-environment"` Name string `jsonapi:"attr,name"` + Operations bool `jsonapi:"attr,operations"` Permissions *WorkspacePermissions `jsonapi:"attr,permissions"` TerraformVersion string `jsonapi:"attr,terraform-version"` VCSRepo *VCSRepo `jsonapi:"attr,vcs-repo"` diff --git a/vendor/modules.txt b/vendor/modules.txt index 8073c8a4b100..9f29433ce3cd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -315,7 +315,7 @@ github.com/hashicorp/go-rootcerts github.com/hashicorp/go-safetemp # github.com/hashicorp/go-slug v0.1.0 github.com/hashicorp/go-slug -# github.com/hashicorp/go-tfe v0.3.0 +# github.com/hashicorp/go-tfe v0.3.1 github.com/hashicorp/go-tfe # github.com/hashicorp/go-uuid v1.0.0 github.com/hashicorp/go-uuid From 17787c943a2efdd99bf94a6e4c2d28adb6f5cf2f Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Tue, 20 Nov 2018 17:57:07 +0100 Subject: [PATCH 138/149] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f968caf2f20..e803c8921812 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ ## 0.12.0-beta1 (Unreleased) BACKWARDS INCOMPATIBILITIES / NOTES: -* command: Remove `-module-depth` flag from plan, apply, show, and graph. This flag was not widely used and the various updates and improvements to cli output should remove the need for this flag. [GH-19267] +* command: Remove `-module-depth` flag from plan, apply, and show. This flag was not widely used and the various updates and improvements to cli output should remove the need for this flag. [GH-19267] * plugins: The protobuf/grpc package name for the provider protocol was changed from `proto` to `tfplugin5` in preparation for future protocol versioning. This means that plugin binaries built for alpha1 and alpha2 are no longer compatible and will need to be rebuilt. [GH-19393] IMPROVEMENTS: @@ -15,6 +15,7 @@ BUG FIXES: * helper/schema: Fix timeout parsing during Provider.Diff (plan) [GH-19286] * core: Fix inconsistent plans when replacing instances. [GH-19233] * core: Correct handling of unknown values in module outputs during planning and final resolution of them during apply. [GH-19237] +* core: Correct handling of wildcard dependencies when upgrading states [GH-19374] ## 0.12.0-alpha2 (October 30, 2018) From 495826444bde4efe24456f6f96e15d0b751e6486 Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Wed, 14 Nov 2018 14:52:46 -0500 Subject: [PATCH 139/149] plugin/discovery: Use GPG keys from Registry When verifying the signature of the SHA256SUMS file, we have been hardcoding HashiCorp's public GPG key and using it as the keyring. Going forward, Terraform will get a list of valid public keys for a provider from the Terraform Registry (registry.terraform.io), and use them as the keyring for the openpgp verification func. --- plugin/discovery/get.go | 48 +++++++++--------- plugin/discovery/get_test.go | 36 +++++++++++++- plugin/discovery/signature.go | 39 ++------------- plugin/discovery/testdata/hashicorp.asc | 30 +++++++++++ registry/response/terraform_provider.go | 28 +++++++++++ registry/response/terraform_provider_test.go | 52 ++++++++++++++++++++ 6 files changed, 174 insertions(+), 59 deletions(-) create mode 100644 plugin/discovery/testdata/hashicorp.asc create mode 100644 registry/response/terraform_provider_test.go diff --git a/plugin/discovery/get.go b/plugin/discovery/get.go index ed96bfabd48c..d26e8e3461fd 100644 --- a/plugin/discovery/get.go +++ b/plugin/discovery/get.go @@ -15,7 +15,6 @@ import ( getter "github.com/hashicorp/go-getter" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/httpclient" "github.com/hashicorp/terraform/registry" "github.com/hashicorp/terraform/registry/regsrc" @@ -353,12 +352,36 @@ func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaS } func (i *ProviderInstaller) getProviderChecksum(urls *response.TerraformProviderPlatformLocation) (string, error) { - checksums, err := getPluginSHA256SUMs(urls.ShasumsURL, urls.ShasumsSignatureURL) + // Get SHA256SUMS file. + shasums, err := getFile(urls.ShasumsURL) + if err != nil { + return "", fmt.Errorf("error fetching checksums: %s", err) + } + + // Get SHA256SUMS.sig file. + signature, err := getFile(urls.ShasumsSignatureURL) + if err != nil { + return "", fmt.Errorf("error fetching checksums signature: %s", err) + } + + // Verify GPG signature. + asciiArmor := urls.SigningKeys.GPGASCIIArmor() + signer, err := verifySig(shasums, signature, asciiArmor) if err != nil { return "", err } - return checksumForFile(checksums, urls.Filename), nil + // Display identity for GPG key which succeeded verifying the signature. + // This could also be used to display to the user with i.Ui.Info(). + identities := []string{} + for k, _ := range signer.Identities { + identities = append(identities, k) + } + identity := strings.Join(identities, ", ") + log.Printf("[DEBUG] verified GPG signature with key from %s", identity) + + // Extract checksum for this os/arch platform binary. + return checksumForFile(shasums, urls.Filename), nil } // list all versions available for the named provider @@ -487,25 +510,6 @@ func checksumForFile(sums []byte, name string) string { return "" } -// fetch the SHA256SUMS file provided, and verify its signature. -func getPluginSHA256SUMs(sumsURL, sigURL string) ([]byte, error) { - sums, err := getFile(sumsURL) - if err != nil { - return nil, fmt.Errorf("error fetching checksums: %s", err) - } - - sig, err := getFile(sigURL) - if err != nil { - return nil, fmt.Errorf("error fetching checksums signature: %s", err) - } - - if err := verifySig(sums, sig); err != nil { - return nil, err - } - - return sums, nil -} - func getFile(url string) ([]byte, error) { resp, err := httpClient.Get(url) if err != nil { diff --git a/plugin/discovery/get_test.go b/plugin/discovery/get_test.go index ce833ee36aa0..534a01fa5ea1 100644 --- a/plugin/discovery/get_test.go +++ b/plugin/discovery/get_test.go @@ -369,23 +369,57 @@ func TestProviderInstallerPurgeUnused(t *testing.T) { // Test fetching a provider's checksum file while verifying its signature. func TestProviderChecksum(t *testing.T) { + hashicorpKey, err := ioutil.ReadFile("testdata/hashicorp.asc") + if err != nil { + t.Fatal(err) + } + tests := []struct { + Name string URLs *response.TerraformProviderPlatformLocation Err bool }{ { + "good", &response.TerraformProviderPlatformLocation{ + Filename: "terraform-provider-template_0.1.0_darwin_amd64.zip", ShasumsURL: "http://127.0.0.1:8080/terraform-provider-template/0.1.0/terraform-provider-template_0.1.0_SHA256SUMS", ShasumsSignatureURL: "http://127.0.0.1:8080/terraform-provider-template/0.1.0/terraform-provider-template_0.1.0_SHA256SUMS.sig", - Filename: "terraform-provider-template_0.1.0_darwin_amd64.zip", + SigningKeys: response.SigningKeyList{ + GPGKeys: []*response.GPGKey{ + &response.GPGKey{ + ASCIIArmor: string(hashicorpKey), + }, + }, + }, }, false, }, { + "bad", &response.TerraformProviderPlatformLocation{ + Filename: "terraform-provider-template_0.1.0_darwin_amd64.zip", ShasumsURL: "http://127.0.0.1:8080/terraform-provider-badsig/0.1.0/terraform-provider-badsig_0.1.0_SHA256SUMS", ShasumsSignatureURL: "http://127.0.0.1:8080/terraform-provider-badsig/0.1.0/terraform-provider-badsig_0.1.0_SHA256SUMS.sig", + SigningKeys: response.SigningKeyList{ + GPGKeys: []*response.GPGKey{ + &response.GPGKey{ + ASCIIArmor: string(hashicorpKey), + }, + }, + }, + }, + true, + }, + { + "no keys", + &response.TerraformProviderPlatformLocation{ Filename: "terraform-provider-template_0.1.0_darwin_amd64.zip", + ShasumsURL: "http://127.0.0.1:8080/terraform-provider-template/0.1.0/terraform-provider-template_0.1.0_SHA256SUMS", + ShasumsSignatureURL: "http://127.0.0.1:8080/terraform-provider-template/0.1.0/terraform-provider-template_0.1.0_SHA256SUMS.sig", + SigningKeys: response.SigningKeyList{ + GPGKeys: []*response.GPGKey{}, + }, }, true, }, diff --git a/plugin/discovery/signature.go b/plugin/discovery/signature.go index b6686a5d5c93..4e941aec659d 100644 --- a/plugin/discovery/signature.go +++ b/plugin/discovery/signature.go @@ -10,44 +10,11 @@ import ( // Verify the data using the provided openpgp detached signature and the // embedded hashicorp public key. -func verifySig(data, sig []byte) error { - el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashiPublicKey)) +func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) if err != nil { log.Fatal(err) } - _, err = openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) - return err + return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) } - -// this is the public key that signs the checksums file for releases. -const hashiPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f -W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq -fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA -3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca -KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k -SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 -cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG -CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n -Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i -SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi -psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w -sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO -klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW -WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 -wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j -2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM -skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo -mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y -0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA -CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc -z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP -0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG -unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ -EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ -oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C -=LYpS ------END PGP PUBLIC KEY BLOCK-----` diff --git a/plugin/discovery/testdata/hashicorp.asc b/plugin/discovery/testdata/hashicorp.asc new file mode 100644 index 000000000000..010c9271cb59 --- /dev/null +++ b/plugin/discovery/testdata/hashicorp.asc @@ -0,0 +1,30 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG +CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n +Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i +SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi +psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w +sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO +klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW +WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 +wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j +2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM +skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo +mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y +0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA +CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc +z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP +0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG +unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ +EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ +oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C +=LYpS +-----END PGP PUBLIC KEY BLOCK----- diff --git a/registry/response/terraform_provider.go b/registry/response/terraform_provider.go index b9d78e3295ed..0a4c3f9efb9c 100644 --- a/registry/response/terraform_provider.go +++ b/registry/response/terraform_provider.go @@ -2,6 +2,7 @@ package response import ( "sort" + "strings" version "github.com/hashicorp/go-version" ) @@ -50,11 +51,38 @@ type TerraformProviderPlatformLocation struct { DownloadURL string `json:"download_url"` ShasumsURL string `json:"shasums_url"` ShasumsSignatureURL string `json:"shasums_signature_url"` + Shasum string `json:"shasum"` + + SigningKeys SigningKeyList `json:"signing_keys"` +} + +// SigningKeyList is the response structure for a list of signing keys. +type SigningKeyList struct { + GPGKeys []*GPGKey `json:"gpg_public_keys"` +} + +// GPGKey is the response structure for a GPG key. +type GPGKey struct { + ASCIIArmor string `json:"ascii_armor"` + Source string `json:"source"` + SourceURL *string `json:"source_url"` } // Collection type for TerraformProviderVersion type ProviderVersionCollection []*TerraformProviderVersion +// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg +// keys in the response. +func (signingKeys *SigningKeyList) GPGASCIIArmor() string { + keys := []string{} + + for _, gpgKey := range signingKeys.GPGKeys { + keys = append(keys, gpgKey.ASCIIArmor) + } + + return strings.Join(keys, "\n") +} + // Sort sorts versions from newest to oldest. func (v ProviderVersionCollection) Sort() { sort.Slice(v, func(i, j int) bool { diff --git a/registry/response/terraform_provider_test.go b/registry/response/terraform_provider_test.go new file mode 100644 index 000000000000..09a976b951c6 --- /dev/null +++ b/registry/response/terraform_provider_test.go @@ -0,0 +1,52 @@ +package response + +import ( + "fmt" + "testing" +) + +var ( + testGPGKeyOne = &GPGKey{ + ASCIIArmor: "---\none\n---", + } + testGPGKeyTwo = &GPGKey{ + ASCIIArmor: "---\ntwo\n---", + } +) + +func TestSigningKeyList_GPGASCIIArmor(t *testing.T) { + var tests = []struct { + name string + gpgKeys []*GPGKey + expected string + }{ + { + name: "no keys", + gpgKeys: []*GPGKey{}, + expected: "", + }, + { + name: "one key", + gpgKeys: []*GPGKey{testGPGKeyOne}, + expected: testGPGKeyOne.ASCIIArmor, + }, + { + name: "two keys", + gpgKeys: []*GPGKey{testGPGKeyOne, testGPGKeyTwo}, + expected: fmt.Sprintf("%s\n%s", + testGPGKeyOne.ASCIIArmor, testGPGKeyTwo.ASCIIArmor), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + signingKeys := &SigningKeyList{ + GPGKeys: tt.gpgKeys, + } + actual := signingKeys.GPGASCIIArmor() + + if actual != tt.expected { + t.Errorf("expected %s, got %s", tt.expected, actual) + } + }) + } +} From 9a8a74b9bba63955db52e8a764627341e512e5b7 Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Fri, 16 Nov 2018 14:05:38 -0500 Subject: [PATCH 140/149] plugin/discovery: Print name before verification This is so that any errors output from the checksum/signature verification show up in the expected place in the output. --- plugin/discovery/get.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugin/discovery/get.go b/plugin/discovery/get.go index d26e8e3461fd..558a277ee69c 100644 --- a/plugin/discovery/get.go +++ b/plugin/discovery/get.go @@ -172,6 +172,9 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e downloadURLs, err := i.listProviderDownloadURLs(provider, versionMeta.Version) providerURL := downloadURLs.DownloadURL + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, versionMeta.Version)) + log.Printf("[DEBUG] getting provider %q version %q", provider, versionMeta.Version) + if !i.SkipVerify { sha256, err := i.getProviderChecksum(downloadURLs) if err != nil { @@ -184,8 +187,6 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, e } } - i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, versionMeta.Version)) - log.Printf("[DEBUG] getting provider %q version %q", provider, versionMeta.Version) err = i.install(provider, v, providerURL) if err != nil { return PluginMeta{}, err From 06825bf46db55b8e5c8771742b07b084fe55e88f Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Fri, 16 Nov 2018 16:32:31 -0500 Subject: [PATCH 141/149] plugin/discovery: Add friendly gpg err msg When GPG verification fails, display a helpful message to the user instead of the generic openpgp error. --- plugin/discovery/get.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/plugin/discovery/get.go b/plugin/discovery/get.go index 558a277ee69c..2f6ac1a9144e 100644 --- a/plugin/discovery/get.go +++ b/plugin/discovery/get.go @@ -27,6 +27,12 @@ import ( const protocolVersionHeader = "x-terraform-protocol-version" +const gpgVerificationError = `GPG signature verification error: +Terraform was unable to verify the GPG signature of the downloaded provider +files using the keys downloaded from the Terraform Registry. This may mean that +the publisher of the provider removed the key it was signed with, or that the +distributed files were changed after this version was released.` + var httpClient *http.Client var errVersionNotFound = errors.New("version not found") @@ -369,13 +375,14 @@ func (i *ProviderInstaller) getProviderChecksum(urls *response.TerraformProvider asciiArmor := urls.SigningKeys.GPGASCIIArmor() signer, err := verifySig(shasums, signature, asciiArmor) if err != nil { - return "", err + log.Printf("[ERROR] error verifying signature: %s", err) + return "", fmt.Errorf(gpgVerificationError) } // Display identity for GPG key which succeeded verifying the signature. // This could also be used to display to the user with i.Ui.Info(). identities := []string{} - for k, _ := range signer.Identities { + for k := range signer.Identities { identities = append(identities, k) } identity := strings.Join(identities, ", ") From c993e9bed690873b65809b40b0843b430051336c Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Mon, 19 Nov 2018 16:33:09 -0500 Subject: [PATCH 142/149] registry/response: Add protocols to DL resp --- registry/response/terraform_provider.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/registry/response/terraform_provider.go b/registry/response/terraform_provider.go index 0a4c3f9efb9c..08d382a482e3 100644 --- a/registry/response/terraform_provider.go +++ b/registry/response/terraform_provider.go @@ -45,13 +45,14 @@ type TerraformProviderPlatform struct { // structure for a provider platform with all details required to perform a // download. type TerraformProviderPlatformLocation struct { - OS string `json:"os"` - Arch string `json:"arch"` - Filename string `json:"filename"` - DownloadURL string `json:"download_url"` - ShasumsURL string `json:"shasums_url"` - ShasumsSignatureURL string `json:"shasums_signature_url"` - Shasum string `json:"shasum"` + Protocols []string `json:"protocols"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + DownloadURL string `json:"download_url"` + ShasumsURL string `json:"shasums_url"` + ShasumsSignatureURL string `json:"shasums_signature_url"` + Shasum string `json:"shasum"` SigningKeys SigningKeyList `json:"signing_keys"` } From 97bdeddabe529b30f04dbebdc77d88d75179c66b Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Tue, 20 Nov 2018 14:32:59 -0500 Subject: [PATCH 143/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e803c8921812..42cf4d25d2ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ IMPROVEMENTS: * command/state: Update and enable the `state show` command [GH-19200] * backend/remote: Implement the remote enhanced backend [GH-19299] +* plugin/discovery: Use signing keys from the Terraform Registry when downloading providers. [GH-19389] BUG FIXES: From 67041486c516cf1217c808d4a2365181afe692cc Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Tue, 20 Nov 2018 11:59:48 -0800 Subject: [PATCH 144/149] build: Travis-CI build should use vendor directory when building for Windows --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index ba4a2e45af82..6a554d726226 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,7 +36,7 @@ script: - make vendor-status - make test - make e2etest -- GOOS=windows go build +- GOOS=windows go build -mod=vendor # website-test is temporarily disabled while we get the website build back in shape after the v0.12 reorganization #- make website-test From e7fd446f740b62b05e3c69ba018bfbee7f1c0ffa Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Tue, 20 Nov 2018 21:17:48 +0100 Subject: [PATCH 145/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42cf4d25d2ea..11208ce2652d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ BACKWARDS INCOMPATIBILITIES / NOTES: IMPROVEMENTS: * command/state: Update and enable the `state show` command [GH-19200] +* command/state: Lock the state when pushing a new state using `state push` [GH-19411] * backend/remote: Implement the remote enhanced backend [GH-19299] * plugin/discovery: Use signing keys from the Terraform Registry when downloading providers. [GH-19389] From a17f3170251d75b60257e23c95c980f1e68aa27d Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 15 Nov 2018 20:26:46 +0100 Subject: [PATCH 146/149] Change how to fall back from remote to local backend In order to support free organizations, we need a way to load the `remote` backend and then, depending on the used offering/plan, enable or disable remote operations. In other words, we should be able to dynamically fall back to the `local` backend if needed, after first configuring the `remote` backend. To make this works we need to change the way this was done previously when the env var `TF_FORCE_LOCAL_BACKEND` was set. The clear difference of course being that the env var would be available on startup, while the used offering/plan is only known after being able to connect to TFE. --- backend/init/init.go | 11 +- backend/init/init_test.go | 42 --- backend/local/backend.go | 6 +- backend/local/backend_apply_test.go | 5 +- backend/local/backend_plan.go | 1 - backend/remote/backend.go | 245 ++++++++++-------- backend/remote/backend_apply.go | 8 +- backend/remote/backend_apply_test.go | 182 +++++++++++-- backend/remote/backend_mock.go | 5 +- backend/remote/backend_plan.go | 8 +- backend/remote/backend_plan_test.go | 121 ++++++++- backend/remote/backend_test.go | 61 +++-- backend/remote/cli.go | 7 + backend/remote/remote_test.go | 28 ++ .../test-fixtures/apply-destroy/apply.log | 3 + .../apply-policy-passed/apply.log | 3 + .../apply-policy-soft-failed/apply.log | 3 + .../test-fixtures/apply-variables/apply.log | 3 + backend/remote/test-fixtures/apply/apply.log | 3 + backend/remote/testing.go | 73 ++++++ 20 files changed, 580 insertions(+), 238 deletions(-) create mode 100644 backend/remote/remote_test.go diff --git a/backend/init/init.go b/backend/init/init.go index 0e4f7188b1ae..1ee473401dc9 100644 --- a/backend/init/init.go +++ b/backend/init/init.go @@ -3,7 +3,6 @@ package init import ( - "os" "sync" "github.com/hashicorp/terraform/backend" @@ -48,14 +47,8 @@ func Init(services *disco.Disco) { backends = map[string]backend.InitFn{ // Enhanced backends. - "local": func() backend.Backend { return backendLocal.New() }, - "remote": func() backend.Backend { - b := backendRemote.New(services) - if os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" { - return backendLocal.NewWithBackend(b) - } - return b - }, + "local": func() backend.Backend { return backendLocal.New() }, + "remote": func() backend.Backend { return backendRemote.New(services) }, // Remote State backends. "artifactory": func() backend.Backend { return backendArtifactory.New() }, diff --git a/backend/init/init_test.go b/backend/init/init_test.go index 02eacb63831a..59653125ac8e 100644 --- a/backend/init/init_test.go +++ b/backend/init/init_test.go @@ -1,11 +1,8 @@ package init import ( - "os" "reflect" "testing" - - backendLocal "github.com/hashicorp/terraform/backend/local" ) func TestInit_backend(t *testing.T) { @@ -44,42 +41,3 @@ func TestInit_backend(t *testing.T) { }) } } - -func TestInit_forceLocalBackend(t *testing.T) { - // Initialize the backends map - Init(nil) - - enhancedBackends := []struct { - Name string - Type string - }{ - {"local", "nil"}, - {"remote", "*remote.Remote"}, - } - - // Set the TF_FORCE_LOCAL_BACKEND flag so all enhanced backends will - // return a local.Local backend with themselves as embedded backend. - if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { - t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) - } - defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") - - // Make sure we always get the local backend. - for _, b := range enhancedBackends { - f := Backend(b.Name) - - local, ok := f().(*backendLocal.Local) - if !ok { - t.Fatalf("expected backend %q to be \"*local.Local\", got: %T", b.Name, f()) - } - - bType := "nil" - if local.Backend != nil { - bType = reflect.TypeOf(local.Backend).String() - } - - if bType != b.Type { - t.Fatalf("expected local.Backend to be %s, got: %s", b.Type, bType) - } - } -} diff --git a/backend/local/backend.go b/backend/local/backend.go index 5de7e1818c63..eb2053b23d57 100644 --- a/backend/local/backend.go +++ b/backend/local/backend.go @@ -258,9 +258,6 @@ func (b *Local) DeleteWorkspace(name string) error { } func (b *Local) StateMgr(name string) (statemgr.Full, error) { - statePath, stateOutPath, backupPath := b.StatePaths(name) - log.Printf("[TRACE] backend/local: state manager for workspace %q will:\n - read initial snapshot from %s\n - write new snapshots to %s\n - create any backup at %s", name, statePath, stateOutPath, backupPath) - // If we have a backend handling state, delegate to that. if b.Backend != nil { return b.Backend.StateMgr(name) @@ -274,6 +271,9 @@ func (b *Local) StateMgr(name string) (statemgr.Full, error) { return nil, err } + statePath, stateOutPath, backupPath := b.StatePaths(name) + log.Printf("[TRACE] backend/local: state manager for workspace %q will:\n - read initial snapshot from %s\n - write new snapshots to %s\n - create any backup at %s", name, statePath, stateOutPath, backupPath) + s := statemgr.NewFilesystemBetweenPaths(statePath, stateOutPath) if backupPath != "" { s.SetBackupPath(backupPath) diff --git a/backend/local/backend_apply_test.go b/backend/local/backend_apply_test.go index f9664b8ea757..940d08bb0333 100644 --- a/backend/local/backend_apply_test.go +++ b/backend/local/backend_apply_test.go @@ -25,8 +25,8 @@ import ( func TestLocal_applyBasic(t *testing.T) { b, cleanup := TestLocal(t) defer cleanup() - p := TestLocalProvider(t, b, "test", applyFixtureSchema()) + p := TestLocalProvider(t, b, "test", applyFixtureSchema()) p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ "id": cty.StringVal("yes"), "ami": cty.StringVal("bar"), @@ -95,8 +95,8 @@ func TestLocal_applyEmptyDir(t *testing.T) { func TestLocal_applyEmptyDirDestroy(t *testing.T) { b, cleanup := TestLocal(t) defer cleanup() - p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) + p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{}) p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{} op, configCleanup := testOperationApply(t, "./test-fixtures/empty") @@ -122,6 +122,7 @@ func TestLocal_applyEmptyDirDestroy(t *testing.T) { func TestLocal_applyError(t *testing.T) { b, cleanup := TestLocal(t) defer cleanup() + p := TestLocalProvider(t, b, "test", nil) p.GetSchemaReturn = &terraform.ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ diff --git a/backend/local/backend_plan.go b/backend/local/backend_plan.go index 950d83b77621..ac1a7701b73e 100644 --- a/backend/local/backend_plan.go +++ b/backend/local/backend_plan.go @@ -188,7 +188,6 @@ func (b *Local) opPlan( } func (b *Local) renderPlan(plan *plans.Plan, schemas *terraform.Schemas) { - counts := map[plans.Action]int{} for _, change := range plan.Changes.Resources { counts[change.Action]++ diff --git a/backend/remote/backend.go b/backend/remote/backend.go index 47a2f1a2e7f2..1a48b39c3d69 100644 --- a/backend/remote/backend.go +++ b/backend/remote/backend.go @@ -25,6 +25,8 @@ import ( "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" ) const ( @@ -49,28 +51,36 @@ type Remote struct { // Operation. See Operation for more details. ContextOpts *terraform.ContextOpts - // client is the remote backend API client + // client is the remote backend API client. client *tfe.Client - // hostname of the remote backend server + // hostname of the remote backend server. hostname string - // organization is the organization that contains the target workspaces + // organization is the organization that contains the target workspaces. organization string - // workspace is used to map the default workspace to a remote workspace + // workspace is used to map the default workspace to a remote workspace. workspace string - // prefix is used to filter down a set of workspaces that use a single + // prefix is used to filter down a set of workspaces that use a single. // configuration prefix string - // schema defines the configuration for the backend + // schema defines the configuration for the backend. schema *schema.Backend // services is used for service discovery services *disco.Disco + // local, if non-nil, will be used for all enhanced behavior. This + // allows local behavior with the remote backend functioning as remote + // state storage backend. + local backend.Enhanced + + // forceLocal, if true, will force the use of the local backend. + forceLocal bool + // opLock locks operations opLock sync.Mutex } @@ -84,6 +94,7 @@ func New(services *disco.Disco) *Remote { } } +// ConfigSchema implements backend.Enhanced. func (b *Remote) ConfigSchema() *configschema.Block { return &configschema.Block{ Attributes: map[string]*configschema.Attribute{ @@ -126,6 +137,7 @@ func (b *Remote) ConfigSchema() *configschema.Block { } } +// ValidateConfig implements backend.Enhanced. func (b *Remote) ValidateConfig(obj cty.Value) tfdiags.Diagnostics { var diags tfdiags.Diagnostics @@ -173,6 +185,7 @@ func (b *Remote) ValidateConfig(obj cty.Value) tfdiags.Diagnostics { return diags } +// Configure implements backend.Enhanced. func (b *Remote) Configure(obj cty.Value) tfdiags.Diagnostics { var diags tfdiags.Diagnostics @@ -255,8 +268,31 @@ func (b *Remote) Configure(obj cty.Value) tfdiags.Diagnostics { `Terraform Enterprise client: %s.`, err, ), )) + return diags } + // Check if the organization exists. + _, err = b.client.Organizations.Read(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + err = fmt.Errorf("organization %s does not exist", b.organization) + } + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Failed to read organization settings", + fmt.Sprintf( + `The "remote" backend encountered an unexpected error while reading the `+ + `organization settings: %s.`, err, + ), + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + return diags + } + + // Configure a local backend for when we need to run operations locally. + b.local = backendLocal.NewWithBackend(b) + b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" + return diags } @@ -292,67 +328,66 @@ func (b *Remote) token(hostname string) (string, error) { return "", nil } -// Workspaces returns a filtered list of remote workspace names. -func (b *Remote) Workspaces() ([]string, error) { - if b.prefix == "" { +// StateMgr implements backend.Enhanced. +func (b *Remote) StateMgr(name string) (state.State, error) { + if b.workspace == "" && name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { return nil, backend.ErrWorkspacesNotSupported } - return b.workspaces() -} -func (b *Remote) workspaces() ([]string, error) { - // Check if the configured organization exists. - _, err := b.client.Organizations.Read(context.Background(), b.organization) + workspaces, err := b.workspaces() if err != nil { - if err == tfe.ErrResourceNotFound { - return nil, fmt.Errorf("organization %s does not exist", b.organization) + return nil, fmt.Errorf("Error retrieving workspaces: %v", err) + } + + exists := false + for _, workspace := range workspaces { + if name == workspace { + exists = true + break } - return nil, err } - options := tfe.WorkspaceListOptions{} + // Configure the remote workspace name. switch { - case b.workspace != "": - options.Search = tfe.String(b.workspace) - case b.prefix != "": - options.Search = tfe.String(b.prefix) + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name } - // Create a slice to contain all the names. - var names []string - - for { - wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) - if err != nil { - return nil, err + if !exists { + options := tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), } - for _, w := range wl.Items { - if b.workspace != "" && w.Name == b.workspace { - names = append(names, backend.DefaultStateName) - continue - } - if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) { - names = append(names, strings.TrimPrefix(w.Name, b.prefix)) - } + // We only set the Terraform Version for the new workspace if this is + // a release candidate or a final release. + if version.Prerelease == "" || strings.HasPrefix(version.Prerelease, "rc") { + options.TerraformVersion = tfe.String(version.String()) } - // Exit the loop when we've seen all pages. - if wl.CurrentPage >= wl.TotalPages { - break + _, err = b.client.Workspaces.Create(context.Background(), b.organization, options) + if err != nil { + return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) } - - // Update the page number to get the next page. - options.PageNumber = wl.NextPage } - // Sort the result so we have consistent output. - sort.StringSlice(names).Sort() + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: name, - return names, nil + // This is optionally set during Terraform Enterprise runs. + runID: os.Getenv("TFE_RUN_ID"), + } + + return &remote.State{Client: client}, nil } -// DeleteWorkspace removes the remote workspace if it exists. +// DeleteWorkspace implements backend.Enhanced. func (b *Remote) DeleteWorkspace(name string) error { if b.workspace == "" && name == backend.DefaultStateName { return backend.ErrDefaultWorkspaceNotSupported @@ -369,15 +404,6 @@ func (b *Remote) DeleteWorkspace(name string) error { name = b.prefix + name } - // Check if the configured organization exists. - _, err := b.client.Organizations.Read(context.Background(), b.organization) - if err != nil { - if err == tfe.ErrResourceNotFound { - return fmt.Errorf("organization %s does not exist", b.organization) - } - return err - } - client := &remoteClient{ client: b.client, organization: b.organization, @@ -387,78 +413,85 @@ func (b *Remote) DeleteWorkspace(name string) error { return client.Delete() } -// StateMgr returns the latest state of the given remote workspace. The -// workspace will be created if it doesn't exist. -func (b *Remote) StateMgr(name string) (state.State, error) { - if b.workspace == "" && name == backend.DefaultStateName { - return nil, backend.ErrDefaultWorkspaceNotSupported - } - if b.prefix == "" && name != backend.DefaultStateName { +// Workspaces implements backend.Enhanced. +func (b *Remote) Workspaces() ([]string, error) { + if b.prefix == "" { return nil, backend.ErrWorkspacesNotSupported } + return b.workspaces() +} - workspaces, err := b.workspaces() - if err != nil { - return nil, fmt.Errorf("Error retrieving workspaces: %v", err) - } - - exists := false - for _, workspace := range workspaces { - if name == workspace { - exists = true - break - } - } - - // Configure the remote workspace name. +// workspaces returns a filtered list of remote workspace names. +func (b *Remote) workspaces() ([]string, error) { + options := tfe.WorkspaceListOptions{} switch { - case name == backend.DefaultStateName: - name = b.workspace - case b.prefix != "" && !strings.HasPrefix(name, b.prefix): - name = b.prefix + name + case b.workspace != "": + options.Search = tfe.String(b.workspace) + case b.prefix != "": + options.Search = tfe.String(b.prefix) } - if !exists { - options := tfe.WorkspaceCreateOptions{ - Name: tfe.String(name), - } + // Create a slice to contain all the names. + var names []string - // We only set the Terraform Version for the new workspace if this is - // a release candidate or a final release. - if version.Prerelease == "" || strings.HasPrefix(version.Prerelease, "rc") { - options.TerraformVersion = tfe.String(version.String()) + for { + wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) + if err != nil { + return nil, err } - _, err = b.client.Workspaces.Create(context.Background(), b.organization, options) - if err != nil { - return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) + for _, w := range wl.Items { + if b.workspace != "" && w.Name == b.workspace { + names = append(names, backend.DefaultStateName) + continue + } + if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) { + names = append(names, strings.TrimPrefix(w.Name, b.prefix)) + } } - } - client := &remoteClient{ - client: b.client, - organization: b.organization, - workspace: name, + // Exit the loop when we've seen all pages. + if wl.CurrentPage >= wl.TotalPages { + break + } - // This is optionally set during Terraform Enterprise runs. - runID: os.Getenv("TFE_RUN_ID"), + // Update the page number to get the next page. + options.PageNumber = wl.NextPage } - return &remote.State{Client: client}, nil + // Sort the result so we have consistent output. + sort.StringSlice(names).Sort() + + return names, nil } -// Operation implements backend.Enhanced +// Operation implements backend.Enhanced. func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { - // Configure the remote workspace name. + // Get the remote workspace name. + workspace := op.Workspace switch { case op.Workspace == backend.DefaultStateName: - op.Workspace = b.workspace + workspace = b.workspace case b.prefix != "" && !strings.HasPrefix(op.Workspace, b.prefix): - op.Workspace = b.prefix + op.Workspace + workspace = b.prefix + op.Workspace } + // Retrieve the workspace for this operation. + w, err := b.client.Workspaces.Read(ctx, b.organization, workspace) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + // Check if we need to use the local backend to run the operation. + if b.forceLocal || !w.Operations { + return b.local.Operation(ctx, op) + } + + // Set the remote workspace name. + op.Workspace = w.Name + // Determine the function to call for our operation - var f func(context.Context, context.Context, *backend.Operation) (*tfe.Run, error) + var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) switch op.Type { case backend.OperationTypePlan: f = b.opPlan @@ -499,7 +532,7 @@ func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend defer b.opLock.Unlock() - r, opErr := f(stopCtx, cancelCtx, op) + r, opErr := f(stopCtx, cancelCtx, op, w) if opErr != nil && opErr != context.Canceled { b.ReportResult(runningOp, opErr) return diff --git a/backend/remote/backend_apply.go b/backend/remote/backend_apply.go index ae6d1eeacf4e..4d78a0437168 100644 --- a/backend/remote/backend_apply.go +++ b/backend/remote/backend_apply.go @@ -12,15 +12,9 @@ import ( "github.com/hashicorp/terraform/tfdiags" ) -func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation) (*tfe.Run, error) { +func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { log.Printf("[INFO] backend/remote: starting Apply operation") - // Retrieve the workspace used to run this operation in. - w, err := b.client.Workspaces.Read(stopCtx, b.organization, op.Workspace) - if err != nil { - return nil, generalError("Failed to retrieve workspace", err) - } - var diags tfdiags.Diagnostics if !w.Permissions.CanUpdate { diff --git a/backend/remote/backend_apply_test.go b/backend/remote/backend_apply_test.go index be7345e60701..f68dea0ccc54 100644 --- a/backend/remote/backend_apply_test.go +++ b/backend/remote/backend_apply_test.go @@ -64,11 +64,14 @@ func TestRemote_applyBasic(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("missing apply summery in output: %s", output) + t.Fatalf("expected apply summery in output: %s", output) } } @@ -407,11 +410,14 @@ func TestRemote_applyAutoApprove(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("missing apply summery in output: %s", output) + t.Fatalf("expected apply summery in output: %s", output) } } @@ -460,11 +466,120 @@ func TestRemote_applyWithAutoApply(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use + // the local backend with itself as embedded backend. + if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { + t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) + } + defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + b := testBackendDefault(t) + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } + if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { + t.Fatalf("expected apply summery in output: %s", output) + } +} + +func TestRemote_applyWorkspaceWithoutOperations(t *testing.T) { + b := testBackendNoDefault(t) + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup := testOperationApply(t, "./test-fixtures/apply") + defer configCleanup() + + input := testInput(t, map[string]string{ + "approve": "yes", + }) + + op.UIIn = input + op.UIOut = b.CLI + op.Workspace = "no-operations" + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + if len(input.answers) > 0 { + t.Fatalf("expected no unused answers, got: %v", input.answers) + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("missing apply summery in output: %s", output) + t.Fatalf("expected apply summery in output: %s", output) } } @@ -526,8 +641,11 @@ func TestRemote_applyLockTimeout(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "Lock timeout exceeded") { - t.Fatalf("missing lock timout error in output: %s", output) + t.Fatalf("expected lock timout error in output: %s", output) } if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { t.Fatalf("unexpected plan summery in output: %s", output) @@ -570,11 +688,14 @@ func TestRemote_applyDestroy(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "0 to add, 0 to change, 1 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "0 added, 0 changed, 1 destroyed") { - t.Fatalf("missing apply summery in output: %s", output) + t.Fatalf("expected apply summery in output: %s", output) } } @@ -643,14 +764,17 @@ func TestRemote_applyPolicyPass(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("missing polic check result in output: %s", output) + t.Fatalf("expected polic check result in output: %s", output) } if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("missing apply summery in output: %s", output) + t.Fatalf("expected apply summery in output: %s", output) } } @@ -691,11 +815,14 @@ func TestRemote_applyPolicyHardFail(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("missing policy check result in output: %s", output) + t.Fatalf("expected policy check result in output: %s", output) } if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { t.Fatalf("unexpected apply summery in output: %s", output) @@ -735,14 +862,17 @@ func TestRemote_applyPolicySoftFail(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("missing policy check result in output: %s", output) + t.Fatalf("expected policy check result in output: %s", output) } if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("missing apply summery in output: %s", output) + t.Fatalf("expected apply summery in output: %s", output) } } @@ -784,11 +914,14 @@ func TestRemote_applyPolicySoftFailAutoApprove(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("missing policy check result in output: %s", output) + t.Fatalf("expected policy check result in output: %s", output) } if strings.Contains(output, "1 added, 0 changed, 0 destroyed") { t.Fatalf("unexpected apply summery in output: %s", output) @@ -841,14 +974,17 @@ func TestRemote_applyPolicySoftFailAutoApply(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running apply in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("missing policy check result in output: %s", output) + t.Fatalf("expected policy check result in output: %s", output) } if !strings.Contains(output, "1 added, 0 changed, 0 destroyed") { - t.Fatalf("missing apply summery in output: %s", output) + t.Fatalf("expected apply summery in output: %s", output) } } @@ -875,6 +1011,6 @@ func TestRemote_applyWithRemoteError(t *testing.T) { output := b.CLI.(*cli.MockUi).OutputWriter.String() if !strings.Contains(output, "null_resource.foo: 1 error") { - t.Fatalf("missing apply error in output: %s", output) + t.Fatalf("expected apply error in output: %s", output) } } diff --git a/backend/remote/backend_mock.go b/backend/remote/backend_mock.go index eac6b68392f0..c65de02585b3 100644 --- a/backend/remote/backend_mock.go +++ b/backend/remote/backend_mock.go @@ -905,8 +905,9 @@ func (m *mockWorkspaces) List(ctx context.Context, organization string, options func (m *mockWorkspaces) Create(ctx context.Context, organization string, options tfe.WorkspaceCreateOptions) (*tfe.Workspace, error) { w := &tfe.Workspace{ - ID: generateID("ws-"), - Name: *options.Name, + ID: generateID("ws-"), + Name: *options.Name, + Operations: !strings.HasSuffix(*options.Name, "no-operations"), Permissions: &tfe.WorkspacePermissions{ CanQueueRun: true, CanUpdate: true, diff --git a/backend/remote/backend_plan.go b/backend/remote/backend_plan.go index 2fdea7781950..8c48d83c678d 100644 --- a/backend/remote/backend_plan.go +++ b/backend/remote/backend_plan.go @@ -18,15 +18,9 @@ import ( "github.com/hashicorp/terraform/tfdiags" ) -func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation) (*tfe.Run, error) { +func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { log.Printf("[INFO] backend/remote: starting Plan operation") - // Retrieve the workspace used to run this operation in. - w, err := b.client.Workspaces.Read(stopCtx, b.organization, op.Workspace) - if err != nil { - return nil, generalError("Failed to retrieve workspace", err) - } - var diags tfdiags.Diagnostics if !w.Permissions.CanQueueRun { diff --git a/backend/remote/backend_plan_test.go b/backend/remote/backend_plan_test.go index 6ce609d4318d..60c052d50584 100644 --- a/backend/remote/backend_plan_test.go +++ b/backend/remote/backend_plan_test.go @@ -54,8 +54,11 @@ func TestRemote_planBasic(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } } @@ -284,6 +287,86 @@ func TestRemote_planNoConfig(t *testing.T) { } } +func TestRemote_planForceLocal(t *testing.T) { + // Set TF_FORCE_LOCAL_BACKEND so the remote backend will use + // the local backend with itself as embedded backend. + if err := os.Setenv("TF_FORCE_LOCAL_BACKEND", "1"); err != nil { + t.Fatalf("error setting environment variable TF_FORCE_LOCAL_BACKEND: %v", err) + } + defer os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + b := testBackendDefault(t) + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.Workspace = backend.DefaultStateName + + run, err := b.Operation(context.Background(), op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } +} + +func TestRemote_planWorkspaceWithoutOperations(t *testing.T) { + b := testBackendNoDefault(t) + ctx := context.Background() + + // Create a named workspace that doesn't allow operations. + _, err := b.client.Workspaces.Create( + ctx, + b.organization, + tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.prefix + "no-operations"), + }, + ) + if err != nil { + t.Fatalf("error creating named workspace: %v", err) + } + + op, configCleanup := testOperationPlan(t, "./test-fixtures/plan") + defer configCleanup() + + op.Workspace = "no-operations" + + run, err := b.Operation(ctx, op) + if err != nil { + t.Fatalf("error starting operation: %v", err) + } + + <-run.Done() + if run.Result != backend.OperationSuccess { + t.Fatalf("operation failed: %s", b.CLI.(*cli.MockUi).ErrorWriter.String()) + } + if run.PlanEmpty { + t.Fatalf("expected a non-empty plan") + } + + output := b.CLI.(*cli.MockUi).OutputWriter.String() + if strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("unexpected remote backend header in output: %s", output) + } + if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { + t.Fatalf("expected plan summery in output: %s", output) + } +} + func TestRemote_planLockTimeout(t *testing.T) { b := testBackendDefault(t) ctx := context.Background() @@ -342,8 +425,11 @@ func TestRemote_planLockTimeout(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "Lock timeout exceeded") { - t.Fatalf("missing lock timout error in output: %s", output) + t.Fatalf("expected lock timout error in output: %s", output) } if strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { t.Fatalf("unexpected plan summery in output: %s", output) @@ -428,8 +514,11 @@ func TestRemote_planWithWorkingDirectory(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } } @@ -455,11 +544,14 @@ func TestRemote_planPolicyPass(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: true") { - t.Fatalf("missing polic check result in output: %s", output) + t.Fatalf("expected polic check result in output: %s", output) } } @@ -490,11 +582,14 @@ func TestRemote_planPolicyHardFail(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("missing policy check result in output: %s", output) + t.Fatalf("expected policy check result in output: %s", output) } } @@ -525,11 +620,14 @@ func TestRemote_planPolicySoftFail(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") { - t.Fatalf("missing plan summery in output: %s", output) + t.Fatalf("expected plan summery in output: %s", output) } if !strings.Contains(output, "Sentinel Result: false") { - t.Fatalf("missing policy check result in output: %s", output) + t.Fatalf("expected policy check result in output: %s", output) } } @@ -555,7 +653,10 @@ func TestRemote_planWithRemoteError(t *testing.T) { } output := b.CLI.(*cli.MockUi).OutputWriter.String() + if !strings.Contains(output, "Running plan in the remote backend") { + t.Fatalf("expected remote backend header in output: %s", output) + } if !strings.Contains(output, "null_resource.foo: 1 error") { - t.Fatalf("missing plan error in output: %s", output) + t.Fatalf("expected plan error in output: %s", output) } } diff --git a/backend/remote/backend_test.go b/backend/remote/backend_test.go index 6e348d397c9a..db476a231836 100644 --- a/backend/remote/backend_test.go +++ b/backend/remote/backend_test.go @@ -7,6 +7,8 @@ import ( "github.com/hashicorp/terraform/backend" "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" ) func TestRemote(t *testing.T) { @@ -32,6 +34,30 @@ func TestRemote_config(t *testing.T) { confErr string valErr string }{ + "with_a_nonexisting_organization": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("nonexisting"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "organization nonexisting does not exist", + }, + "with_an_unknown_host": { + config: cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.StringVal("nonexisting.local"), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }), + confErr: "Host nonexisting.local does not provide a remote backend API", + }, "with_a_name": { config: cty.ObjectVal(map[string]cty.Value{ "hostname": cty.NullVal(cty.String), @@ -78,18 +104,6 @@ func TestRemote_config(t *testing.T) { }), valErr: `Only one of workspace "name" or "prefix" is allowed`, }, - "with_an_unknown_host": { - config: cty.ObjectVal(map[string]cty.Value{ - "hostname": cty.StringVal("nonexisting.local"), - "organization": cty.StringVal("hashicorp"), - "token": cty.NullVal(cty.String), - "workspaces": cty.ObjectVal(map[string]cty.Value{ - "name": cty.StringVal("prod"), - "prefix": cty.NullVal(cty.String), - }), - }), - confErr: "Host nonexisting.local does not provide a remote backend API", - }, } for name, tc := range cases { @@ -107,27 +121,22 @@ func TestRemote_config(t *testing.T) { confDiags := b.Configure(tc.config) if (confDiags.Err() == nil && tc.confErr != "") || (confDiags.Err() != nil && !strings.Contains(confDiags.Err().Error(), tc.confErr)) { - t.Fatalf("%s: unexpected configure result: %v", name, valDiags.Err()) + t.Fatalf("%s: unexpected configure result: %v", name, confDiags.Err()) } } } -func TestRemote_nonexistingOrganization(t *testing.T) { - msg := "does not exist" - - b := testBackendNoDefault(t) - b.organization = "nonexisting" - - if _, err := b.StateMgr("prod"); err == nil || !strings.Contains(err.Error(), msg) { - t.Fatalf("expected %q error, got: %v", msg, err) - } +func TestRemote_localBackend(t *testing.T) { + b := testBackendDefault(t) - if err := b.DeleteWorkspace("prod"); err == nil || !strings.Contains(err.Error(), msg) { - t.Fatalf("expected %q error, got: %v", msg, err) + local, ok := b.local.(*backendLocal.Local) + if !ok { + t.Fatalf("expected b.local to be \"*local.Local\", got: %T", b.local) } - if _, err := b.Workspaces(); err == nil || !strings.Contains(err.Error(), msg) { - t.Fatalf("expected %q error, got: %v", msg, err) + remote, ok := local.Backend.(*Remote) + if !ok { + t.Fatalf("expected local.Backend to be *remote.Remote, got: %T", remote) } } diff --git a/backend/remote/cli.go b/backend/remote/cli.go index a6aa1103fba2..5a6afa7ec43b 100644 --- a/backend/remote/cli.go +++ b/backend/remote/cli.go @@ -6,9 +6,16 @@ import ( // CLIInit implements backend.CLI func (b *Remote) CLIInit(opts *backend.CLIOpts) error { + if cli, ok := b.local.(backend.CLI); ok { + if err := cli.CLIInit(opts); err != nil { + return err + } + } + b.CLI = opts.CLI b.CLIColor = opts.CLIColor b.ShowDiagnostics = opts.ShowDiagnostics b.ContextOpts = opts.ContextOpts + return nil } diff --git a/backend/remote/remote_test.go b/backend/remote/remote_test.go new file mode 100644 index 000000000000..7fc332e490ce --- /dev/null +++ b/backend/remote/remote_test.go @@ -0,0 +1,28 @@ +package remote + +import ( + "flag" + "io/ioutil" + "log" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/logging" +) + +func TestMain(m *testing.M) { + flag.Parse() + + if testing.Verbose() { + // if we're verbose, use the logging requested by TF_LOG + logging.SetOutput() + } else { + // otherwise silence all logs + log.SetOutput(ioutil.Discard) + } + + // Make sure TF_FORCE_LOCAL_BACKEND is unset + os.Unsetenv("TF_FORCE_LOCAL_BACKEND") + + os.Exit(m.Run()) +} diff --git a/backend/remote/test-fixtures/apply-destroy/apply.log b/backend/remote/test-fixtures/apply-destroy/apply.log index 34adfcd6bd5a..d126547d95cf 100644 --- a/backend/remote/test-fixtures/apply-destroy/apply.log +++ b/backend/remote/test-fixtures/apply-destroy/apply.log @@ -1,3 +1,6 @@ +Terraform v0.11.10 + +Initializing plugins and modules... null_resource.hello: Destroying... (ID: 8657651096157629581) null_resource.hello: Destruction complete after 0s diff --git a/backend/remote/test-fixtures/apply-policy-passed/apply.log b/backend/remote/test-fixtures/apply-policy-passed/apply.log index 89c0dbc42d1e..901994838f22 100644 --- a/backend/remote/test-fixtures/apply-policy-passed/apply.log +++ b/backend/remote/test-fixtures/apply-policy-passed/apply.log @@ -1,3 +1,6 @@ +Terraform v0.11.10 + +Initializing plugins and modules... null_resource.hello: Creating... null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) diff --git a/backend/remote/test-fixtures/apply-policy-soft-failed/apply.log b/backend/remote/test-fixtures/apply-policy-soft-failed/apply.log index 89c0dbc42d1e..901994838f22 100644 --- a/backend/remote/test-fixtures/apply-policy-soft-failed/apply.log +++ b/backend/remote/test-fixtures/apply-policy-soft-failed/apply.log @@ -1,3 +1,6 @@ +Terraform v0.11.10 + +Initializing plugins and modules... null_resource.hello: Creating... null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) diff --git a/backend/remote/test-fixtures/apply-variables/apply.log b/backend/remote/test-fixtures/apply-variables/apply.log index 89c0dbc42d1e..901994838f22 100644 --- a/backend/remote/test-fixtures/apply-variables/apply.log +++ b/backend/remote/test-fixtures/apply-variables/apply.log @@ -1,3 +1,6 @@ +Terraform v0.11.10 + +Initializing plugins and modules... null_resource.hello: Creating... null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) diff --git a/backend/remote/test-fixtures/apply/apply.log b/backend/remote/test-fixtures/apply/apply.log index 89c0dbc42d1e..901994838f22 100644 --- a/backend/remote/test-fixtures/apply/apply.log +++ b/backend/remote/test-fixtures/apply/apply.log @@ -1,3 +1,6 @@ +Terraform v0.11.10 + +Initializing plugins and modules... null_resource.hello: Creating... null_resource.hello: Creation complete after 0s (ID: 8657651096157629581) diff --git a/backend/remote/testing.go b/backend/remote/testing.go index 0bb8d66c963e..54083dc231c3 100644 --- a/backend/remote/testing.go +++ b/backend/remote/testing.go @@ -11,6 +11,8 @@ import ( tfe "github.com/hashicorp/go-tfe" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" "github.com/hashicorp/terraform/state/remote" "github.com/hashicorp/terraform/svchost" "github.com/hashicorp/terraform/svchost/auth" @@ -19,6 +21,8 @@ import ( "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" ) const ( @@ -108,6 +112,9 @@ func testBackend(t *testing.T, obj cty.Value) *Remote { } } + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + ctx := context.Background() // Create the organization. @@ -131,6 +138,29 @@ func testBackend(t *testing.T, obj cty.Value) *Remote { return b } +func testLocalBackend(t *testing.T, remote *Remote) backend.Enhanced { + b := backendLocal.NewWithBackend(remote) + + b.CLI = remote.CLI + b.ShowDiagnostics = remote.ShowDiagnostics + + // Add a test provider to the local backend. + p := backendLocal.TestLocalProvider(t, b, "null", &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "null_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + return b +} + // testServer returns a *httptest.Server used for local testing. func testServer(t *testing.T) *httptest.Server { mux := http.NewServeMux() @@ -141,6 +171,49 @@ func testServer(t *testing.T) *httptest.Server { io.WriteString(w, `{"tfe.v2":"/api/v2/"}`) }) + // Respond to the initial query to read the organization settings. + mux.HandleFunc("/api/v2/organizations/hashicorp", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "hashicorp", + "type": "organizations", + "attributes": { + "name": "hashicorp", + "created-at": "2017-09-07T14:34:40.492Z", + "email": "user@example.com", + "collaborator-auth-policy": "password", + "enterprise-plan": "premium", + "permissions": { + "can-update": true, + "can-destroy": true, + "can-create-team": true, + "can-create-workspace": true, + "can-update-oauth": true, + "can-update-api-token": true, + "can-update-sentinel": true, + "can-traverse": true, + "can-create-workspace-migration": true + } + } + } +}`) + }) + + // All tests that are assumed to pass will use the hashicorp organization, + // so for all other organization requests we will return a 404. + mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + io.WriteString(w, `{ + "errors": [ + { + "status": "404", + "title": "not found" + } + ] +}`) + }) + return httptest.NewServer(mux) } From 5e2af5ddff8021fb79a051cdd4e74633b3471dec Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Tue, 20 Nov 2018 22:51:57 +0100 Subject: [PATCH 147/149] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11208ce2652d..50870e890dcd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ IMPROVEMENTS: * command/state: Update and enable the `state show` command [GH-19200] * command/state: Lock the state when pushing a new state using `state push` [GH-19411] * backend/remote: Implement the remote enhanced backend [GH-19299] +* backend/remote: Support remote state only usage by dynamically falling back the local backend [GH-19378] * plugin/discovery: Use signing keys from the Terraform Registry when downloading providers. [GH-19389] BUG FIXES: From 5b0b7d0a7847fe47a2d38fc84c740294f34e37ed Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Tue, 20 Nov 2018 22:52:22 +0100 Subject: [PATCH 148/149] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50870e890dcd..960b39a457fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ IMPROVEMENTS: * command/state: Update and enable the `state show` command [GH-19200] * command/state: Lock the state when pushing a new state using `state push` [GH-19411] * backend/remote: Implement the remote enhanced backend [GH-19299] -* backend/remote: Support remote state only usage by dynamically falling back the local backend [GH-19378] +* backend/remote: Support remote state only usage by dynamically falling back to the local backend [GH-19378] * plugin/discovery: Use signing keys from the Terraform Registry when downloading providers. [GH-19389] BUG FIXES: From 7d5db9522f4e67af435e44d236fe45e3bcc05ce3 Mon Sep 17 00:00:00 2001 From: Nick Fagerlund Date: Tue, 20 Nov 2018 16:54:18 -0800 Subject: [PATCH 149/149] website: Fix plugin path on Windows (#19423) ...and one other reference to the application data directory. Context: https://docs.microsoft.com/en-us/windows/desktop/shell/knownfolderid#folderid_roamingappdata In newer Windows versions, the folder accessible as `%APPDATA%` (and via various APIs) is actually at something like "documents and settings\user\application data\roaming", while earlier versions omit the "\roaming" part of the path. This means you can confuse people by referring to the "application data" directory by its human name, because "roaming" is the real application data directory, but it looks like a subdirectory of "application data". Thus, it's less confusing to just use the `%APPDATA%` variable, with the added benefit that you can copy and paste the path and it'll just work in most places. --- website/docs/commands/cli-config.html.markdown | 2 +- website/docs/commands/init.html.markdown | 2 +- website/docs/configuration/providers.html.md | 4 ++-- website/docs/plugins/basics.html.md | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/commands/cli-config.html.markdown b/website/docs/commands/cli-config.html.markdown index eb420fd82ae5..72e114df479e 100644 --- a/website/docs/commands/cli-config.html.markdown +++ b/website/docs/commands/cli-config.html.markdown @@ -19,7 +19,7 @@ The configuration is placed in a single file whose location depends on the host operating system: * On Windows, the file must be named named `terraform.rc` and placed - in the relevant user's "Application Data" directory. The physical location + in the relevant user's `%APPDATA%` directory. The physical location of this directory depends on your Windows version and system configuration; use `$env:APPDATA` in PowerShell to find its location on your system. * On all other systems, the file must be named `.terraformrc` (note diff --git a/website/docs/commands/init.html.markdown b/website/docs/commands/init.html.markdown index 1c78ce7a08bf..8e4a09dd0c03 100644 --- a/website/docs/commands/init.html.markdown +++ b/website/docs/commands/init.html.markdown @@ -123,7 +123,7 @@ For [providers distributed by HashiCorp](/docs/providers/index.html), init will automatically download and install plugins if necessary. Plugins can also be manually installed in the user plugins directory, located at `~/.terraform.d/plugins` on most operating systems and -`\plugins` on Windows. +`%APPDATA%\terraform.d\plugins` on Windows. For more information about configuring and installing providers, see [Configuration: Providers](/docs/configuration/providers.html). diff --git a/website/docs/configuration/providers.html.md b/website/docs/configuration/providers.html.md index 28e4dcfdd5b5..2304f1c52410 100644 --- a/website/docs/configuration/providers.html.md +++ b/website/docs/configuration/providers.html.md @@ -211,8 +211,8 @@ locations, depending on the host operating system: Operating system | User plugins directory ------------------|----------------------- -Windows | `terraform.d\plugins` in your user's "Application Data" directory -All other systems | `.terraform.d/plugins` in your user's home directory +Windows | `%APPDATA%\terraform.d\plugins` +All other systems | `~/.terraform.d/plugins` Once a plugin is installed, `terraform init` can initialize it normally. diff --git a/website/docs/plugins/basics.html.md b/website/docs/plugins/basics.html.md index dafcec5bbfa1..bba437822db6 100644 --- a/website/docs/plugins/basics.html.md +++ b/website/docs/plugins/basics.html.md @@ -39,7 +39,7 @@ in its respective documentation section. The [provider plugins distributed by HashiCorp](/docs/providers/index.html) are automatically installed by `terraform init`. Third-party plugins (both providers and provisioners) can be manually installed into the user plugins -directory, located at `\terraform.d\plugins` on Windows and +directory, located at `%APPDATA%\terraform.d\plugins` on Windows and `~/.terraform.d/plugins` on other systems. For more information, see: