diff --git a/.changelog/39702.txt b/.changelog/39702.txt new file mode 100644 index 000000000000..a083b1ea9e31 --- /dev/null +++ b/.changelog/39702.txt @@ -0,0 +1,2 @@ +```release-note:bug +resource/aws_workspaces_directory : Fix `InvalidParameterValuesException: Provided set of IP Group IDs are not valid` errors on Update \ No newline at end of file diff --git a/internal/service/workspaces/bundle_data_source.go b/internal/service/workspaces/bundle_data_source.go index 122a629aa0f9..7f159d4ae380 100644 --- a/internal/service/workspaces/bundle_data_source.go +++ b/internal/service/workspaces/bundle_data_source.go @@ -5,7 +5,6 @@ package workspaces import ( "context" - "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/workspaces" @@ -14,11 +13,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_workspaces_bundle") -func DataSourceBundle() *schema.Resource { +// @SDKDataSource("aws_workspaces_bundle", name="Bundle") +func dataSourceBundle() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceWorkspaceBundleRead, @@ -28,20 +29,6 @@ func DataSourceBundle() *schema.Resource { Optional: true, ConflictsWith: []string{names.AttrOwner, names.AttrName}, }, - names.AttrName: { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"bundle_id"}, - }, - names.AttrOwner: { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"bundle_id"}, - }, - names.AttrDescription: { - Type: schema.TypeString, - Computed: true, - }, "compute_type": { Type: schema.TypeList, Computed: true, @@ -54,7 +41,21 @@ func DataSourceBundle() *schema.Resource { }, }, }, - "user_storage": { + names.AttrDescription: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"bundle_id"}, + }, + names.AttrOwner: { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"bundle_id"}, + }, + "root_storage": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ @@ -66,7 +67,7 @@ func DataSourceBundle() *schema.Resource { }, }, }, - "root_storage": { + "user_storage": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ @@ -86,96 +87,95 @@ func dataSourceWorkspaceBundleRead(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - var bundle types.WorkspaceBundle - - if bundleID, ok := d.GetOk("bundle_id"); ok { - resp, err := conn.DescribeWorkspaceBundles(ctx, &workspaces.DescribeWorkspaceBundlesInput{ - BundleIds: []string{bundleID.(string)}, - }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Workspace Bundle (%s): %s", bundleID, err) - } - - if len(resp.Bundles) != 1 { - return sdkdiag.AppendErrorf(diags, "expected 1 result for WorkSpaces Workspace Bundle %q, found %d", bundleID, len(resp.Bundles)) - } + var bundle *types.WorkspaceBundle + var err error - if len(resp.Bundles) == 0 { - return sdkdiag.AppendErrorf(diags, "no WorkSpaces Workspace Bundle with ID %q found", bundleID) + if v, ok := d.GetOk("bundle_id"); ok { + input := &workspaces.DescribeWorkspaceBundlesInput{ + BundleIds: []string{v.(string)}, } - - bundle = resp.Bundles[0] + bundle, err = findBundle(ctx, conn, input, tfslices.PredicateTrue[*types.WorkspaceBundle]()) } - if name, ok := d.GetOk(names.AttrName); ok { - id := name + if v, ok := d.GetOk(names.AttrName); ok { + name := v.(string) input := &workspaces.DescribeWorkspaceBundlesInput{} - - if owner, ok := d.GetOk(names.AttrOwner); ok { - id = fmt.Sprintf("%s:%s", owner, id) - input.Owner = aws.String(owner.(string)) + if v, ok := d.GetOk(names.AttrOwner); ok { + input.Owner = aws.String(v.(string)) } - name := name.(string) - - paginator := workspaces.NewDescribeWorkspaceBundlesPaginator(conn, input, func(out *workspaces.DescribeWorkspaceBundlesPaginatorOptions) {}) - - entryNotFound := true - for paginator.HasMorePages() && entryNotFound { - out, err := paginator.NextPage(ctx) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Workspace Bundle (%s): %s", id, err) - } - - for _, b := range out.Bundles { - if aws.ToString(b.Name) == name { - bundle = b - entryNotFound = false - } - } - } + bundle, err = findBundle(ctx, conn, input, func(v *types.WorkspaceBundle) bool { + return aws.ToString(v.Name) == name + }) + } - if entryNotFound { - return sdkdiag.AppendErrorf(diags, "no WorkSpaces Workspace Bundle with name %q found", name) - } + if err != nil { + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("WorkSpaces Bundle", err)) } d.SetId(aws.ToString(bundle.BundleId)) d.Set("bundle_id", bundle.BundleId) - d.Set(names.AttrDescription, bundle.Description) - d.Set(names.AttrName, bundle.Name) - d.Set(names.AttrOwner, bundle.Owner) - - computeType := make([]map[string]interface{}, 1) + tfMap := make([]map[string]interface{}, 1) if bundle.ComputeType != nil { - computeType[0] = map[string]interface{}{ + tfMap[0] = map[string]interface{}{ names.AttrName: string(bundle.ComputeType.Name), } } - if err := d.Set("compute_type", computeType); err != nil { + if err := d.Set("compute_type", tfMap); err != nil { return sdkdiag.AppendErrorf(diags, "setting compute_type: %s", err) } - - rootStorage := make([]map[string]interface{}, 1) + d.Set(names.AttrDescription, bundle.Description) + d.Set(names.AttrName, bundle.Name) + d.Set(names.AttrOwner, bundle.Owner) + tfMap = make([]map[string]interface{}, 1) if bundle.RootStorage != nil { - rootStorage[0] = map[string]interface{}{ + tfMap[0] = map[string]interface{}{ "capacity": aws.ToString(bundle.RootStorage.Capacity), } } - if err := d.Set("root_storage", rootStorage); err != nil { + if err := d.Set("root_storage", tfMap); err != nil { return sdkdiag.AppendErrorf(diags, "setting root_storage: %s", err) } - - userStorage := make([]map[string]interface{}, 1) + tfMap = make([]map[string]interface{}, 1) if bundle.UserStorage != nil { - userStorage[0] = map[string]interface{}{ + tfMap[0] = map[string]interface{}{ "capacity": aws.ToString(bundle.UserStorage.Capacity), } } - if err := d.Set("user_storage", userStorage); err != nil { + if err := d.Set("user_storage", tfMap); err != nil { return sdkdiag.AppendErrorf(diags, "setting user_storage: %s", err) } return diags } + +func findBundle(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspaceBundlesInput, filter tfslices.Predicate[*types.WorkspaceBundle]) (*types.WorkspaceBundle, error) { + output, err := findBundles(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findBundles(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspaceBundlesInput, filter tfslices.Predicate[*types.WorkspaceBundle]) ([]types.WorkspaceBundle, error) { + var output []types.WorkspaceBundle + + pages := workspaces.NewDescribeWorkspaceBundlesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.Bundles { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} diff --git a/internal/service/workspaces/bundle_data_source_test.go b/internal/service/workspaces/bundle_data_source_test.go index f076bc7eb3c6..efc41ef0393c 100644 --- a/internal/service/workspaces/bundle_data_source_test.go +++ b/internal/service/workspaces/bundle_data_source_test.go @@ -27,7 +27,7 @@ func testAccWorkspaceBundleDataSource_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccBundleDataSourceConfig_basic("wsb-b0s22j3d7"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "bundle_id", "wsb-b0s22j3d7"), resource.TestCheckResourceAttr(dataSourceName, "compute_type.#", acctest.Ct1), resource.TestCheckResourceAttr(dataSourceName, "compute_type.0.name", "PERFORMANCE"), @@ -54,13 +54,13 @@ func testAccWorkspaceBundleDataSource_byOwnerName(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccBundleDataSourceConfig_byOwnerName("AMAZON", "Value with Windows 10 and Office 2016"), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr(dataSourceName, "bundle_id", "wsb-df76rqys9"), + Config: testAccBundleDataSourceConfig_byOwnerName("Amazon", "Value with Ubuntu 22.04"), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "bundle_id"), resource.TestCheckResourceAttr(dataSourceName, "compute_type.#", acctest.Ct1), resource.TestCheckResourceAttr(dataSourceName, "compute_type.0.name", "VALUE"), resource.TestCheckResourceAttrSet(dataSourceName, names.AttrDescription), - resource.TestCheckResourceAttr(dataSourceName, names.AttrName, "Value with Windows 10 and Office 2016"), + resource.TestCheckResourceAttr(dataSourceName, names.AttrName, "Value with Ubuntu 22.04"), resource.TestCheckResourceAttr(dataSourceName, names.AttrOwner, "Amazon"), resource.TestCheckResourceAttr(dataSourceName, "root_storage.#", acctest.Ct1), resource.TestCheckResourceAttr(dataSourceName, "root_storage.0.capacity", "80"), @@ -119,7 +119,7 @@ func testAccBundlePreCheck(t *testing.T) { func testAccBundleDataSourceConfig_basic(bundleID string) string { return fmt.Sprintf(` data "aws_workspaces_bundle" "test" { - bundle_id = %q + bundle_id = %[1]q } `, bundleID) } @@ -127,8 +127,8 @@ data "aws_workspaces_bundle" "test" { func testAccBundleDataSourceConfig_byOwnerName(owner, name string) string { return fmt.Sprintf(` data "aws_workspaces_bundle" "test" { - owner = %q - name = %q + owner = %[1]q + name = %[2]q } `, owner, name) } @@ -136,9 +136,9 @@ data "aws_workspaces_bundle" "test" { func testAccBundleDataSourceConfig_idAndOwnerNameConflict(bundleID, owner, name string) string { return fmt.Sprintf(` data "aws_workspaces_bundle" "test" { - bundle_id = %q - owner = %q - name = %q + bundle_id = %[1]q + owner = %[2]q + name = %[3]q } `, bundleID, owner, name) } @@ -146,7 +146,7 @@ data "aws_workspaces_bundle" "test" { func testAccBundleDataSourceConfig_privateOwner(name string) string { return fmt.Sprintf(` data "aws_workspaces_bundle" "test" { - name = %q + name = %[1]q } `, name) } diff --git a/internal/service/workspaces/connection_alias.go b/internal/service/workspaces/connection_alias.go index caff637895ec..789e37488509 100644 --- a/internal/service/workspaces/connection_alias.go +++ b/internal/service/workspaces/connection_alias.go @@ -5,60 +5,53 @@ package workspaces import ( "context" - "errors" + "fmt" "time" "github.com/aws/aws-sdk-go-v2/service/workspaces" awstypes "github.com/aws/aws-sdk-go-v2/service/workspaces/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" - "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -var ResourceConnectionAlias = newResourceConnectionAlias - // @FrameworkResource(name="Connection Alias") // @Tags(identifierAttribute="id") -func newResourceConnectionAlias(_ context.Context) (resource.ResourceWithConfigure, error) { - r := &resourceConnectionAlias{} +func newConnectionAliasResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &connectionAliasResource{} r.SetDefaultCreateTimeout(30 * time.Minute) - r.SetDefaultUpdateTimeout(30 * time.Minute) r.SetDefaultDeleteTimeout(30 * time.Minute) return r, nil } -const ( - ResNameConnectionAlias = "Connection Alias" -) - -type resourceConnectionAlias struct { +type connectionAliasResource struct { framework.ResourceWithConfigure + framework.WithNoOpUpdate[connectionAliasResourceModel] + framework.WithImportByID framework.WithTimeouts } -func (r *resourceConnectionAlias) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = "aws_workspaces_connection_alias" +func (*connectionAliasResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_workspaces_connection_alias" } -func (r *resourceConnectionAlias) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *connectionAliasResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - names.AttrID: framework.IDAttribute(), "connection_string": schema.StringAttribute{ Required: true, PlanModifiers: []planmodifier.String{ @@ -66,19 +59,20 @@ func (r *resourceConnectionAlias) Schema(ctx context.Context, req resource.Schem }, Description: "The connection string specified for the connection alias. The connection string must be in the form of a fully qualified domain name (FQDN), such as www.example.com.", }, + names.AttrID: framework.IDAttribute(), names.AttrOwnerAccountID: schema.StringAttribute{ - Computed: true, - Description: "The identifier of the Amazon Web Services account that owns the connection alias.", + Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.UseStateForUnknown(), }, + Description: "The identifier of the Amazon Web Services account that owns the connection alias.", }, names.AttrState: schema.StringAttribute{ - Computed: true, - Description: "The current state of the connection alias.", + Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.UseStateForUnknown(), }, + Description: "The current state of the connection alias.", }, names.AttrTags: tftags.TagsAttribute(), names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), @@ -86,184 +80,159 @@ func (r *resourceConnectionAlias) Schema(ctx context.Context, req resource.Schem Blocks: map[string]schema.Block{ names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ Create: true, - Update: true, Delete: true, }), }, } } -func (r *resourceConnectionAlias) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().WorkSpacesClient(ctx) - - var plan resourceConnectionAliasData - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *connectionAliasResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data connectionAliasResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - in := &workspaces.CreateConnectionAliasInput{ - ConnectionString: plan.ConnectionString.ValueStringPointer(), + conn := r.Meta().WorkSpacesClient(ctx) + + input := &workspaces.CreateConnectionAliasInput{ + ConnectionString: fwflex.StringFromFramework(ctx, data.ConnectionString), Tags: getTagsIn(ctx), } - out, err := conn.CreateConnectionAlias(ctx, in) + output, err := conn.CreateConnectionAlias(ctx, input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.WorkSpaces, create.ErrActionCreating, ResNameConnectionAlias, plan.ConnectionString.String(), err), - err.Error(), - ) - return - } - if out == nil || out.AliasId == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.WorkSpaces, create.ErrActionCreating, ResNameConnectionAlias, plan.ConnectionString.String(), nil), - errors.New("empty output").Error(), - ) + response.Diagnostics.AddError("creating WorkSpaces Connection Alias", err.Error()) + return } - plan.ID = flex.StringToFramework(ctx, out.AliasId) + // Set values for unknowns. + data.ID = fwflex.StringToFramework(ctx, output.AliasId) + + alias, err := waitConnectionAliasCreated(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) - createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - alias, err := waitConnectionAliasCreated(ctx, conn, plan.ID.ValueString(), createTimeout) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.WorkSpaces, create.ErrActionWaitingForCreation, ResNameConnectionAlias, plan.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("waiting for WorkSpaces Connection Alias (%s) create", data.ID.ValueString()), err.Error()) + return } - plan.update(ctx, alias) - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) -} + // Set values for unknowns. + data.OwnerAccountId = fwflex.StringToFramework(ctx, alias.OwnerAccountId) + data.State = fwflex.StringValueToFramework(ctx, alias.State) -func (r *resourceConnectionAlias) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().WorkSpacesClient(ctx) + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} - var state resourceConnectionAliasData - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *connectionAliasResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data connectionAliasResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - out, err := FindConnectionAliasByID(ctx, conn, state.ID.ValueString()) + conn := r.Meta().WorkSpacesClient(ctx) + + alias, err := findConnectionAliasByID(ctx, conn, data.ID.ValueString()) + if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.WorkSpaces, create.ErrActionSetting, ResNameConnectionAlias, state.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading WorkSpaces Connection Alias (%s)", data.ID.ValueString()), err.Error()) + return } - state.update(ctx, out) + // Set attributes for import. + data.ConnectionString = fwflex.StringToFramework(ctx, alias.ConnectionString) + data.OwnerAccountId = fwflex.StringToFramework(ctx, alias.OwnerAccountId) + data.State = fwflex.StringValueToFramework(ctx, alias.State) - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -func (r *resourceConnectionAlias) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var plan, state resourceConnectionAliasData - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *connectionAliasResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data connectionAliasResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) -} - -func (r *resourceConnectionAlias) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { conn := r.Meta().WorkSpacesClient(ctx) - var state resourceConnectionAliasData - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { - return - } + _, err := conn.DeleteConnectionAlias(ctx, &workspaces.DeleteConnectionAliasInput{ + AliasId: data.ID.ValueStringPointer(), + }) - in := &workspaces.DeleteConnectionAliasInput{ - AliasId: state.ID.ValueStringPointer(), + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return } - _, err := conn.DeleteConnectionAlias(ctx, in) if err != nil { - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return - } - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.WorkSpaces, create.ErrActionDeleting, ResNameConnectionAlias, state.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting WorkSpaces Connection Alias (%s)", data.ID.ValueString()), err.Error()) + return } - deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) - _, err = waitConnectionAliasDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.WorkSpaces, create.ErrActionWaitingForDeletion, ResNameConnectionAlias, state.ID.String(), err), - err.Error(), - ) + if _, err := waitConnectionAliasDeleted(ctx, conn, data.ID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for WorkSpaces Connection Alias (%s) delete", data.ID.ValueString()), err.Error()) + return } } -func (r *resourceConnectionAlias) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root(names.AttrID), req, resp) -} - -func (r *resourceConnectionAlias) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { +func (r *connectionAliasResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { r.SetTagsAll(ctx, request, response) } -func (data *resourceConnectionAliasData) update(ctx context.Context, in *awstypes.ConnectionAlias) { - data.ConnectionString = flex.StringToFramework(ctx, in.ConnectionString) - data.OwnerAccountId = flex.StringToFramework(ctx, in.OwnerAccountId) - data.State = flex.StringValueToFramework(ctx, in.State) +func findConnectionAliasByID(ctx context.Context, conn *workspaces.Client, id string) (*awstypes.ConnectionAlias, error) { + input := &workspaces.DescribeConnectionAliasesInput{ + AliasIds: []string{id}, + } + + return findConnectionAlias(ctx, conn, input) } -func waitConnectionAliasCreated(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) (*awstypes.ConnectionAlias, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.ConnectionAliasStateCreating), - Target: enum.Slice(awstypes.ConnectionAliasStateCreated), - Refresh: statusConnectionAlias(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 2, - } +func findConnectionAlias(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeConnectionAliasesInput) (*awstypes.ConnectionAlias, error) { + output, err := findConnectionAliases(ctx, conn, input) - outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*awstypes.ConnectionAlias); ok { - return out, err + if err != nil { + return nil, err } - return nil, err + return tfresource.AssertSingleValueResult(output) } -func waitConnectionAliasDeleted(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) (*awstypes.ConnectionAlias, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.ConnectionAliasStateDeleting), - Target: []string{}, - Refresh: statusConnectionAlias(ctx, conn, id), - Timeout: timeout, - } +func findConnectionAliases(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeConnectionAliasesInput) ([]awstypes.ConnectionAlias, error) { + var output []awstypes.ConnectionAlias - outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*awstypes.ConnectionAlias); ok { - return out, err + err := describeConnectionAliasesPages(ctx, conn, input, func(page *workspaces.DescribeConnectionAliasesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + output = append(output, page.ConnectionAliases...) + + return !lastPage + }) + + if err != nil { + return nil, err } - return nil, err + return output, nil } func statusConnectionAlias(ctx context.Context, conn *workspaces.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := FindConnectionAliasByID(ctx, conn, id) + output, err := findConnectionAliasByID(ctx, conn, id) + if tfresource.NotFound(err) { return nil, "", nil } @@ -272,38 +241,47 @@ func statusConnectionAlias(ctx context.Context, conn *workspaces.Client, id stri return nil, "", err } - return out, string(out.State), nil + return output, string(output.State), nil } } -func FindConnectionAliasByID(ctx context.Context, conn *workspaces.Client, id string) (*awstypes.ConnectionAlias, error) { - in := &workspaces.DescribeConnectionAliasesInput{ - AliasIds: []string{id}, +func waitConnectionAliasCreated(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) (*awstypes.ConnectionAlias, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectionAliasStateCreating), + Target: enum.Slice(awstypes.ConnectionAliasStateCreated), + Refresh: statusConnectionAlias(ctx, conn, id), + Timeout: timeout, } - out, err := conn.DescribeConnectionAliases(ctx, in) + outputRaw, err := stateConf.WaitForStateContext(ctx) - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } + if output, ok := outputRaw.(*awstypes.ConnectionAlias); ok { + return output, err } - if err != nil { - return nil, err + return nil, err +} + +func waitConnectionAliasDeleted(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) (*awstypes.ConnectionAlias, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectionAliasStateDeleting), + Target: []string{}, + Refresh: statusConnectionAlias(ctx, conn, id), + Timeout: timeout, } - if out == nil || len(out.ConnectionAliases) == 0 { - return nil, tfresource.NewEmptyResultError(in) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ConnectionAlias); ok { + return output, err } - return &out.ConnectionAliases[0], nil + return nil, err } -type resourceConnectionAliasData struct { - ID types.String `tfsdk:"id"` +type connectionAliasResourceModel struct { ConnectionString types.String `tfsdk:"connection_string"` + ID types.String `tfsdk:"id"` OwnerAccountId types.String `tfsdk:"owner_account_id"` State types.String `tfsdk:"state"` Tags tftags.Map `tfsdk:"tags"` diff --git a/internal/service/workspaces/connection_alias_test.go b/internal/service/workspaces/connection_alias_test.go index 9abea69ab252..8ac9ec71cf93 100644 --- a/internal/service/workspaces/connection_alias_test.go +++ b/internal/service/workspaces/connection_alias_test.go @@ -5,7 +5,6 @@ package workspaces_test import ( "context" - "errors" "fmt" "strings" "testing" @@ -16,7 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfworkspaces "github.com/hashicorp/terraform-provider-aws/internal/service/workspaces" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -156,32 +154,29 @@ func testAccCheckConnectionAliasDestroy(ctx context.Context) resource.TestCheckF return err } - return create.Error(names.WorkSpaces, create.ErrActionCheckingDestroyed, tfworkspaces.ResNameConnectionAlias, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("WorkSpaces Connection Alias %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckConnectionAliasExists(ctx context.Context, name string, connectionalias *awstypes.ConnectionAlias) resource.TestCheckFunc { +func testAccCheckConnectionAliasExists(ctx context.Context, n string, v *awstypes.ConnectionAlias) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.WorkSpaces, create.ErrActionCheckingExistence, tfworkspaces.ResNameConnectionAlias, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.WorkSpaces, create.ErrActionCheckingExistence, tfworkspaces.ResNameConnectionAlias, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) - out, err := tfworkspaces.FindConnectionAliasByID(ctx, conn, rs.Primary.ID) + + output, err := tfworkspaces.FindConnectionAliasByID(ctx, conn, rs.Primary.ID) if err != nil { - return create.Error(names.WorkSpaces, create.ErrActionCheckingExistence, tfworkspaces.ResNameConnectionAlias, rs.Primary.ID, err) + return err } - *connectionalias = *out + *v = *output return nil } diff --git a/internal/service/workspaces/directory.go b/internal/service/workspaces/directory.go index 5823b35ddc8e..f76aee5b6abc 100644 --- a/internal/service/workspaces/directory.go +++ b/internal/service/workspaces/directory.go @@ -5,27 +5,31 @@ package workspaces import ( "context" + "errors" "log" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/workspaces" "github.com/aws/aws-sdk-go-v2/service/workspaces/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + itypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_workspaces_directory", name="Directory") // @Tags(identifierAttribute="id") -func ResourceDirectory() *schema.Resource { +func resourceDirectory() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDirectoryCreate, ReadWithoutTimeout: resourceDirectoryRead, @@ -129,44 +133,44 @@ func ResourceDirectory() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "device_type_android": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, "device_type_chromeos": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, "device_type_ios": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, "device_type_linux": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, "device_type_osx": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, "device_type_web": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, "device_type_windows": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, "device_type_zeroclient": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(flattenAccessPropertyEnumValues(types.AccessPropertyValue("").Values()), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.AccessPropertyValue](), }, }, }, @@ -231,7 +235,10 @@ func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta i input.SubnetIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := tfresource.RetryWhenIsA[*types.InvalidResourceStateException](ctx, DirectoryRegisterInvalidResourceStateTimeout, + const ( + timeout = 2 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*types.InvalidResourceStateException](ctx, timeout, func() (interface{}, error) { return conn.RegisterWorkspaceDirectory(ctx, input) }) @@ -242,59 +249,60 @@ func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(directoryID) - _, err = WaitDirectoryRegistered(ctx, conn, d.Id()) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for WorkSpaces Directory (%s) to register: %s", d.Id(), err) + if _, err := waitDirectoryRegistered(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for WorkSpaces Directory (%s) create: %s", d.Id(), err) } if v, ok := d.GetOk("self_service_permissions"); ok { - log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) self-service permissions", directoryID) - _, err := conn.ModifySelfservicePermissions(ctx, &workspaces.ModifySelfservicePermissionsInput{ - ResourceId: aws.String(directoryID), - SelfservicePermissions: ExpandSelfServicePermissions(v.([]interface{})), - }) + input := &workspaces.ModifySelfservicePermissionsInput{ + ResourceId: aws.String(d.Id()), + SelfservicePermissions: expandSelfservicePermissions(v.([]interface{})), + } + + _, err := conn.ModifySelfservicePermissions(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "setting WorkSpaces Directory (%s) self-service permissions: %s", directoryID, err) + return sdkdiag.AppendErrorf(diags, "setting WorkSpaces Directory (%s) self-service permissions: %s", d.Id(), err) } - log.Printf("[INFO] Modified WorkSpaces Directory (%s) self-service permissions", directoryID) } if v, ok := d.GetOk("workspace_access_properties"); ok { - log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) access properties", directoryID) - _, err := conn.ModifyWorkspaceAccessProperties(ctx, &workspaces.ModifyWorkspaceAccessPropertiesInput{ - ResourceId: aws.String(directoryID), - WorkspaceAccessProperties: ExpandWorkspaceAccessProperties(v.([]interface{})), - }) + input := &workspaces.ModifyWorkspaceAccessPropertiesInput{ + ResourceId: aws.String(d.Id()), + WorkspaceAccessProperties: expandWorkspaceAccessProperties(v.([]interface{})), + } + + _, err := conn.ModifyWorkspaceAccessProperties(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "setting WorkSpaces Directory (%s) access properties: %s", directoryID, err) + return sdkdiag.AppendErrorf(diags, "setting WorkSpaces Directory (%s) access properties: %s", d.Id(), err) } - log.Printf("[INFO] Modified WorkSpaces Directory (%s) access properties", directoryID) } if v, ok := d.GetOk("workspace_creation_properties"); ok { - log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) creation properties", directoryID) - _, err := conn.ModifyWorkspaceCreationProperties(ctx, &workspaces.ModifyWorkspaceCreationPropertiesInput{ - ResourceId: aws.String(directoryID), - WorkspaceCreationProperties: ExpandWorkspaceCreationProperties(v.([]interface{})), - }) + input := &workspaces.ModifyWorkspaceCreationPropertiesInput{ + ResourceId: aws.String(d.Id()), + WorkspaceCreationProperties: expandWorkspaceCreationProperties(v.([]interface{})), + } + + _, err := conn.ModifyWorkspaceCreationProperties(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "setting WorkSpaces Directory (%s) creation properties: %s", directoryID, err) + return sdkdiag.AppendErrorf(diags, "setting WorkSpaces Directory (%s) creation properties: %s", d.Id(), err) } - log.Printf("[INFO] Modified WorkSpaces Directory (%s) creation properties", directoryID) } if v, ok := d.GetOk("ip_group_ids"); ok && v.(*schema.Set).Len() > 0 { - ipGroupIds := v.(*schema.Set) - log.Printf("[DEBUG] Associating WorkSpaces Directory (%s) with IP Groups %s", directoryID, ipGroupIds.List()) - _, err := conn.AssociateIpGroups(ctx, &workspaces.AssociateIpGroupsInput{ - DirectoryId: aws.String(directoryID), - GroupIds: flex.ExpandStringValueSet(ipGroupIds), - }) + input := &workspaces.AssociateIpGroupsInput{ + DirectoryId: aws.String(d.Id()), + GroupIds: flex.ExpandStringValueSet(v.(*schema.Set)), + } + + _, err := conn.AssociateIpGroups(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "asassociating WorkSpaces Directory (%s) ip groups: %s", directoryID, err) + return sdkdiag.AppendErrorf(diags, "asassociating WorkSpaces Directory (%s) IP Groups: %s", d.Id(), err) } - log.Printf("[INFO] Associated WorkSpaces Directory (%s) IP Groups", directoryID) } return append(diags, resourceDirectoryRead(ctx, d, meta)...) @@ -304,7 +312,7 @@ func resourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - directory, err := FindDirectoryByID(ctx, conn, d.Id()) + directory, err := findDirectoryByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] WorkSpaces Directory (%s) not found, removing from state", d.Id()) @@ -316,36 +324,25 @@ func resourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta int return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Directory (%s): %s", d.Id(), err) } + d.Set(names.AttrAlias, directory.Alias) d.Set("directory_id", directory.DirectoryId) - if err := d.Set(names.AttrSubnetIDs, flex.FlattenStringValueSet(directory.SubnetIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting subnet_ids: %s", err) - } - d.Set("workspace_security_group_id", directory.WorkspaceSecurityGroupId) - d.Set("iam_role_id", directory.IamRoleId) - d.Set("registration_code", directory.RegistrationCode) d.Set("directory_name", directory.DirectoryName) d.Set("directory_type", directory.DirectoryType) - d.Set(names.AttrAlias, directory.Alias) - - if err := d.Set("self_service_permissions", FlattenSelfServicePermissions(directory.SelfservicePermissions)); err != nil { + d.Set("dns_ip_addresses", directory.DnsIpAddresses) + d.Set("iam_role_id", directory.IamRoleId) + d.Set("ip_group_ids", directory.IpGroupIds) + d.Set("registration_code", directory.RegistrationCode) + if err := d.Set("self_service_permissions", flattenSelfservicePermissions(directory.SelfservicePermissions)); err != nil { return sdkdiag.AppendErrorf(diags, "setting self_service_permissions: %s", err) } - - if err := d.Set("workspace_access_properties", FlattenWorkspaceAccessProperties(directory.WorkspaceAccessProperties)); err != nil { + d.Set(names.AttrSubnetIDs, directory.SubnetIds) + if err := d.Set("workspace_access_properties", flattenWorkspaceAccessProperties(directory.WorkspaceAccessProperties)); err != nil { return sdkdiag.AppendErrorf(diags, "setting workspace_access_properties: %s", err) } - - if err := d.Set("workspace_creation_properties", FlattenWorkspaceCreationProperties(directory.WorkspaceCreationProperties)); err != nil { + if err := d.Set("workspace_creation_properties", flattenDefaultWorkspaceCreationProperties(directory.WorkspaceCreationProperties)); err != nil { return sdkdiag.AppendErrorf(diags, "setting workspace_creation_properties: %s", err) } - - if err := d.Set("ip_group_ids", flex.FlattenStringValueSet(directory.IpGroupIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting ip_group_ids: %s", err) - } - - if err := d.Set("dns_ip_addresses", flex.FlattenStringValueSet(directory.DnsIpAddresses)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting dns_ip_addresses: %s", err) - } + d.Set("workspace_security_group_id", directory.WorkspaceSecurityGroupId) return diags } @@ -355,73 +352,74 @@ func resourceDirectoryUpdate(ctx context.Context, d *schema.ResourceData, meta i conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) if d.HasChange("self_service_permissions") { - log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) self-service permissions", d.Id()) - permissions := d.Get("self_service_permissions").([]interface{}) - - _, err := conn.ModifySelfservicePermissions(ctx, &workspaces.ModifySelfservicePermissionsInput{ + input := &workspaces.ModifySelfservicePermissionsInput{ ResourceId: aws.String(d.Id()), - SelfservicePermissions: ExpandSelfServicePermissions(permissions), - }) + SelfservicePermissions: expandSelfservicePermissions(d.Get("self_service_permissions").([]interface{})), + } + + _, err := conn.ModifySelfservicePermissions(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Directory (%s) self service permissions: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Directory (%s) self-service permissions: %s", d.Id(), err) } - log.Printf("[INFO] Modified WorkSpaces Directory (%s) self-service permissions", d.Id()) } if d.HasChange("workspace_access_properties") { - log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) access properties", d.Id()) - properties := d.Get("workspace_access_properties").([]interface{}) - - _, err := conn.ModifyWorkspaceAccessProperties(ctx, &workspaces.ModifyWorkspaceAccessPropertiesInput{ + input := &workspaces.ModifyWorkspaceAccessPropertiesInput{ ResourceId: aws.String(d.Id()), - WorkspaceAccessProperties: ExpandWorkspaceAccessProperties(properties), - }) + WorkspaceAccessProperties: expandWorkspaceAccessProperties(d.Get("workspace_access_properties").([]interface{})), + } + + _, err := conn.ModifyWorkspaceAccessProperties(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Directory (%s) access properties: %s", d.Id(), err) } - log.Printf("[INFO] Modified WorkSpaces Directory (%s) access properties", d.Id()) } if d.HasChange("workspace_creation_properties") { - log.Printf("[DEBUG] Modifying WorkSpaces Directory (%s) creation properties", d.Id()) - properties := d.Get("workspace_creation_properties").([]interface{}) - - _, err := conn.ModifyWorkspaceCreationProperties(ctx, &workspaces.ModifyWorkspaceCreationPropertiesInput{ + input := &workspaces.ModifyWorkspaceCreationPropertiesInput{ ResourceId: aws.String(d.Id()), - WorkspaceCreationProperties: ExpandWorkspaceCreationProperties(properties), - }) + WorkspaceCreationProperties: expandWorkspaceCreationProperties(d.Get("workspace_creation_properties").([]interface{})), + } + + _, err := conn.ModifyWorkspaceCreationProperties(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Directory (%s) creation properties: %s", d.Id(), err) } - log.Printf("[INFO] Modified WorkSpaces Directory (%s) creation properties", d.Id()) } if d.HasChange("ip_group_ids") { o, n := d.GetChange("ip_group_ids") - old := o.(*schema.Set) - new := n.(*schema.Set) - added := new.Difference(old) - removed := old.Difference(new) + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := ns.Difference(os), os.Difference(ns) - log.Printf("[DEBUG] Associating WorkSpaces Directory (%s) with IP Groups %s", d.Id(), added.GoString()) - _, err := conn.AssociateIpGroups(ctx, &workspaces.AssociateIpGroupsInput{ - DirectoryId: aws.String(d.Id()), - GroupIds: flex.ExpandStringValueSet(added), - }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "asassociating WorkSpaces Directory (%s) IP Groups: %s", d.Id(), err) - } + if add.Len() > 0 { + input := &workspaces.AssociateIpGroupsInput{ + DirectoryId: aws.String(d.Id()), + GroupIds: flex.ExpandStringValueSet(add), + } - log.Printf("[DEBUG] Disassociating WorkSpaces Directory (%s) with IP Groups %s", d.Id(), removed.GoString()) - _, err = conn.DisassociateIpGroups(ctx, &workspaces.DisassociateIpGroupsInput{ - DirectoryId: aws.String(d.Id()), - GroupIds: flex.ExpandStringValueSet(removed), - }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "disasassociating WorkSpaces Directory (%s) IP Groups: %s", d.Id(), err) + _, err := conn.AssociateIpGroups(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "associating WorkSpaces Directory (%s) IP Groups: %s", d.Id(), err) + } } - log.Printf("[INFO] Updated WorkSpaces Directory (%s) IP Groups", d.Id()) + if del.Len() > 0 { + input := &workspaces.DisassociateIpGroupsInput{ + DirectoryId: aws.String(d.Id()), + GroupIds: flex.ExpandStringValueSet(del), + } + + _, err := conn.DisassociateIpGroups(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "disassociating WorkSpaces Directory (%s) IP Groups: %s", d.Id(), err) + } + } } return append(diags, resourceDirectoryRead(ctx, d, meta)...) @@ -431,8 +429,11 @@ func resourceDirectoryDelete(ctx context.Context, d *schema.ResourceData, meta i var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - log.Printf("[DEBUG] Deregistering WorkSpaces Directory: %s", d.Id()) - _, err := tfresource.RetryWhenIsA[*types.InvalidResourceStateException](ctx, DirectoryRegisterInvalidResourceStateTimeout, + log.Printf("[DEBUG] Deleting WorkSpaces Directory: %s", d.Id()) + const ( + timeout = 2 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*types.InvalidResourceStateException](ctx, timeout, func() (interface{}, error) { return conn.DeregisterWorkspaceDirectory(ctx, &workspaces.DeregisterWorkspaceDirectoryInput{ DirectoryId: aws.String(d.Id()), @@ -447,221 +448,322 @@ func resourceDirectoryDelete(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "deregistering WorkSpaces Directory (%s): %s", d.Id(), err) } - _, err = WaitDirectoryDeregistered(ctx, conn, d.Id()) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for WorkSpaces Directory (%s) to deregister: %s", d.Id(), err) + if _, err := waitDirectoryDeregistered(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for WorkSpaces Directory (%s) delete: %s", d.Id(), err) } return diags } -func ExpandWorkspaceAccessProperties(properties []interface{}) *types.WorkspaceAccessProperties { - if len(properties) == 0 || properties[0] == nil { - return nil +func findDirectoryByID(ctx context.Context, conn *workspaces.Client, id string) (*types.WorkspaceDirectory, error) { + input := &workspaces.DescribeWorkspaceDirectoriesInput{ + DirectoryIds: []string{id}, } - result := &types.WorkspaceAccessProperties{} + output, err := findDirectory(ctx, conn, input) + + if err != nil { + return nil, err + } - p := properties[0].(map[string]interface{}) + if itypes.IsZero(output) { + return nil, tfresource.NewEmptyResultError(input) + } - if p["device_type_android"].(string) != "" { - result.DeviceTypeAndroid = types.AccessPropertyValue(p["device_type_android"].(string)) + if state := output.State; state == types.WorkspaceDirectoryStateDeregistered { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: input, + } } - if p["device_type_chromeos"].(string) != "" { - result.DeviceTypeChromeOs = types.AccessPropertyValue(p["device_type_chromeos"].(string)) + return output, nil +} + +func findDirectory(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspaceDirectoriesInput) (*types.WorkspaceDirectory, error) { + output, err := findDirectories(ctx, conn, input) + + if err != nil { + return nil, err } - if p["device_type_ios"].(string) != "" { - result.DeviceTypeIos = types.AccessPropertyValue(p["device_type_ios"].(string)) + return tfresource.AssertSingleValueResult(output) +} + +func findDirectories(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspaceDirectoriesInput) ([]types.WorkspaceDirectory, error) { + var output []types.WorkspaceDirectory + + pages := workspaces.NewDescribeWorkspaceDirectoriesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.Directories...) } - if p["device_type_linux"].(string) != "" { - result.DeviceTypeLinux = types.AccessPropertyValue(p["device_type_linux"].(string)) + return output, nil +} + +func statusDirectory(ctx context.Context, conn *workspaces.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDirectoryByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil } +} - if p["device_type_osx"].(string) != "" { - result.DeviceTypeOsx = types.AccessPropertyValue(p["device_type_osx"].(string)) +func waitDirectoryRegistered(ctx context.Context, conn *workspaces.Client, directoryID string) (*types.WorkspaceDirectory, error) { + const ( + timeout = 10 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.WorkspaceDirectoryStateRegistering), + Target: enum.Slice(types.WorkspaceDirectoryStateRegistered), + Refresh: statusDirectory(ctx, conn, directoryID), + Timeout: timeout, } - if p["device_type_web"].(string) != "" { - result.DeviceTypeWeb = types.AccessPropertyValue(p["device_type_web"].(string)) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.WorkspaceDirectory); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.ErrorMessage))) + + return output, err } - if p["device_type_windows"].(string) != "" { - result.DeviceTypeWindows = types.AccessPropertyValue(p["device_type_windows"].(string)) + return nil, err +} + +func waitDirectoryDeregistered(ctx context.Context, conn *workspaces.Client, directoryID string) (*types.WorkspaceDirectory, error) { + const ( + timeout = 10 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + types.WorkspaceDirectoryStateRegistering, + types.WorkspaceDirectoryStateRegistered, + types.WorkspaceDirectoryStateDeregistering, + ), + Target: []string{}, + Refresh: statusDirectory(ctx, conn, directoryID), + Timeout: timeout, } - if p["device_type_zeroclient"].(string) != "" { - result.DeviceTypeZeroClient = types.AccessPropertyValue(p["device_type_zeroclient"].(string)) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.WorkspaceDirectory); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.ErrorMessage))) + + return output, err } - return result + return nil, err } -func ExpandSelfServicePermissions(permissions []interface{}) *types.SelfservicePermissions { - if len(permissions) == 0 || permissions[0] == nil { +func expandWorkspaceAccessProperties(tfList []interface{}) *types.WorkspaceAccessProperties { + if len(tfList) == 0 || tfList[0] == nil { return nil } - result := &types.SelfservicePermissions{} + apiObject := &types.WorkspaceAccessProperties{} + tfMap := tfList[0].(map[string]interface{}) + + if tfMap["device_type_android"].(string) != "" { + apiObject.DeviceTypeAndroid = types.AccessPropertyValue(tfMap["device_type_android"].(string)) + } + + if tfMap["device_type_chromeos"].(string) != "" { + apiObject.DeviceTypeChromeOs = types.AccessPropertyValue(tfMap["device_type_chromeos"].(string)) + } + + if tfMap["device_type_ios"].(string) != "" { + apiObject.DeviceTypeIos = types.AccessPropertyValue(tfMap["device_type_ios"].(string)) + } + + if tfMap["device_type_linux"].(string) != "" { + apiObject.DeviceTypeLinux = types.AccessPropertyValue(tfMap["device_type_linux"].(string)) + } + + if tfMap["device_type_osx"].(string) != "" { + apiObject.DeviceTypeOsx = types.AccessPropertyValue(tfMap["device_type_osx"].(string)) + } + + if tfMap["device_type_web"].(string) != "" { + apiObject.DeviceTypeWeb = types.AccessPropertyValue(tfMap["device_type_web"].(string)) + } + + if tfMap["device_type_windows"].(string) != "" { + apiObject.DeviceTypeWindows = types.AccessPropertyValue(tfMap["device_type_windows"].(string)) + } + + if tfMap["device_type_zeroclient"].(string) != "" { + apiObject.DeviceTypeZeroClient = types.AccessPropertyValue(tfMap["device_type_zeroclient"].(string)) + } + + return apiObject +} + +func expandSelfservicePermissions(tfList []interface{}) *types.SelfservicePermissions { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } - p := permissions[0].(map[string]interface{}) + apiObject := &types.SelfservicePermissions{} + tfMap := tfList[0].(map[string]interface{}) - if p["change_compute_type"].(bool) { - result.ChangeComputeType = types.ReconnectEnumEnabled + if tfMap["change_compute_type"].(bool) { + apiObject.ChangeComputeType = types.ReconnectEnumEnabled } else { - result.ChangeComputeType = types.ReconnectEnumDisabled + apiObject.ChangeComputeType = types.ReconnectEnumDisabled } - if p["increase_volume_size"].(bool) { - result.IncreaseVolumeSize = types.ReconnectEnumEnabled + if tfMap["increase_volume_size"].(bool) { + apiObject.IncreaseVolumeSize = types.ReconnectEnumEnabled } else { - result.IncreaseVolumeSize = types.ReconnectEnumDisabled + apiObject.IncreaseVolumeSize = types.ReconnectEnumDisabled } - if p["rebuild_workspace"].(bool) { - result.RebuildWorkspace = types.ReconnectEnumEnabled + if tfMap["rebuild_workspace"].(bool) { + apiObject.RebuildWorkspace = types.ReconnectEnumEnabled } else { - result.RebuildWorkspace = types.ReconnectEnumDisabled + apiObject.RebuildWorkspace = types.ReconnectEnumDisabled } - if p["restart_workspace"].(bool) { - result.RestartWorkspace = types.ReconnectEnumEnabled + if tfMap["restart_workspace"].(bool) { + apiObject.RestartWorkspace = types.ReconnectEnumEnabled } else { - result.RestartWorkspace = types.ReconnectEnumDisabled + apiObject.RestartWorkspace = types.ReconnectEnumDisabled } - if p["switch_running_mode"].(bool) { - result.SwitchRunningMode = types.ReconnectEnumEnabled + if tfMap["switch_running_mode"].(bool) { + apiObject.SwitchRunningMode = types.ReconnectEnumEnabled } else { - result.SwitchRunningMode = types.ReconnectEnumDisabled + apiObject.SwitchRunningMode = types.ReconnectEnumDisabled } - return result + return apiObject } -func ExpandWorkspaceCreationProperties(properties []interface{}) *types.WorkspaceCreationProperties { - if len(properties) == 0 || properties[0] == nil { +func expandWorkspaceCreationProperties(tfList []interface{}) *types.WorkspaceCreationProperties { + if len(tfList) == 0 || tfList[0] == nil { return nil } - p := properties[0].(map[string]interface{}) - - result := &types.WorkspaceCreationProperties{ - EnableInternetAccess: aws.Bool(p["enable_internet_access"].(bool)), - EnableMaintenanceMode: aws.Bool(p["enable_maintenance_mode"].(bool)), - UserEnabledAsLocalAdministrator: aws.Bool(p["user_enabled_as_local_administrator"].(bool)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &types.WorkspaceCreationProperties{ + EnableInternetAccess: aws.Bool(tfMap["enable_internet_access"].(bool)), + EnableMaintenanceMode: aws.Bool(tfMap["enable_maintenance_mode"].(bool)), + UserEnabledAsLocalAdministrator: aws.Bool(tfMap["user_enabled_as_local_administrator"].(bool)), } - if p["custom_security_group_id"].(string) != "" { - result.CustomSecurityGroupId = aws.String(p["custom_security_group_id"].(string)) + if tfMap["custom_security_group_id"].(string) != "" { + apiObject.CustomSecurityGroupId = aws.String(tfMap["custom_security_group_id"].(string)) } - if p["default_ou"].(string) != "" { - result.DefaultOu = aws.String(p["default_ou"].(string)) + if tfMap["default_ou"].(string) != "" { + apiObject.DefaultOu = aws.String(tfMap["default_ou"].(string)) } - return result + return apiObject } -func FlattenWorkspaceAccessProperties(properties *types.WorkspaceAccessProperties) []interface{} { - if properties == nil { +func flattenWorkspaceAccessProperties(apiObject *types.WorkspaceAccessProperties) []interface{} { + if apiObject == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ - "device_type_android": string(properties.DeviceTypeAndroid), - "device_type_chromeos": string(properties.DeviceTypeChromeOs), - "device_type_ios": string(properties.DeviceTypeIos), - "device_type_linux": string(properties.DeviceTypeLinux), - "device_type_osx": string(properties.DeviceTypeOsx), - "device_type_web": string(properties.DeviceTypeWeb), - "device_type_windows": string(properties.DeviceTypeWindows), - "device_type_zeroclient": string(properties.DeviceTypeZeroClient), + "device_type_android": apiObject.DeviceTypeAndroid, + "device_type_chromeos": apiObject.DeviceTypeChromeOs, + "device_type_ios": apiObject.DeviceTypeIos, + "device_type_linux": apiObject.DeviceTypeLinux, + "device_type_osx": apiObject.DeviceTypeOsx, + "device_type_web": apiObject.DeviceTypeWeb, + "device_type_windows": apiObject.DeviceTypeWindows, + "device_type_zeroclient": apiObject.DeviceTypeZeroClient, }, } } -func FlattenSelfServicePermissions(permissions *types.SelfservicePermissions) []interface{} { - if permissions == nil { +func flattenSelfservicePermissions(apiObject *types.SelfservicePermissions) []interface{} { + if apiObject == nil { return []interface{}{} } - result := map[string]interface{}{} + tfMap := map[string]interface{}{} - switch permissions.ChangeComputeType { + switch apiObject.ChangeComputeType { case types.ReconnectEnumEnabled: - result["change_compute_type"] = true + tfMap["change_compute_type"] = true case types.ReconnectEnumDisabled: - result["change_compute_type"] = false + tfMap["change_compute_type"] = false default: - result["change_compute_type"] = nil + tfMap["change_compute_type"] = nil } - switch permissions.IncreaseVolumeSize { + switch apiObject.IncreaseVolumeSize { case types.ReconnectEnumEnabled: - result["increase_volume_size"] = true + tfMap["increase_volume_size"] = true case types.ReconnectEnumDisabled: - result["increase_volume_size"] = false + tfMap["increase_volume_size"] = false default: - result["increase_volume_size"] = nil + tfMap["increase_volume_size"] = nil } - switch permissions.RebuildWorkspace { + switch apiObject.RebuildWorkspace { case types.ReconnectEnumEnabled: - result["rebuild_workspace"] = true + tfMap["rebuild_workspace"] = true case types.ReconnectEnumDisabled: - result["rebuild_workspace"] = false + tfMap["rebuild_workspace"] = false default: - result["rebuild_workspace"] = nil + tfMap["rebuild_workspace"] = nil } - switch permissions.RestartWorkspace { + switch apiObject.RestartWorkspace { case types.ReconnectEnumEnabled: - result["restart_workspace"] = true + tfMap["restart_workspace"] = true case types.ReconnectEnumDisabled: - result["restart_workspace"] = false + tfMap["restart_workspace"] = false default: - result["restart_workspace"] = nil + tfMap["restart_workspace"] = nil } - switch permissions.SwitchRunningMode { + switch apiObject.SwitchRunningMode { case types.ReconnectEnumEnabled: - result["switch_running_mode"] = true + tfMap["switch_running_mode"] = true case types.ReconnectEnumDisabled: - result["switch_running_mode"] = false + tfMap["switch_running_mode"] = false default: - result["switch_running_mode"] = nil + tfMap["switch_running_mode"] = nil } - return []interface{}{result} + return []interface{}{tfMap} } -func FlattenWorkspaceCreationProperties(properties *types.DefaultWorkspaceCreationProperties) []interface{} { - if properties == nil { +func flattenDefaultWorkspaceCreationProperties(apiObject *types.DefaultWorkspaceCreationProperties) []interface{} { + if apiObject == nil { return []interface{}{} } return []interface{}{ map[string]interface{}{ - "custom_security_group_id": aws.ToString(properties.CustomSecurityGroupId), - "default_ou": aws.ToString(properties.DefaultOu), - "enable_internet_access": aws.ToBool(properties.EnableInternetAccess), - "enable_maintenance_mode": aws.ToBool(properties.EnableMaintenanceMode), - "user_enabled_as_local_administrator": aws.ToBool(properties.UserEnabledAsLocalAdministrator), + "custom_security_group_id": aws.ToString(apiObject.CustomSecurityGroupId), + "default_ou": aws.ToString(apiObject.DefaultOu), + "enable_internet_access": aws.ToBool(apiObject.EnableInternetAccess), + "enable_maintenance_mode": aws.ToBool(apiObject.EnableMaintenanceMode), + "user_enabled_as_local_administrator": aws.ToBool(apiObject.UserEnabledAsLocalAdministrator), }, } } - -func flattenAccessPropertyEnumValues(t []types.AccessPropertyValue) []string { - var out []string - - for _, v := range t { - out = append(out, string(v)) - } - - return out -} diff --git a/internal/service/workspaces/directory_data_source.go b/internal/service/workspaces/directory_data_source.go index 3184835fe680..cf30c95fc16f 100644 --- a/internal/service/workspaces/directory_data_source.go +++ b/internal/service/workspaces/directory_data_source.go @@ -6,18 +6,17 @@ package workspaces import ( "context" - "github.com/aws/aws-sdk-go-v2/service/workspaces/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_workspaces_directory") -func DataSourceDirectory() *schema.Resource { +// @SDKDataSource("aws_workspaces_directory", name="Directory") +// @Tags(identifierAttribute="id") +func dataSourceDirectory() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceDirectoryRead, @@ -93,7 +92,7 @@ func DataSourceDirectory() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - names.AttrTags: tftags.TagsSchema(), + names.AttrTags: tftags.TagsSchemaComputed(), "workspace_access_properties": { Type: schema.TypeList, Computed: true, @@ -173,60 +172,34 @@ func DataSourceDirectory() *schema.Resource { func dataSourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig directoryID := d.Get("directory_id").(string) + directory, err := findDirectoryByID(ctx, conn, directoryID) - rawOutput, state, err := StatusDirectoryState(ctx, conn, directoryID)() if err != nil { - return sdkdiag.AppendErrorf(diags, "getting WorkSpaces Directory (%s): %s", directoryID, err) - } - if state == string(types.WorkspaceDirectoryStateDeregistered) { - return sdkdiag.AppendErrorf(diags, "WorkSpaces directory %s was not found", directoryID) + return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Directory (%s): %s", directoryID, err) } d.SetId(directoryID) - - directory := rawOutput.(*types.WorkspaceDirectory) + d.Set(names.AttrAlias, directory.Alias) d.Set("directory_id", directory.DirectoryId) - d.Set("workspace_security_group_id", directory.WorkspaceSecurityGroupId) - d.Set("iam_role_id", directory.IamRoleId) - d.Set("registration_code", directory.RegistrationCode) d.Set("directory_name", directory.DirectoryName) d.Set("directory_type", directory.DirectoryType) - d.Set(names.AttrAlias, directory.Alias) - - if err := d.Set(names.AttrSubnetIDs, flex.FlattenStringValueSet(directory.SubnetIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting subnet_ids: %s", err) - } - - if err := d.Set("self_service_permissions", FlattenSelfServicePermissions(directory.SelfservicePermissions)); err != nil { + d.Set("dns_ip_addresses", directory.DnsIpAddresses) + d.Set("iam_role_id", directory.IamRoleId) + d.Set("ip_group_ids", directory.IpGroupIds) + d.Set("registration_code", directory.RegistrationCode) + if err := d.Set("self_service_permissions", flattenSelfservicePermissions(directory.SelfservicePermissions)); err != nil { return sdkdiag.AppendErrorf(diags, "setting self_service_permissions: %s", err) } - - if err := d.Set("workspace_access_properties", FlattenWorkspaceAccessProperties(directory.WorkspaceAccessProperties)); err != nil { + d.Set(names.AttrSubnetIDs, directory.SubnetIds) + if err := d.Set("workspace_access_properties", flattenWorkspaceAccessProperties(directory.WorkspaceAccessProperties)); err != nil { return sdkdiag.AppendErrorf(diags, "setting workspace_access_properties: %s", err) } - - if err := d.Set("workspace_creation_properties", FlattenWorkspaceCreationProperties(directory.WorkspaceCreationProperties)); err != nil { + if err := d.Set("workspace_creation_properties", flattenDefaultWorkspaceCreationProperties(directory.WorkspaceCreationProperties)); err != nil { return sdkdiag.AppendErrorf(diags, "setting workspace_creation_properties: %s", err) } - - if err := d.Set("ip_group_ids", flex.FlattenStringValueSet(directory.IpGroupIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting ip_group_ids: %s", err) - } - - if err := d.Set("dns_ip_addresses", flex.FlattenStringValueSet(directory.DnsIpAddresses)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting dns_ip_addresses: %s", err) - } - - tags, err := listTags(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing tags: %s", err) - } - if err := d.Set(names.AttrTags, tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + d.Set("workspace_security_group_id", directory.WorkspaceSecurityGroupId) return diags } diff --git a/internal/service/workspaces/directory_test.go b/internal/service/workspaces/directory_test.go index 31035c099314..1721f9f174b9 100644 --- a/internal/service/workspaces/directory_test.go +++ b/internal/service/workspaces/directory_test.go @@ -6,11 +6,9 @@ package workspaces_test import ( "context" "fmt" - "reflect" "strings" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/workspaces" "github.com/aws/aws-sdk-go-v2/service/workspaces/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -426,283 +424,6 @@ func testAccDirectory_ipGroupIDs(t *testing.T) { }) } -func TestExpandSelfServicePermissions(t *testing.T) { - t.Parallel() - - cases := []struct { - input []interface{} - expected *types.SelfservicePermissions - }{ - // Empty - { - input: []interface{}{}, - expected: nil, - }, - // Full - { - input: []interface{}{ - map[string]interface{}{ - "change_compute_type": false, - "increase_volume_size": false, - "rebuild_workspace": true, - "restart_workspace": true, - "switch_running_mode": true, - }, - }, - expected: &types.SelfservicePermissions{ - ChangeComputeType: types.ReconnectEnumDisabled, - IncreaseVolumeSize: types.ReconnectEnumDisabled, - RebuildWorkspace: types.ReconnectEnumEnabled, - RestartWorkspace: types.ReconnectEnumEnabled, - SwitchRunningMode: types.ReconnectEnumEnabled, - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.ExpandSelfServicePermissions(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - -func TestFlattenSelfServicePermissions(t *testing.T) { - t.Parallel() - - cases := []struct { - input *types.SelfservicePermissions - expected []interface{} - }{ - // Empty - { - input: nil, - expected: []interface{}{}, - }, - // Full - { - input: &types.SelfservicePermissions{ - ChangeComputeType: types.ReconnectEnumDisabled, - IncreaseVolumeSize: types.ReconnectEnumDisabled, - RebuildWorkspace: types.ReconnectEnumEnabled, - RestartWorkspace: types.ReconnectEnumEnabled, - SwitchRunningMode: types.ReconnectEnumEnabled, - }, - expected: []interface{}{ - map[string]interface{}{ - "change_compute_type": false, - "increase_volume_size": false, - "rebuild_workspace": true, - "restart_workspace": true, - "switch_running_mode": true, - }, - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.FlattenSelfServicePermissions(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - -func TestExpandWorkspaceAccessProperties(t *testing.T) { - t.Parallel() - - cases := []struct { - input []interface{} - expected *types.WorkspaceAccessProperties - }{ - // Empty - { - input: []interface{}{}, - expected: nil, - }, - // Full - { - input: []interface{}{ - map[string]interface{}{ - "device_type_android": "ALLOW", - "device_type_chromeos": "ALLOW", - "device_type_ios": "ALLOW", - "device_type_linux": "DENY", - "device_type_osx": "ALLOW", - "device_type_web": "DENY", - "device_type_windows": "DENY", - "device_type_zeroclient": "DENY", - }, - }, - expected: &types.WorkspaceAccessProperties{ - DeviceTypeAndroid: types.AccessPropertyValue("ALLOW"), - DeviceTypeChromeOs: types.AccessPropertyValue("ALLOW"), - DeviceTypeIos: types.AccessPropertyValue("ALLOW"), - DeviceTypeLinux: types.AccessPropertyValue("DENY"), - DeviceTypeOsx: types.AccessPropertyValue("ALLOW"), - DeviceTypeWeb: types.AccessPropertyValue("DENY"), - DeviceTypeWindows: types.AccessPropertyValue("DENY"), - DeviceTypeZeroClient: types.AccessPropertyValue("DENY"), - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.ExpandWorkspaceAccessProperties(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - -func TestFlattenWorkspaceAccessProperties(t *testing.T) { - t.Parallel() - - cases := []struct { - input *types.WorkspaceAccessProperties - expected []interface{} - }{ - // Empty - { - input: nil, - expected: []interface{}{}, - }, - // Full - { - input: &types.WorkspaceAccessProperties{ - DeviceTypeAndroid: types.AccessPropertyValue("ALLOW"), - DeviceTypeChromeOs: types.AccessPropertyValue("ALLOW"), - DeviceTypeIos: types.AccessPropertyValue("ALLOW"), - DeviceTypeLinux: types.AccessPropertyValue("DENY"), - DeviceTypeOsx: types.AccessPropertyValue("ALLOW"), - DeviceTypeWeb: types.AccessPropertyValue("DENY"), - DeviceTypeWindows: types.AccessPropertyValue("DENY"), - DeviceTypeZeroClient: types.AccessPropertyValue("DENY"), - }, - expected: []interface{}{ - map[string]interface{}{ - "device_type_android": "ALLOW", - "device_type_chromeos": "ALLOW", - "device_type_ios": "ALLOW", - "device_type_linux": "DENY", - "device_type_osx": "ALLOW", - "device_type_web": "DENY", - "device_type_windows": "DENY", - "device_type_zeroclient": "DENY", - }, - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.FlattenWorkspaceAccessProperties(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - -func TestExpandWorkspaceCreationProperties(t *testing.T) { - t.Parallel() - - cases := []struct { - input []interface{} - expected *types.WorkspaceCreationProperties - }{ - // Empty - { - input: []interface{}{}, - expected: nil, - }, - // Full - { - input: []interface{}{ - map[string]interface{}{ - "custom_security_group_id": "sg-123456789012", - "default_ou": "OU=AWS,DC=Workgroup,DC=Example,DC=com", - "enable_internet_access": true, - "enable_maintenance_mode": true, - "user_enabled_as_local_administrator": true, - }, - }, - expected: &types.WorkspaceCreationProperties{ - CustomSecurityGroupId: aws.String("sg-123456789012"), - DefaultOu: aws.String("OU=AWS,DC=Workgroup,DC=Example,DC=com"), - EnableInternetAccess: aws.Bool(true), - EnableMaintenanceMode: aws.Bool(true), - UserEnabledAsLocalAdministrator: aws.Bool(true), - }, - }, - // Without Custom Security Group ID & Default OU - { - input: []interface{}{ - map[string]interface{}{ - "custom_security_group_id": "", - "default_ou": "", - "enable_internet_access": true, - "enable_maintenance_mode": true, - "user_enabled_as_local_administrator": true, - }, - }, - expected: &types.WorkspaceCreationProperties{ - CustomSecurityGroupId: nil, - DefaultOu: nil, - EnableInternetAccess: aws.Bool(true), - EnableMaintenanceMode: aws.Bool(true), - UserEnabledAsLocalAdministrator: aws.Bool(true), - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.ExpandWorkspaceCreationProperties(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - -func TestFlattenWorkspaceCreationProperties(t *testing.T) { - t.Parallel() - - cases := []struct { - input *types.DefaultWorkspaceCreationProperties - expected []interface{} - }{ - // Empty - { - input: nil, - expected: []interface{}{}, - }, - // Full - { - input: &types.DefaultWorkspaceCreationProperties{ - CustomSecurityGroupId: aws.String("sg-123456789012"), - DefaultOu: aws.String("OU=AWS,DC=Workgroup,DC=Example,DC=com"), - EnableInternetAccess: aws.Bool(true), - EnableMaintenanceMode: aws.Bool(true), - UserEnabledAsLocalAdministrator: aws.Bool(true), - }, - expected: []interface{}{ - map[string]interface{}{ - "custom_security_group_id": "sg-123456789012", - "default_ou": "OU=AWS,DC=Workgroup,DC=Example,DC=com", - "enable_internet_access": true, - "enable_maintenance_mode": true, - "user_enabled_as_local_administrator": true, - }, - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.FlattenWorkspaceCreationProperties(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - func testAccCheckDirectoryDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) @@ -736,10 +457,6 @@ func testAccCheckDirectoryExists(ctx context.Context, n string, v *types.Workspa return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No WorkSpaces Directory ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) output, err := tfworkspaces.FindDirectoryByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/workspaces/exports_test.go b/internal/service/workspaces/exports_test.go new file mode 100644 index 000000000000..d770166e197e --- /dev/null +++ b/internal/service/workspaces/exports_test.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package workspaces + +// Exports for use in tests only. +var ( + ResourceConnectionAlias = newConnectionAliasResource + ResourceDirectory = resourceDirectory + ResourceIPGroup = resourceIPGroup + ResourceWorkspace = resourceWorkspace + + FindConnectionAliasByID = findConnectionAliasByID + FindDirectoryByID = findDirectoryByID + FindIPGroupByID = findIPGroupByID + FindWorkspaceByID = findWorkspaceByID +) diff --git a/internal/service/workspaces/find.go b/internal/service/workspaces/find.go deleted file mode 100644 index 2e2e891fc10f..000000000000 --- a/internal/service/workspaces/find.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package workspaces - -import ( - "context" - "reflect" - - "github.com/aws/aws-sdk-go-v2/service/workspaces" - "github.com/aws/aws-sdk-go-v2/service/workspaces/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -func FindDirectoryByID(ctx context.Context, conn *workspaces.Client, id string) (*types.WorkspaceDirectory, error) { - input := &workspaces.DescribeWorkspaceDirectoriesInput{ - DirectoryIds: []string{id}, - } - - output, err := conn.DescribeWorkspaceDirectories(ctx, input) - - if err != nil { - return nil, err - } - - if output == nil || len(output.Directories) == 0 || reflect.DeepEqual(output.Directories[0], (types.WorkspaceDirectory{})) { - return nil, &retry.NotFoundError{ - Message: "Empty result", - LastRequest: input, - } - } - - // TODO Check for multiple results. - // TODO https://github.com/hashicorp/terraform-provider-aws/pull/17613. - - directory := output.Directories[0] - - if state := string(directory.State); state == string(types.WorkspaceDirectoryStateDeregistered) { - return nil, &retry.NotFoundError{ - Message: state, - LastRequest: input, - } - } - - return &directory, nil -} diff --git a/internal/service/workspaces/generate.go b/internal/service/workspaces/generate.go index 7f346afc8c8c..99210ea6eca2 100644 --- a/internal/service/workspaces/generate.go +++ b/internal/service/workspaces/generate.go @@ -1,6 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 +//go:generate go run ../../generate/listpages/main.go -ListOps=DescribeConnectionAliases,DescribeIpGroups,DescribeWorkspaceImages //go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=DescribeTags -ListTagsInIDElem=ResourceId -ListTagsOutTagsElem=TagList -ServiceTagsSlice -TagOp=CreateTags -TagInIDElem=ResourceId -UntagOp=DeleteTags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/workspaces/image_data_source.go b/internal/service/workspaces/image_data_source.go index 8952a091bcb1..d6325730026d 100644 --- a/internal/service/workspaces/image_data_source.go +++ b/internal/service/workspaces/image_data_source.go @@ -7,19 +7,25 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/service/workspaces" + "github.com/aws/aws-sdk-go-v2/service/workspaces/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_workspaces_image") -func DataSourceImage() *schema.Resource { +// @SDKDataSource("aws_workspaces_image", name="Image") +func dataSourceImage() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceImageRead, Schema: map[string]*schema.Schema{ + names.AttrDescription: { + Type: schema.TypeString, + Computed: true, + }, "image_id": { Type: schema.TypeString, Required: true, @@ -28,10 +34,6 @@ func DataSourceImage() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrDescription: { - Type: schema.TypeString, - Computed: true, - }, "operating_system_type": { Type: schema.TypeString, Computed: true, @@ -53,25 +55,56 @@ func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, meta inter conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) imageID := d.Get("image_id").(string) - input := &workspaces.DescribeWorkspaceImagesInput{ - ImageIds: []string{imageID}, - } + image, err := findImageByID(ctx, conn, imageID) - resp, err := conn.DescribeWorkspaceImages(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "describe workspaces images: %s", err) - } - if len(resp.Images) == 0 { - return sdkdiag.AppendErrorf(diags, "Workspace image %s was not found", imageID) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("WorkSpaces Image", err)) } - image := resp.Images[0] d.SetId(imageID) - d.Set(names.AttrName, image.Name) d.Set(names.AttrDescription, image.Description) + d.Set(names.AttrName, image.Name) d.Set("operating_system_type", image.OperatingSystem.Type) d.Set("required_tenancy", image.RequiredTenancy) d.Set(names.AttrState, image.State) return diags } + +func findImageByID(ctx context.Context, conn *workspaces.Client, id string) (*types.WorkspaceImage, error) { + input := &workspaces.DescribeWorkspaceImagesInput{ + ImageIds: []string{id}, + } + + return findImage(ctx, conn, input) +} + +func findImage(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspaceImagesInput) (*types.WorkspaceImage, error) { + output, err := findImages(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findImages(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspaceImagesInput) ([]types.WorkspaceImage, error) { + var output []types.WorkspaceImage + + err := describeWorkspaceImagesPages(ctx, conn, input, func(page *workspaces.DescribeWorkspaceImagesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + output = append(output, page.Images...) + + return !lastPage + }) + + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/internal/service/workspaces/ip_group.go b/internal/service/workspaces/ip_group.go index b4c5dd88e02e..8c717a1c7dd7 100644 --- a/internal/service/workspaces/ip_group.go +++ b/internal/service/workspaces/ip_group.go @@ -14,33 +14,36 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_workspaces_ip_group", name="IP Group") // @Tags(identifierAttribute="id") -func ResourceIPGroup() *schema.Resource { +func resourceIPGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceIPGroupCreate, ReadWithoutTimeout: resourceIPGroupRead, UpdateWithoutTimeout: resourceIPGroupUpdate, DeleteWithoutTimeout: resourceIPGroupDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ - names.AttrName: { + names.AttrDescription: { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, }, - names.AttrDescription: { + names.AttrName: { Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, }, "rules": { @@ -48,15 +51,15 @@ func ResourceIPGroup() *schema.Resource { Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, names.AttrSource: { Type: schema.TypeString, Required: true, ValidateFunc: validation.IsCIDR, }, - names.AttrDescription: { - Type: schema.TypeString, - Optional: true, - }, }, }, }, @@ -72,19 +75,21 @@ func resourceIPGroupCreate(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - rules := d.Get("rules").(*schema.Set).List() - resp, err := conn.CreateIpGroup(ctx, &workspaces.CreateIpGroupInput{ - GroupName: aws.String(d.Get(names.AttrName).(string)), + name := d.Get(names.AttrName).(string) + input := &workspaces.CreateIpGroupInput{ GroupDesc: aws.String(d.Get(names.AttrDescription).(string)), - UserRules: expandIPGroupRules(rules), + GroupName: aws.String(name), Tags: getTagsIn(ctx), - }) + UserRules: expandIPRuleItems(d.Get("rules").(*schema.Set).List()), + } + + output, err := conn.CreateIpGroup(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating WorkSpaces IP Group: %s", err) + return sdkdiag.AppendErrorf(diags, "creating WorkSpaces IP Group (%s): %s", name, err) } - d.SetId(aws.ToString(resp.GroupId)) + d.SetId(aws.ToString(output.GroupId)) return append(diags, resourceIPGroupRead(ctx, d, meta)...) } @@ -93,32 +98,23 @@ func resourceIPGroupRead(ctx context.Context, d *schema.ResourceData, meta inter var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - resp, err := conn.DescribeIpGroups(ctx, &workspaces.DescribeIpGroupsInput{ - GroupIds: []string{d.Id()}, - }) - if err != nil { - if len(resp.Result) == 0 { - log.Printf("[WARN] WorkSpaces IP Group (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - - return sdkdiag.AppendErrorf(diags, "reading WorkSpaces IP Group (%s): %s", d.Id(), err) - } - - ipGroups := resp.Result + ipGroup, err := findIPGroupByID(ctx, conn, d.Id()) - if len(ipGroups) == 0 { - log.Printf("[WARN] WorkSpaces Ip Group (%s) not found, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] WorkSpaces IP Group (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - ipGroup := ipGroups[0] + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading WorkSpaces IP Group (%s): %s", d.Id(), err) + } - d.Set(names.AttrName, ipGroup.GroupName) d.Set(names.AttrDescription, ipGroup.GroupDesc) - d.Set("rules", flattenIPGroupRules(ipGroup.UserRules)) + d.Set(names.AttrName, ipGroup.GroupName) + if err := d.Set("rules", flattenIPRuleItems(ipGroup.UserRules)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting rules: %s", err) + } return diags } @@ -128,12 +124,13 @@ func resourceIPGroupUpdate(ctx context.Context, d *schema.ResourceData, meta int conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) if d.HasChange("rules") { - rules := d.Get("rules").(*schema.Set).List() - - _, err := conn.UpdateRulesOfIpGroup(ctx, &workspaces.UpdateRulesOfIpGroupInput{ + input := &workspaces.UpdateRulesOfIpGroupInput{ GroupId: aws.String(d.Id()), - UserRules: expandIPGroupRules(rules), - }) + UserRules: expandIPRuleItems(d.Get("rules").(*schema.Set).List()), + } + + _, err := conn.UpdateRulesOfIpGroup(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "updating WorkSpaces IP Group (%s): %s", d.Id(), err) } @@ -146,82 +143,116 @@ func resourceIPGroupDelete(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - var found bool - log.Printf("[DEBUG] Finding directories associated with WorkSpaces IP Group (%s)", d.Id()) - paginator := workspaces.NewDescribeWorkspaceDirectoriesPaginator(conn, &workspaces.DescribeWorkspaceDirectoriesInput{}, func(out *workspaces.DescribeWorkspaceDirectoriesPaginatorOptions) {}) + input := &workspaces.DescribeWorkspaceDirectoriesInput{} + directories, err := findDirectories(ctx, conn, input) - for paginator.HasMorePages() { - out, err := paginator.NextPage(ctx) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Directories: %s", err) + } - if err != nil { - diags = sdkdiag.AppendErrorf(diags, "describing WorkSpaces Directories: %s", err) - } - for _, dir := range out.Directories { - for _, groupID := range dir.IpGroupIds { - if groupID == d.Id() { - found = true - log.Printf("[DEBUG] WorkSpaces IP Group (%s) associated with WorkSpaces Directory (%s), disassociating", groupID, aws.ToString(dir.DirectoryId)) - _, err := conn.DisassociateIpGroups(ctx, &workspaces.DisassociateIpGroupsInput{ - DirectoryId: dir.DirectoryId, - GroupIds: []string{d.Id()}, - }) - if err != nil { - diags = sdkdiag.AppendErrorf(diags, "disassociating WorkSpaces IP Group (%s) from WorkSpaces Directory (%s): %s", d.Id(), aws.ToString(dir.DirectoryId), err) - continue - } - log.Printf("[INFO] WorkSpaces IP Group (%s) disassociated from WorkSpaces Directory (%s)", d.Id(), aws.ToString(dir.DirectoryId)) + for _, v := range directories { + directoryID := aws.ToString(v.DirectoryId) + for _, v := range v.IpGroupIds { + if v == d.Id() { + input := &workspaces.DisassociateIpGroupsInput{ + DirectoryId: aws.String(directoryID), + GroupIds: []string{d.Id()}, + } + + _, err := conn.DisassociateIpGroups(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "disassociating WorkSpaces Directory (%s) IP Group (%s): %s", directoryID, d.Id(), err) } } } } - if diags.HasError() { + log.Printf("[DEBUG] Deleting WorkSpaces IP Group (%s)", d.Id()) + _, err = conn.DeleteIpGroup(ctx, &workspaces.DeleteIpGroupInput{ + GroupId: aws.String(d.Id()), + }) + + if errs.IsA[*types.ResourceNotFoundException](err) { return diags } - if !found { - log.Printf("[DEBUG] WorkSpaces IP Group (%s) not associated with any WorkSpaces Directories", d.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting WorkSpaces IP Group (%s): %s", d.Id(), err) } - log.Printf("[DEBUG] Deleting WorkSpaces IP Group (%s)", d.Id()) - _, err := conn.DeleteIpGroup(ctx, &workspaces.DeleteIpGroupInput{ - GroupId: aws.String(d.Id()), + return diags +} + +func findIPGroupByID(ctx context.Context, conn *workspaces.Client, id string) (*types.WorkspacesIpGroup, error) { + input := &workspaces.DescribeIpGroupsInput{ + GroupIds: []string{id}, + } + + return findIPGroup(ctx, conn, input) +} + +func findIPGroup(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeIpGroupsInput) (*types.WorkspacesIpGroup, error) { + output, err := findIPGroups(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findIPGroups(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeIpGroupsInput) ([]types.WorkspacesIpGroup, error) { + var output []types.WorkspacesIpGroup + + err := describeIPGroupsPages(ctx, conn, input, func(page *workspaces.DescribeIpGroupsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + output = append(output, page.Result...) + + return !lastPage }) + if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting WorkSpaces IP Group (%s): %s", d.Id(), err) + return nil, err } - log.Printf("[INFO] WorkSpaces IP Group (%s) deleted", d.Id()) - return diags + return output, nil } -func expandIPGroupRules(rules []interface{}) []types.IpRuleItem { - var result []types.IpRuleItem - for _, rule := range rules { - r := rule.(map[string]interface{}) +func expandIPRuleItems(tfList []interface{}) []types.IpRuleItem { + var apiObjects []types.IpRuleItem - result = append(result, types.IpRuleItem{ - IpRule: aws.String(r[names.AttrSource].(string)), - RuleDesc: aws.String(r[names.AttrDescription].(string)), + for _, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]interface{}) + + apiObjects = append(apiObjects, types.IpRuleItem{ + IpRule: aws.String(tfMap[names.AttrSource].(string)), + RuleDesc: aws.String(tfMap[names.AttrDescription].(string)), }) } - return result + + return apiObjects } -func flattenIPGroupRules(rules []types.IpRuleItem) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(rules)) - for _, rule := range rules { - r := map[string]interface{}{} +func flattenIPRuleItems(apiObjects []types.IpRuleItem) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) - if v := rule.IpRule; v != nil { - r[names.AttrSource] = aws.ToString(v) + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{} + + if v := apiObject.IpRule; v != nil { + tfMap[names.AttrSource] = aws.ToString(v) } - if v := rule.RuleDesc; v != nil { - r[names.AttrDescription] = aws.ToString(rule.RuleDesc) + if v := apiObject.RuleDesc; v != nil { + tfMap[names.AttrDescription] = aws.ToString(apiObject.RuleDesc) } - result = append(result, r) + tfList = append(tfList, tfMap) } - return result + + return tfList } diff --git a/internal/service/workspaces/ip_group_test.go b/internal/service/workspaces/ip_group_test.go index 5a13efd04d71..5c9ddeddc0c8 100644 --- a/internal/service/workspaces/ip_group_test.go +++ b/internal/service/workspaces/ip_group_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfworkspaces "github.com/hashicorp/terraform-provider-aws/internal/service/workspaces" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -181,22 +182,18 @@ func testAccCheckIPGroupDestroy(ctx context.Context) resource.TestCheckFunc { } conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) - resp, err := conn.DescribeIpGroups(ctx, &workspaces.DescribeIpGroupsInput{ - GroupIds: []string{rs.Primary.ID}, - }) - if err != nil { - return fmt.Errorf("error Describing WorkSpaces IP Group: %w", err) - } + _, err := tfworkspaces.FindIPGroupByID(ctx, conn, rs.Primary.ID) - // Return nil if the IP Group is already destroyed (does not exist) - if len(resp.Result) == 0 { - return nil + if tfresource.NotFound(err) { + continue } - if *resp.Result[0].GroupId == rs.Primary.ID { - return fmt.Errorf("WorkSpaces IP Group %s still exists", rs.Primary.ID) + if err != nil { + return err } + + return fmt.Errorf("WorkSpaces IP Group %s still exists", rs.Primary.ID) } return nil @@ -210,24 +207,17 @@ func testAccCheckIPGroupExists(ctx context.Context, n string, v *types.Workspace return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Workpsaces IP Group ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) - resp, err := conn.DescribeIpGroups(ctx, &workspaces.DescribeIpGroupsInput{ - GroupIds: []string{rs.Primary.ID}, - }) + + output, err := tfworkspaces.FindIPGroupByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if *resp.Result[0].GroupId == rs.Primary.ID { - *v = resp.Result[0] - return nil - } + *v = *output - return fmt.Errorf("WorkSpaces IP Group (%s) not found", rs.Primary.ID) + return nil } } diff --git a/internal/service/workspaces/list_pages_gen.go b/internal/service/workspaces/list_pages_gen.go new file mode 100644 index 000000000000..69291240dcc3 --- /dev/null +++ b/internal/service/workspaces/list_pages_gen.go @@ -0,0 +1,59 @@ +// Code generated by "internal/generate/listpages/main.go -ListOps=DescribeConnectionAliases,DescribeIpGroups,DescribeWorkspaceImages"; DO NOT EDIT. + +package workspaces + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/workspaces" +) + +func describeConnectionAliasesPages(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeConnectionAliasesInput, fn func(*workspaces.DescribeConnectionAliasesOutput, bool) bool) error { + for { + output, err := conn.DescribeConnectionAliases(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextToken) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.NextToken = output.NextToken + } + return nil +} +func describeIPGroupsPages(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeIpGroupsInput, fn func(*workspaces.DescribeIpGroupsOutput, bool) bool) error { + for { + output, err := conn.DescribeIpGroups(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextToken) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.NextToken = output.NextToken + } + return nil +} +func describeWorkspaceImagesPages(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspaceImagesInput, fn func(*workspaces.DescribeWorkspaceImagesOutput, bool) bool) error { + for { + output, err := conn.DescribeWorkspaceImages(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextToken) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.NextToken = output.NextToken + } + return nil +} diff --git a/internal/service/workspaces/service_package_gen.go b/internal/service/workspaces/service_package_gen.go index 7a47eafcc0c1..701e6fd2eac8 100644 --- a/internal/service/workspaces/service_package_gen.go +++ b/internal/service/workspaces/service_package_gen.go @@ -21,7 +21,7 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { return []*types.ServicePackageFrameworkResource{ { - Factory: newResourceConnectionAlias, + Factory: newConnectionAliasResource, Name: "Connection Alias", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrID, @@ -33,20 +33,30 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceBundle, + Factory: dataSourceBundle, TypeName: "aws_workspaces_bundle", + Name: "Bundle", }, { - Factory: DataSourceDirectory, + Factory: dataSourceDirectory, TypeName: "aws_workspaces_directory", + Name: "Directory", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }, }, { - Factory: DataSourceImage, + Factory: dataSourceImage, TypeName: "aws_workspaces_image", + Name: "Image", }, { - Factory: DataSourceWorkspace, + Factory: dataSourceWorkspace, TypeName: "aws_workspaces_workspace", + Name: "Workspace", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + }, }, } } @@ -54,7 +64,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceDirectory, + Factory: resourceDirectory, TypeName: "aws_workspaces_directory", Name: "Directory", Tags: &types.ServicePackageResourceTags{ @@ -62,7 +72,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceIPGroup, + Factory: resourceIPGroup, TypeName: "aws_workspaces_ip_group", Name: "IP Group", Tags: &types.ServicePackageResourceTags{ @@ -70,7 +80,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceWorkspace, + Factory: resourceWorkspace, TypeName: "aws_workspaces_workspace", Name: "Workspace", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/workspaces/status.go b/internal/service/workspaces/status.go deleted file mode 100644 index 5601ba964d25..000000000000 --- a/internal/service/workspaces/status.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package workspaces - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/service/workspaces" - "github.com/aws/aws-sdk-go-v2/service/workspaces/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func StatusDirectoryState(ctx context.Context, conn *workspaces.Client, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDirectoryByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.State), nil - } -} - -// nosemgrep:ci.workspaces-in-func-name -func StatusWorkspaceState(ctx context.Context, conn *workspaces.Client, workspaceID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := conn.DescribeWorkspaces(ctx, &workspaces.DescribeWorkspacesInput{ - WorkspaceIds: []string{workspaceID}, - }) - if err != nil { - return nil, string(types.WorkspaceStateError), err - } - - if len(output.Workspaces) == 0 { - return output, string(types.WorkspaceStateTerminated), nil - } - - workspace := output.Workspaces[0] - - // https://docs.aws.amazon.com/workspaces/latest/api/API_TerminateWorkspaces.html - // State TERMINATED is overridden with TERMINATING to catch up directory metadata clean up. - if workspace.State == types.WorkspaceStateTerminated { - return workspace, string(types.WorkspaceStateTerminating), nil - } - - return workspace, string(workspace.State), nil - } -} diff --git a/internal/service/workspaces/sweep.go b/internal/service/workspaces/sweep.go index 8c96169bdb1a..ac07adc543c5 100644 --- a/internal/service/workspaces/sweep.go +++ b/internal/service/workspaces/sweep.go @@ -4,7 +4,6 @@ package workspaces import ( - "context" "fmt" "log" @@ -60,7 +59,7 @@ func sweepDirectories(region string) error { } for _, v := range page.Directories { - r := ResourceDirectory() + r := resourceDirectory() d := r.Data(nil) d.SetId(aws.ToString(v.DirectoryId)) @@ -93,7 +92,7 @@ func sweepIPGroups(region string) error { } for _, v := range page.Result { - r := ResourceIPGroup() + r := resourceIPGroup() d := r.Data(nil) d.SetId(aws.ToString(v.GroupId)) @@ -121,23 +120,6 @@ func sweepIPGroups(region string) error { return nil } -func describeIPGroupsPages(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeIpGroupsInput, fn func(*workspaces.DescribeIpGroupsOutput, bool) bool) error { - for { - output, err := conn.DescribeIpGroups(ctx, input) - if err != nil { - return err - } - - lastPage := aws.ToString(output.NextToken) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.NextToken = output.NextToken - } - return nil -} - func sweepWorkspace(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) @@ -162,7 +144,7 @@ func sweepWorkspace(region string) error { } for _, v := range page.Workspaces { - r := ResourceWorkspace() + r := resourceWorkspace() d := r.Data(nil) d.SetId(aws.ToString(v.WorkspaceId)) diff --git a/internal/service/workspaces/wait.go b/internal/service/workspaces/wait.go deleted file mode 100644 index 820305d6cd9b..000000000000 --- a/internal/service/workspaces/wait.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package workspaces - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go-v2/service/workspaces" - "github.com/aws/aws-sdk-go-v2/service/workspaces/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/enum" -) - -const ( - DirectoryDeregisterInvalidResourceStateTimeout = 2 * time.Minute - DirectoryRegisterInvalidResourceStateTimeout = 2 * time.Minute - - // Maximum amount of time to wait for a Directory to return Registered - DirectoryRegisteredTimeout = 10 * time.Minute - - // Maximum amount of time to wait for a Directory to return Deregistered - DirectoryDeregisteredTimeout = 10 * time.Minute - - // Maximum amount of time to wait for a WorkSpace to return Available - WorkspaceAvailableTimeout = 30 * time.Minute - - // Maximum amount of time to wait for a WorkSpace while returning Updating - WorkspaceUpdatingTimeout = 10 * time.Minute - - // Amount of time to delay before checking WorkSpace when updating - WorkspaceUpdatingDelay = 1 * time.Minute - - // Maximum amount of time to wait for a WorkSpace to return Terminated - WorkspaceTerminatedTimeout = 10 * time.Minute -) - -func WaitDirectoryRegistered(ctx context.Context, conn *workspaces.Client, directoryID string) (*types.WorkspaceDirectory, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.WorkspaceDirectoryStateRegistering), - Target: enum.Slice(types.WorkspaceDirectoryStateRegistered), - Refresh: StatusDirectoryState(ctx, conn, directoryID), - Timeout: DirectoryRegisteredTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if v, ok := outputRaw.(*types.WorkspaceDirectory); ok { - return v, err - } - - return nil, err -} - -func WaitDirectoryDeregistered(ctx context.Context, conn *workspaces.Client, directoryID string) (*types.WorkspaceDirectory, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - types.WorkspaceDirectoryStateRegistering, - types.WorkspaceDirectoryStateRegistered, - types.WorkspaceDirectoryStateDeregistering, - ), - Target: []string{}, - Refresh: StatusDirectoryState(ctx, conn, directoryID), - Timeout: DirectoryDeregisteredTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if v, ok := outputRaw.(*types.WorkspaceDirectory); ok { - return v, err - } - - return nil, err -} - -func WaitWorkspaceAvailable(ctx context.Context, conn *workspaces.Client, workspaceID string, timeout time.Duration) (*types.Workspace, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - types.WorkspaceStatePending, - types.WorkspaceStateStarting, - ), - Target: enum.Slice(types.WorkspaceStateAvailable), - Refresh: StatusWorkspaceState(ctx, conn, workspaceID), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if v, ok := outputRaw.(*types.Workspace); ok { - return v, err - } - - return nil, err -} - -func WaitWorkspaceTerminated(ctx context.Context, conn *workspaces.Client, workspaceID string, timeout time.Duration) (*types.Workspace, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - types.WorkspaceStatePending, - types.WorkspaceStateAvailable, - types.WorkspaceStateImpaired, - types.WorkspaceStateUnhealthy, - types.WorkspaceStateRebooting, - types.WorkspaceStateStarting, - types.WorkspaceStateRebuilding, - types.WorkspaceStateRestoring, - types.WorkspaceStateMaintenance, - types.WorkspaceStateAdminMaintenance, - types.WorkspaceStateSuspended, - types.WorkspaceStateUpdating, - types.WorkspaceStateStopping, - types.WorkspaceStateStopped, - types.WorkspaceStateTerminating, - types.WorkspaceStateError, - ), - Target: enum.Slice(types.WorkspaceStateTerminated), - Refresh: StatusWorkspaceState(ctx, conn, workspaceID), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if v, ok := outputRaw.(*types.Workspace); ok { - return v, err - } - - return nil, err -} - -func WaitWorkspaceUpdated(ctx context.Context, conn *workspaces.Client, workspaceID string, timeout time.Duration) (*types.Workspace, error) { - // OperationInProgressException: The properties of this WorkSpace are currently under modification. Please try again in a moment. - // AWS Workspaces service doesn't change instance status to "Updating" during property modification. Respective AWS Support feature request has been created. Meanwhile, artificial delay is placed here as a workaround. - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - types.WorkspaceStateUpdating, - ), - Target: enum.Slice( - types.WorkspaceStateAvailable, - types.WorkspaceStateStopped, - ), - Refresh: StatusWorkspaceState(ctx, conn, workspaceID), - Delay: WorkspaceUpdatingDelay, - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if v, ok := outputRaw.(*types.Workspace); ok { - return v, err - } - - return nil, err -} diff --git a/internal/service/workspaces/workspace.go b/internal/service/workspaces/workspace.go index 4a4667b5939c..bfec6562c35f 100644 --- a/internal/service/workspaces/workspace.go +++ b/internal/service/workspaces/workspace.go @@ -13,33 +13,43 @@ import ( "github.com/aws/aws-sdk-go-v2/service/workspaces" "github.com/aws/aws-sdk-go-v2/service/workspaces/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + itypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_workspaces_workspace", name="Workspace") // @Tags(identifierAttribute="id") -func ResourceWorkspace() *schema.Resource { +func resourceWorkspace() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkspaceCreate, ReadWithoutTimeout: resourceWorkspaceRead, UpdateWithoutTimeout: resourceWorkspaceUpdate, DeleteWithoutTimeout: resourceWorkspaceDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, + Schema: map[string]*schema.Schema{ "bundle_id": { Type: schema.TypeString, Required: true, ForceNew: true, }, + "computer_name": { + Type: schema.TypeString, + Computed: true, + }, "directory_id": { Type: schema.TypeString, Required: true, @@ -49,20 +59,18 @@ func ResourceWorkspace() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "computer_name": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrState: { - Type: schema.TypeString, - Computed: true, - }, "root_volume_encryption_enabled": { Type: schema.TypeBool, Optional: true, ForceNew: true, Default: false, }, + names.AttrState: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrUserName: { Type: schema.TypeString, Required: true, @@ -87,10 +95,10 @@ func ResourceWorkspace() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "compute_type_name": { - Type: schema.TypeString, - Optional: true, - Default: string(types.ComputeValue), - ValidateFunc: validation.StringInSlice(flattenComputeEnumValues(types.Compute("").Values()), false), + Type: schema.TypeString, + Optional: true, + Default: types.ComputeValue, + ValidateDiagFunc: enum.Validate[types.Compute](), }, "root_volume_size_gib": { Type: schema.TypeInt, @@ -104,7 +112,7 @@ func ResourceWorkspace() *schema.Resource { "running_mode": { Type: schema.TypeString, Optional: true, - Default: string(types.RunningModeAlwaysOn), + Default: types.RunningModeAlwaysOn, ValidateFunc: validation.StringInSlice(enum.Slice( types.RunningModeAlwaysOn, types.RunningModeAutoStop, @@ -135,13 +143,12 @@ func ResourceWorkspace() *schema.Resource { }, }, }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), }, + Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(WorkspaceAvailableTimeout), - Update: schema.DefaultTimeout(WorkspaceUpdatingTimeout), - Delete: schema.DefaultTimeout(WorkspaceTerminatedTimeout), + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), }, CustomizeDiff: verify.SetTagsDiff, @@ -155,36 +162,34 @@ func resourceWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta i input := types.WorkspaceRequest{ BundleId: aws.String(d.Get("bundle_id").(string)), DirectoryId: aws.String(d.Get("directory_id").(string)), - UserName: aws.String(d.Get(names.AttrUserName).(string)), RootVolumeEncryptionEnabled: aws.Bool(d.Get("root_volume_encryption_enabled").(bool)), - UserVolumeEncryptionEnabled: aws.Bool(d.Get("user_volume_encryption_enabled").(bool)), Tags: getTagsIn(ctx), + UserName: aws.String(d.Get(names.AttrUserName).(string)), + UserVolumeEncryptionEnabled: aws.Bool(d.Get("user_volume_encryption_enabled").(bool)), + WorkspaceProperties: expandWorkspaceProperties(d.Get("workspace_properties").([]interface{})), } if v, ok := d.GetOk("volume_encryption_key"); ok { input.VolumeEncryptionKey = aws.String(v.(string)) } - input.WorkspaceProperties = ExpandWorkspaceProperties(d.Get("workspace_properties").([]interface{})) - - resp, err := conn.CreateWorkspaces(ctx, &workspaces.CreateWorkspacesInput{ + output, err := conn.CreateWorkspaces(ctx, &workspaces.CreateWorkspacesInput{ Workspaces: []types.WorkspaceRequest{input}, }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating WorkSpaces Workspace: %s", err) + + if err == nil && len(output.FailedRequests) > 0 { + v := output.FailedRequests[0] + err = fmt.Errorf("%s: %s", aws.ToString(v.ErrorCode), aws.ToString(v.ErrorMessage)) } - wsFail := resp.FailedRequests - if len(wsFail) > 0 { - return sdkdiag.AppendErrorf(diags, "creating WorkSpaces Workspace: %s: %s", aws.ToString(wsFail[0].ErrorCode), aws.ToString(wsFail[0].ErrorMessage)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating WorkSpaces Workspace: %s", err) } - workspaceID := aws.ToString(resp.PendingRequests[0].WorkspaceId) - d.SetId(workspaceID) + d.SetId(aws.ToString(output.PendingRequests[0].WorkspaceId)) - _, err = WaitWorkspaceAvailable(ctx, conn, workspaceID, d.Timeout(schema.TimeoutCreate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "creating WorkSpaces Workspace: waiting for completion: %s", err) + if _, err := waitWorkspaceAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for WorkSpaces Workspace (%s) create: %s", d.Id(), err) } return append(diags, resourceWorkspaceRead(ctx, d, meta)...) @@ -194,28 +199,29 @@ func resourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - rawOutput, state, err := StatusWorkspaceState(ctx, conn, d.Id())() - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Workspace (%s): %s", d.Id(), err) - } - if state == string(types.WorkspaceStateTerminated) { + workspace, err := findWorkspaceByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] WorkSpaces Workspace (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - workspace := rawOutput.(types.Workspace) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Workspace (%s): %s", d.Id(), err) + } + d.Set("bundle_id", workspace.BundleId) + d.Set("computer_name", workspace.ComputerName) d.Set("directory_id", workspace.DirectoryId) d.Set(names.AttrIPAddress, workspace.IpAddress) - d.Set("computer_name", workspace.ComputerName) - d.Set(names.AttrState, workspace.State) d.Set("root_volume_encryption_enabled", workspace.RootVolumeEncryptionEnabled) + d.Set(names.AttrState, workspace.State) d.Set(names.AttrUserName, workspace.UserName) d.Set("user_volume_encryption_enabled", workspace.UserVolumeEncryptionEnabled) d.Set("volume_encryption_key", workspace.VolumeEncryptionKey) - if err := d.Set("workspace_properties", FlattenWorkspaceProperties(workspace.WorkspaceProperties)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting workspace properties: %s", err) + if err := d.Set("workspace_properties", flattenWorkspaceProperties(workspace.WorkspaceProperties)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting workspace_properties: %s", err) } return diags @@ -229,33 +235,33 @@ func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta i // I've create AWS Support feature request to allow multiple properties modification in a time. // https://docs.aws.amazon.com/workspaces/latest/adminguide/modify-workspaces.html - if d.HasChange("workspace_properties.0.compute_type_name") { - if err := workspacePropertyUpdate(ctx, "compute_type_name", conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Workspace (%s): %s", d.Id(), err) + if key := "workspace_properties.0.compute_type_name"; d.HasChange(key) { + if err := workspacePropertyUpdate(ctx, conn, d, key); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - if d.HasChange("workspace_properties.0.root_volume_size_gib") { - if err := workspacePropertyUpdate(ctx, "root_volume_size_gib", conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Workspace (%s): %s", d.Id(), err) + if key := "workspace_properties.0.root_volume_size_gib"; d.HasChange(key) { + if err := workspacePropertyUpdate(ctx, conn, d, key); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - if d.HasChange("workspace_properties.0.running_mode") { - if err := workspacePropertyUpdate(ctx, "running_mode", conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Workspace (%s): %s", d.Id(), err) + if key := "workspace_properties.0.running_mode"; d.HasChange(key) { + if err := workspacePropertyUpdate(ctx, conn, d, key); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - if d.HasChange("workspace_properties.0.running_mode_auto_stop_timeout_in_minutes") { - if err := workspacePropertyUpdate(ctx, "running_mode_auto_stop_timeout_in_minutes", conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Workspace (%s): %s", d.Id(), err) + if key := "workspace_properties.0.running_mode_auto_stop_timeout_in_minutes"; d.HasChange(key) { + if err := workspacePropertyUpdate(ctx, conn, d, key); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - if d.HasChange("workspace_properties.0.user_volume_size_gib") { - if err := workspacePropertyUpdate(ctx, "user_volume_size_gib", conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating WorkSpaces Workspace (%s): %s", d.Id(), err) + if key := "workspace_properties.0.user_volume_size_gib"; d.HasChange(key) { + if err := workspacePropertyUpdate(ctx, conn, d, key); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } @@ -266,133 +272,225 @@ func resourceWorkspaceDelete(ctx context.Context, d *schema.ResourceData, meta i var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - if err := WorkspaceDelete(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendFromErr(diags, err) - } - return diags -} - -func WorkspaceDelete(ctx context.Context, conn *workspaces.Client, id string, timeout time.Duration) error { - resp, err := conn.TerminateWorkspaces(ctx, &workspaces.TerminateWorkspacesInput{ - TerminateWorkspaceRequests: []types.TerminateRequest{ - { - WorkspaceId: aws.String(id), - }, - }, + log.Printf("[DEBUG] Deleting WorkSpaces Workspace: %s", d.Id()) + output, err := conn.TerminateWorkspaces(ctx, &workspaces.TerminateWorkspacesInput{ + TerminateWorkspaceRequests: []types.TerminateRequest{{ + WorkspaceId: aws.String(d.Id()), + }}, }) - if err != nil { - return fmt.Errorf("deleting WorkSpaces Workspace (%s): %w", id, err) - } - wsFail := resp.FailedRequests - if len(wsFail) > 0 { - return fmt.Errorf("deleting WorkSpaces Workspace (%s): %s: %s", id, aws.ToString(wsFail[0].ErrorCode), aws.ToString(wsFail[0].ErrorMessage)) + if err == nil && len(output.FailedRequests) > 0 { + v := output.FailedRequests[0] + err = fmt.Errorf("%s: %s", aws.ToString(v.ErrorCode), aws.ToString(v.ErrorMessage)) } - _, err = WaitWorkspaceTerminated(ctx, conn, id, timeout) if err != nil { - return fmt.Errorf("deleting WorkSpaces Workspace (%s): waiting for completion: %w", id, err) + return sdkdiag.AppendErrorf(diags, "deleting WorkSpaces Workspace (%s): %s", d.Id(), err) } - return nil -} + if _, err := waitWorkspaceTerminated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for WorkSpaces Workspace (%s) delete: %s", d.Id(), err) + } -func workspacePropertyUpdate(ctx context.Context, p string, conn *workspaces.Client, d *schema.ResourceData) error { - id := d.Id() + return diags +} - var wsp *types.WorkspaceProperties +func workspacePropertyUpdate(ctx context.Context, conn *workspaces.Client, d *schema.ResourceData, key string) error { + input := &workspaces.ModifyWorkspacePropertiesInput{ + WorkspaceId: aws.String(d.Id()), + } - switch p { - case "compute_type_name": - wsp = &types.WorkspaceProperties{ - ComputeTypeName: types.Compute(d.Get("workspace_properties.0.compute_type_name").(string)), + switch key { + case "workspace_properties.0.compute_type_name": + input.WorkspaceProperties = &types.WorkspaceProperties{ + ComputeTypeName: types.Compute(d.Get(key).(string)), } - case "root_volume_size_gib": - wsp = &types.WorkspaceProperties{ - RootVolumeSizeGib: aws.Int32(int32(d.Get("workspace_properties.0.root_volume_size_gib").(int))), + case "workspace_properties.0.root_volume_size_gib": + input.WorkspaceProperties = &types.WorkspaceProperties{ + RootVolumeSizeGib: aws.Int32(int32(d.Get(key).(int))), } - case "running_mode": - wsp = &types.WorkspaceProperties{ - RunningMode: types.RunningMode(d.Get("workspace_properties.0.running_mode").(string)), + case "workspace_properties.0.running_mode": + input.WorkspaceProperties = &types.WorkspaceProperties{ + RunningMode: types.RunningMode(d.Get(key).(string)), } - case "running_mode_auto_stop_timeout_in_minutes": + case "workspace_properties.0.running_mode_auto_stop_timeout_in_minutes": if d.Get("workspace_properties.0.running_mode") != types.RunningModeAutoStop { log.Printf("[DEBUG] Property running_mode_auto_stop_timeout_in_minutes makes sense only for AUTO_STOP running mode") return nil } - wsp = &types.WorkspaceProperties{ - RunningModeAutoStopTimeoutInMinutes: aws.Int32(int32(d.Get("workspace_properties.0.running_mode_auto_stop_timeout_in_minutes").(int))), + input.WorkspaceProperties = &types.WorkspaceProperties{ + RunningModeAutoStopTimeoutInMinutes: aws.Int32(int32(d.Get(key).(int))), } - case "user_volume_size_gib": - wsp = &types.WorkspaceProperties{ - UserVolumeSizeGib: aws.Int32(int32(d.Get("workspace_properties.0.user_volume_size_gib").(int))), + case "workspace_properties.0.user_volume_size_gib": + input.WorkspaceProperties = &types.WorkspaceProperties{ + UserVolumeSizeGib: aws.Int32(int32(d.Get(key).(int))), } } - _, err := conn.ModifyWorkspaceProperties(ctx, &workspaces.ModifyWorkspacePropertiesInput{ - WorkspaceId: aws.String(id), - WorkspaceProperties: wsp, - }) + _, err := conn.ModifyWorkspaceProperties(ctx, input) + if err != nil { - return fmt.Errorf("modifying property %q: %w", p, err) + return fmt.Errorf("updating WorkSpaces Workspace (%s,%s): %w", d.Id(), key, err) } - _, err = WaitWorkspaceUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("modifying property %q: waiting for completion: %w", p, err) + if _, err := waitWorkspaceUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("waiting for WorkSpaces Workspace (%s,%s) update: %w", d.Id(), key, err) } return nil } -func ExpandWorkspaceProperties(properties []interface{}) *types.WorkspaceProperties { - log.Printf("[DEBUG] Expand Workspace properties: %+v ", properties) +func findWorkspaceByID(ctx context.Context, conn *workspaces.Client, id string) (*types.Workspace, error) { + input := &workspaces.DescribeWorkspacesInput{ + WorkspaceIds: []string{id}, + } + + output, err := findWorkspace(ctx, conn, input) - if len(properties) == 0 || properties[0] == nil { - return nil + if err != nil { + return nil, err } - p := properties[0].(map[string]interface{}) + if itypes.IsZero(output) { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findWorkspace(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspacesInput) (*types.Workspace, error) { + output, err := findWorkspaces(ctx, conn, input) - workspaceProperties := &types.WorkspaceProperties{ - ComputeTypeName: types.Compute(p["compute_type_name"].(string)), - RootVolumeSizeGib: aws.Int32(int32(p["root_volume_size_gib"].(int))), - RunningMode: types.RunningMode(p["running_mode"].(string)), - UserVolumeSizeGib: aws.Int32(int32(p["user_volume_size_gib"].(int))), + if err != nil { + return nil, err } - if p["running_mode"].(string) == string(types.RunningModeAutoStop) { - workspaceProperties.RunningModeAutoStopTimeoutInMinutes = aws.Int32(int32(p["running_mode_auto_stop_timeout_in_minutes"].(int))) + return tfresource.AssertSingleValueResult(output) +} + +func findWorkspaces(ctx context.Context, conn *workspaces.Client, input *workspaces.DescribeWorkspacesInput) ([]types.Workspace, error) { // nosemgrep:ci.caps0-in-func-name,ci.workspaces-in-func-name + var output []types.Workspace + + pages := workspaces.NewDescribeWorkspacesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.Workspaces...) } - return workspaceProperties + return output, nil } -func FlattenWorkspaceProperties(properties *types.WorkspaceProperties) []map[string]interface{} { - log.Printf("[DEBUG] Flatten workspace properties: %+v ", properties) +func statusWorkspace(ctx context.Context, conn *workspaces.Client, workspaceID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findWorkspaceByID(ctx, conn, workspaceID) - if properties == nil { - return []map[string]interface{}{} + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil } +} - return []map[string]interface{}{ - { - "compute_type_name": string(properties.ComputeTypeName), - "root_volume_size_gib": int(aws.ToInt32(properties.RootVolumeSizeGib)), - "running_mode": string(properties.RunningMode), - "running_mode_auto_stop_timeout_in_minutes": int(aws.ToInt32(properties.RunningModeAutoStopTimeoutInMinutes)), - "user_volume_size_gib": int(aws.ToInt32(properties.UserVolumeSizeGib)), - }, +func waitWorkspaceAvailable(ctx context.Context, conn *workspaces.Client, workspaceID string, timeout time.Duration) (*types.Workspace, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.WorkspaceStatePending, types.WorkspaceStateStarting), + Target: enum.Slice(types.WorkspaceStateAvailable), + Refresh: statusWorkspace(ctx, conn, workspaceID), + Timeout: timeout, } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Workspace); ok { + return output, err + } + + return nil, err +} + +func waitWorkspaceUpdated(ctx context.Context, conn *workspaces.Client, workspaceID string, timeout time.Duration) (*types.Workspace, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.WorkspaceStateUpdating), + Target: enum.Slice(types.WorkspaceStateAvailable, types.WorkspaceStateStopped), + Refresh: statusWorkspace(ctx, conn, workspaceID), + // "OperationInProgressException: The properties of this WorkSpace are currently under modification. Please try again in a moment". + // AWS Workspaces service doesn't change instance status to "Updating" during property modification. + // Respective AWS Support feature request has been created. Meanwhile, artificial delay is placed here as a workaround. + Delay: 1 * time.Minute, + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if v, ok := outputRaw.(*types.Workspace); ok { + return v, err + } + + return nil, err +} + +func waitWorkspaceTerminated(ctx context.Context, conn *workspaces.Client, workspaceID string, timeout time.Duration) (*types.Workspace, error) { + // https://docs.aws.amazon.com/workspaces/latest/api/API_TerminateWorkspaces.html + stateConf := &retry.StateChangeConf{ + // You can terminate a WorkSpace that is in any state except SUSPENDED. + // After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up. + Pending: enum.Slice(tfslices.RemoveAll(enum.EnumValues[types.WorkspaceState](), types.WorkspaceStateSuspended)...), + Target: []string{}, + Refresh: statusWorkspace(ctx, conn, workspaceID), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Workspace); ok { + return output, err + } + + return nil, err } -func flattenComputeEnumValues(t []types.Compute) []string { - var out []string +func expandWorkspaceProperties(tfList []interface{}) *types.WorkspaceProperties { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) - for _, v := range t { - out = append(out, string(v)) + apiObject := &types.WorkspaceProperties{ + ComputeTypeName: types.Compute(tfMap["compute_type_name"].(string)), + RootVolumeSizeGib: aws.Int32(int32(tfMap["root_volume_size_gib"].(int))), + RunningMode: types.RunningMode(tfMap["running_mode"].(string)), + UserVolumeSizeGib: aws.Int32(int32(tfMap["user_volume_size_gib"].(int))), + } + + if tfMap["running_mode"].(string) == string(types.RunningModeAutoStop) { + apiObject.RunningModeAutoStopTimeoutInMinutes = aws.Int32(int32(tfMap["running_mode_auto_stop_timeout_in_minutes"].(int))) + } + + return apiObject +} + +func flattenWorkspaceProperties(apiObject *types.WorkspaceProperties) []map[string]interface{} { + if apiObject == nil { + return []map[string]interface{}{} } - return out + return []map[string]interface{}{{ + "compute_type_name": apiObject.ComputeTypeName, + "root_volume_size_gib": aws.ToInt32(apiObject.RootVolumeSizeGib), + "running_mode": apiObject.RunningMode, + "running_mode_auto_stop_timeout_in_minutes": aws.ToInt32(apiObject.RunningModeAutoStopTimeoutInMinutes), + "user_volume_size_gib": aws.ToInt32(apiObject.UserVolumeSizeGib), + }} } diff --git a/internal/service/workspaces/workspace_data_source.go b/internal/service/workspaces/workspace_data_source.go index e67d3cc02e63..3de3c8747ed6 100644 --- a/internal/service/workspaces/workspace_data_source.go +++ b/internal/service/workspaces/workspace_data_source.go @@ -5,7 +5,6 @@ package workspaces import ( "context" - "reflect" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/workspaces" @@ -15,11 +14,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_workspaces_workspace") -func DataSourceWorkspace() *schema.Resource { +// @SDKDataSource("aws_workspaces_workspace", name="Workspace") +// @Tags(identifierAttribute="id") +func dataSourceWorkspace() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceWorkspaceRead, @@ -28,6 +29,10 @@ func DataSourceWorkspace() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "computer_name": { + Type: schema.TypeString, + Computed: true, + }, "directory_id": { Type: schema.TypeString, Computed: true, @@ -39,18 +44,15 @@ func DataSourceWorkspace() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "computer_name": { - Type: schema.TypeString, + "root_volume_encryption_enabled": { + Type: schema.TypeBool, Computed: true, }, names.AttrState: { Type: schema.TypeString, Computed: true, }, - "root_volume_encryption_enabled": { - Type: schema.TypeBool, - Computed: true, - }, + names.AttrTags: tftags.TagsSchemaComputed(), names.AttrUserName: { Type: schema.TypeString, Computed: true, @@ -100,7 +102,6 @@ func DataSourceWorkspace() *schema.Resource { }, }, }, - names.AttrTags: tftags.TagsSchemaComputed(), }, } } @@ -108,71 +109,39 @@ func DataSourceWorkspace() *schema.Resource { func dataSourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).WorkSpacesClient(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - - var workspace types.Workspace - - if workspaceID, ok := d.GetOk("workspace_id"); ok { - resp, err := conn.DescribeWorkspaces(ctx, &workspaces.DescribeWorkspacesInput{ - WorkspaceIds: []string{workspaceID.(string)}, - }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Workspace (%s): %s", workspaceID, err) - } - if len(resp.Workspaces) != 1 { - return sdkdiag.AppendErrorf(diags, "expected 1 result for WorkSpaces Workspace (%s), found %d", workspaceID, len(resp.Workspaces)) - } - - workspace = resp.Workspaces[0] + var workspace *types.Workspace + var err error - if reflect.DeepEqual(workspace, (types.Workspace{})) { - return sdkdiag.AppendErrorf(diags, "no WorkSpaces Workspace with ID %q found", workspaceID) - } + if v, ok := d.GetOk("workspace_id"); ok { + workspace, err = findWorkspaceByID(ctx, conn, v.(string)) } - if directoryID, ok := d.GetOk("directory_id"); ok { - userName := d.Get(names.AttrUserName).(string) - resp, err := conn.DescribeWorkspaces(ctx, &workspaces.DescribeWorkspacesInput{ - DirectoryId: aws.String(directoryID.(string)), - UserName: aws.String(userName), - }) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading WorkSpaces Workspace (%s:%s): %s", directoryID, userName, err) - } - - if len(resp.Workspaces) != 1 { - return sdkdiag.AppendErrorf(diags, "expected 1 result for %q Workspace in %q directory, found %d", userName, directoryID, len(resp.Workspaces)) + if v, ok := d.GetOk("directory_id"); ok { + input := &workspaces.DescribeWorkspacesInput{ + DirectoryId: aws.String(v.(string)), + UserName: aws.String(d.Get(names.AttrUserName).(string)), } - workspace = resp.Workspaces[0] + workspace, err = findWorkspace(ctx, conn, input) + } - if reflect.DeepEqual(workspace, (types.Workspace{})) { - return sdkdiag.AppendErrorf(diags, "no %q Workspace in %q directory found", userName, directoryID) - } + if err != nil { + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("WorkSpaces Workspace", err)) } d.SetId(aws.ToString(workspace.WorkspaceId)) d.Set("bundle_id", workspace.BundleId) + d.Set("computer_name", workspace.ComputerName) d.Set("directory_id", workspace.DirectoryId) d.Set(names.AttrIPAddress, workspace.IpAddress) - d.Set("computer_name", workspace.ComputerName) - d.Set(names.AttrState, workspace.State) d.Set("root_volume_encryption_enabled", workspace.RootVolumeEncryptionEnabled) + d.Set(names.AttrState, workspace.State) d.Set(names.AttrUserName, workspace.UserName) d.Set("user_volume_encryption_enabled", workspace.UserVolumeEncryptionEnabled) d.Set("volume_encryption_key", workspace.VolumeEncryptionKey) - if err := d.Set("workspace_properties", FlattenWorkspaceProperties(workspace.WorkspaceProperties)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting workspace properties: %s", err) - } - - tags, err := listTags(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing tags: %s", err) - } - - if err := d.Set(names.AttrTags, tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + if err := d.Set("workspace_properties", flattenWorkspaceProperties(workspace.WorkspaceProperties)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting workspace_properties: %s", err) } return diags diff --git a/internal/service/workspaces/workspace_test.go b/internal/service/workspaces/workspace_test.go index 9ded263b9be9..5c981446cea6 100644 --- a/internal/service/workspaces/workspace_test.go +++ b/internal/service/workspaces/workspace_test.go @@ -6,13 +6,11 @@ package workspaces_test import ( "context" "fmt" - "reflect" "regexp" "strings" "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/workspaces" "github.com/aws/aws-sdk-go-v2/service/workspaces/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -21,6 +19,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfworkspaces "github.com/hashicorp/terraform-provider-aws/internal/service/workspaces" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -350,21 +349,17 @@ func testAccCheckWorkspaceDestroy(ctx context.Context) resource.TestCheckFunc { continue } - resp, err := conn.DescribeWorkspaces(ctx, &workspaces.DescribeWorkspacesInput{ - WorkspaceIds: []string{rs.Primary.ID}, - }) - if err != nil { - return err - } + _, err := tfworkspaces.FindWorkspaceByID(ctx, conn, rs.Primary.ID) - if len(resp.Workspaces) == 0 { - return nil + if tfresource.NotFound(err) { + continue } - ws := resp.Workspaces[0] - if ws.State != types.WorkspaceStateTerminating && ws.State != types.WorkspaceStateTerminated { - return fmt.Errorf("workspace %q was not terminated", rs.Primary.ID) + if err != nil { + return err } + + return fmt.Errorf("WorkSpaces Workspace %s still exists", rs.Primary.ID) } return nil @@ -380,19 +375,15 @@ func testAccCheckWorkspaceExists(ctx context.Context, n string, v *types.Workspa conn := acctest.Provider.Meta().(*conns.AWSClient).WorkSpacesClient(ctx) - output, err := conn.DescribeWorkspaces(ctx, &workspaces.DescribeWorkspacesInput{ - WorkspaceIds: []string{rs.Primary.ID}, - }) + output, err := tfworkspaces.FindWorkspaceByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if *output.Workspaces[0].WorkspaceId == rs.Primary.ID { - *v = output.Workspaces[0] - return nil - } + *v = *output - return fmt.Errorf("workspace %q not found", rs.Primary.ID) + return nil } } @@ -635,85 +626,3 @@ resource "aws_workspaces_workspace" "test" { } `, rName)) } - -func TestExpandWorkspaceProperties(t *testing.T) { - t.Parallel() - - cases := []struct { - input []interface{} - expected *types.WorkspaceProperties - }{ - // Empty - { - input: []interface{}{}, - expected: nil, - }, - // Full - { - input: []interface{}{ - map[string]interface{}{ - "compute_type_name": string(types.ComputeValue), - "root_volume_size_gib": 80, - "running_mode": string(types.RunningModeAutoStop), - "running_mode_auto_stop_timeout_in_minutes": 60, - "user_volume_size_gib": 10, - }, - }, - expected: &types.WorkspaceProperties{ - ComputeTypeName: types.ComputeValue, - RootVolumeSizeGib: aws.Int32(80), - RunningMode: types.RunningModeAutoStop, - RunningModeAutoStopTimeoutInMinutes: aws.Int32(60), - UserVolumeSizeGib: aws.Int32(10), - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.ExpandWorkspaceProperties(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -} - -func TestFlattenWorkspaceProperties(t *testing.T) { - t.Parallel() - - cases := []struct { - input *types.WorkspaceProperties - expected []map[string]interface{} - }{ - // Empty - { - input: nil, - expected: []map[string]interface{}{}, - }, - // Full - { - input: &types.WorkspaceProperties{ - ComputeTypeName: types.ComputeValue, - RootVolumeSizeGib: aws.Int32(80), - RunningMode: types.RunningModeAutoStop, - RunningModeAutoStopTimeoutInMinutes: aws.Int32(60), - UserVolumeSizeGib: aws.Int32(10), - }, - expected: []map[string]interface{}{ - { - "compute_type_name": string(types.ComputeValue), - "root_volume_size_gib": 80, - "running_mode": string(types.RunningModeAutoStop), - "running_mode_auto_stop_timeout_in_minutes": 60, - "user_volume_size_gib": 10, - }, - }, - }, - } - - for _, c := range cases { - actual := tfworkspaces.FlattenWorkspaceProperties(c.input) - if !reflect.DeepEqual(actual, c.expected) { - t.Fatalf("expected\n\n%#+v\n\ngot\n\n%#+v", c.expected, actual) - } - } -}