From 1f35ce25a07236e9243682948d6c3f48355ea022 Mon Sep 17 00:00:00 2001 From: Evan Rademacher <44209290+erademacher@users.noreply.github.com> Date: Thu, 5 Jan 2023 15:34:43 -0800 Subject: [PATCH] [CC-8720]CMEK resource (#67) --- docs/data-sources/cluster.md | 5 +- docs/resources/cluster.md | 1 + docs/resources/cmek.md | 74 +++ examples/workflows/cockroach_cmek/main.tf | 170 +++++++ go.mod | 2 +- go.sum | 2 + internal/provider/cluster_resource.go | 231 ++++++--- internal/provider/cluster_resource_test.go | 50 +- internal/provider/cmek_resource.go | 443 ++++++++++++++++++ internal/provider/cmek_resource_test.go | 336 +++++++++++++ .../provider/cockroach_cluster_data_source.go | 19 +- internal/provider/models.go | 34 +- .../private_endpoint_connection_resource.go | 4 +- .../private_endpoint_services_resource.go | 4 +- internal/provider/provider.go | 1 + mock/build.go | 2 + mock/service.go | 143 ++++++ 17 files changed, 1409 insertions(+), 112 deletions(-) create mode 100644 docs/resources/cmek.md create mode 100644 examples/workflows/cockroach_cmek/main.tf create mode 100644 internal/provider/cmek_resource.go create mode 100644 internal/provider/cmek_resource_test.go create mode 100644 mock/build.go diff --git a/docs/data-sources/cluster.md b/docs/data-sources/cluster.md index e9599395..a22fbabc 100644 --- a/docs/data-sources/cluster.md +++ b/docs/data-sources/cluster.md @@ -3,12 +3,12 @@ page_title: "cockroach_cluster Data Source - terraform-provider-cockroach" subcategory: "" description: |- - clusterSourceType Data Source + Cluster Data Source --- # cockroach_cluster (Data Source) -clusterSourceType Data Source +Cluster Data Source @@ -39,6 +39,7 @@ Read-Only: - `machine_type` (String) - `memory_gib` (Number) - `num_virtual_cpus` (Number) +- `private_network_visibility` (Boolean) - `storage_gib` (Number) diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index d3c7a817..4f107b80 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -61,6 +61,7 @@ Optional: - `disk_iops` (Number) - `machine_type` (String) - `num_virtual_cpus` (Number) +- `private_network_visibility` (Boolean) Set to true to assign private IP addresses to nodes. Required for CMEK, PrivateLink, and other advanced features. - `storage_gib` (Number) Read-Only: diff --git a/docs/resources/cmek.md b/docs/resources/cmek.md new file mode 100644 index 00000000..f5ea1d75 --- /dev/null +++ b/docs/resources/cmek.md @@ -0,0 +1,74 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "cockroach_cmek Resource - terraform-provider-cockroach" +subcategory: "" +description: |- + Customer-managed encryption keys (CMEK) resource for a single cluster +--- + +# cockroach_cmek (Resource) + +Customer-managed encryption keys (CMEK) resource for a single cluster + + + + +## Schema + +### Required + +- `id` (String) Cluster ID +- `regions` (Attributes List) (see [below for nested schema](#nestedatt--regions)) + +### Optional + +- `additional_regions` (Attributes List) Once CMEK is enabled for a cluster, no new regions can be added to the cluster resource, since they need encryption key info stored in the CMEK resource. New regions can be added and maintained here instead. (see [below for nested schema](#nestedatt--additional_regions)) +- `status` (String) Aggregated status of the cluster's encryption key(s) + + +### Nested Schema for `regions` + +Required: + +- `key` (Attributes) (see [below for nested schema](#nestedatt--regions--key)) +- `region` (String) + +Read-Only: + +- `status` (String) + + +### Nested Schema for `regions.key` + +Required: + +- `auth_principal` (String) +- `type` (String) Current allowed values are 'AWS_KMS' and 'GCP_CLOUD_KMS' +- `uri` (String) + +Read-Only: + +- `created_at` (String) +- `status` (String) +- `updated_at` (String) +- `user_message` (String) + + + + +### Nested Schema for `additional_regions` + +Required: + +- `name` (String) + +Optional: + +- `node_count` (Number) + +Read-Only: + +- `sql_dns` (String) +- `ui_dns` (String) + + diff --git a/examples/workflows/cockroach_cmek/main.tf b/examples/workflows/cockroach_cmek/main.tf new file mode 100644 index 00000000..f04be422 --- /dev/null +++ b/examples/workflows/cockroach_cmek/main.tf @@ -0,0 +1,170 @@ +# Your Organization ID can be found at https://cockroachlabs.cloud/information +variable "org_id" { + type = string + nullable = false +} + +# Required to assign yourself permission to update the key. +variable "iam_user" { + type = string + nullable = false +} + +variable "cluster_name" { + type = string + nullable = false +} + +variable "aws_region" { + type = string + nullable = false + default = "us-west-2" +} + +variable "additional_regions" { + type = list(string) + nullable = false +} + +variable "cluster_node_count" { + type = number + nullable = false + default = 3 +} + +variable "storage_gib" { + type = number + nullable = false + default = 15 +} + +variable "machine_type" { + type = string + nullable = false + default = "m5.large" +} + +terraform { + required_providers { + cockroach = { + source = "cockroachdb/cockroach" + } + aws = { + source = "hashicorp/aws" + version = "~> 4.0" + } + } +} +provider "cockroach" { + # export COCKROACH_API_KEY with the cockroach cloud API Key +} + +provider "aws" { + # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs + # for configuration steps. + + # Please don't use a variable for region in production! The AWS provider won't + # be able to find any resources if this value changes and you'll get + # into a weird state. Be sure to run `terraform destroy` before changing + # this value. + region = var.aws_region +} + +resource "cockroach_cluster" "example" { + name = var.cluster_name + cloud_provider = "AWS" + dedicated = { + storage_gib = var.storage_gib + machine_type = var.machine_type + private_network_visibility = true + } + regions = [{ + name = var.aws_region, + node_count = var.cluster_node_count + } + ] +} + +resource "aws_iam_role" "example" { + name = "cmek_test_role" + + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : "sts:AssumeRole", + "Principal" : { + "AWS" : cockroach_cluster.example.account_id + }, + "Condition" : { + "StringEquals" : { + "sts:ExternalId" : var.org_id + } + } + } + ] + }) +} + +data "aws_iam_user" "example" { + user_name = var.iam_user +} + +resource "aws_kms_key" "example" { + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : "kms:*", + "Principal" : { + "AWS" : [ + aws_iam_role.example.arn, + data.aws_iam_user.example.arn + ] + }, + "Resource" : "*" + } + ] + }) + multi_region = true +} + +resource "cockroach_cmek" "example" { + id = cockroach_cluster.example.id + regions = /*concat(*/ [ + { + region : var.aws_region + key : { + auth_principal : aws_iam_role.example.arn + type : "AWS_KMS" + uri : aws_kms_key.example.arn + } + } + ] #, + # + # Additional regions can be added after CMEK is enabled by updating + # the `region` attribute and adding their name and node count to + # `additional_regions`. These regions will be managed separately from + # the parent cluster, but will otherwise behave the same. Cluster data + # sources will always show the entire list of regions, regardless of + # whether they're managed by the cluster or CMEK resource. + # + # These should be concatenated with the current region(s). + #[for r in var.additional_regions : { + # region: r, + # key: { + # auth_principal: aws_iam_role.example.arn + # type: "AWS_KMS" + # uri: aws_kms_key.example.arn + # } + #}]) + + #additional_regions = [for r in var.additional_regions : + # { + # name = r + # node_count = var.cluster_node_count + # } + #] +} diff --git a/go.mod b/go.mod index 7a5c083d..174575db 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/cockroachdb/terraform-provider-cockroach go 1.18 require ( - github.com/cockroachdb/cockroach-cloud-sdk-go v0.3.1 + github.com/cockroachdb/cockroach-cloud-sdk-go v0.3.3 github.com/golang/mock v1.6.0 github.com/hashicorp/terraform-plugin-docs v0.13.0 github.com/hashicorp/terraform-plugin-framework v0.17.0 diff --git a/go.sum b/go.sum index f5fa7052..1daa6223 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/cockroach-cloud-sdk-go v0.3.1 h1:PaqggGcqDV8/T9AQMXzSC+xjhtLo92eDpqLi76yIeNg= github.com/cockroachdb/cockroach-cloud-sdk-go v0.3.1/go.mod h1:zVVtMKMcPkwrYgrZ/hv73HiGSsWId3BorWlSpRWc7tM= +github.com/cockroachdb/cockroach-cloud-sdk-go v0.3.3 h1:Dk6ACbo0UgxIzwqoXiMS7zbM/L2f5z8INU3pHUOinXs= +github.com/cockroachdb/cockroach-cloud-sdk-go v0.3.3/go.mod h1:zVVtMKMcPkwrYgrZ/hv73HiGSsWId3BorWlSpRWc7tM= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= diff --git a/internal/provider/cluster_resource.go b/internal/provider/cluster_resource.go index f3267ff2..49c4413a 100644 --- a/internal/provider/cluster_resource.go +++ b/internal/provider/cluster_resource.go @@ -25,9 +25,11 @@ import ( "time" "github.com/cockroachdb/cockroach-cloud-sdk-go/pkg/client" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" @@ -46,6 +48,27 @@ type clusterResource struct { provider *provider } +var regionSchema = schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "sql_dns": schema.StringAttribute{ + Computed: true, + }, + "ui_dns": schema.StringAttribute{ + Computed: true, + }, + "node_count": schema.Int64Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + }, +} + func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ MarkdownDescription: "Cluster Resource", @@ -131,6 +154,14 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re Optional: true, Computed: true, }, + "private_network_visibility": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "Set to true to assign private IP addresses to nodes. Required for CMEK, PrivateLink, and other advanced features.", + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, + }, }, }, "regions": schema.ListNestedAttribute{ @@ -138,26 +169,7 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re PlanModifiers: []planmodifier.List{ listplanmodifier.UseStateForUnknown(), }, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "name": schema.StringAttribute{ - Required: true, - }, - "sql_dns": schema.StringAttribute{ - Computed: true, - }, - "ui_dns": schema.StringAttribute{ - Computed: true, - }, - "node_count": schema.Int64Attribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, - }, - }, - }, + NestedObject: regionSchema, }, "state": schema.StringAttribute{ Computed: true, @@ -235,14 +247,14 @@ func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest } dedicated.RegionNodes = regionNodes } - if plan.DedicatedConfig != nil { + if cfg := plan.DedicatedConfig; cfg != nil { hardware := client.DedicatedHardwareCreateSpecification{} machineSpec := client.DedicatedMachineTypeSpecification{} - if !plan.DedicatedConfig.NumVirtualCpus.IsNull() { - cpus := int32(plan.DedicatedConfig.NumVirtualCpus.ValueInt64()) + if !cfg.NumVirtualCpus.IsNull() { + cpus := int32(cfg.NumVirtualCpus.ValueInt64()) machineSpec.NumVirtualCpus = &cpus - } else if !plan.DedicatedConfig.MachineType.IsNull() { - machineType := plan.DedicatedConfig.MachineType.ValueString() + } else if !cfg.MachineType.IsNull() { + machineType := cfg.MachineType.ValueString() machineSpec.MachineType = &machineType } else { resp.Diagnostics.AddError( @@ -251,14 +263,18 @@ func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest ) } hardware.MachineSpec = machineSpec - if !plan.DedicatedConfig.StorageGib.IsNull() { - hardware.StorageGib = int32(plan.DedicatedConfig.StorageGib.ValueInt64()) + if !cfg.StorageGib.IsNull() { + hardware.StorageGib = int32(cfg.StorageGib.ValueInt64()) } - if !plan.DedicatedConfig.DiskIops.IsNull() { - diskiops := int32(plan.DedicatedConfig.DiskIops.ValueInt64()) + if !cfg.DiskIops.IsNull() { + diskiops := int32(cfg.DiskIops.ValueInt64()) hardware.DiskIops = &diskiops } dedicated.Hardware = hardware + if cfg.PrivateNetworkVisibility.ValueBool() { + visibilityPrivate := client.NETWORKVISIBLITY_PRIVATE + dedicated.NetworkVisibility = &visibilityPrivate + } } clusterSpec.SetDedicated(dedicated) } @@ -278,7 +294,7 @@ func (r *clusterResource) Create(ctx context.Context, req resource.CreateRequest } err = sdk_resource.RetryContext(ctx, clusterCreateTimeout, - waitForClusterCreatedFunc(ctx, clusterObj.Id, r.provider.service, clusterObj)) + waitForClusterReadyFunc(ctx, clusterObj.Id, r.provider.service, clusterObj)) if err != nil { resp.Diagnostics.AddError( "Cluster creation failed", @@ -317,7 +333,7 @@ func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, re clusterObj, httpResp, err := r.provider.service.GetCluster(ctx, clusterID) if err != nil { - if httpResp.StatusCode == http.StatusNotFound { + if httpResp != nil && httpResp.StatusCode == http.StatusNotFound { resp.Diagnostics.AddWarning( "Cluster not found", fmt.Sprintf("Cluster with clusterID %s is not found. Removing from state.", clusterID)) @@ -333,6 +349,7 @@ func (r *clusterResource) Read(ctx context.Context, req resource.ReadRequest, re // We actually want to use the current state as the plan here, // since we're trying to see if it changed. loadClusterToTerraformState(clusterObj, &cluster, &cluster) + diags = resp.State.Set(ctx, cluster) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { @@ -375,14 +392,14 @@ func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest if plan.ServerlessConfig != nil { serverless := client.NewServerlessClusterUpdateSpecification(int32(plan.ServerlessConfig.SpendLimit.ValueInt64())) clusterReq.SetServerless(*serverless) - } else if plan.DedicatedConfig != nil { + } else if cfg := plan.DedicatedConfig; cfg != nil { dedicated := client.NewDedicatedClusterUpdateSpecification() if plan.Regions != nil { - regionNodes := make(map[string]int32, len(plan.Regions)) - for _, region := range plan.Regions { - regionNodes[region.Name.ValueString()] = int32(region.NodeCount.ValueInt64()) + dedicated.RegionNodes, diags = reconcileRegionUpdate(ctx, state.Regions, plan.Regions, state.ID.ValueString(), r.provider.service) //®ionNodes + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return } - dedicated.RegionNodes = ®ionNodes } dedicated.Hardware = client.NewDedicatedHardwareUpdateSpecification() if !plan.DedicatedConfig.StorageGib.IsNull() { @@ -416,11 +433,11 @@ func (r *clusterResource) Update(ctx context.Context, req resource.UpdateRequest } err = sdk_resource.RetryContext(ctx, clusterUpdateTimeout, - waitForClusterCreatedFunc(ctx, clusterObj.Id, r.provider.service, clusterObj)) + waitForClusterReadyFunc(ctx, clusterObj.Id, r.provider.service, clusterObj)) if err != nil { resp.Diagnostics.AddError( "Cluster update failed", - fmt.Sprintf("Cluster is not ready: %v %v "+err.Error()), + fmt.Sprintf("Cluster is not ready: %v", formatAPIErrorMessage(err)), ) return } @@ -450,7 +467,7 @@ func (r *clusterResource) Delete(ctx context.Context, req resource.DeleteRequest _, httpResp, err := r.provider.service.DeleteCluster(ctx, clusterID) if err != nil { - if httpResp.StatusCode == http.StatusNotFound { + if httpResp != nil && httpResp.StatusCode == http.StatusNotFound { // Cluster is already gone. Swallow the error. } else { resp.Diagnostics.AddError( @@ -492,32 +509,20 @@ func simplifyClusterVersion(version string) string { // resort the list, so it matches up with the plan. If the response and // plan regions don't match up, the sort won't work right, but we can // ignore it. Terraform will handle it. -func sortRegionsByPlan(clusterObj *client.Cluster, plan *CockroachCluster) { - if clusterObj == nil || plan == nil { +func sortRegionsByPlan(regions *[]client.Region, plan []Region) { + if regions == nil || plan == nil { return } - regionOrdinals := make(map[string]int, len(clusterObj.Regions)) - for i, region := range plan.Regions { + regionOrdinals := make(map[string]int, len(*regions)) + for i, region := range plan { regionOrdinals[region.Name.ValueString()] = i } - sort.Slice(clusterObj.Regions, func(i, j int) bool { - return regionOrdinals[clusterObj.Regions[i].Name] < regionOrdinals[clusterObj.Regions[j].Name] + sort.Slice(*regions, func(i, j int) bool { + return regionOrdinals[(*regions)[i].Name] < regionOrdinals[(*regions)[j].Name] }) } func loadClusterToTerraformState(clusterObj *client.Cluster, state *CockroachCluster, plan *CockroachCluster) { - sortRegionsByPlan(clusterObj, plan) - var rgs []Region - for _, x := range clusterObj.Regions { - rg := Region{ - Name: types.StringValue(x.Name), - SqlDns: types.StringValue(x.SqlDns), - UiDns: types.StringValue(x.UiDns), - NodeCount: types.Int64Value(int64(x.NodeCount)), - } - rgs = append(rgs, rg) - } - state.ID = types.StringValue(clusterObj.Id) state.Name = types.StringValue(clusterObj.Name) state.CloudProvider = types.StringValue(string(clusterObj.CloudProvider)) @@ -531,7 +536,7 @@ func loadClusterToTerraformState(clusterObj *client.Cluster, state *CockroachClu state.State = types.StringValue(string(clusterObj.State)) state.CreatorId = types.StringValue(clusterObj.CreatorId) state.OperationStatus = types.StringValue(string(clusterObj.OperationStatus)) - state.Regions = rgs + state.Regions = getManagedRegions(&clusterObj.Regions, plan.Regions) if clusterObj.Config.Serverless != nil { state.ServerlessConfig = &ServerlessClusterConfig{ @@ -540,20 +545,49 @@ func loadClusterToTerraformState(clusterObj *client.Cluster, state *CockroachClu } } else if clusterObj.Config.Dedicated != nil { state.DedicatedConfig = &DedicatedClusterConfig{ - MachineType: types.StringValue(clusterObj.Config.Dedicated.MachineType), - NumVirtualCpus: types.Int64Value(int64(clusterObj.Config.Dedicated.NumVirtualCpus)), - StorageGib: types.Int64Value(int64(clusterObj.Config.Dedicated.StorageGib)), - MemoryGib: types.Float64Value(float64(clusterObj.Config.Dedicated.MemoryGib)), - DiskIops: types.Int64Value(int64(clusterObj.Config.Dedicated.DiskIops)), + MachineType: types.StringValue(clusterObj.Config.Dedicated.MachineType), + NumVirtualCpus: types.Int64Value(int64(clusterObj.Config.Dedicated.NumVirtualCpus)), + StorageGib: types.Int64Value(int64(clusterObj.Config.Dedicated.StorageGib)), + MemoryGib: types.Float64Value(float64(clusterObj.Config.Dedicated.MemoryGib)), + DiskIops: types.Int64Value(int64(clusterObj.Config.Dedicated.DiskIops)), + PrivateNetworkVisibility: types.BoolValue(clusterObj.GetNetworkVisibility() == client.NETWORKVISIBLITY_PRIVATE), } } } -func waitForClusterCreatedFunc(ctx context.Context, id string, cl client.Service, cluster *client.Cluster) sdk_resource.RetryFunc { +// Due to the cyclic dependency issues of CMEK, there may be additional +// regions that are managed by another resource (i.e. cockroach_cmek) that +// we can safely omit from the state. +func getManagedRegions(apiRegions *[]client.Region, plan []Region) []Region { + if apiRegions == nil { + return nil + } + regions := make([]Region, 0, len(*apiRegions)) + sortRegionsByPlan(apiRegions, plan) + planRegions := make(map[string]bool, len(plan)) + for _, region := range plan { + planRegions[region.Name.ValueString()] = true + } + isImport := len(plan) == 0 + for _, x := range *apiRegions { + if isImport || planRegions[x.Name] { + rg := Region{ + Name: types.StringValue(x.Name), + SqlDns: types.StringValue(x.SqlDns), + UiDns: types.StringValue(x.UiDns), + NodeCount: types.Int64Value(int64(x.NodeCount)), + } + regions = append(regions, rg) + } + } + return regions +} + +func waitForClusterReadyFunc(ctx context.Context, id string, cl client.Service, cluster *client.Cluster) sdk_resource.RetryFunc { return func() *sdk_resource.RetryError { apiCluster, httpResp, err := cl.GetCluster(ctx, id) if err != nil { - if httpResp.StatusCode < http.StatusInternalServerError { + if httpResp != nil && httpResp.StatusCode < http.StatusInternalServerError { return sdk_resource.NonRetryableError(fmt.Errorf("error getting cluster: %s", formatAPIErrorMessage(err))) } else { return sdk_resource.RetryableError(fmt.Errorf("encountered a server error while reading cluster status - trying again")) @@ -570,6 +604,73 @@ func waitForClusterCreatedFunc(ctx context.Context, id string, cl client.Service } } +// To build an update request, we need to reconcile three region lists: +// the current regions managed by this resource (state), the planned regions +// managed by this resource (plan), and the full list of regions in the +// cluster. We need to update the current resource's regions without impacting +// the regions managed by another resource. +// +// A nil return value means no region update is required. +func reconcileRegionUpdate(ctx context.Context, state, plan []Region, clusterID string, service client.Service) (*map[string]int32, diag.Diagnostics) { + type regionInfo struct { + inState bool + inPlan bool + nodeCount int64 + } + var regionUpdateRequired bool + regions := make(map[string]*regionInfo, len(state)) + for _, region := range state { + regions[region.Name.ValueString()] = ®ionInfo{true, false, region.NodeCount.ValueInt64()} + } + for _, planRegion := range plan { + region, ok := regions[planRegion.Name.ValueString()] + if !ok { + regions[planRegion.Name.ValueString()] = ®ionInfo{false, true, planRegion.NodeCount.ValueInt64()} + regionUpdateRequired = true + } else { + region.inPlan = true + if region.nodeCount != planRegion.NodeCount.ValueInt64() { + region.nodeCount = planRegion.NodeCount.ValueInt64() + regionUpdateRequired = true + } + } + } + for _, region := range regions { + if !region.inPlan { + regionUpdateRequired = true + } + } + if regionUpdateRequired { + cluster, _, err := service.GetCluster(ctx, clusterID) + if err != nil { + diags := diag.Diagnostics{} + diags.AddError("Error retrieving cluster info", formatAPIErrorMessage(err)) + return nil, diags + } + for _, region := range cluster.Regions { + _, ok := regions[region.Name] + if !ok { + regions[region.Name] = ®ionInfo{ + inState: false, + inPlan: false, + nodeCount: int64(region.NodeCount), + } + } + } + regionNodes := make(map[string]int32, len(regions)) + for name, info := range regions { + // Omit regions that are in the state (meaning we had been managing them) + // but not the plan. Everything else stays. + if info.inState && !info.inPlan { + continue + } + regionNodes[name] = int32(info.nodeCount) + } + return ®ionNodes, nil + } + return nil, nil +} + func NewClusterResource() resource.Resource { return &clusterResource{} } diff --git a/internal/provider/cluster_resource_test.go b/internal/provider/cluster_resource_test.go index ace28844..b49015e9 100644 --- a/internal/provider/cluster_resource_test.go +++ b/internal/provider/cluster_resource_test.go @@ -251,53 +251,47 @@ resource "cockroach_cluster" "dedicated" { func TestSortRegionsByPlan(t *testing.T) { t.Run("Plan matches cluster", func(t *testing.T) { - clusterObj := &client.Cluster{Regions: []client.Region{ + regions := []client.Region{ {Name: "us-central1"}, {Name: "us-east1"}, {Name: "us-west2"}, - }} - plan := &CockroachCluster{ - Regions: []Region{ - {Name: types.StringValue("us-west2")}, - {Name: types.StringValue("us-central1")}, - {Name: types.StringValue("us-east1")}, - }, } - sortRegionsByPlan(clusterObj, plan) - for i, region := range clusterObj.Regions { - require.Equal(t, plan.Regions[i].Name.ValueString(), region.Name) + plan := []Region{ + {Name: types.StringValue("us-west2")}, + {Name: types.StringValue("us-central1")}, + {Name: types.StringValue("us-east1")}, + } + sortRegionsByPlan(®ions, plan) + for i, region := range regions { + require.Equal(t, plan[i].Name.ValueString(), region.Name) } }) t.Run("More regions in cluster than plan", func(t *testing.T) { - clusterObj := &client.Cluster{Regions: []client.Region{ + regions := []client.Region{ {Name: "us-central1"}, {Name: "us-east1"}, {Name: "us-west2"}, - }} - plan := &CockroachCluster{ - Regions: []Region{ - {Name: types.StringValue("us-west2")}, - {Name: types.StringValue("us-central1")}, - }, + } + plan := []Region{ + {Name: types.StringValue("us-west2")}, + {Name: types.StringValue("us-central1")}, } // We really just want to make sure it doesn't panic here. - sortRegionsByPlan(clusterObj, plan) + sortRegionsByPlan(®ions, plan) }) t.Run("More regions in plan than cluster", func(t *testing.T) { - clusterObj := &client.Cluster{Regions: []client.Region{ + regions := []client.Region{ {Name: "us-central1"}, {Name: "us-east1"}, - }} - plan := &CockroachCluster{ - Regions: []Region{ - {Name: types.StringValue("us-west2")}, - {Name: types.StringValue("us-central1")}, - {Name: types.StringValue("us-east1")}, - }, + } + plan := []Region{ + {Name: types.StringValue("us-west2")}, + {Name: types.StringValue("us-central1")}, + {Name: types.StringValue("us-east1")}, } // We really just want to make sure it doesn't panic here. - sortRegionsByPlan(clusterObj, plan) + sortRegionsByPlan(®ions, plan) }) } diff --git a/internal/provider/cmek_resource.go b/internal/provider/cmek_resource.go new file mode 100644 index 00000000..e163a80e --- /dev/null +++ b/internal/provider/cmek_resource.go @@ -0,0 +1,443 @@ +/* +Copyright 2022 The Cockroach Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "net/http" + "reflect" + "sort" + + "github.com/cockroachdb/cockroach-cloud-sdk-go/pkg/client" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + sdk_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +var cmekAttributes = map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Required: true, + MarkdownDescription: "Cluster ID", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "status": schema.StringAttribute{ + MarkdownDescription: "Aggregated status of the cluster's encryption key(s)", + Computed: true, + Optional: true, + }, + "regions": schema.ListNestedAttribute{ + Required: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "region": schema.StringAttribute{ + Required: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "key": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "auth_principal": schema.StringAttribute{ + Required: true, + }, + "type": schema.StringAttribute{ + MarkdownDescription: "Current allowed values are 'AWS_KMS' and 'GCP_CLOUD_KMS'", + Required: true, + }, + "uri": schema.StringAttribute{ + Required: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "user_message": schema.StringAttribute{ + Computed: true, + }, + "created_at": schema.StringAttribute{ + Computed: true, + }, + "updated_at": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "additional_regions": schema.ListNestedAttribute{ + NestedObject: regionSchema, + Optional: true, + MarkdownDescription: "Once CMEK is enabled for a cluster, no new regions can be added to the cluster resource, since they need encryption key info stored in the CMEK resource. New regions can be added and maintained here instead.", + }, +} + +type cmekResource struct { + provider *provider +} + +func (r *cmekResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: "Customer-managed encryption keys (CMEK) resource for a single cluster", + Attributes: cmekAttributes, + } +} + +func (r *cmekResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cmek" +} + +func (r *cmekResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + var ok bool + if r.provider, ok = req.ProviderData.(*provider); !ok { + resp.Diagnostics.AddError("Internal provider error", + fmt.Sprintf("Error in Configure: expected %T but got %T", provider{}, req.ProviderData)) + } +} + +func (r *cmekResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + if r.provider == nil || !r.provider.configured { + addConfigureProviderErr(&resp.Diagnostics) + return + } + + var plan ClusterCMEK + + diags := req.Config.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if len(plan.AdditionalRegions) != 0 { + resp.Diagnostics.AddError( + "Invalid initial CMEK plan", + "`additional_regions` must be empty initially. Add new regions to the parent cluster before creating a CMEK resource") + return + } + + cmekSpec := client.NewCMEKClusterSpecificationWithDefaults() + regionSpecs := make([]client.CMEKRegionSpecification, 0, len(plan.Regions)) + for _, region := range plan.Regions { + regionSpecs = append(regionSpecs, cmekRegionToClientSpec(region)) + } + cmekSpec.SetRegionSpecs(regionSpecs) + + cmekObj, _, err := r.provider.service.EnableCMEKSpec(ctx, plan.ID.ValueString(), cmekSpec) + if err != nil { + resp.Diagnostics.AddError( + "Error enabling CMEK", + fmt.Sprintf("Could not enable CMEK: %v", formatAPIErrorMessage(err)), + ) + return + } + err = sdk_resource.RetryContext(ctx, clusterUpdateTimeout, + waitForCMEKReadyFunc(ctx, plan.ID.ValueString(), r.provider.service, cmekObj)) + if err != nil { + resp.Diagnostics.AddError( + "CMEK enable failed", + fmt.Sprintf("CMEK is not ready: %s", formatAPIErrorMessage(err)), + ) + return + } + + var state ClusterCMEK + loadCMEKToTerraformState(cmekObj, &state, &plan) + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) +} + +func (r *cmekResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + if r.provider == nil || !r.provider.configured { + addConfigureProviderErr(&resp.Diagnostics) + return + } + + var cmek ClusterCMEK + diags := req.State.Get(ctx, &cmek) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() || cmek.ID.IsNull() { + return + } + + clusterID := cmek.ID.ValueString() + cmekObj, httpResp, err := r.provider.service.GetCMEKClusterInfo(ctx, clusterID) + if err != nil { + if httpResp != nil && httpResp.StatusCode == http.StatusNotFound { + resp.Diagnostics.AddWarning( + "CMEK spec not found", + fmt.Sprintf("CMEK specification with cluster ID %s is not found. Removing from state.", clusterID)) + resp.State.RemoveResource(ctx) + } else { + resp.Diagnostics.AddError( + "Error getting CMEK info", + fmt.Sprintf("Unexpected error retrieving CMEK info: %s", formatAPIErrorMessage(err))) + } + return + } + + // We actually want to use the current state as the plan here, + // since we're trying to see if it changed. + loadCMEKToTerraformState(cmekObj, &cmek, &cmek) + diags = resp.State.Set(ctx, cmek) + resp.Diagnostics.Append(diags...) +} + +func (r *cmekResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Get plan values + var plan ClusterCMEK + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get current state + var state ClusterCMEK + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + existingRegions := make(map[string]client.CMEKRegionSpecification, len(state.Regions)) + for _, region := range state.Regions { + existingRegions[region.Region.ValueString()] = cmekRegionToClientSpec(region) + } + + updateRegions := make([]client.CMEKRegionSpecification, 0, len(state.Regions)) + newRegions := make([]client.CMEKRegionSpecification, 0, len(plan.Regions)) + for _, plannedRegion := range plan.Regions { + if existingRegion, ok := existingRegions[plannedRegion.Region.ValueString()]; !ok { + newRegions = append(newRegions, cmekRegionToClientSpec(plannedRegion)) + } else if plannedRegionSpec := cmekRegionToClientSpec(plannedRegion); !reflect.DeepEqual(existingRegion, plannedRegionSpec) { + updateRegions = append(updateRegions, plannedRegionSpec) + } + } + + regionNodes, diags := reconcileRegionUpdate(ctx, state.AdditionalRegions, plan.AdditionalRegions, state.ID.ValueString(), r.provider.service) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + if plan.Status.ValueString() == string(client.CMEKSTATUS_REVOKED) { + if regionNodes != nil || len(updateRegions) != 0 || len(newRegions) != 0 { + resp.Diagnostics.AddError("Invalid CMEK update", + "Can't revoke access and modify regions in the same operation") + return + } + if _, _, err := r.provider.service.UpdateCMEKStatus(ctx, plan.ID.ValueString(), &client.UpdateCMEKStatusRequest{ + Action: client.CMEKCUSTOMERACTION_REVOKE, + }); err != nil { + resp.Diagnostics.AddError("Error revoking CMEK", + fmt.Sprintf("Error while attempting to revoke CMEK: %s", formatAPIErrorMessage(err))) + return + } + } + + var cluster *client.Cluster + if regionNodes != nil { + // UpdateCluster expects only new regions in CmekRegionSpecs. Theoretically, + // this should be the regions that are in the plan, but not the spec. + if len(*regionNodes) != len(newRegions)+len(existingRegions) { + resp.Diagnostics.AddError("Mismatch between CMEK and cluster regions", "Each new addition to `additional_regions` must be accompanied by exactly one corresponding entry in `regions`.") + return + } + var err error + cluster, _, err = r.provider.service.UpdateCluster(ctx, state.ID.ValueString(), &client.UpdateClusterSpecification{ + Dedicated: &client.DedicatedClusterUpdateSpecification{ + RegionNodes: regionNodes, + CmekRegionSpecs: &newRegions, + }, + }, &client.UpdateClusterOptions{}) + if err != nil { + resp.Diagnostics.AddError( + "Error updating cluster", + fmt.Sprintf("Error updating cluster: %s", formatAPIErrorMessage(err))) + return + } + + err = sdk_resource.RetryContext(ctx, clusterUpdateTimeout, + waitForClusterReadyFunc(ctx, plan.ID.ValueString(), r.provider.service, cluster)) + if err != nil { + resp.Diagnostics.AddError( + "Adding new regions failed", + fmt.Sprintf("Error adding new regions: %s", formatAPIErrorMessage(err)), + ) + return + } + state.AdditionalRegions = getManagedRegions(&cluster.Regions, plan.AdditionalRegions) + } + + if len(updateRegions) != 0 { + _, _, err := r.provider.service.UpdateCMEKSpec( + ctx, + plan.ID.ValueString(), + &client.CMEKClusterSpecification{RegionSpecs: updateRegions}, + ) + if err != nil { + resp.Diagnostics.AddError("Error updating CMEK specification", formatAPIErrorMessage(err)) + return + } + } + + var clusterInfo client.CMEKClusterInfo + err := sdk_resource.RetryContext(ctx, clusterUpdateTimeout, + waitForCMEKReadyFunc(ctx, plan.ID.ValueString(), r.provider.service, &clusterInfo)) + if err != nil { + resp.Diagnostics.AddError( + "Updating CMEK regions failed", + fmt.Sprintf("Error updating CMEK regions: %s", formatAPIErrorMessage(err)), + ) + return + } + + loadCMEKToTerraformState(&clusterInfo, &state, &plan) + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) +} + +// Delete is a no-op, since you can't disable CMEK once it's set up. +func (r *cmekResource) Delete(ctx context.Context, _ resource.DeleteRequest, resp *resource.DeleteResponse) { + resp.State.RemoveResource(ctx) +} + +func (r *cmekResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// Since the API response will always sort regions by name, we need to +// resort the list, so it matches up with the plan. If the response and +// plan regions don't match up, the sort won't work right, but we can +// ignore it. Terraform will handle it. +func sortCMEKRegionsByPlan(cmekObj *client.CMEKClusterInfo, plan *ClusterCMEK) { + if cmekObj == nil || plan == nil { + return + } + regionOrdinals := make(map[string]int, len(cmekObj.GetRegionInfos())) + for i, region := range plan.Regions { + regionOrdinals[region.Region.ValueString()] = i + } + sort.Slice(*cmekObj.RegionInfos, func(i, j int) bool { + return regionOrdinals[cmekObj.GetRegionInfos()[i].GetRegion()] < regionOrdinals[cmekObj.GetRegionInfos()[j].GetRegion()] + }) +} + +func loadCMEKToTerraformState(cmekObj *client.CMEKClusterInfo, state *ClusterCMEK, plan *ClusterCMEK) { + sortCMEKRegionsByPlan(cmekObj, plan) + var rgs []CMEKRegion + for i, region := range cmekObj.GetRegionInfos() { + var keyInfo client.CMEKKeyInfo + // If we have a plan, find the key that matches the plan URI. + // If there's no plan (i.e. import), use the first key that's enabled. + for _, key := range region.GetKeyInfos() { + if plan != nil && len(plan.Regions) > 0 { + if *key.GetSpec().Uri == plan.Regions[i].Key.URI.ValueString() { + keyInfo = key + break + } + } else { + if key.GetStatus() == client.CMEKSTATUS_ENABLED { + keyInfo = key + break + } + } + } + rg := CMEKRegion{ + Region: types.StringValue(region.GetRegion()), + Status: types.StringValue(string(region.GetStatus())), + Key: CMEKKey{ + Status: types.StringValue(string(keyInfo.GetStatus())), + UserMessage: types.StringValue(keyInfo.GetUserMessage()), + Type: types.StringValue(string(keyInfo.Spec.GetType())), + URI: types.StringValue(keyInfo.Spec.GetUri()), + AuthPrincipal: types.StringValue(keyInfo.Spec.GetAuthPrincipal()), + CreatedAt: types.StringValue(keyInfo.GetCreatedAt().String()), + UpdatedAt: types.StringValue(keyInfo.GetUpdatedAt().String()), + }, + } + rgs = append(rgs, rg) + } + + state.Regions = rgs + state.ID = plan.ID + state.Status = types.StringValue(string(cmekObj.GetStatus())) +} + +func cmekRegionToClientSpec(region CMEKRegion) client.CMEKRegionSpecification { + name := region.Region.ValueString() + keyType := client.CMEKKeyType(region.Key.Type.ValueString()) + uri := region.Key.URI.ValueString() + authPrincipal := region.Key.AuthPrincipal.ValueString() + return client.CMEKRegionSpecification{ + Region: &name, + KeySpec: &client.CMEKKeySpecification{ + Type: &keyType, + Uri: &uri, + AuthPrincipal: &authPrincipal, + }, + } +} + +func waitForCMEKReadyFunc(ctx context.Context, clusterID string, cl client.Service, cmek *client.CMEKClusterInfo) sdk_resource.RetryFunc { + return func() *sdk_resource.RetryError { + apiCMEK, httpResp, err := cl.GetCMEKClusterInfo(ctx, clusterID) + if err != nil { + if httpResp != nil && httpResp.StatusCode < http.StatusInternalServerError { + return sdk_resource.NonRetryableError(fmt.Errorf("error getting cmek: %s", formatAPIErrorMessage(err))) + } else { + return sdk_resource.RetryableError(fmt.Errorf("encountered a server error while reading cmek status - trying again")) + } + } + *cmek = *apiCMEK + for _, region := range cmek.GetRegionInfos() { + switch region.GetStatus() { + case client.CMEKSTATUS_ENABLED, + client.CMEKSTATUS_DISABLED, + client.CMEKSTATUS_REVOKED: + continue + case client.CMEKSTATUS_DISABLE_FAILED, + client.CMEKSTATUS_ENABLE_FAILED, + client.CMEKSTATUS_REVOKE_FAILED, + client.CMEKSTATUS_ROTATE_FAILED: + return sdk_resource.NonRetryableError(fmt.Errorf("cmek update failed")) + default: + return sdk_resource.RetryableError(fmt.Errorf("cmek is not ready yet")) + } + } + return nil + } +} + +func NewCMEKResource() resource.Resource { + return &cmekResource{} +} diff --git a/internal/provider/cmek_resource_test.go b/internal/provider/cmek_resource_test.go new file mode 100644 index 00000000..e447766d --- /dev/null +++ b/internal/provider/cmek_resource_test.go @@ -0,0 +1,336 @@ +/* + Copyright 2022 The Cockroach Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package provider + +import ( + "fmt" + "net/http" + "os" + "testing" + + "github.com/cockroachdb/cockroach-cloud-sdk-go/pkg/client" + mock_client "github.com/cockroachdb/terraform-provider-cockroach/mock" + "github.com/golang/mock/gomock" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +// TestAccCMEKResource attempts to create, check, and destroy +// a real cluster and allowlist entry. It will be skipped if TF_ACC isn't set. +func TestAccCMEKResource(t *testing.T) { + t.Skip("Skipping until we can either integrate the AWS provider " + + "or import a permanent test fixture.") + t.Parallel() + clusterName := fmt.Sprintf("tftest-cmek-%s", GenerateRandomString(4)) + testCMEKResource(t, clusterName, false) +} + +// TestIntegrationCMEKResource attempts to create, check, and destroy +// a cluster, but uses a mocked API service. +func TestIntegrationCMEKResource(t *testing.T) { + clusterName := fmt.Sprintf("tftest-cmek-%s", GenerateRandomString(4)) + clusterID := "cluster-id" + if os.Getenv(CockroachAPIKey) == "" { + os.Setenv(CockroachAPIKey, "fake") + } + + ctrl := gomock.NewController(t) + s := mock_client.NewMockService(ctrl) + defer HookGlobal(&NewService, func(c *client.Client) client.Service { + return s + })() + + initialCluster := &client.Cluster{ + Id: clusterID, + Name: clusterName, + CockroachVersion: "v22.2.0", + Plan: "DEDICATED", + CloudProvider: "AWS", + State: "CREATED", + Config: client.ClusterConfig{ + Dedicated: &client.DedicatedHardwareConfig{ + MachineType: "m5.xlarge", + NumVirtualCpus: 4, + StorageGib: 35, + MemoryGib: 8, + }, + }, + Regions: []client.Region{ + { + Name: "us-central-1", + NodeCount: 3, + }, + }, + } + updatedCluster := &client.Cluster{} + *updatedCluster = *initialCluster + updatedCluster.Regions = append( + updatedCluster.Regions, + []client.Region{ + { + Name: "us-east-1", + NodeCount: 3, + }, + { + Name: "us-east-2", + NodeCount: 3, + }, + }...) + + keyType := client.CMEKKeyType("AWS_KMS") + keyURI := "aws-kms-key-arn" + keyPrincipal := "aws-auth-principal-arn" + keySpec := &client.CMEKKeySpecification{ + Type: &keyType, + Uri: &keyURI, + AuthPrincipal: &keyPrincipal, + } + + usCentral1 := "us-central-1" + cmekCreateSpec := &client.CMEKClusterSpecification{ + RegionSpecs: []client.CMEKRegionSpecification{ + { + Region: &usCentral1, + KeySpec: keySpec, + }, + }, + } + + usEast1 := "us-east-1" + usEast2 := "us-east-2" + cmekUpdateRegionSpecs := []client.CMEKRegionSpecification{ + { + Region: &usEast1, + KeySpec: keySpec, + }, + { + Region: &usEast2, + KeySpec: keySpec, + }, + } + clusterUpdateSpec := &client.UpdateClusterSpecification{ + Dedicated: &client.DedicatedClusterUpdateSpecification{ + RegionNodes: &map[string]int32{ + usCentral1: 3, + usEast1: 3, + usEast2: 3, + }, + CmekRegionSpecs: &cmekUpdateRegionSpecs, + }, + } + + cmekStatus := client.CMEKSTATUS_ENABLED + initialCMEKInfo := &client.CMEKClusterInfo{ + Status: &cmekStatus, + RegionInfos: &[]client.CMEKRegionInfo{ + { + Region: &usCentral1, + Status: &cmekStatus, + KeyInfos: &[]client.CMEKKeyInfo{ + { + Status: &cmekStatus, + Spec: keySpec, + }, + }, + }, + }, + } + updatedCMEKInfo := &client.CMEKClusterInfo{ + Status: &cmekStatus, + RegionInfos: &[]client.CMEKRegionInfo{ + { + Region: &usCentral1, + Status: &cmekStatus, + KeyInfos: &[]client.CMEKKeyInfo{ + { + Status: &cmekStatus, + Spec: keySpec, + }, + }, + }, + { + Region: &usEast1, + Status: &cmekStatus, + KeyInfos: &[]client.CMEKKeyInfo{ + { + Status: &cmekStatus, + Spec: keySpec, + }, + }, + }, + { + Region: &usEast2, + Status: &cmekStatus, + KeyInfos: &[]client.CMEKKeyInfo{ + { + Status: &cmekStatus, + Spec: keySpec, + }, + }, + }, + }, + } + + // Create + s.EXPECT().CreateCluster(gomock.Any(), gomock.Any()). + Return(initialCluster, nil, nil) + s.EXPECT().GetCluster(gomock.Any(), clusterID). + Return(initialCluster, &http.Response{Status: http.StatusText(http.StatusOK)}, nil). + Times(3) + s.EXPECT().EnableCMEKSpec(gomock.Any(), clusterID, cmekCreateSpec). + Return(initialCMEKInfo, nil, nil) + s.EXPECT().GetCMEKClusterInfo(gomock.Any(), clusterID). + Return(initialCMEKInfo, nil, nil). + Times(2) + + // Update + s.EXPECT().GetCluster(gomock.Any(), clusterID). + Return(initialCluster, nil, nil). + Times(2) + s.EXPECT().GetCMEKClusterInfo(gomock.Any(), clusterID). + Return(initialCMEKInfo, nil, nil) + s.EXPECT().UpdateCluster(gomock.Any(), clusterID, clusterUpdateSpec, gomock.Any()). + Return(updatedCluster, nil, nil) + s.EXPECT().GetCluster(gomock.Any(), clusterID). + Return(updatedCluster, &http.Response{Status: http.StatusText(http.StatusOK)}, nil). + Times(2) + s.EXPECT().GetCMEKClusterInfo(gomock.Any(), clusterID). + Return(updatedCMEKInfo, nil, nil). + Times(2) + + // Delete + s.EXPECT().DeleteCluster(gomock.Any(), clusterID) + + testCMEKResource(t, clusterName, true) +} + +func testCMEKResource(t *testing.T, clusterName string, useMock bool) { + var ( + clusterResourceName = "cockroach_cluster.test" + cmekResourceName = "cockroach_cmek.test" + cluster client.Cluster + ) + + resource.Test(t, resource.TestCase{ + IsUnitTest: useMock, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: getTestCMEKResourceCreateConfig(clusterName), + Check: resource.ComposeTestCheckFunc( + testCheckCockroachClusterExists(clusterResourceName, &cluster), + ), + }, + { + Config: getTestCMEKResourceUpdateConfig(clusterName), + Check: resource.ComposeTestCheckFunc( + // The original region should only show up under the cluster resource, + // and the two additional regions should only show up under the CMEK resource. + resource.TestCheckResourceAttr(clusterResourceName, "regions.#", "1"), + resource.TestCheckResourceAttr(cmekResourceName, "additional_regions.#", "2"), + resource.TestCheckResourceAttr(cmekResourceName, "regions.#", "3"), + ), + }, + }, + }) +} + +func getTestCMEKResourceCreateConfig(name string) string { + return fmt.Sprintf(` +resource "cockroach_cluster" "test" { + name = "%s" + cloud_provider = "AWS" + dedicated = { + storage_gib = 35 + machine_type = "m5.xlarge" + } + regions = [{ + name = "us-central-1" + node_count: 3 + }] +} + +resource "cockroach_cmek" "test" { + id = cockroach_cluster.test.id + regions = [{ + region: "us-central-1" + key: { + auth_principal: "aws-auth-principal-arn" + type: "AWS_KMS" + uri: "aws-kms-key-arn" + } + }] +} +`, name) +} + +func getTestCMEKResourceUpdateConfig(name string) string { + return fmt.Sprintf(` +resource "cockroach_cluster" "test" { + name = "%s" + cloud_provider = "AWS" + dedicated = { + storage_gib = 35 + machine_type = "m5.xlarge" + } + regions = [{ + name = "us-central-1" + node_count: 3 + }] +} + +resource "cockroach_cmek" "test" { + id = cockroach_cluster.test.id + regions = [ + { + region: "us-central-1" + key: { + auth_principal: "aws-auth-principal-arn" + type: "AWS_KMS" + uri: "aws-kms-key-arn" + } + }, + { + region: "us-east-1" + key: { + auth_principal: "aws-auth-principal-arn" + type: "AWS_KMS" + uri: "aws-kms-key-arn" + } + }, + { + region: "us-east-2" + key: { + auth_principal: "aws-auth-principal-arn" + type: "AWS_KMS" + uri: "aws-kms-key-arn" + } + } + ] + additional_regions = [ + { + name = "us-east-1" + node_count: 3 + }, + { + name = "us-east-2" + node_count: 3 + } + ] +} +`, name) +} diff --git a/internal/provider/cockroach_cluster_data_source.go b/internal/provider/cockroach_cluster_data_source.go index c49e6497..889b1eb8 100644 --- a/internal/provider/cockroach_cluster_data_source.go +++ b/internal/provider/cockroach_cluster_data_source.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" + "github.com/cockroachdb/cockroach-cloud-sdk-go/pkg/client" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" @@ -81,6 +82,9 @@ func (d *clusterDataSource) Schema(_ context.Context, _ datasource.SchemaRequest "disk_iops": schema.Int64Attribute{ Computed: true, }, + "private_network_visibility": schema.BoolAttribute{ + Computed: true, + }, }, }, "regions": schema.ListNestedAttribute{ @@ -136,7 +140,7 @@ func (d *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest addConfigureProviderErr(&resp.Diagnostics) return } - + var cluster CockroachCluster diags := req.Config.Get(ctx, &cluster) @@ -155,7 +159,7 @@ func (d *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest } cockroachCluster, httpResp, err := d.provider.service.GetCluster(ctx, cluster.ID.ValueString()) - if httpResp.StatusCode == http.StatusNotFound { + if httpResp != nil && httpResp.StatusCode == http.StatusNotFound { resp.Diagnostics.AddError( "Cluster not found", fmt.Sprintf("Couldn't find a cluster with ID %s", cluster.ID.ValueString())) @@ -181,11 +185,12 @@ func (d *clusterDataSource) Read(ctx context.Context, req datasource.ReadRequest } if cockroachCluster.Config.Dedicated != nil { cluster.DedicatedConfig = &DedicatedClusterConfig{ - MachineType: types.StringValue(cockroachCluster.Config.Dedicated.MachineType), - NumVirtualCpus: types.Int64Value(int64(cockroachCluster.Config.Dedicated.NumVirtualCpus)), - StorageGib: types.Int64Value(int64(cockroachCluster.Config.Dedicated.StorageGib)), - MemoryGib: types.Float64Value(float64(cockroachCluster.Config.Dedicated.MemoryGib)), - DiskIops: types.Int64Value(int64(cockroachCluster.Config.Dedicated.DiskIops)), + MachineType: types.StringValue(cockroachCluster.Config.Dedicated.MachineType), + NumVirtualCpus: types.Int64Value(int64(cockroachCluster.Config.Dedicated.NumVirtualCpus)), + StorageGib: types.Int64Value(int64(cockroachCluster.Config.Dedicated.StorageGib)), + MemoryGib: types.Float64Value(float64(cockroachCluster.Config.Dedicated.MemoryGib)), + DiskIops: types.Int64Value(int64(cockroachCluster.Config.Dedicated.DiskIops)), + PrivateNetworkVisibility: types.BoolValue(cockroachCluster.GetNetworkVisibility() == client.NETWORKVISIBLITY_PRIVATE), } } diff --git a/internal/provider/models.go b/internal/provider/models.go index 1616d945..d3dbd5a3 100644 --- a/internal/provider/models.go +++ b/internal/provider/models.go @@ -36,11 +36,12 @@ type Region struct { } type DedicatedClusterConfig struct { - MachineType types.String `tfsdk:"machine_type"` - NumVirtualCpus types.Int64 `tfsdk:"num_virtual_cpus"` - StorageGib types.Int64 `tfsdk:"storage_gib"` - MemoryGib types.Float64 `tfsdk:"memory_gib"` - DiskIops types.Int64 `tfsdk:"disk_iops"` + MachineType types.String `tfsdk:"machine_type"` + NumVirtualCpus types.Int64 `tfsdk:"num_virtual_cpus"` + StorageGib types.Int64 `tfsdk:"storage_gib"` + MemoryGib types.Float64 `tfsdk:"memory_gib"` + DiskIops types.Int64 `tfsdk:"disk_iops"` + PrivateNetworkVisibility types.Bool `tfsdk:"private_network_visibility"` } type ServerlessClusterConfig struct { @@ -114,6 +115,29 @@ type PrivateEndpointConnection struct { ClusterID types.String `tfsdk:"cluster_id"` } +type CMEKKey struct { + Status types.String `tfsdk:"status"` + UserMessage types.String `tfsdk:"user_message"` + Type types.String `tfsdk:"type"` + URI types.String `tfsdk:"uri"` + AuthPrincipal types.String `tfsdk:"auth_principal"` + CreatedAt types.String `tfsdk:"created_at"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + +type CMEKRegion struct { + Region types.String `tfsdk:"region"` + Status types.String `tfsdk:"status"` + Key CMEKKey `tfsdk:"key"` +} + +type ClusterCMEK struct { + ID types.String `tfsdk:"id"` + Status types.String `tfsdk:"status"` + Regions []CMEKRegion `tfsdk:"regions"` + AdditionalRegions []Region `tfsdk:"additional_regions"` +} + func (e *APIErrorMessage) String() string { return fmt.Sprintf("%v-%v", e.Code, e.Message) } diff --git a/internal/provider/private_endpoint_connection_resource.go b/internal/provider/private_endpoint_connection_resource.go index a0742b96..943a8bd0 100644 --- a/internal/provider/private_endpoint_connection_resource.go +++ b/internal/provider/private_endpoint_connection_resource.go @@ -241,7 +241,7 @@ func (r *privateEndpointConnectionResource) Delete(ctx context.Context, req reso &client.CockroachCloudSetAwsEndpointConnectionStateRequest{ Status: &status, }) - if err != nil && httpResp.StatusCode != http.StatusNotFound { + if err != nil && httpResp != nil && httpResp.StatusCode != http.StatusNotFound { diags.AddError("Couldn't delete connection", fmt.Sprintf("Unexpected error occurred while setting connection status: %s", formatAPIErrorMessage(err))) return @@ -276,7 +276,7 @@ func waitForEndpointConnectionCreatedFunc(ctx context.Context, clusterID, endpoi return func() *sdk_resource.RetryError { connections, httpResp, err := cl.ListAwsEndpointConnections(ctx, clusterID) if err != nil { - if httpResp.StatusCode < http.StatusInternalServerError { + if httpResp != nil && httpResp.StatusCode < http.StatusInternalServerError { return sdk_resource.NonRetryableError(fmt.Errorf("error getting endpoint connections: %s", formatAPIErrorMessage(err))) } else { return sdk_resource.RetryableError(fmt.Errorf("encountered a server error while reading connection status - trying again")) diff --git a/internal/provider/private_endpoint_services_resource.go b/internal/provider/private_endpoint_services_resource.go index 6df04a5c..9580cce8 100644 --- a/internal/provider/private_endpoint_services_resource.go +++ b/internal/provider/private_endpoint_services_resource.go @@ -194,7 +194,7 @@ func (r *privateEndpointServicesResource) Read(ctx context.Context, req resource } apiResp, httpResp, err := r.provider.service.ListPrivateEndpointServices(ctx, state.ClusterID.ValueString()) if err != nil { - if httpResp.StatusCode == http.StatusNotFound { + if httpResp != nil && httpResp.StatusCode == http.StatusNotFound { resp.Diagnostics.AddWarning("Couldn't find endpoint services", "Couldn't find endpoint services, which usually means the cluster has been deleted. Removing from state.") resp.State.RemoveResource(ctx) @@ -255,7 +255,7 @@ func waitForEndpointServicesCreatedFunc(ctx context.Context, clusterID string, c return func() *sdk_resource.RetryError { apiServices, httpResp, err := cl.ListPrivateEndpointServices(ctx, clusterID) if err != nil { - if httpResp.StatusCode < http.StatusInternalServerError { + if httpResp != nil && httpResp.StatusCode < http.StatusInternalServerError { return sdk_resource.NonRetryableError(fmt.Errorf("error getting endpoint services: %s", formatAPIErrorMessage(err))) } else { return sdk_resource.RetryableError(fmt.Errorf("encountered a server error while reading endpoint status - trying again")) diff --git a/internal/provider/provider.go b/internal/provider/provider.go index aabc17a2..94791f45 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -114,6 +114,7 @@ func (p *provider) Resources(_ context.Context) []func() resource.Resource { NewAllowlistResource, NewPrivateEndpointServicesResource, NewPrivateEndpointConnectionResource, + NewCMEKResource, } } diff --git a/mock/build.go b/mock/build.go new file mode 100644 index 00000000..d88df222 --- /dev/null +++ b/mock/build.go @@ -0,0 +1,2 @@ +//go:generate mockgen -package mock_client -destination ./service.go github.com/cockroachdb/cockroach-cloud-sdk-go/pkg/client Service +package mock_client diff --git a/mock/service.go b/mock/service.go index 5a90157d..5d2019bb 100644 --- a/mock/service.go +++ b/mock/service.go @@ -68,6 +68,22 @@ func (mr *MockServiceMockRecorder) AddAllowlistEntry2(arg0, arg1, arg2, arg3, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAllowlistEntry2", reflect.TypeOf((*MockService)(nil).AddAllowlistEntry2), arg0, arg1, arg2, arg3, arg4) } +// AddEgressRule mocks base method. +func (m *MockService) AddEgressRule(arg0 context.Context, arg1 string, arg2 *client.AddEgressRuleRequest) (*client.AddEgressRuleResponse, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddEgressRule", arg0, arg1, arg2) + ret0, _ := ret[0].(*client.AddEgressRuleResponse) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// AddEgressRule indicates an expected call of AddEgressRule. +func (mr *MockServiceMockRecorder) AddEgressRule(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEgressRule", reflect.TypeOf((*MockService)(nil).AddEgressRule), arg0, arg1, arg2) +} + // CreateCluster mocks base method. func (m *MockService) CreateCluster(arg0 context.Context, arg1 *client.CreateClusterRequest) (*client.Cluster, *http.Response, error) { m.ctrl.T.Helper() @@ -180,6 +196,22 @@ func (mr *MockServiceMockRecorder) DeleteDatabase(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDatabase", reflect.TypeOf((*MockService)(nil).DeleteDatabase), arg0, arg1, arg2) } +// DeleteEgressRule mocks base method. +func (m *MockService) DeleteEgressRule(arg0 context.Context, arg1, arg2 string, arg3 *client.DeleteEgressRuleOptions) (*client.DeleteEgressRuleResponse, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteEgressRule", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*client.DeleteEgressRuleResponse) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// DeleteEgressRule indicates an expected call of DeleteEgressRule. +func (mr *MockServiceMockRecorder) DeleteEgressRule(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEgressRule", reflect.TypeOf((*MockService)(nil).DeleteEgressRule), arg0, arg1, arg2, arg3) +} + // DeleteLogExport mocks base method. func (m *MockService) DeleteLogExport(arg0 context.Context, arg1 string) (*client.LogExportClusterInfo, *http.Response, error) { m.ctrl.T.Helper() @@ -196,6 +228,22 @@ func (mr *MockServiceMockRecorder) DeleteLogExport(arg0, arg1 interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLogExport", reflect.TypeOf((*MockService)(nil).DeleteLogExport), arg0, arg1) } +// DeleteMetricExport mocks base method. +func (m *MockService) DeleteMetricExport(arg0 context.Context, arg1 string, arg2 *client.DeleteMetricExportOptions) (*client.DeleteMetricExportResponse, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteMetricExport", arg0, arg1, arg2) + ret0, _ := ret[0].(*client.DeleteMetricExportResponse) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// DeleteMetricExport indicates an expected call of DeleteMetricExport. +func (mr *MockServiceMockRecorder) DeleteMetricExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMetricExport", reflect.TypeOf((*MockService)(nil).DeleteMetricExport), arg0, arg1, arg2) +} + // DeleteSQLUser mocks base method. func (m *MockService) DeleteSQLUser(arg0 context.Context, arg1, arg2 string) (*client.SQLUser, *http.Response, error) { m.ctrl.T.Helper() @@ -228,6 +276,22 @@ func (mr *MockServiceMockRecorder) EditDatabase(arg0, arg1, arg2 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EditDatabase", reflect.TypeOf((*MockService)(nil).EditDatabase), arg0, arg1, arg2) } +// EditEgressRule mocks base method. +func (m *MockService) EditEgressRule(arg0 context.Context, arg1, arg2 string, arg3 *client.EditEgressRuleRequest) (*client.EditEgressRuleResponse, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EditEgressRule", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*client.EditEgressRuleResponse) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// EditEgressRule indicates an expected call of EditEgressRule. +func (mr *MockServiceMockRecorder) EditEgressRule(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EditEgressRule", reflect.TypeOf((*MockService)(nil).EditEgressRule), arg0, arg1, arg2, arg3) +} + // EnableCMEKSpec mocks base method. func (m *MockService) EnableCMEKSpec(arg0 context.Context, arg1 string, arg2 *client.CMEKClusterSpecification) (*client.CMEKClusterInfo, *http.Response, error) { m.ctrl.T.Helper() @@ -260,6 +324,22 @@ func (mr *MockServiceMockRecorder) EnableLogExport(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableLogExport", reflect.TypeOf((*MockService)(nil).EnableLogExport), arg0, arg1, arg2) } +// EnableMetricExport mocks base method. +func (m *MockService) EnableMetricExport(arg0 context.Context, arg1 string, arg2 *client.EnableMetricExportRequest) (*client.MetricExportInfo, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EnableMetricExport", arg0, arg1, arg2) + ret0, _ := ret[0].(*client.MetricExportInfo) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// EnableMetricExport indicates an expected call of EnableMetricExport. +func (mr *MockServiceMockRecorder) EnableMetricExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnableMetricExport", reflect.TypeOf((*MockService)(nil).EnableMetricExport), arg0, arg1, arg2) +} + // GetCMEKClusterInfo mocks base method. func (m *MockService) GetCMEKClusterInfo(arg0 context.Context, arg1 string) (*client.CMEKClusterInfo, *http.Response, error) { m.ctrl.T.Helper() @@ -292,6 +372,22 @@ func (mr *MockServiceMockRecorder) GetCluster(arg0, arg1 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCluster", reflect.TypeOf((*MockService)(nil).GetCluster), arg0, arg1) } +// GetEgressRule mocks base method. +func (m *MockService) GetEgressRule(arg0 context.Context, arg1, arg2 string) (*client.GetEgressRuleResponse, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEgressRule", arg0, arg1, arg2) + ret0, _ := ret[0].(*client.GetEgressRuleResponse) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetEgressRule indicates an expected call of GetEgressRule. +func (mr *MockServiceMockRecorder) GetEgressRule(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEgressRule", reflect.TypeOf((*MockService)(nil).GetEgressRule), arg0, arg1, arg2) +} + // GetInvoice mocks base method. func (m *MockService) GetInvoice(arg0 context.Context, arg1 string) (*client.Invoice, *http.Response, error) { m.ctrl.T.Helper() @@ -324,6 +420,22 @@ func (mr *MockServiceMockRecorder) GetLogExportInfo(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogExportInfo", reflect.TypeOf((*MockService)(nil).GetLogExportInfo), arg0, arg1) } +// GetMetricExportInfo mocks base method. +func (m *MockService) GetMetricExportInfo(arg0 context.Context, arg1 string) (*client.MetricExportInfo, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMetricExportInfo", arg0, arg1) + ret0, _ := ret[0].(*client.MetricExportInfo) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetMetricExportInfo indicates an expected call of GetMetricExportInfo. +func (mr *MockServiceMockRecorder) GetMetricExportInfo(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetricExportInfo", reflect.TypeOf((*MockService)(nil).GetMetricExportInfo), arg0, arg1) +} + // ListAllowlistEntries mocks base method. func (m *MockService) ListAllowlistEntries(arg0 context.Context, arg1 string, arg2 *client.ListAllowlistEntriesOptions) (*client.ListAllowlistEntriesResponse, *http.Response, error) { m.ctrl.T.Helper() @@ -420,6 +532,22 @@ func (mr *MockServiceMockRecorder) ListDatabases(arg0, arg1, arg2 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListDatabases", reflect.TypeOf((*MockService)(nil).ListDatabases), arg0, arg1, arg2) } +// ListEgressRules mocks base method. +func (m *MockService) ListEgressRules(arg0 context.Context, arg1 string, arg2 *client.ListEgressRulesOptions) (*client.ListEgressRulesResponse, *http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListEgressRules", arg0, arg1, arg2) + ret0, _ := ret[0].(*client.ListEgressRulesResponse) + ret1, _ := ret[1].(*http.Response) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// ListEgressRules indicates an expected call of ListEgressRules. +func (mr *MockServiceMockRecorder) ListEgressRules(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEgressRules", reflect.TypeOf((*MockService)(nil).ListEgressRules), arg0, arg1, arg2) +} + // ListInvoices mocks base method. func (m *MockService) ListInvoices(arg0 context.Context) (*client.ListInvoicesResponse, *http.Response, error) { m.ctrl.T.Helper() @@ -484,6 +612,21 @@ func (mr *MockServiceMockRecorder) SetAwsEndpointConnectionState(arg0, arg1, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetAwsEndpointConnectionState", reflect.TypeOf((*MockService)(nil).SetAwsEndpointConnectionState), arg0, arg1, arg2, arg3) } +// SetEgressTrafficPolicy mocks base method. +func (m *MockService) SetEgressTrafficPolicy(arg0 context.Context, arg1 string, arg2 *client.SetEgressTrafficPolicyRequest) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetEgressTrafficPolicy", arg0, arg1, arg2) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetEgressTrafficPolicy indicates an expected call of SetEgressTrafficPolicy. +func (mr *MockServiceMockRecorder) SetEgressTrafficPolicy(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEgressTrafficPolicy", reflect.TypeOf((*MockService)(nil).SetEgressTrafficPolicy), arg0, arg1, arg2) +} + // UpdateAllowlistEntry mocks base method. func (m *MockService) UpdateAllowlistEntry(arg0 context.Context, arg1, arg2 string, arg3 int32, arg4 *client.AllowlistEntry1, arg5 *client.UpdateAllowlistEntryOptions) (*client.AllowlistEntry, *http.Response, error) { m.ctrl.T.Helper()