From bbbc6f00ed1ebf2c546eebe322d9c76fd660c307 Mon Sep 17 00:00:00 2001 From: Jennifer Georgevich Date: Fri, 17 Mar 2023 10:26:00 -0700 Subject: [PATCH] Add LogExportConfig resource (#81) Add a new LogExportConfig resource, which manages the log export configuration for a cluster. --- docs/resources/log_export_config.md | 51 ++ .../cockroach_log_export_config.tf | 27 + .../aws_cockroach_log_export_config/main.tf | 174 ++++++ .../gcp_cockroach_log_export_config/main.tf | 125 +++++ .../provider/log_export_config_resource.go | 506 ++++++++++++++++++ .../log_export_config_resource_test.go | 304 +++++++++++ internal/provider/models.go | 21 + internal/provider/provider.go | 1 + 8 files changed, 1209 insertions(+) create mode 100644 docs/resources/log_export_config.md create mode 100644 examples/resources/cockroach_log_export_config/cockroach_log_export_config.tf create mode 100644 examples/workflows/cockroach_log_export_config/aws_cockroach_log_export_config/main.tf create mode 100644 examples/workflows/cockroach_log_export_config/gcp_cockroach_log_export_config/main.tf create mode 100644 internal/provider/log_export_config_resource.go create mode 100644 internal/provider/log_export_config_resource_test.go diff --git a/docs/resources/log_export_config.md b/docs/resources/log_export_config.md new file mode 100644 index 00000000..a52c110f --- /dev/null +++ b/docs/resources/log_export_config.md @@ -0,0 +1,51 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "cockroach_log_export_config Resource - terraform-provider-cockroach" +subcategory: "" +description: |- + Log Export Config Resource +--- + +# cockroach_log_export_config (Resource) + +Log Export Config Resource + + + + +## Schema + +### Required + +- `auth_principal` (String) Either the AWS Role ARN that identifies a role that the cluster account can assume to write to CloudWatch or the GCP Project ID that the cluster service account has permissions to write to for cloud logging +- `id` (String) Cluster ID +- `log_name` (String) An identifier for the logs in the customer's log sink +- `type` (String) The cloud selection that we're exporting to along with the cloud logging platform. Possible values are `GCP_CLOUD_LOGGING` or `AWS_CLOUDWATCH` + +### Optional + +- `groups` (Attributes List) (see [below for nested schema](#nestedatt--groups)) +- `redact` (Boolean) Controls whether logs are redacted before forwarding to customer sinks +- `region` (String) Controls whether all logs are sent to a specific region in the customer sink + +### Read-Only + +- `created_at` (String) +- `status` (String) +- `updated_at` (String) +- `user_message` (String) + + +### Nested Schema for `groups` + +Required: + +- `channels` (List of String) A list of CRDB log channels to include in this group +- `log_name` (String) The name of the group, reflected in the log sink + +Optional: + +- `min_level` (String) The minimum log level to filter to this log group +- `redact` (Boolean) Governs whether this log group should aggregate redacted logs if unset + + diff --git a/examples/resources/cockroach_log_export_config/cockroach_log_export_config.tf b/examples/resources/cockroach_log_export_config/cockroach_log_export_config.tf new file mode 100644 index 00000000..22922f3e --- /dev/null +++ b/examples/resources/cockroach_log_export_config/cockroach_log_export_config.tf @@ -0,0 +1,27 @@ +variable "cluster_id" { + type = string +} + +variable "auth_principal" { + type = string +} + +resource "cockroach_log_export_config" "example" { + id = var.cluster_id + auth_principal = var.auth_principal + log_name = "example" + type = "GCP_CLOUD_LOGGING" + redact = true + groups = [ + { + log_name : "sql", + channels : ["SQL_SCHEMA", "SQL_EXEC"], + redact : false + }, + { + log_name : "devops", + channels : ["OPS", "HEALTH", "STORAGE"] + min_level : "WARNING" + } + ] +} diff --git a/examples/workflows/cockroach_log_export_config/aws_cockroach_log_export_config/main.tf b/examples/workflows/cockroach_log_export_config/aws_cockroach_log_export_config/main.tf new file mode 100644 index 00000000..35d46288 --- /dev/null +++ b/examples/workflows/cockroach_log_export_config/aws_cockroach_log_export_config/main.tf @@ -0,0 +1,174 @@ +# Your CockroachDB Organization ID can be found at +# https://cockroachlabs.cloud/information +variable "org_id" { + type = string + nullable = false +} + +# Your AWS Account ID (not the AWS Account ID +# of your CockroachDB Dedicated cluster). +variable "aws_account_id" { + type = string + nullable = false +} + +variable "aws_region" { + type = string + default = "us-east-1" + nullable = false +} + +variable "cluster_name" { + type = string + nullable = false +} + +variable "cluster_node_count" { + type = number + nullable = false + default = 3 +} + +variable "storage_gib" { + type = number + nullable = false + default = 15 +} + +variable "machine_type" { + type = string + nullable = false + default = "m5.large" +} + +variable "iam_role_name" { + type = string + nullable = false + default = "CockroachCloudLogExportRole" +} + +variable "iam_policy_name" { + type = string + nullable = false + default = "ExampleCockroachCloudLogExportPolicy" +} + +variable "log_group_name" { + type = string + nullable = false + default = "example" +} + +terraform { + required_providers { + cockroach = { + source = "cockroachdb/cockroach" + } + aws = { + source = "hashicorp/aws" + version = "~> 4.0" + } + } +} + +provider "cockroach" { + # export COCKROACH_API_KEY with the cockroach cloud API Key +} + +provider "aws" { + # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs + # for configuration steps. + + # Please don't use a variable for region in production! The AWS provider won't + # be able to find any resources if this value changes and you'll get + # into a weird state. Be sure to run `terraform destroy` before changing + # this value. + region = var.aws_region +} + +resource "cockroach_cluster" "example" { + name = var.cluster_name + cloud_provider = "AWS" + dedicated = { + storage_gib = var.storage_gib + machine_type = var.machine_type + } + regions = [{ + name = var.aws_region, + node_count = var.cluster_node_count + } + ] +} + +resource "aws_cloudwatch_log_group" "example" { + name = var.log_group_name + retention_in_days = 0 +} + +# Cross-account AWS IAM role in your AWS account. +resource "aws_iam_role" "example-role" { + name = var.iam_role_name + + assume_role_policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Effect" : "Allow", + "Action" : "sts:AssumeRole", + "Principal" : { + "AWS" : cockroach_cluster.example.account_id + } + } + ] + }) +} + +resource "aws_iam_policy" "example-policy" { + name = var.iam_policy_name + description = "An example log export policy" + policy = jsonencode({ + "Version" : "2012-10-17", + "Statement" : [ + { + "Action" : [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutRetentionPolicy", + "logs:PutLogEvents" + ], + "Effect" : "Allow", + "Resource" : [ + "arn:aws:logs:*:${var.aws_account_id}:log-group:${var.log_group_name}:*" + ] + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "example-attach" { + role = aws_iam_role.example-role.name + policy_arn = aws_iam_policy.example-policy.arn +} + +resource "cockroach_log_export_config" "example" { + id = cockroach_cluster.example.id + auth_principal = aws_iam_role.example-role.arn + log_name = var.log_group_name + type = "AWS_CLOUDWATCH" + redact = true + region = var.aws_region + groups = [ + { + log_name = "sql", + channels = ["SQL_SCHEMA", "SQL_EXEC"], + min_level = "WARNING" + }, + { + log_name = "devops", + channels = ["OPS", "HEALTH", "STORAGE"], + redact = false + } + ] +} diff --git a/examples/workflows/cockroach_log_export_config/gcp_cockroach_log_export_config/main.tf b/examples/workflows/cockroach_log_export_config/gcp_cockroach_log_export_config/main.tf new file mode 100644 index 00000000..bae68b70 --- /dev/null +++ b/examples/workflows/cockroach_log_export_config/gcp_cockroach_log_export_config/main.tf @@ -0,0 +1,125 @@ +variable "gcp_project_id" { + type = string + nullable = false +} + +variable "gcp_region" { + type = string + default = "us-west2" + nullable = false +} + +variable "cluster_name" { + type = string + nullable = false +} + +variable "cluster_node_count" { + type = number + nullable = false + default = 3 +} + +variable "storage_gib" { + type = number + nullable = false + default = 15 +} + +variable "machine_type" { + type = string + nullable = false + default = "n1-standard-2" +} + +variable "iam_role_id" { + type = string + nullable = false + default = "ExampleLogExportRole" +} + +variable "iam_role_title" { + type = string + nullable = false + default = "Example LogExport Role" +} + +# For GCP, auth_principal should be the gcp_project_id. +variable "auth_principal" { + type = string + nullable = false +} + +terraform { + required_providers { + cockroach = { + source = "cockroachdb/cockroach" + } + google = { + source = "hashicorp/google" + version = "~> 4.0.0" + } + } +} + +provider "cockroach" { + # export COCKROACH_API_KEY with the cockroach cloud API Key +} + +provider "google" { + # For configuration help, see + # https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/getting_started + project = var.gcp_project_id + region = var.gcp_region +} + +resource "cockroach_cluster" "example" { + name = var.cluster_name + cloud_provider = "GCP" + dedicated = { + storage_gib = var.storage_gib + machine_type = var.machine_type + } + regions = [{ + name = var.gcp_region, + node_count = var.cluster_node_count + } + ] +} + +# New role in your GCP project. +resource "google_project_iam_custom_role" "example-logexport-role" { + project = var.gcp_project_id + role_id = var.iam_role_id + title = var.iam_role_title + permissions = ["logging.logEntries.create"] +} + +# Grants example-logexport-role to the CockroachDB Cloud service account. +resource "google_project_iam_member" "role-sa-binding" { + project = var.gcp_project_id + role = "projects/${var.gcp_project_id}/roles/${google_project_iam_custom_role.example-logexport-role.role_id}" + # member is the CockroachDB Cloud log export service account for the cluster. + # Example: crl-logging-user-a1c42be2e53b@crl-prod-abc.iam.gserviceaccount.com + member = "serviceAccount:crl-logging-user-${element(split("-", cockroach_cluster.example.id), 4)}@${cockroach_cluster.example.account_id}.iam.gserviceaccount.com" +} + +resource "cockroach_log_export_config" "example" { + id = cockroach_cluster.example.id + auth_principal = var.auth_principal + log_name = "example" + type = "GCP_CLOUD_LOGGING" + redact = true + groups = [ + { + log_name = "sql", + channels = ["SQL_SCHEMA", "SQL_EXEC"], + redact = false + }, + { + log_name = "devops", + channels = ["OPS", "HEALTH", "STORAGE"], + min_level = "WARNING" + } + ] +} diff --git a/internal/provider/log_export_config_resource.go b/internal/provider/log_export_config_resource.go new file mode 100644 index 00000000..33095090 --- /dev/null +++ b/internal/provider/log_export_config_resource.go @@ -0,0 +1,506 @@ +/* +Copyright 2022 The Cockroach Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "net/http" + + "github.com/cockroachdb/cockroach-cloud-sdk-go/pkg/client" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + sdk_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +var logExportAttributes = map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Required: true, + MarkdownDescription: "Cluster ID", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "auth_principal": schema.StringAttribute{ + Required: true, + MarkdownDescription: "Either the AWS Role ARN that identifies a role that the cluster account can assume to write to CloudWatch or the GCP Project ID that the cluster service account has permissions to write to for cloud logging", + }, + "log_name": schema.StringAttribute{ + Required: true, + MarkdownDescription: "An identifier for the logs in the customer's log sink", + }, + "type": schema.StringAttribute{ + Required: true, + MarkdownDescription: "The cloud selection that we're exporting to along with the cloud logging platform. Possible values are `GCP_CLOUD_LOGGING` or `AWS_CLOUDWATCH`", + }, + "redact": schema.BoolAttribute{ + Optional: true, + Description: "Controls whether logs are redacted before forwarding to customer sinks", + }, + "region": schema.StringAttribute{ + Optional: true, + MarkdownDescription: "Controls whether all logs are sent to a specific region in the customer sink", + Computed: true, + }, + "groups": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "channels": schema.ListAttribute{ + Required: true, + ElementType: types.StringType, + MarkdownDescription: "A list of CRDB log channels to include in this group", + }, + "log_name": schema.StringAttribute{ + Required: true, + MarkdownDescription: "The name of the group, reflected in the log sink", + }, + "min_level": schema.StringAttribute{ + Optional: true, + MarkdownDescription: "The minimum log level to filter to this log group", + }, + "redact": schema.BoolAttribute{ + Optional: true, + MarkdownDescription: "Governs whether this log group should aggregate redacted logs if unset", + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "user_message": schema.StringAttribute{ + Computed: true, + }, + "created_at": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "updated_at": schema.StringAttribute{ + Computed: true, + }, +} + +type logExportConfigResource struct { + provider *provider +} + +func (r *logExportConfigResource) Schema( + _ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse, +) { + resp.Schema = schema.Schema{ + MarkdownDescription: "Log Export Config Resource", + Attributes: logExportAttributes, + } +} + +func (r *logExportConfigResource) Metadata( + _ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse, +) { + resp.TypeName = req.ProviderTypeName + "_log_export_config" +} + +func (r *logExportConfigResource) Configure( + _ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse, +) { + if req.ProviderData == nil { + return + } + var ok bool + if r.provider, ok = req.ProviderData.(*provider); !ok { + resp.Diagnostics.AddError("Internal provider error", + fmt.Sprintf("Error in Configure: expected %T but got %T", provider{}, req.ProviderData)) + } +} + +func (r *logExportConfigResource) Create( + ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse, +) { + if r.provider == nil || !r.provider.configured { + addConfigureProviderErr(&resp.Diagnostics) + return + } + + var plan ClusterLogExport + + diags := req.Config.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Check cluster + cluster, _, err := r.provider.service.GetCluster(ctx, plan.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error getting cluster", + fmt.Sprintf("Could not retrieve cluster info: %s", formatAPIErrorMessage(err)), + ) + return + } + + if cluster.Config.Serverless != nil { + resp.Diagnostics.AddError( + "Incompatible cluster type", + "Log export services are only available for dedicated clusters", + ) + return + } + + configType, err := client.NewLogExportTypeFromValue(plan.Type.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + "Error preparing log export config", + fmt.Sprintf("Invalid log export type: %s", err), + ) + return + } + + if cluster.GetCloudProvider() == client.APICLOUDPROVIDER_AWS && + *configType != client.LOGEXPORTTYPE_AWS_CLOUDWATCH { + resp.Diagnostics.AddError( + "Incompatible log export type", + fmt.Sprintf("For an AWS cluster, expected %s but got: %s", + client.LOGEXPORTTYPE_AWS_CLOUDWATCH, plan.Type.ValueString()), + ) + return + } + if cluster.GetCloudProvider() == client.APICLOUDPROVIDER_GCP && + *configType != client.LOGEXPORTTYPE_GCP_CLOUD_LOGGING { + resp.Diagnostics.AddError( + "Incompatible log export type", + fmt.Sprintf("For a GCP cluster, expected %s but got: %s", + client.LOGEXPORTTYPE_GCP_CLOUD_LOGGING, plan.Type.ValueString()), + ) + return + } + + logExportRequest := client.NewEnableLogExportRequestWithDefaults() + if err = loadPlanIntoEnableLogExportRequest(plan, logExportRequest); err != nil { + resp.Diagnostics.AddError( + "Error preparing log export config", + fmt.Sprintf("Invalid log export config: %s", err), + ) + return + } + + apiLogExportObj, _, err := r.provider.service.EnableLogExport(ctx, plan.ID.ValueString(), logExportRequest) + if err != nil { + resp.Diagnostics.AddError( + "Error enabling log export", + fmt.Sprintf("Could not enable log export: %v", formatAPIErrorMessage(err)), + ) + return + } + + err = sdk_resource.RetryContext(ctx, clusterUpdateTimeout, + waitForLogExportReadyFunc(ctx, plan.ID.ValueString(), r.provider.service, apiLogExportObj)) + if err != nil { + resp.Diagnostics.AddError( + "Error enabling log export", + fmt.Sprintf("Could not enable log export: %s", formatAPIErrorMessage(err)), + ) + return + } + + var state ClusterLogExport + loadLogExportIntoTerraformState(apiLogExportObj, &state) + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) +} + +func loadLogExportIntoTerraformState( + apiLogExportObj *client.LogExportClusterInfo, state *ClusterLogExport, +) { + spec := apiLogExportObj.GetSpec() + + var groups []LogExportGroup + if len(spec.GetGroups()) != 0 { + groups = make([]LogExportGroup, len(spec.GetGroups())) + for group_idx, apiGroup := range spec.GetGroups() { + channels := make([]types.String, len(apiGroup.GetChannels())) + for ch_idx, channel := range apiGroup.GetChannels() { + channels[ch_idx] = types.StringValue(channel) + } + var groupRedact types.Bool + if apiGroup.Redact == nil { + groupRedact = types.BoolNull() + } else { + groupRedact = types.BoolValue(apiGroup.GetRedact()) + } + var groupMinLevel types.String + if apiGroup.GetMinLevel() == client.LOGLEVEL_LOG_LEVEL_UNSPECIFIED { + groupMinLevel = types.StringNull() + } else { + groupMinLevel = types.StringValue(string(apiGroup.GetMinLevel())) + } + groups[group_idx] = LogExportGroup{ + LogName: types.StringValue(apiGroup.GetLogName()), + Channels: channels, + MinLevel: groupMinLevel, + Redact: groupRedact, + } + } + } + + var apiRedact types.Bool + if spec.Redact == nil { + apiRedact = types.BoolNull() + } else { + apiRedact = types.BoolValue(spec.GetRedact()) + } + + var apiRegion types.String + if spec.Region == nil { + apiRegion = types.StringNull() + } else { + apiRegion = types.StringValue(spec.GetRegion()) + } + + state.ID = types.StringValue(apiLogExportObj.GetClusterId()) + state.AuthPrincipal = types.StringValue(spec.GetAuthPrincipal()) + state.LogName = types.StringValue(spec.GetLogName()) + state.Type = types.StringValue(string(spec.GetType())) + state.Redact = apiRedact + state.Region = apiRegion + state.Groups = &groups + state.Status = types.StringValue(string(apiLogExportObj.GetStatus())) + state.UserMessage = types.StringValue(apiLogExportObj.GetUserMessage()) + state.CreatedAt = types.StringValue(apiLogExportObj.GetCreatedAt().String()) + state.UpdatedAt = types.StringValue(apiLogExportObj.GetUpdatedAt().String()) +} + +func (r *logExportConfigResource) Read( + ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse, +) { + if r.provider == nil || !r.provider.configured { + addConfigureProviderErr(&resp.Diagnostics) + return + } + + var state ClusterLogExport + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() || state.ID.IsNull() { + return + } + clusterID := state.ID.ValueString() + apiLogExportObj, httpResp, err := r.provider.service.GetLogExportInfo(ctx, clusterID) + if err != nil { + if httpResp != nil && httpResp.StatusCode == http.StatusNotFound { + resp.Diagnostics.AddWarning( + "log export config not found", + fmt.Sprintf("log export config with cluster ID %s is not found. Removing from state.", clusterID)) + resp.State.RemoveResource(ctx) + } else { + resp.Diagnostics.AddError( + "Error getting log export info", + fmt.Sprintf("Unexpected error retrieving log export info: %s", formatAPIErrorMessage(err))) + } + return + } + + loadLogExportIntoTerraformState(apiLogExportObj, &state) + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) +} + +func (r *logExportConfigResource) Update( + ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse, +) { + // Get plan values + var plan ClusterLogExport + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Get current state + var state ClusterLogExport + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + logExportRequest := client.NewEnableLogExportRequestWithDefaults() + if err := loadPlanIntoEnableLogExportRequest(plan, logExportRequest); err != nil { + resp.Diagnostics.AddError( + "Error preparing log export config update", + fmt.Sprintf("Invalid log export config: %s", err), + ) + return + } + + apiLogExportObj, _, err := r.provider.service.EnableLogExport(ctx, plan.ID.ValueString(), logExportRequest) + if err != nil { + resp.Diagnostics.AddError( + "Error updating log export config", + fmt.Sprintf("Could not update log export config: %v", formatAPIErrorMessage(err)), + ) + return + } + + err = sdk_resource.RetryContext(ctx, clusterUpdateTimeout, + waitForLogExportReadyFunc(ctx, plan.ID.ValueString(), r.provider.service, apiLogExportObj)) + if err != nil { + resp.Diagnostics.AddError( + "Error updating log export config", + fmt.Sprintf("Could not update log export config: %s", formatAPIErrorMessage(err)), + ) + return + } + + loadLogExportIntoTerraformState(apiLogExportObj, &state) + diags = resp.State.Set(ctx, state) + resp.Diagnostics.Append(diags...) +} + +func logExportGroupToClientGroup(group LogExportGroup) (*client.LogExportGroup, error) { + channels := make([]string, len(group.Channels)) + for i, channel := range group.Channels { + channels[i] = channel.ValueString() + } + + clientGroup := client.LogExportGroup{ + LogName: group.LogName.ValueString(), + Channels: channels, + } + + if !group.Redact.IsNull() && !group.Redact.IsUnknown() { + clientGroup.SetRedact(group.Redact.ValueBool()) + } + + if group.MinLevel.IsNull() { + clientGroup.SetMinLevel(client.LOGLEVEL_LOG_LEVEL_UNSPECIFIED) + } else { + minLevel, err := client.NewLogLevelFromValue(group.MinLevel.ValueString()) + if err != nil { + return nil, err + } + clientGroup.SetMinLevel(*minLevel) + } + + return &clientGroup, nil +} + +func loadPlanIntoEnableLogExportRequest( + plan ClusterLogExport, req *client.EnableLogExportRequest, +) error { + if plan.Groups != nil { + logExportGroups := make([]client.LogExportGroup, len(*plan.Groups)) + for i, group := range *plan.Groups { + clientGroup, err := logExportGroupToClientGroup(group) + if err != nil { + return err + } + logExportGroups[i] = *clientGroup + } + req.SetGroups(logExportGroups) + } + + req.SetAuthPrincipal(plan.AuthPrincipal.ValueString()) + req.SetLogName(plan.LogName.ValueString()) + if !plan.Redact.IsNull() && !plan.Redact.IsUnknown() { + req.SetRedact(plan.Redact.ValueBool()) + } + if !plan.Region.IsNull() && !plan.Redact.IsUnknown() { + req.SetRegion(plan.Region.ValueString()) + } + + configType, err := client.NewLogExportTypeFromValue(plan.Type.ValueString()) + if err != nil { + return err + } + req.SetType(*configType) + return nil +} + +// Delete +func (r *logExportConfigResource) Delete( + ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse, +) { + var state ClusterLogExport + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + clusterID := state.ID.ValueString() + _, httpResp, err := r.provider.service.DeleteLogExport(ctx, clusterID) + if err != nil { + if httpResp != nil && httpResp.StatusCode == http.StatusNotFound { + // Log export config or cluster is already gone. Swallow the error. + } else { + resp.Diagnostics.AddError( + "Error deleting log export config", + fmt.Sprintf("Could not delete log export config: %s", formatAPIErrorMessage(err)), + ) + return + } + } + + // Remove resource from state + resp.State.RemoveResource(ctx) +} + +func (r *logExportConfigResource) ImportState( + ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse, +) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +func waitForLogExportReadyFunc( + ctx context.Context, + clusterID string, + cl client.Service, + logExportClusterInfo *client.LogExportClusterInfo, +) sdk_resource.RetryFunc { + return func() *sdk_resource.RetryError { + apiLogExport, httpResp, err := cl.GetLogExportInfo(ctx, clusterID) + if err != nil { + if httpResp != nil && httpResp.StatusCode < http.StatusInternalServerError { + return sdk_resource.NonRetryableError(fmt.Errorf("error getting log export info: %s", formatAPIErrorMessage(err))) + } else { + return sdk_resource.RetryableError(fmt.Errorf("encountered a server error while reading log export status - trying again")) + } + } + + *logExportClusterInfo = *apiLogExport + switch logExportClusterInfo.GetStatus() { + case client.LOGEXPORTSTATUS_DISABLE_FAILED, client.LOGEXPORTSTATUS_ENABLE_FAILED: + return sdk_resource.NonRetryableError(fmt.Errorf("log export update failed")) + case client.LOGEXPORTSTATUS_ENABLING, client.LOGEXPORTSTATUS_DISABLING: + return sdk_resource.RetryableError(fmt.Errorf("log export is not ready yet")) + default: + return nil + } + } +} + +func NewLogExportConfigResource() resource.Resource { + return &logExportConfigResource{} +} diff --git a/internal/provider/log_export_config_resource_test.go b/internal/provider/log_export_config_resource_test.go new file mode 100644 index 00000000..0c594b82 --- /dev/null +++ b/internal/provider/log_export_config_resource_test.go @@ -0,0 +1,304 @@ +/* + Copyright 2022 The Cockroach Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "log" + "net/http" + "os" + "testing" + + "github.com/cockroachdb/cockroach-cloud-sdk-go/pkg/client" + mock_client "github.com/cockroachdb/terraform-provider-cockroach/mock" + "github.com/golang/mock/gomock" + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// TestAccLogExportConfigResource attempts to create, check, and destroy +// a real cluster. It will be skipped if TF_ACC isn't set. +func TestAccLogExportConfigResource(t *testing.T) { + t.Skip("Skipping until we can either integrate the AWS provider " + + "or import a permanent test fixture.") + t.Parallel() + clusterName := fmt.Sprintf("tftest-logexport-%s", GenerateRandomString(4)) + testCMEKResource(t, clusterName, false) +} + +// TestIntegrationLogExportConfigResource attempts to create, check, and destroy +// a cluster, but uses a mocked API service. +func TestIntegrationLogExportConfigResource(t *testing.T) { + clusterName := fmt.Sprintf("tftest-logexport-%s", GenerateRandomString(4)) + clusterID := uuid.Nil.String() + if os.Getenv(CockroachAPIKey) == "" { + os.Setenv(CockroachAPIKey, "fake") + } + + ctrl := gomock.NewController(t) + s := mock_client.NewMockService(ctrl) + defer HookGlobal(&NewService, func(c *client.Client) client.Service { + return s + })() + + cluster := &client.Cluster{ + Id: clusterID, + Name: clusterName, + CockroachVersion: "v22.2.0", + Plan: "DEDICATED", + CloudProvider: "AWS", + State: "CREATED", + Config: client.ClusterConfig{ + Dedicated: &client.DedicatedHardwareConfig{ + MachineType: "m5.xlarge", + NumVirtualCpus: 4, + StorageGib: 35, + MemoryGib: 8, + }, + }, + Regions: []client.Region{ + { + Name: "us-east-1", + NodeCount: 3, + }, + }, + } + + authPrincipal := "iam-role-arn" + configType, _ := client.NewLogExportTypeFromValue("AWS_CLOUDWATCH") + logName := "test" + region := "us-east-1" + trueBool := true + minLevel, _ := client.NewLogLevelFromValue("WARNING") + createdGroups := []client.LogExportGroup{ + {LogName: "sql", Channels: []string{"SQL_SCHEMA", "SQL_EXEC"}, MinLevel: minLevel, Redact: &trueBool}, + } + enabledStatus, _ := client.NewLogExportStatusFromValue("ENABLED") + createdLogExportClusterInfo := &client.LogExportClusterInfo{ + ClusterId: &clusterID, + Spec: &client.LogExportClusterSpecification{ + AuthPrincipal: &authPrincipal, + LogName: &logName, + Groups: &createdGroups, + Redact: &trueBool, + Type: configType, + Region: ®ion, + }, + Status: enabledStatus, + } + + falseBool := false + updatedGroups := []client.LogExportGroup{ + {LogName: "sql", Channels: []string{"SQL_EXEC"}, MinLevel: minLevel, Redact: &trueBool}, + {LogName: "devops", Channels: []string{"OPS", "HEALTH", "STORAGE"}, MinLevel: minLevel, Redact: &falseBool}, + } + updatedLogExportClusterInfo := &client.LogExportClusterInfo{ + ClusterId: &clusterID, + Spec: &client.LogExportClusterSpecification{ + AuthPrincipal: &authPrincipal, + LogName: &logName, + Groups: &updatedGroups, + Redact: &falseBool, + Type: configType, + Region: ®ion, + }, + Status: enabledStatus, + } + + // Create + s.EXPECT().CreateCluster(gomock.Any(), gomock.Any()). + Return(cluster, nil, nil) + s.EXPECT().GetCluster(gomock.Any(), clusterID). + Return(cluster, &http.Response{Status: http.StatusText(http.StatusOK)}, nil). + Times(3) + s.EXPECT().EnableLogExport(gomock.Any(), clusterID, + &client.EnableLogExportRequest{ + Groups: &createdGroups, + AuthPrincipal: authPrincipal, + LogName: logName, + Redact: &trueBool, + Type: *configType, + Region: ®ion, + }). + Return(createdLogExportClusterInfo, nil, nil) + s.EXPECT().GetLogExportInfo(gomock.Any(), clusterID). + Return(createdLogExportClusterInfo, nil, nil). + Times(3) + + // Update + s.EXPECT().GetCluster(gomock.Any(), clusterID). + Return(cluster, nil, nil). + Times(2) + s.EXPECT().GetLogExportInfo(gomock.Any(), clusterID). + Return(createdLogExportClusterInfo, nil, nil) + s.EXPECT().EnableLogExport(gomock.Any(), clusterID, + &client.EnableLogExportRequest{ + AuthPrincipal: authPrincipal, + Type: *configType, + LogName: logName, + Redact: &falseBool, + Groups: &updatedGroups, + Region: ®ion, + }). + Return(updatedLogExportClusterInfo, nil, nil) + s.EXPECT().GetLogExportInfo(gomock.Any(), clusterID). + Return(updatedLogExportClusterInfo, nil, nil). + Times(3) + + // Delete + s.EXPECT().DeleteCluster(gomock.Any(), clusterID) + s.EXPECT().DeleteLogExport(gomock.Any(), clusterID) + + testLogExportConfigResource(t, clusterName, true) +} + +func testLogExportConfigResource(t *testing.T, clusterName string, useMock bool) { + var ( + clusterResourceName = "cockroach_cluster.test" + logExportConfigResourceName = "cockroach_log_export_config.test" + ) + + resource.Test(t, resource.TestCase{ + IsUnitTest: useMock, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: getTestLogExportConfigResourceCreateConfig(clusterName), + Check: resource.ComposeTestCheckFunc( + testLogExportConfigExists(logExportConfigResourceName, clusterResourceName), + resource.TestCheckResourceAttr(logExportConfigResourceName, "redact", "true"), + resource.TestCheckResourceAttr(logExportConfigResourceName, "groups.#", "1"), + resource.TestCheckResourceAttr(logExportConfigResourceName, "groups.0.channels.#", "2"), + ), + }, + { + Config: getTestLogExportConfigResourceUpdateConfig(clusterName), + Check: resource.ComposeTestCheckFunc( + testLogExportConfigExists(logExportConfigResourceName, clusterResourceName), + resource.TestCheckResourceAttr(logExportConfigResourceName, "redact", "false"), + resource.TestCheckResourceAttr(logExportConfigResourceName, "groups.#", "2"), + resource.TestCheckResourceAttr(logExportConfigResourceName, "groups.0.channels.#", "1"), + ), + }, + }, + }) +} + +func testLogExportConfigExists(resourceName, clusterResourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + p := testAccProvider.(*provider) + p.service = NewService(cl) + + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("not found: %s", resourceName) + } + clusterRs, ok := s.RootModule().Resources[clusterResourceName] + if !ok { + return fmt.Errorf("not found: %s", clusterResourceName) + } + + clusterID := clusterRs.Primary.Attributes["id"] + log.Printf("[DEBUG] clusterID: %s, name %s", clusterRs.Primary.Attributes["id"], clusterRs.Primary.Attributes["name"]) + + logExportConfigResponse, _, err := p.service.GetLogExportInfo(context.TODO(), clusterID) + if err == nil { + if logExportConfigResponse.Spec.GetLogName() == rs.Primary.Attributes["log_name"] { + return nil + } + } + + return fmt.Errorf("log export config with log name %s does not exist", rs.Primary.Attributes["log_name"]) + } +} + +func getTestLogExportConfigResourceCreateConfig(name string) string { + return fmt.Sprintf(` +resource "cockroach_cluster" "test" { + name = "%s" + cloud_provider = "AWS" + dedicated = { + storage_gib = 35 + machine_type = "m5.xlarge" + } + regions = [{ + name = "us-east-1" + node_count: 3 + }] +} + +resource "cockroach_log_export_config" "test" { + id = cockroach_cluster.test.id + auth_principal = "iam-role-arn" + log_name = "test" + type = "AWS_CLOUDWATCH" + redact = true + region = "us-east-1" + groups = [ + { + log_name = "sql", + channels = ["SQL_SCHEMA", "SQL_EXEC"], + min_level = "WARNING", + redact = true + } + ] +} +`, name) +} + +func getTestLogExportConfigResourceUpdateConfig(name string) string { + return fmt.Sprintf(` +resource "cockroach_cluster" "test" { + name = "%s" + cloud_provider = "AWS" + dedicated = { + storage_gib = 35 + machine_type = "m5.xlarge" + } + regions = [{ + name = "us-east-1" + node_count: 3 + }] +} + +resource "cockroach_log_export_config" "test" { + id = cockroach_cluster.test.id + auth_principal = "iam-role-arn" + log_name = "test" + type = "AWS_CLOUDWATCH" + redact = false + region = "us-east-1" + groups = [ + { + log_name = "sql", + channels = ["SQL_EXEC"], + min_level = "WARNING", + redact: true + }, + { + log_name = "devops", + channels = ["OPS", "HEALTH", "STORAGE"], + min_level = "WARNING", + redact = false + } + ] +} +`, name) +} diff --git a/internal/provider/models.go b/internal/provider/models.go index 259feecd..90c279f0 100644 --- a/internal/provider/models.go +++ b/internal/provider/models.go @@ -173,6 +173,27 @@ type Organization struct { CreatedAt types.String `tfsdk:"created_at"` } +type LogExportGroup struct { + LogName types.String `tfsdk:"log_name"` + Channels []types.String `tfsdk:"channels"` + MinLevel types.String `tfsdk:"min_level"` + Redact types.Bool `tfsdk:"redact"` +} + +type ClusterLogExport struct { + ID types.String `tfsdk:"id"` + AuthPrincipal types.String `tfsdk:"auth_principal"` + LogName types.String `tfsdk:"log_name"` + Type types.String `tfsdk:"type"` + Redact types.Bool `tfsdk:"redact"` + Region types.String `tfsdk:"region"` + Groups *[]LogExportGroup `tfsdk:"groups"` + Status types.String `tfsdk:"status"` + UserMessage types.String `tfsdk:"user_message"` + CreatedAt types.String `tfsdk:"created_at"` + UpdatedAt types.String `tfsdk:"updated_at"` +} + func (e *APIErrorMessage) String() string { return fmt.Sprintf("%v-%v", e.Code, e.Message) } diff --git a/internal/provider/provider.go b/internal/provider/provider.go index b5508e59..fa1df037 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -121,6 +121,7 @@ func (p *provider) Resources(_ context.Context) []func() resource.Resource { NewCMEKResource, NewDatabaseResource, NewFinalizeVersionUpgradeResource, + NewLogExportConfigResource, } }