diff --git a/.changelog/7175.txt b/.changelog/7175.txt new file mode 100644 index 00000000000..dc35def7919 --- /dev/null +++ b/.changelog/7175.txt @@ -0,0 +1,9 @@ +```release-note:enhancement +datastream: Added `postgresql_source_config` & `oracle_source_config` in `google_datastream_stream` +``` +```release-note:enhancement +datastream: Added support for `desired_state=RUNNING` in `google_datastream_stream` +``` +```release-note:enhancement +datastream: Exposed validation errors in `google_datastream_stream` +``` diff --git a/google/datastream_operation.go b/google/datastream_operation.go index 0078bd8fb19..7f18cb3c3fd 100644 --- a/google/datastream_operation.go +++ b/google/datastream_operation.go @@ -4,14 +4,16 @@ import ( "bytes" "encoding/json" "fmt" - datastream "google.golang.org/api/datastream/v1" "time" + + datastream "google.golang.org/api/datastream/v1" ) type DatastreamOperationWaiter struct { Config *Config UserAgent string Project string + Op datastream.Operation CommonOperationWaiter } @@ -20,14 +22,22 @@ func (w *DatastreamOperationWaiter) QueryOp() (interface{}, error) { return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") } // Returns the proper get. - url := fmt.Sprintf("%s%s", w.Config.DatastreamBasePath, w.CommonOperationWaiter.Op.Name) + url := fmt.Sprintf("%s%s", w.Config.DatastreamBasePath, w.Op.Name) return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) } func (w *DatastreamOperationWaiter) Error() error { if w != nil && w.Op.Error != nil { - return DatastreamError(*w.Op.Error) + return &DatastreamOperationError{Op: w.Op} + } + return nil +} + +func (w *DatastreamOperationWaiter) SetOp(op interface{}) error { + w.CommonOperationWaiter.SetOp(op) + if err := Convert(op, &w.Op); err != nil { + return err } return nil } @@ -38,7 +48,7 @@ func createDatastreamWaiter(config *Config, op map[string]interface{}, project, UserAgent: userAgent, Project: project, } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { + if err := w.SetOp(op); err != nil { return nil, err } return w, nil @@ -53,7 +63,7 @@ func datastreamOperationWaitTimeWithResponse(config *Config, op map[string]inter if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { return err } - return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) + return json.Unmarshal([]byte(w.Op.Response), response) } func datastreamOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { @@ -69,17 +79,52 @@ func datastreamOperationWaitTime(config *Config, op map[string]interface{}, proj return OperationWait(w, activity, timeout, config.PollInterval) } -// DatastreamError wraps datastream.Status and implements the +// DatastreamOperationError wraps datastream.Status and implements the // error interface so it can be returned. -type DatastreamError datastream.Status +type DatastreamOperationError struct { + Op datastream.Operation +} -func (e DatastreamError) Error() string { +func (e DatastreamOperationError) Error() string { var buf bytes.Buffer - for _, err := range e.Details { + for _, err := range e.Op.Error.Details { buf.Write(err) buf.WriteString("\n") } + if validations := e.extractFailedValidationResult(); validations != nil { + buf.Write(validations) + buf.WriteString("\n") + } return buf.String() } + +// extractFailedValidationResult extracts the internal failed validations +// if there are any. +func (e DatastreamOperationError) extractFailedValidationResult() []byte { + var metadata datastream.OperationMetadata + data, err := e.Op.Metadata.MarshalJSON() + if err != nil { + return nil + } + err = json.Unmarshal(data, &metadata) + if err != nil { + return nil + } + if metadata.ValidationResult == nil { + return nil + } + var res []byte + for _, v := range metadata.ValidationResult.Validations { + if v.State == "FAILED" { + data, err := v.MarshalJSON() + if err != nil { + return nil + } + res = append(res, data...) + res = append(res, []byte("\n")...) + } + } + return res +} diff --git a/google/resource_datastream_stream.go b/google/resource_datastream_stream.go index 64682d8a878..3c9271414d8 100644 --- a/google/resource_datastream_stream.go +++ b/google/resource_datastream_stream.go @@ -272,9 +272,16 @@ A duration in seconds with up to nine fractional digits, terminated by 's'. Exam MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "source_connection_profile": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: projectNumberDiffSuppress, + Description: `Source connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name}`, + }, "mysql_source_config": { Type: schema.TypeList, - Required: true, + Optional: true, Description: `MySQL data source configuration.`, MaxItems: 1, Elem: &schema.Resource{ @@ -461,103 +468,206 @@ If not set (or set to 0), the system's default value will be used.`, }, }, }, + ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config"}, }, - "source_connection_profile": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: projectNumberDiffSuppress, - Description: `Source connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name}`, - }, - }, - }, - }, - "stream_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: `The stream identifier.`, - }, - "backfill_all": { - Type: schema.TypeList, - Optional: true, - Description: `Backfill strategy to automatically backfill the Stream's objects. Specific objects can be excluded.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "mysql_excluded_objects": { + "oracle_source_config": { Type: schema.TypeList, Optional: true, - Description: `MySQL data source objects to avoid backfilling.`, + Description: `MySQL data source configuration.`, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "mysql_databases": { + "drop_large_objects": { Type: schema.TypeList, - Required: true, - Description: `MySQL databases on the server`, - MinItems: 1, + Optional: true, + Description: `Configuration to drop large object values.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, + "exclude_objects": { + Type: schema.TypeList, + Optional: true, + Description: `Oracle objects to exclude from the stream.`, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "database": { - Type: schema.TypeString, - Required: true, - Description: `Database name.`, - }, - "mysql_tables": { + "oracle_schemas": { Type: schema.TypeList, - Optional: true, - Description: `Tables in the database.`, + Required: true, + Description: `Oracle schemas/databases in the database server`, MinItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "table": { + "schema": { Type: schema.TypeString, Required: true, - Description: `Table name.`, + Description: `Schema name.`, }, - "mysql_columns": { + "oracle_tables": { Type: schema.TypeList, Optional: true, - Description: `MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + Description: `Tables in the database.`, MinItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "collation": { - Type: schema.TypeString, - Optional: true, - Description: `Column collation.`, - }, - "column": { + "table": { Type: schema.TypeString, - Optional: true, - Description: `Column name.`, - }, - "data_type": { - Type: schema.TypeString, - Optional: true, - Description: `The MySQL data type. Full data types list can be found here: -https://dev.mysql.com/doc/refman/8.0/en/data-types.html`, + Required: true, + Description: `Table name.`, }, - "nullable": { - Type: schema.TypeBool, + "oracle_columns": { + Type: schema.TypeList, Optional: true, - Description: `Whether or not the column can accept a null value.`, + Description: `Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The Oracle data type. Full data types list can be found here: +https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html`, + }, + "encoding": { + Type: schema.TypeString, + Computed: true, + Description: `Column encoding.`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "nullable": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Computed: true, + Description: `The ordinal position of the column in the table.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "primary_key": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column represents a primary key.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, }, - "ordinal_position": { - Type: schema.TypeInt, - Optional: true, - Description: `The ordinal position of the column in the table.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "include_objects": { + Type: schema.TypeList, + Optional: true, + Description: `Oracle objects to retrieve from the source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oracle_schemas": { + Type: schema.TypeList, + Required: true, + Description: `Oracle schemas/databases in the database server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Schema name.`, + }, + "oracle_tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the database.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, }, - "primary_key": { - Type: schema.TypeBool, + "oracle_columns": { + Type: schema.TypeList, Optional: true, - Description: `Whether or not the column represents a primary key.`, - }, - "length": { - Type: schema.TypeInt, - Computed: true, - Description: `Column length.`, + Description: `Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The Oracle data type. Full data types list can be found here: +https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html`, + }, + "encoding": { + Type: schema.TypeString, + Computed: true, + Description: `Column encoding.`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "nullable": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Computed: true, + Description: `The ordinal position of the column in the table.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "primary_key": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column represents a primary key.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, }, }, }, @@ -568,1159 +678,4324 @@ https://dev.mysql.com/doc/refman/8.0/en/data-types.html`, }, }, }, + "max_concurrent_backfill_tasks": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Maximum number of concurrent backfill tasks. The number should be non negative. +If not set (or set to 0), the system's default value will be used.`, + }, + "max_concurrent_cdc_tasks": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Maximum number of concurrent CDC tasks. The number should be non negative. +If not set (or set to 0), the system's default value will be used.`, + }, + "stream_large_objects": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration to drop large object values.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + }, }, }, + ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config"}, }, - }, - }, - ExactlyOneOf: []string{"backfill_all", "backfill_none"}, - }, - "backfill_none": { - Type: schema.TypeList, - Optional: true, - Description: `Backfill strategy to disable automatic backfill for the Stream's objects.`, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{}, - }, - ExactlyOneOf: []string{"backfill_all", "backfill_none"}, - }, - "customer_managed_encryption_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data -will be encrypted using an internal Stream-specific encryption key provisioned through KMS.`, - }, - "labels": { - Type: schema.TypeMap, - Optional: true, - Description: `Labels.`, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: `The stream's name.`, + "postgresql_source_config": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL data source configuration.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "publication": { + Type: schema.TypeString, + Required: true, + Description: `The name of the publication that includes the set of all tables +that are defined in the stream's include_objects.`, + }, + "replication_slot": { + Type: schema.TypeString, + Required: true, + Description: `The name of the logical replication slot that's configured with +the pgoutput plugin.`, + }, + "exclude_objects": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL objects to exclude from the stream.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "postgresql_schemas": { + Type: schema.TypeList, + Required: true, + Description: `PostgreSQL schemas on the server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Database name.`, + }, + "postgresql_tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the schema.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "postgresql_columns": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The PostgreSQL data type. Full data types list can be found here: +https://www.postgresql.org/docs/current/datatype.html`, + }, + "nullable": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Optional: true, + Description: `The ordinal position of the column in the table.`, + }, + "primary_key": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column represents a primary key.`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "include_objects": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL objects to retrieve from the source.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "postgresql_schemas": { + Type: schema.TypeList, + Required: true, + Description: `PostgreSQL schemas on the server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Database name.`, + }, + "postgresql_tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the schema.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "postgresql_columns": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The PostgreSQL data type. Full data types list can be found here: +https://www.postgresql.org/docs/current/datatype.html`, + }, + "nullable": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Optional: true, + Description: `The ordinal position of the column in the table.`, + }, + "primary_key": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column represents a primary key.`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "max_concurrent_backfill_tasks": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Maximum number of concurrent backfill tasks. The number should be non +negative. If not set (or set to 0), the system's default value will be used.`, + }, + }, + }, + ExactlyOneOf: []string{"source_config.0.mysql_source_config", "source_config.0.oracle_source_config", "source_config.0.postgresql_source_config"}, + }, + }, + }, }, - "state": { + "stream_id": { Type: schema.TypeString, - Computed: true, - Description: `The state of the stream.`, + Required: true, + ForceNew: true, + Description: `The stream identifier.`, }, - "desired_state": { - Type: schema.TypeString, + "backfill_all": { + Type: schema.TypeList, Optional: true, - Default: "NOT_STARTED", - Description: `Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.`, - }, - "project": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, + Description: `Backfill strategy to automatically backfill the Stream's objects. Specific objects can be excluded.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mysql_excluded_objects": { + Type: schema.TypeList, + Optional: true, + Description: `MySQL data source objects to avoid backfilling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mysql_databases": { + Type: schema.TypeList, + Required: true, + Description: `MySQL databases on the server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": { + Type: schema.TypeString, + Required: true, + Description: `Database name.`, + }, + "mysql_tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the database.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "mysql_columns": { + Type: schema.TypeList, + Optional: true, + Description: `MySQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "collation": { + Type: schema.TypeString, + Optional: true, + Description: `Column collation.`, + }, + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The MySQL data type. Full data types list can be found here: +https://dev.mysql.com/doc/refman/8.0/en/data-types.html`, + }, + "nullable": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Optional: true, + Description: `The ordinal position of the column in the table.`, + }, + "primary_key": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column represents a primary key.`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "oracle_excluded_objects": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL data source objects to avoid backfilling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oracle_schemas": { + Type: schema.TypeList, + Required: true, + Description: `Oracle schemas/databases in the database server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Schema name.`, + }, + "oracle_tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the database.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "oracle_columns": { + Type: schema.TypeList, + Optional: true, + Description: `Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The Oracle data type. Full data types list can be found here: +https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html`, + }, + "encoding": { + Type: schema.TypeString, + Computed: true, + Description: `Column encoding.`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "nullable": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Computed: true, + Description: `The ordinal position of the column in the table.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "primary_key": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether or not the column represents a primary key.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "postgresql_excluded_objects": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL data source objects to avoid backfilling.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "postgresql_schemas": { + Type: schema.TypeList, + Required: true, + Description: `PostgreSQL schemas on the server`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schema": { + Type: schema.TypeString, + Required: true, + Description: `Database name.`, + }, + "postgresql_tables": { + Type: schema.TypeList, + Optional: true, + Description: `Tables in the schema.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "table": { + Type: schema.TypeString, + Required: true, + Description: `Table name.`, + }, + "postgresql_columns": { + Type: schema.TypeList, + Optional: true, + Description: `PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything.`, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeString, + Optional: true, + Description: `Column name.`, + }, + "data_type": { + Type: schema.TypeString, + Optional: true, + Description: `The PostgreSQL data type. Full data types list can be found here: +https://www.postgresql.org/docs/current/datatype.html`, + }, + "nullable": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column can accept a null value.`, + }, + "ordinal_position": { + Type: schema.TypeInt, + Optional: true, + Description: `The ordinal position of the column in the table.`, + }, + "primary_key": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not the column represents a primary key.`, + }, + "length": { + Type: schema.TypeInt, + Computed: true, + Description: `Column length.`, + }, + "precision": { + Type: schema.TypeInt, + Computed: true, + Description: `Column precision.`, + }, + "scale": { + Type: schema.TypeInt, + Computed: true, + Description: `Column scale.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ExactlyOneOf: []string{"backfill_all", "backfill_none"}, + }, + "backfill_none": { + Type: schema.TypeList, + Optional: true, + Description: `Backfill strategy to disable automatic backfill for the Stream's objects.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{}, + }, + ExactlyOneOf: []string{"backfill_all", "backfill_none"}, + }, + "customer_managed_encryption_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A reference to a KMS encryption key. If provided, it will be used to encrypt the data. If left blank, data +will be encrypted using an internal Stream-specific encryption key provisioned through KMS.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `Labels.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The stream's name.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The state of the stream.`, + }, + "desired_state": { + Type: schema.TypeString, + Optional: true, + Default: "NOT_STARTED", + Description: `Desired state of the Stream. Set this field to 'RUNNING' to start the stream, and 'PAUSED' to pause the stream.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandDatastreamStreamLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + displayNameProp, err := expandDatastreamStreamDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + sourceConfigProp, err := expandDatastreamStreamSourceConfig(d.Get("source_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_config"); !isEmptyValue(reflect.ValueOf(sourceConfigProp)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { + obj["sourceConfig"] = sourceConfigProp + } + destinationConfigProp, err := expandDatastreamStreamDestinationConfig(d.Get("destination_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination_config"); !isEmptyValue(reflect.ValueOf(destinationConfigProp)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { + obj["destinationConfig"] = destinationConfigProp + } + backfillAllProp, err := expandDatastreamStreamBackfillAll(d.Get("backfill_all"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backfill_all"); ok || !reflect.DeepEqual(v, backfillAllProp) { + obj["backfillAll"] = backfillAllProp + } + backfillNoneProp, err := expandDatastreamStreamBackfillNone(d.Get("backfill_none"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backfill_none"); ok || !reflect.DeepEqual(v, backfillNoneProp) { + obj["backfillNone"] = backfillNoneProp + } + customerManagedEncryptionKeyProp, err := expandDatastreamStreamCustomerManagedEncryptionKey(d.Get("customer_managed_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("customer_managed_encryption_key"); !isEmptyValue(reflect.ValueOf(customerManagedEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, customerManagedEncryptionKeyProp)) { + obj["customerManagedEncryptionKey"] = customerManagedEncryptionKeyProp + } + + obj, err = resourceDatastreamStreamEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams?streamId={{stream_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Stream: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Stream: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Stream: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = datastreamOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Stream", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Stream: %s", err) + } + + if err := d.Set("name", flattenDatastreamStreamName(opRes["name"], d, config)); err != nil { + return err + } + + // This may have caused the ID to update - update it if so. + id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED or RUNNING during creation: %q", d.Get("name").(string), err) + } + + if d.Get("state") != d.Get("desired_state") { + log.Printf("[DEBUG] Desired state %s not equal to state = %s, updating stream %q", d.Get("desired_state"), d.Get("state"), d.Id()) + if err = resourceDatastreamStreamUpdate(d, meta); err != nil { + return fmt.Errorf("Error updating Stream %q during creation: %q", d.Get("name").(string), err) + } + } + + log.Printf("[DEBUG] Finished creating Stream %q: %#v", d.Id(), res) + + return resourceDatastreamStreamRead(d, meta) +} + +func resourceDatastreamStreamRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Stream: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DatastreamStream %q", d.Id())) + } + + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("desired_state"); !ok { + if err := d.Set("desired_state", "NOT_STARTED"); err != nil { + return fmt.Errorf("Error setting desired_state: %s", err) + } + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + + if err := d.Set("name", flattenDatastreamStreamName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("labels", flattenDatastreamStreamLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("display_name", flattenDatastreamStreamDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("source_config", flattenDatastreamStreamSourceConfig(res["sourceConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("destination_config", flattenDatastreamStreamDestinationConfig(res["destinationConfig"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("state", flattenDatastreamStreamState(res["state"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("backfill_all", flattenDatastreamStreamBackfillAll(res["backfillAll"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("backfill_none", flattenDatastreamStreamBackfillNone(res["backfillNone"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + if err := d.Set("customer_managed_encryption_key", flattenDatastreamStreamCustomerManagedEncryptionKey(res["customerManagedEncryptionKey"], d, config)); err != nil { + return fmt.Errorf("Error reading Stream: %s", err) + } + + return nil +} + +func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Stream: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandDatastreamStreamLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + displayNameProp, err := expandDatastreamStreamDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + sourceConfigProp, err := expandDatastreamStreamSourceConfig(d.Get("source_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { + obj["sourceConfig"] = sourceConfigProp + } + destinationConfigProp, err := expandDatastreamStreamDestinationConfig(d.Get("destination_config"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("destination_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { + obj["destinationConfig"] = destinationConfigProp + } + backfillAllProp, err := expandDatastreamStreamBackfillAll(d.Get("backfill_all"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backfill_all"); ok || !reflect.DeepEqual(v, backfillAllProp) { + obj["backfillAll"] = backfillAllProp + } + backfillNoneProp, err := expandDatastreamStreamBackfillNone(d.Get("backfill_none"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("backfill_none"); ok || !reflect.DeepEqual(v, backfillNoneProp) { + obj["backfillNone"] = backfillNoneProp + } + + obj, err = resourceDatastreamStreamEncoder(d, meta, obj) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Stream %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("source_config") { + updateMask = append(updateMask, "sourceConfig") + } + + if d.HasChange("destination_config") { + updateMask = append(updateMask, "destinationConfig") + } + + if d.HasChange("backfill_all") { + updateMask = append(updateMask, "backfillAll") + } + + if d.HasChange("backfill_none") { + updateMask = append(updateMask, "backfillNone") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + if d.HasChange("desired_state") { + updateMask = append(updateMask, "state") + } + + // Override the previous setting of updateMask to include state. + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED before updating: %q", d.Get("name").(string), err) + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Stream %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Stream %q: %#v", d.Id(), res) + } + + err = datastreamOperationWaitTime( + config, res, project, "Updating Stream", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED during update: %q", d.Get("name").(string), err) + } + return resourceDatastreamStreamRead(d, meta) +} + +func resourceDatastreamStreamDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Stream: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Stream %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Stream") + } + + err = datastreamOperationWaitTime( + config, res, project, "Deleting Stream", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Stream %q: %#v", d.Id(), res) + return nil +} + +func resourceDatastreamStreamImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/streams/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Explicitly set virtual fields to default values on import + if err := d.Set("desired_state", "NOT_STARTED"); err != nil { + return nil, fmt.Errorf("Error setting desired_state: %s", err) + } + if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { + return nil, fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED during import: %q", d.Get("name").(string), err) + } + + return []*schema.ResourceData{d}, nil +} + +func flattenDatastreamStreamName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["source_connection_profile"] = + flattenDatastreamStreamSourceConfigSourceConnectionProfile(original["sourceConnectionProfile"], d, config) + transformed["mysql_source_config"] = + flattenDatastreamStreamSourceConfigMysqlSourceConfig(original["mysqlSourceConfig"], d, config) + transformed["oracle_source_config"] = + flattenDatastreamStreamSourceConfigOracleSourceConfig(original["oracleSourceConfig"], d, config) + transformed["postgresql_source_config"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfig(original["postgresqlSourceConfig"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["include_objects"] = + flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(original["includeObjects"], d, config) + transformed["exclude_objects"] = + flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["excludeObjects"], d, config) + transformed["max_concurrent_cdc_tasks"] = + flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(original["maxConcurrentCdcTasks"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mysql_databases"] = + flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysqlDatabases"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "database": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(original["database"], d, config), + "mysql_tables": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), + "mysql_columns": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), + "collation": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mysql_databases"] = + flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysqlDatabases"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "database": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(original["database"], d, config), + "mysql_tables": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), + "mysql_columns": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), + "collation": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["include_objects"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(original["includeObjects"], d, config) + transformed["exclude_objects"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(original["excludeObjects"], d, config) + transformed["max_concurrent_cdc_tasks"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(original["maxConcurrentCdcTasks"], d, config) + transformed["max_concurrent_backfill_tasks"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(original["maxConcurrentBackfillTasks"], d, config) + transformed["drop_large_objects"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(original["dropLargeObjects"], d, config) + transformed["stream_large_objects"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(original["streamLargeObjects"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["oracle_schemas"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(original["oracleSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(original["schema"], d, config), + "oracle_tables": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(original["oracleTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config), + "oracle_columns": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracleColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config), + "encoding": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["oracle_schemas"] = + flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(original["oracleSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(original["schema"], d, config), + "oracle_tables": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(original["oracleTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config), + "oracle_columns": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracleColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config), + "encoding": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["include_objects"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(original["includeObjects"], d, config) + transformed["exclude_objects"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(original["excludeObjects"], d, config) + transformed["replication_slot"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(original["replicationSlot"], d, config) + transformed["publication"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(original["publication"], d, config) + transformed["max_concurrent_backfill_tasks"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(original["maxConcurrentBackfillTasks"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["postgresql_schemas"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(original["schema"], d, config), + "postgresql_tables": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config), + "postgresql_columns": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["postgresql_schemas"] = + flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(original["schema"], d, config), + "postgresql_tables": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config), + "postgresql_columns": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config), + "primary_key": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["destination_connection_profile"] = + flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(original["destinationConnectionProfile"], d, config) + transformed["gcs_destination_config"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfig(original["gcsDestinationConfig"], d, config) + transformed["bigquery_destination_config"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigqueryDestinationConfig"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["path"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(original["path"], d, config) + transformed["file_rotation_mb"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(original["fileRotationMb"], d, config) + transformed["file_rotation_interval"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(original["fileRotationInterval"], d, config) + transformed["avro_file_format"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(original["avroFileFormat"], d, config) + transformed["json_file_format"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["jsonFileFormat"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["schema_file_format"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(original["schemaFileFormat"], d, config) + transformed["compression"] = + flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["data_freshness"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(original["dataFreshness"], d, config) + transformed["single_target_dataset"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(original["singleTargetDataset"], d, config) + transformed["source_hierarchy_datasets"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["sourceHierarchyDatasets"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_id"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["datasetId"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["dataset_template"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["datasetTemplate"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["location"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(original["location"], d, config) + transformed["dataset_id_prefix"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(original["datasetIdPrefix"], d, config) + transformed["kms_key_name"] = + flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAll(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["mysql_excluded_objects"] = + flattenDatastreamStreamBackfillAllMysqlExcludedObjects(original["mysqlExcludedObjects"], d, config) + transformed["postgresql_excluded_objects"] = + flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(original["postgresqlExcludedObjects"], d, config) + transformed["oracle_excluded_objects"] = + flattenDatastreamStreamBackfillAllOracleExcludedObjects(original["oracleExcludedObjects"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["mysql_databases"] = + flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysqlDatabases"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "database": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(original["database"], d, config), + "mysql_tables": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), + "mysql_columns": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), + "collation": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), + "primary_key": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["postgresql_schemas"] = + flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(original["postgresqlSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(original["schema"], d, config), + "postgresql_tables": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(original["postgresqlTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config), + "postgresql_columns": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresqlColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config), + "primary_key": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["oracle_schemas"] = + flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(original["oracleSchemas"], d, config) + return []interface{}{transformed} +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "schema": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(original["schema"], d, config), + "oracle_tables": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(original["oracleTables"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "table": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(original["table"], d, config), + "oracle_columns": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(original["oracleColumns"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "column": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config), + "data_type": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["dataType"], d, config), + "length": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config), + "precision": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config), + "scale": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config), + "encoding": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config), + "primary_key": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primaryKey"], d, config), + "nullable": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config), + "ordinal_position": flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinalPosition"], d, config), + }) + } + return transformed +} +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } + + return v // let terraform core handle it otherwise } -func resourceDatastreamStreamCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } } - obj := make(map[string]interface{}) - labelsProp, err := expandDatastreamStreamLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal } - displayNameProp, err := expandDatastreamStreamDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp + + return v // let terraform core handle it otherwise +} + +func flattenDatastreamStreamBackfillNone(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil } - sourceConfigProp, err := expandDatastreamStreamSourceConfig(d.Get("source_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_config"); !isEmptyValue(reflect.ValueOf(sourceConfigProp)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { - obj["sourceConfig"] = sourceConfigProp + transformed := make(map[string]interface{}) + return []interface{}{transformed} +} + +func flattenDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandDatastreamStreamLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil } - destinationConfigProp, err := expandDatastreamStreamDestinationConfig(d.Get("destination_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_config"); !isEmptyValue(reflect.ValueOf(destinationConfigProp)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { - obj["destinationConfig"] = destinationConfigProp + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) } - backfillAllProp, err := expandDatastreamStreamBackfillAll(d.Get("backfill_all"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backfill_all"); ok || !reflect.DeepEqual(v, backfillAllProp) { - obj["backfillAll"] = backfillAllProp + return m, nil +} + +func expandDatastreamStreamDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } - backfillNoneProp, err := expandDatastreamStreamBackfillNone(d.Get("backfill_none"), d, config) + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSourceConnectionProfile, err := expandDatastreamStreamSourceConfigSourceConnectionProfile(original["source_connection_profile"], d, config) if err != nil { - return err - } else if v, ok := d.GetOkExists("backfill_none"); ok || !reflect.DeepEqual(v, backfillNoneProp) { - obj["backfillNone"] = backfillNoneProp + return nil, err + } else if val := reflect.ValueOf(transformedSourceConnectionProfile); val.IsValid() && !isEmptyValue(val) { + transformed["sourceConnectionProfile"] = transformedSourceConnectionProfile } - customerManagedEncryptionKeyProp, err := expandDatastreamStreamCustomerManagedEncryptionKey(d.Get("customer_managed_encryption_key"), d, config) + + transformedMysqlSourceConfig, err := expandDatastreamStreamSourceConfigMysqlSourceConfig(original["mysql_source_config"], d, config) if err != nil { - return err - } else if v, ok := d.GetOkExists("customer_managed_encryption_key"); !isEmptyValue(reflect.ValueOf(customerManagedEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, customerManagedEncryptionKeyProp)) { - obj["customerManagedEncryptionKey"] = customerManagedEncryptionKeyProp + return nil, err + } else { + transformed["mysqlSourceConfig"] = transformedMysqlSourceConfig } - obj, err = resourceDatastreamStreamEncoder(d, meta, obj) + transformedOracleSourceConfig, err := expandDatastreamStreamSourceConfigOracleSourceConfig(original["oracle_source_config"], d, config) if err != nil { - return err + return nil, err + } else { + transformed["oracleSourceConfig"] = transformedOracleSourceConfig } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams?streamId={{stream_id}}") + transformedPostgresqlSourceConfig, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfig(original["postgresql_source_config"], d, config) if err != nil { - return err + return nil, err + } else { + transformed["postgresqlSourceConfig"] = transformedPostgresqlSourceConfig } - log.Printf("[DEBUG] Creating new Stream: %#v", obj) - billingProject := "" + return transformed, nil +} - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Stream: %s", err) +func expandDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil } - billingProject = project - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + transformedIncludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(original["include_objects"], d, config) if err != nil { - return fmt.Errorf("Error creating Stream: %s", err) + return nil, err + } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !isEmptyValue(val) { + transformed["includeObjects"] = transformedIncludeObjects } - // Store the ID now - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + transformedExcludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return nil, err + } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !isEmptyValue(val) { + transformed["excludeObjects"] = transformedExcludeObjects } - d.SetId(id) - // Use the resource in the operation response to populate - // identity fields and d.Id() before read - var opRes map[string]interface{} - err = datastreamOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Stream", userAgent, - d.Timeout(schema.TimeoutCreate)) + transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) if err != nil { - // The resource didn't actually create - d.SetId("") - - return fmt.Errorf("Error waiting to create Stream: %s", err) + return nil, err + } else { + transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks } - if err := d.Set("name", flattenDatastreamStreamName(opRes["name"], d, config)); err != nil { - return err + return transformed, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - // This may have caused the ID to update - update it if so. - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") + transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysql_databases"], d, config) if err != nil { - return fmt.Errorf("Error constructing id: %s", err) + return nil, err + } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { + transformed["mysqlDatabases"] = transformedMysqlDatabases } - d.SetId(id) - if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED or RUNNING during creation: %q", d.Get("name").(string), err) - } + return transformed, nil +} - log.Printf("[DEBUG] Finished creating Stream %q: %#v", d.Id(), res) +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - return resourceDatastreamStreamRead(d, meta) -} + transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(original["database"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { + transformed["database"] = transformedDatabase + } -func resourceDatastreamStreamRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } + transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { + transformed["mysqlTables"] = transformedMysqlTables + } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") - if err != nil { - return err + req = append(req, transformed) } + return req, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } - billingProject := "" + transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { + transformed["mysqlColumns"] = transformedMysqlColumns + } - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Stream: %s", err) + req = append(req, transformed) } - billingProject = project + return req, nil +} - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("DatastreamStream %q", d.Id())) - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - // Explicitly set virtual fields to default values if unset - if _, ok := d.GetOkExists("desired_state"); !ok { - if err := d.Set("desired_state", "NOT_STARTED"); err != nil { - return fmt.Errorf("Error setting desired_state: %s", err) + transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + transformed["column"] = transformedColumn } - } - if err := d.Set("project", project); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("name", flattenDatastreamStreamName(res["name"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("labels", flattenDatastreamStreamLabels(res["labels"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("display_name", flattenDatastreamStreamDisplayName(res["displayName"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("source_config", flattenDatastreamStreamSourceConfig(res["sourceConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("destination_config", flattenDatastreamStreamDestinationConfig(res["destinationConfig"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("state", flattenDatastreamStreamState(res["state"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("backfill_all", flattenDatastreamStreamBackfillAll(res["backfillAll"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("backfill_none", flattenDatastreamStreamBackfillNone(res["backfillNone"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } - if err := d.Set("customer_managed_encryption_key", flattenDatastreamStreamCustomerManagedEncryptionKey(res["customerManagedEncryptionKey"], d, config)); err != nil { - return fmt.Errorf("Error reading Stream: %s", err) - } + transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + transformed["dataType"] = transformedDataType + } - return nil -} + transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + transformed["length"] = transformedLength + } -func resourceDatastreamStreamUpdate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } + transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { + transformed["collation"] = transformedCollation + } - billingProject := "" + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Stream: %s", err) - } - billingProject = project + transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + transformed["nullable"] = transformedNullable + } - obj := make(map[string]interface{}) - labelsProp, err := expandDatastreamStreamLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - displayNameProp, err := expandDatastreamStreamDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - sourceConfigProp, err := expandDatastreamStreamSourceConfig(d.Get("source_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sourceConfigProp)) { - obj["sourceConfig"] = sourceConfigProp - } - destinationConfigProp, err := expandDatastreamStreamDestinationConfig(d.Get("destination_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_config"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, destinationConfigProp)) { - obj["destinationConfig"] = destinationConfigProp - } - backfillAllProp, err := expandDatastreamStreamBackfillAll(d.Get("backfill_all"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backfill_all"); ok || !reflect.DeepEqual(v, backfillAllProp) { - obj["backfillAll"] = backfillAllProp - } - backfillNoneProp, err := expandDatastreamStreamBackfillNone(d.Get("backfill_none"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backfill_none"); ok || !reflect.DeepEqual(v, backfillNoneProp) { - obj["backfillNone"] = backfillNoneProp - } + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } - obj, err = resourceDatastreamStreamEncoder(d, meta, obj) - if err != nil { - return err + req = append(req, transformed) } + return req, nil +} - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") - if err != nil { - return err - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - log.Printf("[DEBUG] Updating Stream %q: %#v", d.Id(), obj) - updateMask := []string{} +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - if d.HasChange("source_config") { - updateMask = append(updateMask, "sourceConfig") - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - if d.HasChange("destination_config") { - updateMask = append(updateMask, "destinationConfig") - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - if d.HasChange("backfill_all") { - updateMask = append(updateMask, "backfillAll") - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - if d.HasChange("backfill_none") { - updateMask = append(updateMask, "backfillNone") +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysql_databases"], d, config) if err != nil { - return err + return nil, err + } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { + transformed["mysqlDatabases"] = transformedMysqlDatabases } - if d.HasChange("desired_state") { - updateMask = append(updateMask, "state") - } + return transformed, nil +} - // Override the previous setting of updateMask to include state. - // updateMask is a URL parameter but not present in the schema, so replaceVars - // won't set it - url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(original["database"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { + transformed["database"] = transformedDatabase + } + + transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { + transformed["mysqlTables"] = transformedMysqlTables + } - if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED before updating: %q", d.Get("name").(string), err) + req = append(req, transformed) } + return req, nil +} - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - if err != nil { - return fmt.Errorf("Error updating Stream %q: %s", d.Id(), err) - } else { - log.Printf("[DEBUG] Finished updating Stream %q: %#v", d.Id(), res) - } + transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } - err = datastreamOperationWaitTime( - config, res, project, "Updating Stream", userAgent, - d.Timeout(schema.TimeoutUpdate)) + transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { + transformed["mysqlColumns"] = transformedMysqlColumns + } - if err != nil { - return err + req = append(req, transformed) } + return req, nil +} - if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED during update: %q", d.Get("name").(string), err) - } - return resourceDatastreamStreamRead(d, meta) +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func resourceDatastreamStreamDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - billingProject := "" + transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + transformed["column"] = transformedColumn + } - project, err := getProject(d, config) - if err != nil { - return fmt.Errorf("Error fetching project for Stream: %s", err) - } - billingProject = project + transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + transformed["dataType"] = transformedDataType + } - url, err := replaceVars(d, config, "{{DatastreamBasePath}}projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") - if err != nil { - return err - } + transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + transformed["length"] = transformedLength + } - var obj map[string]interface{} - log.Printf("[DEBUG] Deleting Stream %q", d.Id()) + transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { + transformed["collation"] = transformedCollation + } - // err == nil indicates that the billing_project value was found - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Stream") - } + transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + transformed["nullable"] = transformedNullable + } - err = datastreamOperationWaitTime( - config, res, project, "Deleting Stream", userAgent, - d.Timeout(schema.TimeoutDelete)) + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } - if err != nil { - return err + req = append(req, transformed) } + return req, nil +} - log.Printf("[DEBUG] Finished deleting Stream %q: %#v", d.Id(), res) - return nil +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func resourceDatastreamStreamImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/streams/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - // Replace import id for the resource id - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/streams/{{stream_id}}") - if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - // Explicitly set virtual fields to default values on import - if err := d.Set("desired_state", "NOT_STARTED"); err != nil { - return nil, fmt.Errorf("Error setting desired_state: %s", err) - } - if err := waitForDatastreamStreamReady(d, config, d.Timeout(schema.TimeoutCreate)-time.Minute); err != nil { - return nil, fmt.Errorf("Error waiting for Stream %q to be NOT_STARTED, RUNNING, or PAUSED during import: %q", d.Get("name").(string), err) - } +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - return []*schema.ResourceData{d}, nil +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil +func expandDatastreamStreamSourceConfigOracleSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil } + raw := l[0] + original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformed["source_connection_profile"] = - flattenDatastreamStreamSourceConfigSourceConnectionProfile(original["sourceConnectionProfile"], d, config) - transformed["mysql_source_config"] = - flattenDatastreamStreamSourceConfigMysqlSourceConfig(original["mysqlSourceConfig"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} -func flattenDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil + transformedIncludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(original["include_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !isEmptyValue(val) { + transformed["includeObjects"] = transformedIncludeObjects } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["include_objects"] = - flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(original["includeObjects"], d, config) - transformed["exclude_objects"] = - flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["excludeObjects"], d, config) - transformed["max_concurrent_cdc_tasks"] = - flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(original["maxConcurrentCdcTasks"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil + + transformedExcludeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(original["exclude_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !isEmptyValue(val) { + transformed["excludeObjects"] = transformedExcludeObjects + } + + transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) + if err != nil { + return nil, err + } else { + transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks + } + + transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) + if err != nil { + return nil, err + } else { + transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks + } + + transformedDropLargeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(original["drop_large_objects"], d, config) + if err != nil { + return nil, err + } else { + transformed["dropLargeObjects"] = transformedDropLargeObjects } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + + transformedStreamLargeObjects, err := expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(original["stream_large_objects"], d, config) + if err != nil { + return nil, err + } else { + transformed["streamLargeObjects"] = transformedStreamLargeObjects } - transformed := make(map[string]interface{}) - transformed["mysql_databases"] = - flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysqlDatabases"], d, config) - return []interface{}{transformed} + + return transformed, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v + +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(original["oracle_schemas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !isEmptyValue(val) { + transformed["oracleSchemas"] = transformedOracleSchemas } + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "database": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(original["database"], d, config), - "mysql_tables": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !isEmptyValue(val) { + transformed["oracleTables"] = transformedOracleTables + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "table": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), - "mysql_columns": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !isEmptyValue(val) { + transformed["oracleColumns"] = transformedOracleColumns + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "column": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), - "data_type": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), - "length": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), - "collation": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), - "primary_key": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), - "nullable": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), - "ordinal_position": flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + transformed["length"] = transformedLength + } + + transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + transformed["precision"] = transformedPrecision + } + + transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { + transformed["encoding"] = transformedEncoding + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } - } +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - return v // let terraform core handle it otherwise +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } +func expandDatastreamStreamSourceConfigOracleSourceConfigIncludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal + transformedOracleSchemas, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(original["oracle_schemas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !isEmptyValue(val) { + transformed["oracleSchemas"] = transformedOracleSchemas } - return v // let terraform core handle it otherwise + return transformed, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["mysql_databases"] = - flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysqlDatabases"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "database": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(original["database"], d, config), - "mysql_tables": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchema, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedOracleTables, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !isEmptyValue(val) { + transformed["oracleTables"] = transformedOracleTables + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "table": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), - "mysql_columns": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedOracleColumns, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !isEmptyValue(val) { + transformed["oracleColumns"] = transformedOracleColumns + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "column": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), - "data_type": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), - "length": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), - "collation": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), - "primary_key": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), - "nullable": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), - "ordinal_position": flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedDataType, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + transformedLength, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + transformed["length"] = transformedLength + } + + transformedPrecision, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + transformed["precision"] = transformedPrecision + } + + transformedScale, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedEncoding, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { + transformed["encoding"] = transformedEncoding + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } - } +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - return v // let terraform core handle it otherwise +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } - } +func expandDatastreamStreamSourceConfigOracleSourceConfigExcludeObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } +func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentCdcTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - return v // let terraform core handle it otherwise +func expandDatastreamStreamSourceConfigOracleSourceConfigMaxConcurrentBackfillTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } +func expandDatastreamStreamSourceConfigOracleSourceConfigDropLargeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil } - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil } + transformed := make(map[string]interface{}) - return v // let terraform core handle it otherwise + return transformed, nil } -func flattenDatastreamStreamDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil +func expandDatastreamStreamSourceConfigOracleSourceConfigStreamLargeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil } transformed := make(map[string]interface{}) - transformed["destination_connection_profile"] = - flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(original["destinationConnectionProfile"], d, config) - transformed["gcs_destination_config"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfig(original["gcsDestinationConfig"], d, config) - transformed["bigquery_destination_config"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigqueryDestinationConfig"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + + return transformed, nil } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil +func expandDatastreamStreamSourceConfigPostgresqlSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil } + raw := l[0] + original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformed["path"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(original["path"], d, config) - transformed["file_rotation_mb"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(original["fileRotationMb"], d, config) - transformed["file_rotation_interval"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(original["fileRotationInterval"], d, config) - transformed["avro_file_format"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(original["avroFileFormat"], d, config) - transformed["json_file_format"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["jsonFileFormat"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v -} -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } + transformedIncludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(original["include_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !isEmptyValue(val) { + transformed["includeObjects"] = transformedIncludeObjects + } + + transformedExcludeObjects, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !isEmptyValue(val) { + transformed["excludeObjects"] = transformedExcludeObjects + } + + transformedReplicationSlot, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(original["replication_slot"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReplicationSlot); val.IsValid() && !isEmptyValue(val) { + transformed["replicationSlot"] = transformedReplicationSlot } - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal + transformedPublication, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(original["publication"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPublication); val.IsValid() && !isEmptyValue(val) { + transformed["publication"] = transformedPublication } - return v // let terraform core handle it otherwise -} + transformedMaxConcurrentBackfillTasks, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(original["max_concurrent_backfill_tasks"], d, config) + if err != nil { + return nil, err + } else { + transformed["maxConcurrentBackfillTasks"] = transformedMaxConcurrentBackfillTasks + } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + return transformed, nil } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - return []interface{}{transformed} -} -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } - transformed := make(map[string]interface{}) - transformed["schema_file_format"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(original["schemaFileFormat"], d, config) - transformed["compression"] = - flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) - return []interface{}{transformed} + + return transformed, nil } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlTables"] = transformedPostgresqlTables + } + + req = append(req, transformed) + } + return req, nil } -func flattenDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlColumns"] = transformedPostgresqlColumns + } + + req = append(req, transformed) } - transformed := make(map[string]interface{}) - transformed["data_freshness"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(original["dataFreshness"], d, config) - transformed["single_target_dataset"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(original["singleTargetDataset"], d, config) - transformed["source_hierarchy_datasets"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["sourceHierarchyDatasets"], d, config) - return []interface{}{transformed} + return req, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + transformed["length"] = transformedLength + } + + transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + transformed["precision"] = transformedPrecision + } + + transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["datasetId"], d, config) - return []interface{}{transformed} + return req, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_template"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["datasetTemplate"], d, config) - return []interface{}{transformed} +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["location"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(original["location"], d, config) - transformed["dataset_id_prefix"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(original["datasetIdPrefix"], d, config) - transformed["kms_key_name"] = - flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamState(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAll(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["mysql_excluded_objects"] = - flattenDatastreamStreamBackfillAllMysqlExcludedObjects(original["mysqlExcludedObjects"], d, config) - return []interface{}{transformed} +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigIncludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil } + raw := l[0] + original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformed["mysql_databases"] = - flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysqlDatabases"], d, config) - return []interface{}{transformed} -} -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v + + transformedPostgresqlSchemas, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } + + return transformed, nil +} + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "database": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(original["database"], d, config), - "mysql_tables": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(original["mysqlTables"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSchema, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedPostgresqlTables, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlTables"] = transformedPostgresqlTables + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "table": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config), - "mysql_columns": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysqlColumns"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } + + transformedPostgresqlColumns, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlColumns"] = transformedPostgresqlColumns + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) + req := make([]interface{}, 0, len(l)) for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - // Do not include empty json objects coming back from the api + if raw == nil { continue } - transformed = append(transformed, map[string]interface{}{ - "column": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config), - "data_type": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["dataType"], d, config), - "length": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config), - "collation": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config), - "primary_key": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primaryKey"], d, config), - "nullable": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config), - "ordinal_position": flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinalPosition"], d, config), - }) + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedColumn, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + transformed["column"] = transformedColumn + } + + transformedDataType, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + transformed["dataType"] = transformedDataType + } + + transformedLength, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + transformed["length"] = transformedLength + } + + transformedPrecision, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + transformed["precision"] = transformedPrecision + } + + transformedScale, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedPrimaryKey, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) } - return transformed + return req, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } - } +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - return v // let terraform core handle it otherwise +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigExcludeObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigReplicationSlot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigPublication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamSourceConfigPostgresqlSourceConfigMaxConcurrentBackfillTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedDestinationConnectionProfile, err := expandDatastreamStreamDestinationConfigDestinationConnectionProfile(original["destination_connection_profile"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDestinationConnectionProfile); val.IsValid() && !isEmptyValue(val) { + transformed["destinationConnectionProfile"] = transformedDestinationConnectionProfile + } + + transformedGcsDestinationConfig, err := expandDatastreamStreamDestinationConfigGcsDestinationConfig(original["gcs_destination_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedGcsDestinationConfig); val.IsValid() && !isEmptyValue(val) { + transformed["gcsDestinationConfig"] = transformedGcsDestinationConfig + } + + transformedBigqueryDestinationConfig, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigquery_destination_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBigqueryDestinationConfig); val.IsValid() && !isEmptyValue(val) { + transformed["bigqueryDestinationConfig"] = transformedBigqueryDestinationConfig + } + + return transformed, nil +} + +func expandDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d *schema.ResourceData, config *Config) interface{} { - // Handles the string fixed64 format - if strVal, ok := v.(string); ok { - if intVal, err := stringToFixed64(strVal); err == nil { - return intVal - } +func expandDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedFileRotationMb, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(original["file_rotation_mb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileRotationMb); val.IsValid() && !isEmptyValue(val) { + transformed["fileRotationMb"] = transformedFileRotationMb + } + + transformedFileRotationInterval, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(original["file_rotation_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFileRotationInterval); val.IsValid() && !isEmptyValue(val) { + transformed["fileRotationInterval"] = transformedFileRotationInterval + } + + transformedAvroFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(original["avro_file_format"], d, config) + if err != nil { + return nil, err + } else { + transformed["avroFileFormat"] = transformedAvroFileFormat } - // number values are represented as float64 - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal + transformedJsonFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["json_file_format"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedJsonFileFormat); val.IsValid() && !isEmptyValue(val) { + transformed["jsonFileFormat"] = transformedJsonFileFormat } - return v // let terraform core handle it otherwise + return transformed, nil } -func flattenDatastreamStreamBackfillNone(v interface{}, d *schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - transformed := make(map[string]interface{}) - return []interface{}{transformed} +func expandDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func flattenDatastreamStreamCustomerManagedEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { - return v +func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func expandDatastreamStreamLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil +func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil } - return m, nil -} + transformed := make(map[string]interface{}) -func expandDatastreamStreamDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil + return transformed, nil } -func expandDatastreamStreamSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1729,66 +5004,69 @@ func expandDatastreamStreamSourceConfig(v interface{}, d TerraformResourceData, original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedSourceConnectionProfile, err := expandDatastreamStreamSourceConfigSourceConnectionProfile(original["source_connection_profile"], d, config) + transformedSchemaFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(original["schema_file_format"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedSourceConnectionProfile); val.IsValid() && !isEmptyValue(val) { - transformed["sourceConnectionProfile"] = transformedSourceConnectionProfile + } else if val := reflect.ValueOf(transformedSchemaFileFormat); val.IsValid() && !isEmptyValue(val) { + transformed["schemaFileFormat"] = transformedSchemaFileFormat } - transformedMysqlSourceConfig, err := expandDatastreamStreamSourceConfigMysqlSourceConfig(original["mysql_source_config"], d, config) + transformedCompression, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) if err != nil { return nil, err - } else { - transformed["mysqlSourceConfig"] = transformedMysqlSourceConfig + } else if val := reflect.ValueOf(transformedCompression); val.IsValid() && !isEmptyValue(val) { + transformed["compression"] = transformedCompression } return transformed, nil } -func expandDatastreamStreamSourceConfigSourceConnectionProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 { + if len(l) == 0 || l[0] == nil { return nil, nil } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } raw := l[0] original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedIncludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(original["include_objects"], d, config) + transformedDataFreshness, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(original["data_freshness"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedIncludeObjects); val.IsValid() && !isEmptyValue(val) { - transformed["includeObjects"] = transformedIncludeObjects + } else if val := reflect.ValueOf(transformedDataFreshness); val.IsValid() && !isEmptyValue(val) { + transformed["dataFreshness"] = transformedDataFreshness } - transformedExcludeObjects, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(original["exclude_objects"], d, config) + transformedSingleTargetDataset, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(original["single_target_dataset"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedExcludeObjects); val.IsValid() && !isEmptyValue(val) { - transformed["excludeObjects"] = transformedExcludeObjects + } else if val := reflect.ValueOf(transformedSingleTargetDataset); val.IsValid() && !isEmptyValue(val) { + transformed["singleTargetDataset"] = transformedSingleTargetDataset } - transformedMaxConcurrentCdcTasks, err := expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(original["max_concurrent_cdc_tasks"], d, config) + transformedSourceHierarchyDatasets, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["source_hierarchy_datasets"], d, config) if err != nil { return nil, err - } else { - transformed["maxConcurrentCdcTasks"] = transformedMaxConcurrentCdcTasks + } else if val := reflect.ValueOf(transformedSourceHierarchyDatasets); val.IsValid() && !isEmptyValue(val) { + transformed["sourceHierarchyDatasets"] = transformedSourceHierarchyDatasets } return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1797,175 +5075,123 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjects(v interfa original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(original["mysql_databases"], d, config) + transformedDatasetId, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["dataset_id"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { - transformed["mysqlDatabases"] = transformedMysqlDatabases + } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { + transformed["datasetId"] = transformedDatasetId } return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(original["database"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { - transformed["database"] = transformedDatabase - } - - transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { - transformed["mysqlTables"] = transformedMysqlTables - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { - transformed["table"] = transformedTable - } - - transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { - transformed["mysqlColumns"] = transformedMysqlColumns - } + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - req = append(req, transformed) + transformedDatasetTemplate, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["dataset_template"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetTemplate); val.IsValid() && !isEmptyValue(val) { + transformed["datasetTemplate"] = transformedDatasetTemplate } - return req, nil -} -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil + return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { - transformed["column"] = transformedColumn - } - - transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { - transformed["dataType"] = transformedDataType - } - - transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { - transformed["length"] = transformedLength - } - - transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { - transformed["collation"] = transformedCollation - } - - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { - transformed["primaryKey"] = transformedPrimaryKey - } + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { - transformed["nullable"] = transformedNullable - } + transformedLocation, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(original["location"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { + transformed["location"] = transformedLocation + } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { - transformed["ordinalPosition"] = transformedOrdinalPosition - } + transformedDatasetIdPrefix, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(original["dataset_id_prefix"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatasetIdPrefix); val.IsValid() && !isEmptyValue(val) { + transformed["datasetIdPrefix"] = transformedDatasetIdPrefix + } - req = append(req, transformed) + transformedKmsKeyName, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName } - return req, nil -} -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil + return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} +func expandDatastreamStreamBackfillAll(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) -func expandDatastreamStreamSourceConfigMysqlSourceConfigIncludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil + transformedMysqlExcludedObjects, err := expandDatastreamStreamBackfillAllMysqlExcludedObjects(original["mysql_excluded_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMysqlExcludedObjects); val.IsValid() && !isEmptyValue(val) { + transformed["mysqlExcludedObjects"] = transformedMysqlExcludedObjects + } + + transformedPostgresqlExcludedObjects, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjects(original["postgresql_excluded_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlExcludedObjects); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlExcludedObjects"] = transformedPostgresqlExcludedObjects + } + + transformedOracleExcludedObjects, err := expandDatastreamStreamBackfillAllOracleExcludedObjects(original["oracle_excluded_objects"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOracleExcludedObjects); val.IsValid() && !isEmptyValue(val) { + transformed["oracleExcludedObjects"] = transformedOracleExcludedObjects + } + + return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -1974,7 +5200,7 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interfa original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedMysqlDatabases, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(original["mysql_databases"], d, config) + transformedMysqlDatabases, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysql_databases"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { @@ -1984,7 +5210,7 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjects(v interfa return transformed, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -1994,14 +5220,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedDatabase, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(original["database"], d, config) + transformedDatabase, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(original["database"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { transformed["database"] = transformedDatabase } - transformedMysqlTables, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + transformedMysqlTables, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { @@ -2013,11 +5239,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2027,14 +5253,14 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { transformed["table"] = transformedTable } - transformedMysqlColumns, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + transformedMysqlColumns, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { @@ -2046,11 +5272,11 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2060,49 +5286,49 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { transformed["length"] = transformedLength } - transformedCollation, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + transformedCollation, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { transformed["collation"] = transformedCollation } - transformedPrimaryKey, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + transformedPrimaryKey, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { @@ -2114,184 +5340,35 @@ func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatab return req, nil } -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigExcludeObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamSourceConfigMysqlSourceConfigMaxConcurrentCdcTasks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDestinationConnectionProfile, err := expandDatastreamStreamDestinationConfigDestinationConnectionProfile(original["destination_connection_profile"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDestinationConnectionProfile); val.IsValid() && !isEmptyValue(val) { - transformed["destinationConnectionProfile"] = transformedDestinationConnectionProfile - } - - transformedGcsDestinationConfig, err := expandDatastreamStreamDestinationConfigGcsDestinationConfig(original["gcs_destination_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedGcsDestinationConfig); val.IsValid() && !isEmptyValue(val) { - transformed["gcsDestinationConfig"] = transformedGcsDestinationConfig - } - - transformedBigqueryDestinationConfig, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(original["bigquery_destination_config"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedBigqueryDestinationConfig); val.IsValid() && !isEmptyValue(val) { - transformed["bigqueryDestinationConfig"] = transformedBigqueryDestinationConfig - } - - return transformed, nil -} - -func expandDatastreamStreamDestinationConfigDestinationConnectionProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamDestinationConfigGcsDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedFileRotationMb, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(original["file_rotation_mb"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFileRotationMb); val.IsValid() && !isEmptyValue(val) { - transformed["fileRotationMb"] = transformedFileRotationMb - } - - transformedFileRotationInterval, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(original["file_rotation_interval"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedFileRotationInterval); val.IsValid() && !isEmptyValue(val) { - transformed["fileRotationInterval"] = transformedFileRotationInterval - } - - transformedAvroFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(original["avro_file_format"], d, config) - if err != nil { - return nil, err - } else { - transformed["avroFileFormat"] = transformedAvroFileFormat - } - - transformedJsonFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(original["json_file_format"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedJsonFileFormat); val.IsValid() && !isEmptyValue(val) { - transformed["jsonFileFormat"] = transformedJsonFileFormat - } - - return transformed, nil -} - -func expandDatastreamStreamDestinationConfigGcsDestinationConfigPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationMb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigFileRotationInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigAvroFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - transformed := make(map[string]interface{}) - - return transformed, nil -} - -func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchemaFileFormat, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(original["schema_file_format"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSchemaFileFormat); val.IsValid() && !isEmptyValue(val) { - transformed["schemaFileFormat"] = transformedSchemaFileFormat - } - - transformedCompression, err := expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(original["compression"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedCompression); val.IsValid() && !isEmptyValue(val) { - transformed["compression"] = transformedCompression - } +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - return transformed, nil +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatSchemaFileFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigGcsDestinationConfigJsonFileFormatCompression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2300,146 +5377,186 @@ func expandDatastreamStreamDestinationConfigBigqueryDestinationConfig(v interfac original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedDataFreshness, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(original["data_freshness"], d, config) + transformedPostgresqlSchemas, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(original["postgresql_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDataFreshness); val.IsValid() && !isEmptyValue(val) { - transformed["dataFreshness"] = transformedDataFreshness + } else if val := reflect.ValueOf(transformedPostgresqlSchemas); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlSchemas"] = transformedPostgresqlSchemas } - transformedSingleTargetDataset, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(original["single_target_dataset"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSingleTargetDataset); val.IsValid() && !isEmptyValue(val) { - transformed["singleTargetDataset"] = transformedSingleTargetDataset - } + return transformed, nil +} - transformedSourceHierarchyDatasets, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(original["source_hierarchy_datasets"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedSourceHierarchyDatasets); val.IsValid() && !isEmptyValue(val) { - transformed["sourceHierarchyDatasets"] = transformedSourceHierarchyDatasets - } +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - return transformed, nil + transformedSchema, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(original["schema"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + transformed["schema"] = transformedSchema + } + + transformedPostgresqlTables, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(original["postgresql_tables"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlTables); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlTables"] = transformedPostgresqlTables + } + + req = append(req, transformed) + } + return req, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigDataFreshness(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - transformedDatasetId, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } + transformedTable, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(original["table"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { + transformed["table"] = transformedTable + } - return transformed, nil + transformedPostgresqlColumns, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(original["postgresql_columns"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPostgresqlColumns); val.IsValid() && !isEmptyValue(val) { + transformed["postgresqlColumns"] = transformedPostgresqlColumns + } + + req = append(req, transformed) + } + return req, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSingleTargetDatasetDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) - transformedDatasetTemplate, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(original["dataset_template"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetTemplate); val.IsValid() && !isEmptyValue(val) { - transformed["datasetTemplate"] = transformedDatasetTemplate - } + transformedColumn, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(original["column"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { + transformed["column"] = transformedColumn + } - return transformed, nil -} + transformedDataType, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(original["data_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { + transformed["dataType"] = transformedDataType + } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) + transformedLength, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(original["length"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { + transformed["length"] = transformedLength + } - transformedLocation, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } + transformedPrecision, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(original["precision"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + transformed["precision"] = transformedPrecision + } - transformedDatasetIdPrefix, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(original["dataset_id_prefix"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedDatasetIdPrefix); val.IsValid() && !isEmptyValue(val) { - transformed["datasetIdPrefix"] = transformedDatasetIdPrefix - } + transformedScale, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + transformed["scale"] = transformedScale + } - transformedKmsKeyName, err := expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName + transformedPrimaryKey, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(original["primary_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { + transformed["primaryKey"] = transformedPrimaryKey + } + + transformedNullable, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(original["nullable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { + transformed["nullable"] = transformedNullable + } + + transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { + transformed["ordinalPosition"] = transformedOrdinalPosition + } + + req = append(req, transformed) } + return req, nil +} - return transformed, nil +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateDatasetIdPrefix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamDestinationConfigBigqueryDestinationConfigSourceHierarchyDatasetsDatasetTemplateKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAll(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - transformedMysqlExcludedObjects, err := expandDatastreamStreamBackfillAllMysqlExcludedObjects(original["mysql_excluded_objects"], d, config) - if err != nil { - return nil, err - } else if val := reflect.ValueOf(transformedMysqlExcludedObjects); val.IsValid() && !isEmptyValue(val) { - transformed["mysqlExcludedObjects"] = transformedMysqlExcludedObjects - } +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} - return transformed, nil +func expandDatastreamStreamBackfillAllPostgresqlExcludedObjectsPostgresqlSchemasPostgresqlTablesPostgresqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { return nil, nil @@ -2448,17 +5565,17 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjects(v interface{}, d Terr original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedMysqlDatabases, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(original["mysql_databases"], d, config) + transformedOracleSchemas, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(original["oracle_schemas"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlDatabases); val.IsValid() && !isEmptyValue(val) { - transformed["mysqlDatabases"] = transformedMysqlDatabases + } else if val := reflect.ValueOf(transformedOracleSchemas); val.IsValid() && !isEmptyValue(val) { + transformed["oracleSchemas"] = transformedOracleSchemas } return transformed, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2468,18 +5585,18 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v inter original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedDatabase, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(original["database"], d, config) + transformedSchema, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(original["schema"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedDatabase); val.IsValid() && !isEmptyValue(val) { - transformed["database"] = transformedDatabase + } else if val := reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { + transformed["schema"] = transformedSchema } - transformedMysqlTables, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(original["mysql_tables"], d, config) + transformedOracleTables, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(original["oracle_tables"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlTables); val.IsValid() && !isEmptyValue(val) { - transformed["mysqlTables"] = transformedMysqlTables + } else if val := reflect.ValueOf(transformedOracleTables); val.IsValid() && !isEmptyValue(val) { + transformed["oracleTables"] = transformedOracleTables } req = append(req, transformed) @@ -2487,11 +5604,11 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabases(v inter return req, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2501,18 +5618,18 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedTable, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(original["table"], d, config) + transformedTable, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(original["table"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { transformed["table"] = transformedTable } - transformedMysqlColumns, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(original["mysql_columns"], d, config) + transformedOracleColumns, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(original["oracle_columns"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedMysqlColumns); val.IsValid() && !isEmptyValue(val) { - transformed["mysqlColumns"] = transformedMysqlColumns + } else if val := reflect.ValueOf(transformedOracleColumns); val.IsValid() && !isEmptyValue(val) { + transformed["oracleColumns"] = transformedOracleColumns } req = append(req, transformed) @@ -2520,11 +5637,11 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab return req, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) for _, raw := range l { @@ -2534,49 +5651,63 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - transformedColumn, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(original["column"], d, config) + transformedColumn, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(original["column"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedColumn); val.IsValid() && !isEmptyValue(val) { transformed["column"] = transformedColumn } - transformedDataType, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(original["data_type"], d, config) + transformedDataType, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(original["data_type"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { transformed["dataType"] = transformedDataType } - transformedLength, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(original["length"], d, config) + transformedLength, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(original["length"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedLength); val.IsValid() && !isEmptyValue(val) { transformed["length"] = transformedLength } - transformedCollation, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(original["collation"], d, config) + transformedPrecision, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(original["precision"], d, config) if err != nil { return nil, err - } else if val := reflect.ValueOf(transformedCollation); val.IsValid() && !isEmptyValue(val) { - transformed["collation"] = transformedCollation + } else if val := reflect.ValueOf(transformedPrecision); val.IsValid() && !isEmptyValue(val) { + transformed["precision"] = transformedPrecision } - transformedPrimaryKey, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(original["primary_key"], d, config) + transformedScale, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(original["scale"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { + transformed["scale"] = transformedScale + } + + transformedEncoding, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(original["encoding"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { + transformed["encoding"] = transformedEncoding + } + + transformedPrimaryKey, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(original["primary_key"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedPrimaryKey); val.IsValid() && !isEmptyValue(val) { transformed["primaryKey"] = transformedPrimaryKey } - transformedNullable, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(original["nullable"], d, config) + transformedNullable, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(original["nullable"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedNullable); val.IsValid() && !isEmptyValue(val) { transformed["nullable"] = transformedNullable } - transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(original["ordinal_position"], d, config) + transformedOrdinalPosition, err := expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(original["ordinal_position"], d, config) if err != nil { return nil, err } else if val := reflect.ValueOf(transformedOrdinalPosition); val.IsValid() && !isEmptyValue(val) { @@ -2588,31 +5719,39 @@ func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTab return req, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrecision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } -func expandDatastreamStreamBackfillAllMysqlExcludedObjectsMysqlDatabasesMysqlTablesMysqlColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsPrimaryKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsNullable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandDatastreamStreamBackfillAllOracleExcludedObjectsOracleSchemasOracleTablesOracleColumnsOrdinalPosition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { return v, nil } diff --git a/google/resource_datastream_stream_generated_test.go b/google/resource_datastream_stream_generated_test.go index ed13b15d6dc..e1d3251ed97 100644 --- a/google/resource_datastream_stream_generated_test.go +++ b/google/resource_datastream_stream_generated_test.go @@ -189,7 +189,6 @@ func TestAccDatastreamStream_datastreamStreamFullExample(t *testing.T) { context := map[string]interface{}{ "deletion_protection": false, - "stream_cmek": BootstrapKMSKeyInLocation(t, "us-central1").CryptoKey.Name, "random_suffix": randString(t, 10), } @@ -313,7 +312,7 @@ resource "google_storage_bucket_iam_member" "reader" { } resource "google_kms_crypto_key_iam_member" "key_user" { - crypto_key_id = "%{stream_cmek}" + crypto_key_id = "tf-test-kms-name%{random_suffix}" role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-datastream.iam.gserviceaccount.com" } @@ -410,7 +409,7 @@ resource "google_datastream_stream" "default" { } } - customer_managed_encryption_key = "%{stream_cmek}" + customer_managed_encryption_key = "tf-test-kms-name%{random_suffix}" } `, context) } diff --git a/website/docs/r/datastream_stream.html.markdown b/website/docs/r/datastream_stream.html.markdown index 448c1d098a0..90b04f05dde 100644 --- a/website/docs/r/datastream_stream.html.markdown +++ b/website/docs/r/datastream_stream.html.markdown @@ -233,6 +233,186 @@ resource "google_datastream_stream" "default" { customer_managed_encryption_key = "kms-name" } ``` +## Example Usage - Datastream Stream Postgresql + + +```hcl +resource "google_datastream_connection_profile" "source" { + display_name = "Postgresql Source" + location = "us-central1" + connection_profile_id = "source-profile" + + postgresql_profile { + hostname = "hostname" + port = 3306 + username = "user" + password = "pass" + database = "postgres" + } +} + +resource "google_datastream_connection_profile" "destination" { + display_name = "BigQuery Destination" + location = "us-central1" + connection_profile_id = "destination-profile" + + bigquery_profile {} +} + +resource "google_datastream_stream" "default" { + display_name = "Postgres to BigQuery" + location = "us-central1" + stream_id = "my-stream" + desired_state = "RUNNING" + + source_config { + source_connection_profile = google_datastream_connection_profile.source.id + postgresql_source_config { + max_concurrent_backfill_tasks = 12 + publication = "publication" + replication_slot = "replication_slot" + include_objects { + postgresql_schemas { + schema = "schema" + postgresql_tables { + table = "table" + postgresql_columns { + column = "column" + } + } + } + } + exclude_objects { + postgresql_schemas { + schema = "schema" + postgresql_tables { + table = "table" + postgresql_columns { + column = "column" + } + } + } + } + } + } + + destination_config { + destination_connection_profile = google_datastream_connection_profile.destination.id + bigquery_destination_config { + data_freshness = "900s" + source_hierarchy_datasets { + dataset_template { + location = "us-central1" + } + } + } + } + + backfill_all { + postgresql_excluded_objects { + postgresql_schemas { + schema = "schema" + postgresql_tables { + table = "table" + postgresql_columns { + column = "column" + } + } + } + } + } +} +``` +## Example Usage - Datastream Stream Oracle + + +```hcl +resource "google_datastream_connection_profile" "source" { + display_name = "Oracle Source" + location = "us-central1" + connection_profile_id = "source-profile" + + oracle_profile { + hostname = "hostname" + port = 1521 + username = "user" + password = "pass" + database_service = "ORCL" + } +} + +resource "google_datastream_connection_profile" "destination" { + display_name = "BigQuery Destination" + location = "us-central1" + connection_profile_id = "destination-profile" + + bigquery_profile {} +} + +resource "google_datastream_stream" "stream5" { + display_name = "Oracle to BigQuery" + location = "us-central1" + stream_id = "my-stream" + desired_state = "RUNNING" + + source_config { + source_connection_profile = google_datastream_connection_profile.source.id + oracle_source_config { + max_concurrent_cdc_tasks = 8 + max_concurrent_backfill_tasks = 12 + include_objects { + oracle_schemas { + schema = "schema" + oracle_tables { + table = "table" + oracle_columns { + column = "column" + } + } + } + } + exclude_objects { + oracle_schemas { + schema = "schema" + oracle_tables { + table = "table" + oracle_columns { + column = "column" + } + } + } + } + drop_large_objects {} + } + } + + destination_config { + destination_connection_profile = google_datastream_connection_profile.destination.id + bigquery_destination_config { + data_freshness = "900s" + source_hierarchy_datasets { + dataset_template { + location = "us-central1" + } + } + } + } + + backfill_all { + oracle_excluded_objects { + oracle_schemas { + schema = "schema" + oracle_tables { + table = "table" + oracle_columns { + column = "column" + } + } + } + } + } +} +```
Open in Cloud Shell @@ -393,10 +573,20 @@ The following arguments are supported: Source connection profile resource. Format: projects/{project}/locations/{location}/connectionProfiles/{name} * `mysql_source_config` - - (Required) + (Optional) MySQL data source configuration. Structure is [documented below](#nested_mysql_source_config). +* `oracle_source_config` - + (Optional) + MySQL data source configuration. + Structure is [documented below](#nested_oracle_source_config). + +* `postgresql_source_config` - + (Optional) + PostgreSQL data source configuration. + Structure is [documented below](#nested_postgresql_source_config). + The `mysql_source_config` block supports: @@ -540,6 +730,321 @@ The following arguments are supported: (Optional) The ordinal position of the column in the table. +The `oracle_source_config` block supports: + +* `include_objects` - + (Optional) + Oracle objects to retrieve from the source. + Structure is [documented below](#nested_include_objects). + +* `exclude_objects` - + (Optional) + Oracle objects to exclude from the stream. + Structure is [documented below](#nested_exclude_objects). + +* `max_concurrent_cdc_tasks` - + (Optional) + Maximum number of concurrent CDC tasks. The number should be non negative. + If not set (or set to 0), the system's default value will be used. + +* `max_concurrent_backfill_tasks` - + (Optional) + Maximum number of concurrent backfill tasks. The number should be non negative. + If not set (or set to 0), the system's default value will be used. + +* `drop_large_objects` - + (Optional) + Configuration to drop large object values. + +* `stream_large_objects` - + (Optional) + Configuration to drop large object values. + + +The `include_objects` block supports: + +* `oracle_schemas` - + (Required) + Oracle schemas/databases in the database server + Structure is [documented below](#nested_oracle_schemas). + + +The `oracle_schemas` block supports: + +* `schema` - + (Required) + Schema name. + +* `oracle_tables` - + (Optional) + Tables in the database. + Structure is [documented below](#nested_oracle_tables). + + +The `oracle_tables` block supports: + +* `table` - + (Required) + Table name. + +* `oracle_columns` - + (Optional) + Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. + Structure is [documented below](#nested_oracle_columns). + + +The `oracle_columns` block supports: + +* `column` - + (Optional) + Column name. + +* `data_type` - + (Optional) + The Oracle data type. Full data types list can be found here: + https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html + +* `length` - + Column length. + +* `precision` - + Column precision. + +* `scale` - + Column scale. + +* `encoding` - + Column encoding. + +* `primary_key` - + Whether or not the column represents a primary key. + +* `nullable` - + Whether or not the column can accept a null value. + +* `ordinal_position` - + The ordinal position of the column in the table. + +The `exclude_objects` block supports: + +* `oracle_schemas` - + (Required) + Oracle schemas/databases in the database server + Structure is [documented below](#nested_oracle_schemas). + + +The `oracle_schemas` block supports: + +* `schema` - + (Required) + Schema name. + +* `oracle_tables` - + (Optional) + Tables in the database. + Structure is [documented below](#nested_oracle_tables). + + +The `oracle_tables` block supports: + +* `table` - + (Required) + Table name. + +* `oracle_columns` - + (Optional) + Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. + Structure is [documented below](#nested_oracle_columns). + + +The `oracle_columns` block supports: + +* `column` - + (Optional) + Column name. + +* `data_type` - + (Optional) + The Oracle data type. Full data types list can be found here: + https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html + +* `length` - + Column length. + +* `precision` - + Column precision. + +* `scale` - + Column scale. + +* `encoding` - + Column encoding. + +* `primary_key` - + Whether or not the column represents a primary key. + +* `nullable` - + Whether or not the column can accept a null value. + +* `ordinal_position` - + The ordinal position of the column in the table. + +The `postgresql_source_config` block supports: + +* `include_objects` - + (Optional) + PostgreSQL objects to retrieve from the source. + Structure is [documented below](#nested_include_objects). + +* `exclude_objects` - + (Optional) + PostgreSQL objects to exclude from the stream. + Structure is [documented below](#nested_exclude_objects). + +* `replication_slot` - + (Required) + The name of the logical replication slot that's configured with + the pgoutput plugin. + +* `publication` - + (Required) + The name of the publication that includes the set of all tables + that are defined in the stream's include_objects. + +* `max_concurrent_backfill_tasks` - + (Optional) + Maximum number of concurrent backfill tasks. The number should be non + negative. If not set (or set to 0), the system's default value will be used. + + +The `include_objects` block supports: + +* `postgresql_schemas` - + (Required) + PostgreSQL schemas on the server + Structure is [documented below](#nested_postgresql_schemas). + + +The `postgresql_schemas` block supports: + +* `schema` - + (Required) + Database name. + +* `postgresql_tables` - + (Optional) + Tables in the schema. + Structure is [documented below](#nested_postgresql_tables). + + +The `postgresql_tables` block supports: + +* `table` - + (Required) + Table name. + +* `postgresql_columns` - + (Optional) + PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. + Structure is [documented below](#nested_postgresql_columns). + + +The `postgresql_columns` block supports: + +* `column` - + (Optional) + Column name. + +* `data_type` - + (Optional) + The PostgreSQL data type. Full data types list can be found here: + https://www.postgresql.org/docs/current/datatype.html + +* `length` - + Column length. + +* `precision` - + Column precision. + +* `scale` - + Column scale. + +* `primary_key` - + (Optional) + Whether or not the column represents a primary key. + +* `nullable` - + (Optional) + Whether or not the column can accept a null value. + +* `ordinal_position` - + (Optional) + The ordinal position of the column in the table. + +The `exclude_objects` block supports: + +* `postgresql_schemas` - + (Required) + PostgreSQL schemas on the server + Structure is [documented below](#nested_postgresql_schemas). + + +The `postgresql_schemas` block supports: + +* `schema` - + (Required) + Database name. + +* `postgresql_tables` - + (Optional) + Tables in the schema. + Structure is [documented below](#nested_postgresql_tables). + + +The `postgresql_tables` block supports: + +* `table` - + (Required) + Table name. + +* `postgresql_columns` - + (Optional) + PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. + Structure is [documented below](#nested_postgresql_columns). + + +The `postgresql_columns` block supports: + +* `column` - + (Optional) + Column name. + +* `data_type` - + (Optional) + The PostgreSQL data type. Full data types list can be found here: + https://www.postgresql.org/docs/current/datatype.html + +* `length` - + Column length. + +* `precision` - + Column precision. + +* `scale` - + Column scale. + +* `primary_key` - + (Optional) + Whether or not the column represents a primary key. + +* `nullable` - + (Optional) + Whether or not the column can accept a null value. + +* `ordinal_position` - + (Optional) + The ordinal position of the column in the table. + The `destination_config` block supports: * `destination_connection_profile` - @@ -681,6 +1186,16 @@ The following arguments are supported: MySQL data source objects to avoid backfilling. Structure is [documented below](#nested_mysql_excluded_objects). +* `postgresql_excluded_objects` - + (Optional) + PostgreSQL data source objects to avoid backfilling. + Structure is [documented below](#nested_postgresql_excluded_objects). + +* `oracle_excluded_objects` - + (Optional) + PostgreSQL data source objects to avoid backfilling. + Structure is [documented below](#nested_oracle_excluded_objects). + The `mysql_excluded_objects` block supports: @@ -744,6 +1259,134 @@ The following arguments are supported: (Optional) The ordinal position of the column in the table. +The `postgresql_excluded_objects` block supports: + +* `postgresql_schemas` - + (Required) + PostgreSQL schemas on the server + Structure is [documented below](#nested_postgresql_schemas). + + +The `postgresql_schemas` block supports: + +* `schema` - + (Required) + Database name. + +* `postgresql_tables` - + (Optional) + Tables in the schema. + Structure is [documented below](#nested_postgresql_tables). + + +The `postgresql_tables` block supports: + +* `table` - + (Required) + Table name. + +* `postgresql_columns` - + (Optional) + PostgreSQL columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. + Structure is [documented below](#nested_postgresql_columns). + + +The `postgresql_columns` block supports: + +* `column` - + (Optional) + Column name. + +* `data_type` - + (Optional) + The PostgreSQL data type. Full data types list can be found here: + https://www.postgresql.org/docs/current/datatype.html + +* `length` - + Column length. + +* `precision` - + Column precision. + +* `scale` - + Column scale. + +* `primary_key` - + (Optional) + Whether or not the column represents a primary key. + +* `nullable` - + (Optional) + Whether or not the column can accept a null value. + +* `ordinal_position` - + (Optional) + The ordinal position of the column in the table. + +The `oracle_excluded_objects` block supports: + +* `oracle_schemas` - + (Required) + Oracle schemas/databases in the database server + Structure is [documented below](#nested_oracle_schemas). + + +The `oracle_schemas` block supports: + +* `schema` - + (Required) + Schema name. + +* `oracle_tables` - + (Optional) + Tables in the database. + Structure is [documented below](#nested_oracle_tables). + + +The `oracle_tables` block supports: + +* `table` - + (Required) + Table name. + +* `oracle_columns` - + (Optional) + Oracle columns in the schema. When unspecified as part of include/exclude objects, includes/excludes everything. + Structure is [documented below](#nested_oracle_columns). + + +The `oracle_columns` block supports: + +* `column` - + (Optional) + Column name. + +* `data_type` - + (Optional) + The Oracle data type. Full data types list can be found here: + https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/Data-Types.html + +* `length` - + Column length. + +* `precision` - + Column precision. + +* `scale` - + Column scale. + +* `encoding` - + Column encoding. + +* `primary_key` - + Whether or not the column represents a primary key. + +* `nullable` - + Whether or not the column can accept a null value. + +* `ordinal_position` - + The ordinal position of the column in the table. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: