diff --git a/.changelog/6576.txt b/.changelog/6576.txt new file mode 100644 index 00000000000..10e5c142dd0 --- /dev/null +++ b/.changelog/6576.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +eventarc: added `channels` and `conditions` fields to `google_eventarc_trigger` +``` +```release-note:enhancement +clouddeploy: added `serial_pipeline.stages.strategy` field to `google_clouddeploy_delivery_pipeline +``` diff --git a/go.mod b/go.mod index 929e409186e..22ee126bcb7 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ go 1.18 require ( cloud.google.com/go/bigtable v1.16.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.19.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.21.1 github.com/apparentlymart/go-cidr v1.1.0 github.com/client9/misspell v0.3.4 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 02b86cb57ec..3ee3264875d 100644 --- a/go.sum +++ b/go.sum @@ -76,8 +76,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.19.0 h1:4YAtk4xuOCxUSkGdwlDhkX7DTP4VwLZCoebGGEsU+U4= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.19.0/go.mod h1:i6Pmzp7aolLmJY86RaJ9wjqm/HFleMeN7Vl5uIWLwE8= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.21.1 h1:WPrdiImW8A7kmMZF95dVAybR3H/ItAfJXPdRJnwf/yg= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.21.1/go.mod h1:i6Pmzp7aolLmJY86RaJ9wjqm/HFleMeN7Vl5uIWLwE8= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= diff --git a/google/resource_clouddeploy_delivery_pipeline.go b/google/resource_clouddeploy_delivery_pipeline.go index e86f00483d7..1e8a0541a74 100644 --- a/google/resource_clouddeploy_delivery_pipeline.go +++ b/google/resource_clouddeploy_delivery_pipeline.go @@ -159,6 +159,14 @@ func ClouddeployDeliveryPipelineSerialPipelineStagesSchema() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, + "strategy": { + Type: schema.TypeList, + Optional: true, + Description: "Optional. The strategy to use for a `Rollout` to this stage.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema(), + }, + "target_id": { Type: schema.TypeString, Optional: true, @@ -168,6 +176,32 @@ func ClouddeployDeliveryPipelineSerialPipelineStagesSchema() *schema.Resource { } } +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategySchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "standard": { + Type: schema.TypeList, + Optional: true, + Description: "Standard deployment strategy executes a single deploy and allows verifying the deployment.", + MaxItems: 1, + Elem: ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema(), + }, + }, + } +} + +func ClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandardSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "verify": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to verify a deployment.", + }, + }, + } +} + func ClouddeployDeliveryPipelineConditionSchema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -534,6 +568,7 @@ func expandClouddeployDeliveryPipelineSerialPipelineStages(o interface{}) *cloud obj := o.(map[string]interface{}) return &clouddeploy.DeliveryPipelineSerialPipelineStages{ Profiles: expandStringArray(obj["profiles"]), + Strategy: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj["strategy"]), TargetId: dcl.String(obj["target_id"].(string)), } } @@ -558,6 +593,7 @@ func flattenClouddeployDeliveryPipelineSerialPipelineStages(obj *clouddeploy.Del } transformed := map[string]interface{}{ "profiles": obj.Profiles, + "strategy": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj.Strategy), "target_id": obj.TargetId, } @@ -565,6 +601,58 @@ func flattenClouddeployDeliveryPipelineSerialPipelineStages(obj *clouddeploy.Del } +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategy(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategy { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategy + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategy{ + Standard: expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj["standard"]), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategy(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategy) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "standard": flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj.Standard), + } + + return []interface{}{transformed} + +} + +func expandClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(o interface{}) *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyStandard { + if o == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyDeliveryPipelineSerialPipelineStagesStrategyStandard + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyStandard{ + Verify: dcl.Bool(obj["verify"].(bool)), + } +} + +func flattenClouddeployDeliveryPipelineSerialPipelineStagesStrategyStandard(obj *clouddeploy.DeliveryPipelineSerialPipelineStagesStrategyStandard) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "verify": obj.Verify, + } + + return []interface{}{transformed} + +} + func flattenClouddeployDeliveryPipelineCondition(obj *clouddeploy.DeliveryPipelineCondition) interface{} { if obj == nil || obj.Empty() { return nil diff --git a/google/resource_clouddeploy_target.go b/google/resource_clouddeploy_target.go index f0a243e29b3..c2c00486799 100644 --- a/google/resource_clouddeploy_target.go +++ b/google/resource_clouddeploy_target.go @@ -72,7 +72,7 @@ func resourceClouddeployTarget() *schema.Resource { Description: "Information specifying an Anthos Cluster.", MaxItems: 1, Elem: ClouddeployTargetAnthosClusterSchema(), - ConflictsWith: []string{"gke"}, + ConflictsWith: []string{"gke", "run"}, }, "description": { @@ -95,7 +95,7 @@ func resourceClouddeployTarget() *schema.Resource { Description: "Information specifying a GKE Cluster.", MaxItems: 1, Elem: ClouddeployTargetGkeSchema(), - ConflictsWith: []string{"anthos_cluster"}, + ConflictsWith: []string{"anthos_cluster", "run"}, }, "labels": { @@ -120,6 +120,15 @@ func resourceClouddeployTarget() *schema.Resource { Description: "Optional. Whether or not the `Target` requires approval.", }, + "run": { + Type: schema.TypeList, + Optional: true, + Description: "Information specifying a Cloud Run deployment target.", + MaxItems: 1, + Elem: ClouddeployTargetRunSchema(), + ConflictsWith: []string{"gke", "anthos_cluster"}, + }, + "create_time": { Type: schema.TypeString, Computed: true, @@ -183,6 +192,12 @@ func ClouddeployTargetExecutionConfigsSchema() *schema.Resource { Description: "Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket (\"gs://my-bucket\") or a path within a bucket (\"gs://my-bucket/my-dir\"). If unspecified, a default bucket located in the same region will be used.", }, + "execution_timeout": { + Type: schema.TypeString, + Optional: true, + Description: "Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used.", + }, + "service_account": { Type: schema.TypeString, Computed: true, @@ -219,6 +234,18 @@ func ClouddeployTargetGkeSchema() *schema.Resource { } } +func ClouddeployTargetRunSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "location": { + Type: schema.TypeString, + Required: true, + Description: "Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`.", + }, + }, + } +} + func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) project, err := getProject(d, config) @@ -237,6 +264,7 @@ func resourceClouddeployTargetCreate(d *schema.ResourceData, meta interface{}) e Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } id, err := obj.ID() @@ -294,6 +322,7 @@ func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) err Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } userAgent, err := generateUserAgentString(d, config.userAgent) @@ -348,6 +377,9 @@ func resourceClouddeployTargetRead(d *schema.ResourceData, meta interface{}) err if err = d.Set("require_approval", res.RequireApproval); err != nil { return fmt.Errorf("error setting require_approval in state: %s", err) } + if err = d.Set("run", flattenClouddeployTargetRun(res.Run)); err != nil { + return fmt.Errorf("error setting run in state: %s", err) + } if err = d.Set("create_time", res.CreateTime); err != nil { return fmt.Errorf("error setting create_time in state: %s", err) } @@ -384,6 +416,7 @@ func resourceClouddeployTargetUpdate(d *schema.ResourceData, meta interface{}) e Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } directive := UpdateDirective userAgent, err := generateUserAgentString(d, config.userAgent) @@ -436,6 +469,7 @@ func resourceClouddeployTargetDelete(d *schema.ResourceData, meta interface{}) e Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), RequireApproval: dcl.Bool(d.Get("require_approval").(bool)), + Run: expandClouddeployTargetRun(d.Get("run")), } log.Printf("[DEBUG] Deleting Target %q", d.Id()) @@ -535,10 +569,11 @@ func expandClouddeployTargetExecutionConfigs(o interface{}) *clouddeploy.TargetE obj := o.(map[string]interface{}) return &clouddeploy.TargetExecutionConfigs{ - Usages: expandClouddeployTargetExecutionConfigsUsagesArray(obj["usages"]), - ArtifactStorage: dcl.StringOrNil(obj["artifact_storage"].(string)), - ServiceAccount: dcl.StringOrNil(obj["service_account"].(string)), - WorkerPool: dcl.String(obj["worker_pool"].(string)), + Usages: expandClouddeployTargetExecutionConfigsUsagesArray(obj["usages"]), + ArtifactStorage: dcl.StringOrNil(obj["artifact_storage"].(string)), + ExecutionTimeout: dcl.String(obj["execution_timeout"].(string)), + ServiceAccount: dcl.StringOrNil(obj["service_account"].(string)), + WorkerPool: dcl.String(obj["worker_pool"].(string)), } } @@ -561,10 +596,11 @@ func flattenClouddeployTargetExecutionConfigs(obj *clouddeploy.TargetExecutionCo return nil } transformed := map[string]interface{}{ - "usages": flattenClouddeployTargetExecutionConfigsUsagesArray(obj.Usages), - "artifact_storage": obj.ArtifactStorage, - "service_account": obj.ServiceAccount, - "worker_pool": obj.WorkerPool, + "usages": flattenClouddeployTargetExecutionConfigsUsagesArray(obj.Usages), + "artifact_storage": obj.ArtifactStorage, + "execution_timeout": obj.ExecutionTimeout, + "service_account": obj.ServiceAccount, + "worker_pool": obj.WorkerPool, } return transformed @@ -597,6 +633,32 @@ func flattenClouddeployTargetGke(obj *clouddeploy.TargetGke) interface{} { return []interface{}{transformed} +} + +func expandClouddeployTargetRun(o interface{}) *clouddeploy.TargetRun { + if o == nil { + return clouddeploy.EmptyTargetRun + } + objArr := o.([]interface{}) + if len(objArr) == 0 || objArr[0] == nil { + return clouddeploy.EmptyTargetRun + } + obj := objArr[0].(map[string]interface{}) + return &clouddeploy.TargetRun{ + Location: dcl.String(obj["location"].(string)), + } +} + +func flattenClouddeployTargetRun(obj *clouddeploy.TargetRun) interface{} { + if obj == nil || obj.Empty() { + return nil + } + transformed := map[string]interface{}{ + "location": obj.Location, + } + + return []interface{}{transformed} + } func flattenClouddeployTargetExecutionConfigsUsagesArray(obj []clouddeploy.TargetExecutionConfigsUsagesEnum) interface{} { if obj == nil { diff --git a/google/resource_eventarc_trigger.go b/google/resource_eventarc_trigger.go index d33028cedc4..76e6bb6a99c 100644 --- a/google/resource_eventarc_trigger.go +++ b/google/resource_eventarc_trigger.go @@ -75,6 +75,14 @@ func resourceEventarcTrigger() *schema.Resource { Description: "Required. The resource name of the trigger. Must be unique within the location on the project.", }, + "channel": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: "Optional. The name of the channel associated with the trigger in `projects/{project}/locations/{location}/channels/{channel}` format. You must provide a channel to receive events from Eventarc SaaS partners.", + }, + "labels": { Type: schema.TypeMap, Optional: true, @@ -108,6 +116,13 @@ func resourceEventarcTrigger() *schema.Resource { Elem: EventarcTriggerTransportSchema(), }, + "conditions": { + Type: schema.TypeMap, + Computed: true, + Description: "Output only. The reason(s) why a trigger is in FAILED state.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_time": { Type: schema.TypeString, Computed: true, @@ -305,6 +320,7 @@ func resourceEventarcTriggerCreate(d *schema.ResourceData, meta interface{}) err Location: dcl.String(d.Get("location").(string)), MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), ServiceAccount: dcl.String(d.Get("service_account").(string)), @@ -360,6 +376,7 @@ func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error Location: dcl.String(d.Get("location").(string)), MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), ServiceAccount: dcl.String(d.Get("service_account").(string)), @@ -400,6 +417,9 @@ func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error if err = d.Set("name", res.Name); err != nil { return fmt.Errorf("error setting name in state: %s", err) } + if err = d.Set("channel", res.Channel); err != nil { + return fmt.Errorf("error setting channel in state: %s", err) + } if err = d.Set("labels", res.Labels); err != nil { return fmt.Errorf("error setting labels in state: %s", err) } @@ -412,6 +432,9 @@ func resourceEventarcTriggerRead(d *schema.ResourceData, meta interface{}) error if err = d.Set("transport", flattenEventarcTriggerTransport(res.Transport)); err != nil { return fmt.Errorf("error setting transport in state: %s", err) } + if err = d.Set("conditions", res.Conditions); err != nil { + return fmt.Errorf("error setting conditions in state: %s", err) + } if err = d.Set("create_time", res.CreateTime); err != nil { return fmt.Errorf("error setting create_time in state: %s", err) } @@ -439,6 +462,7 @@ func resourceEventarcTriggerUpdate(d *schema.ResourceData, meta interface{}) err Location: dcl.String(d.Get("location").(string)), MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), ServiceAccount: dcl.String(d.Get("service_account").(string)), @@ -489,6 +513,7 @@ func resourceEventarcTriggerDelete(d *schema.ResourceData, meta interface{}) err Location: dcl.String(d.Get("location").(string)), MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), Name: dcl.String(d.Get("name").(string)), + Channel: dcl.String(d.Get("channel").(string)), Labels: checkStringMap(d.Get("labels")), Project: dcl.String(project), ServiceAccount: dcl.String(d.Get("service_account").(string)), diff --git a/google/resource_eventarc_trigger_generated_test.go b/google/resource_eventarc_trigger_generated_test.go index ce1cdcd2237..3db3670ef6b 100644 --- a/google/resource_eventarc_trigger_generated_test.go +++ b/google/resource_eventarc_trigger_generated_test.go @@ -317,6 +317,7 @@ func testAccCheckEventarcTriggerDestroyProducer(t *testing.T) func(s *terraform. obj := &eventarc.Trigger{ Location: dcl.String(rs.Primary.Attributes["location"]), Name: dcl.String(rs.Primary.Attributes["name"]), + Channel: dcl.String(rs.Primary.Attributes["channel"]), Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), ServiceAccount: dcl.String(rs.Primary.Attributes["service_account"]), CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), diff --git a/website/docs/r/clouddeploy_delivery_pipeline.html.markdown b/website/docs/r/clouddeploy_delivery_pipeline.html.markdown index 34b3500e806..20e651fd727 100644 --- a/website/docs/r/clouddeploy_delivery_pipeline.html.markdown +++ b/website/docs/r/clouddeploy_delivery_pipeline.html.markdown @@ -116,10 +116,26 @@ The `stages` block supports: (Optional) Skaffold profiles to use when rendering the manifest for this stage's `Target`. +* `strategy` - + (Optional) + Optional. The strategy to use for a `Rollout` to this stage. + * `target_id` - (Optional) The target_id to which this stage points. This field refers exclusively to the last segment of a target name. For example, this field would just be `my-target` (rather than `projects/project/locations/location/targets/my-target`). The location of the `Target` is inferred to be the same as the location of the `DeliveryPipeline` that contains this `Stage`. +The `strategy` block supports: + +* `standard` - + (Optional) + Standard deployment strategy executes a single deploy and allows verifying the deployment. + +The `standard` block supports: + +* `verify` - + (Optional) + Whether to verify a deployment. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/clouddeploy_target.html.markdown b/website/docs/r/clouddeploy_target.html.markdown index 91978bbb16f..43006d6d2a0 100644 --- a/website/docs/r/clouddeploy_target.html.markdown +++ b/website/docs/r/clouddeploy_target.html.markdown @@ -103,6 +103,10 @@ The following arguments are supported: (Optional) Optional. Whether or not the `Target` requires approval. +* `run` - + (Optional) + Information specifying a Cloud Run deployment target. + The `anthos_cluster` block supports: @@ -117,6 +121,10 @@ The `execution_configs` block supports: (Optional) Optional. Cloud Storage location in which to store execution outputs. This can either be a bucket ("gs://my-bucket") or a path within a bucket ("gs://my-bucket/my-dir"). If unspecified, a default bucket located in the same region will be used. +* `execution_timeout` - + (Optional) + Optional. Execution timeout for a Cloud Build Execution. This must be between 10m and 24h in seconds format. If unspecified, a default timeout of 1h is used. + * `service_account` - (Optional) Optional. Google service account to use for execution. If unspecified, the project execution service account (-compute@developer.gserviceaccount.com) is used. @@ -139,6 +147,12 @@ The `gke` block supports: (Optional) Optional. If true, `cluster` is accessed using the private IP address of the control plane endpoint. Otherwise, the default IP address of the control plane endpoint is used. The default IP address is the private IP address for clusters with private control-plane endpoints and the public IP address otherwise. Only specify this option when `cluster` is a [private GKE cluster](https://cloud.google.com/kubernetes-engine/docs/concepts/private-cluster-concept). +The `run` block supports: + +* `location` - + (Required) + Required. The location where the Cloud Run Service should be located. Format is `projects/{project}/locations/{location}`. + ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/website/docs/r/eventarc_trigger.html.markdown b/website/docs/r/eventarc_trigger.html.markdown index 28226ebcce3..ca686e4f16e 100644 --- a/website/docs/r/eventarc_trigger.html.markdown +++ b/website/docs/r/eventarc_trigger.html.markdown @@ -132,6 +132,10 @@ The `matching_criteria` block supports: - - - +* `channel` - + (Optional) + Optional. The name of the channel associated with the trigger in `projects/{project}/locations/{location}/channels/{channel}` format. You must provide a channel to receive events from Eventarc SaaS partners. + * `labels` - (Optional) Optional. User labels attached to the triggers that can be used to group resources. @@ -207,6 +211,9 @@ In addition to the arguments listed above, the following computed attributes are * `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/triggers/{{name}}` +* `conditions` - + Output only. The reason(s) why a trigger is in FAILED state. + * `create_time` - Output only. The creation time.