diff --git a/aws/resource_aws_emr_cluster.go b/aws/resource_aws_emr_cluster.go index 54899b016cd3..852ec3e80445 100644 --- a/aws/resource_aws_emr_cluster.go +++ b/aws/resource_aws_emr_cluster.go @@ -879,8 +879,11 @@ func resourceAwsEMRClusterCreate(d *schema.ResourceData, meta interface{}) error } return nil }) + if isResourceTimeoutError(err) { + resp, err = conn.RunJobFlow(params) + } if err != nil { - return err + return fmt.Errorf("error running EMR Job Flow: %s", err) } d.SetId(*resp.JobFlowId) @@ -1318,48 +1321,72 @@ func resourceAwsEMRClusterDelete(d *schema.ResourceData, meta interface{}) error return err } + input := &emr.ListInstancesInput{ + ClusterId: aws.String(d.Id()), + } + var resp *emr.ListInstancesOutput + var count int err = resource.Retry(20*time.Minute, func() *resource.RetryError { - resp, err := conn.ListInstances(&emr.ListInstancesInput{ - ClusterId: aws.String(d.Id()), - }) + var err error + resp, err = conn.ListInstances(input) if err != nil { return resource.NonRetryableError(err) } - instanceCount := len(resp.Instances) - - if resp == nil || instanceCount == 0 { - log.Printf("[DEBUG] No instances found for EMR Cluster (%s)", d.Id()) - return nil + count = countEMRRemainingInstances(resp, d.Id()) + if count != 0 { + return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", d.Id(), count)) } + return nil + }) - // Collect instance status states, wait for all instances to be terminated - // before moving on - var terminated []string - for j, i := range resp.Instances { - if i.Status != nil { - if aws.StringValue(i.Status.State) == emr.InstanceStateTerminated { - terminated = append(terminated, *i.Ec2InstanceId) - } - } else { - log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, *i.Ec2InstanceId) - } - } - if len(terminated) == instanceCount { - log.Printf("[DEBUG] All (%d) EMR Cluster (%s) Instances terminated", instanceCount, d.Id()) - return nil + if isResourceTimeoutError(err) { + resp, err = conn.ListInstances(input) + + if err == nil { + count = countEMRRemainingInstances(resp, d.Id()) } - return resource.RetryableError(fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining, retrying", d.Id(), len(resp.Instances))) - }) + } + + if count != 0 { + return fmt.Errorf("EMR Cluster (%s) has (%d) Instances remaining", d.Id(), count) + } if err != nil { - return fmt.Errorf("error waiting for EMR Cluster (%s) Instances to drain", d.Id()) + return fmt.Errorf("error waiting for EMR Cluster (%s) Instances to drain: %s", d.Id(), err) } return nil } +func countEMRRemainingInstances(resp *emr.ListInstancesOutput, emrClusterId string) int { + instanceCount := len(resp.Instances) + + if resp == nil || instanceCount == 0 { + log.Printf("[DEBUG] No instances found for EMR Cluster (%s)", emrClusterId) + return 0 + } + + // Collect instance status states, wait for all instances to be terminated + // before moving on + var terminated []string + for j, i := range resp.Instances { + if i.Status != nil { + if aws.StringValue(i.Status.State) == emr.InstanceStateTerminated { + terminated = append(terminated, *i.Ec2InstanceId) + } + } else { + log.Printf("[DEBUG] Cluster instance (%d : %s) has no status", j, *i.Ec2InstanceId) + } + } + if len(terminated) == instanceCount { + log.Printf("[DEBUG] All (%d) EMR Cluster (%s) Instances terminated", instanceCount, emrClusterId) + return 0 + } + return len(resp.Instances) +} + func expandApplications(apps []interface{}) []*emr.Application { appOut := make([]*emr.Application, 0, len(apps))