Skip to content

Commit

Permalink
[SPARK-3332] Revert spark-ec2 patch that identifies clusters using tags
Browse files Browse the repository at this point in the history
This reverts apache#1899 and apache#2163, two patches that modified `spark-ec2` so that clusters are identified using tags instead of security groups.  The original motivation for this patch was to allow multiple clusters to run in the same security group.

Unfortunately, tagging is not atomic with launching instances on EC2, so with this approach we have the possibility of `spark-ec2` launching instances and crashing before they can be tagged, effectively orphaning those instances.  The orphaned instances won't belong to any cluster, so the `spark-ec2` script will be unable to clean them up.

Since this feature may still be worth supporting, there are several alternative approaches that we might consider, including detecting orphaned instances and logging warnings, or maybe using another mechanism to group instances into clusters.  For the 1.1.0 release, though, I propose that we just revert this patch.

Author: Josh Rosen <joshrosen@apache.org>

Closes apache#2225 from JoshRosen/revert-ec2-cluster-naming and squashes the following commits:

0c18e86 [Josh Rosen] Revert "SPARK-2333 - spark_ec2 script should allow option for existing security group"
c2ca2d4 [Josh Rosen] Revert "Spark-3213 Fixes issue with spark-ec2 not detecting slaves created with "Launch More like this""
  • Loading branch information
JoshRosen committed Sep 2, 2014
1 parent e6972ea commit ccf3520
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 64 deletions.
14 changes: 6 additions & 8 deletions docs/ec2-scripts.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,14 @@ on the [Amazon Web Services site](http://aws.amazon.com/).

`spark-ec2` is designed to manage multiple named clusters. You can
launch a new cluster (telling the script its size and giving it a name),
shutdown an existing cluster, or log into a cluster. Each cluster
launches a set of instances, which are tagged with the cluster name,
and placed into EC2 security groups. If you don't specify a security
group, the `spark-ec2` script will create security groups based on the
cluster name you request. For example, a cluster named
shutdown an existing cluster, or log into a cluster. Each cluster is
identified by placing its machines into EC2 security groups whose names
are derived from the name of the cluster. For example, a cluster named
`test` will contain a master node in a security group called
`test-master`, and a number of slave nodes in a security group called
`test-slaves`. You can also specify a security group prefix to be used
in place of the cluster name. Machines in a cluster can be identified
by looking for the "Name" tag of the instance in the Amazon EC2 Console.
`test-slaves`. The `spark-ec2` script will create these security groups
for you based on the cluster name you request. You can also use them to
identify machines belonging to each cluster in the Amazon EC2 Console.


# Before You Start
Expand Down
80 changes: 24 additions & 56 deletions ec2/spark_ec2.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
# A URL prefix from which to fetch AMI information
AMI_PREFIX = "https://raw.github.com/mesos/spark-ec2/v2/ami-list"


class UsageError(Exception):
pass

Expand Down Expand Up @@ -124,7 +125,7 @@ def parse_args():
help="The SSH user you want to connect as (default: root)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created.")
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
Expand All @@ -138,9 +139,7 @@ def parse_args():
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMI's interpret this as an initialization script)")
parser.add_option(
"--security-group-prefix", type="string", default=None,
help="Use this prefix for the security group rather than the cluster name.")


(opts, args) = parser.parse_args()
if len(args) != 2:
Expand Down Expand Up @@ -287,12 +286,8 @@ def launch_cluster(conn, opts, cluster_name):
user_data_content = user_data_file.read()

print "Setting up security groups..."
if opts.security_group_prefix is None:
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
else:
master_group = get_or_make_group(conn, opts.security_group_prefix + "-master")
slave_group = get_or_make_group(conn, opts.security_group_prefix + "-slaves")
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
Expand All @@ -316,11 +311,12 @@ def launch_cluster(conn, opts, cluster_name):
slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')

# Check if instances are already running with the cluster name
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print >> stderr, ("ERROR: There are already instances for name: %s " % cluster_name)
print >> stderr, ("ERROR: There are already instances running in " +
"group %s or %s" % (master_group.name, slave_group.name))
sys.exit(1)

# Figure out Spark AMI
Expand Down Expand Up @@ -385,13 +381,9 @@ def launch_cluster(conn, opts, cluster_name):
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
outstanding_request_ids = []
for i in my_req_ids:
if i in id_to_req:
if id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
else:
outstanding_request_ids.append(i)
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_instances(active_instance_ids)
Expand All @@ -400,8 +392,8 @@ def launch_cluster(conn, opts, cluster_name):
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer for request ids including %s" % (
len(active_instance_ids), opts.slaves, outstanding_request_ids[0:10])
print "%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves)
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
Expand Down Expand Up @@ -460,66 +452,45 @@ def launch_cluster(conn, opts, cluster_name):

# Give the instances descriptive names
for master in master_nodes:
name = '{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id)
tag_instance(master, name)

master.add_tag(
key='Name',
value='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
for slave in slave_nodes:
name = '{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id)
tag_instance(slave, name)
slave.add_tag(
key='Name',
value='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))

# Return all the instances
return (master_nodes, slave_nodes)

def tag_instance(instance, name):
for i in range(0, 5):
try:
instance.add_tag(key='Name', value=name)
except:
print "Failed attempt %i of 5 to tag %s" % ((i + 1), name)
if (i == 5):
raise "Error - failed max attempts to add name tag"
time.sleep(5)

# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters and slaves
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
# Search all the spot instance requests, and copy any tags from the spot instance request to the cluster.
spot_instance_requests = conn.get_all_spot_instance_requests()
for req in spot_instance_requests:
if req.state != u'active':
continue
name = req.tags.get(u'Name', "")
if name.startswith(cluster_name):
reservations = conn.get_all_instances(instance_ids=[req.instance_id])
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for instance in active:
if (instance.tags.get(u'Name') == None):
tag_instance(instance, name)
# Now proceed to detect master and slaves instances.
reservations = conn.get_all_instances()
master_nodes = []
slave_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for inst in active:
name = inst.tags.get(u'Name', "")
if name.startswith(cluster_name + "-master"):
group_names = [g.name for g in inst.groups]
if group_names == [cluster_name + "-master"]:
master_nodes.append(inst)
elif name.startswith(cluster_name + "-slave"):
elif group_names == [cluster_name + "-slaves"]:
slave_nodes.append(inst)
if any((master_nodes, slave_nodes)):
print ("Found %d master(s), %d slaves" % (len(master_nodes), len(slave_nodes)))
if master_nodes != [] or not die_on_error:
return (master_nodes, slave_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print >> sys.stderr, "ERROR: Could not find master in with name " + cluster_name + "-master"
print >> sys.stderr, "ERROR: Could not find master in group " + cluster_name + "-master"
else:
print >> sys.stderr, "ERROR: Could not find any existing cluster"
sys.exit(1)


# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
Expand Down Expand Up @@ -856,10 +827,7 @@ def real_main():
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
if opts.security_group_prefix is None:
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
else:
group_names = [opts.security_group_prefix + "-master", opts.security_group_prefix + "-slaves"]
group_names = [cluster_name + "-master", cluster_name + "-slaves"]

attempt = 1
while attempt <= 3:
Expand Down

0 comments on commit ccf3520

Please sign in to comment.