diff options
author | Shivaram Venkataraman <shivaram@eecs.berkeley.edu> | 2013-01-27 00:26:00 -0800 |
---|---|---|
committer | Shivaram Venkataraman <shivaram@eecs.berkeley.edu> | 2013-01-27 00:26:00 -0800 |
commit | dc9d3ab6ed7ec2122ec9fdd248e236393601725c (patch) | |
tree | aa79083d73f791d000401901ffd8cfe6c153e7df | |
parent | 0243b081ce4348c3d2955f2c16c0d3a61620be34 (diff) | |
download | spark-dc9d3ab6ed7ec2122ec9fdd248e236393601725c.tar.gz spark-dc9d3ab6ed7ec2122ec9fdd248e236393601725c.tar.bz2 spark-dc9d3ab6ed7ec2122ec9fdd248e236393601725c.zip |
Add option to start ganglia. Also enable Hadoop ports even if cluster type is
not mesos
-rwxr-xr-x | ec2/spark_ec2.py | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/ec2/spark_ec2.py b/ec2/spark_ec2.py index f2385b5b56..dfccb6c238 100755 --- a/ec2/spark_ec2.py +++ b/ec2/spark_ec2.py @@ -84,6 +84,9 @@ def parse_args(): "maximum price (in dollars)") parser.add_option("-c", "--cluster-type", default="mesos", help="'mesos' for a mesos cluster, 'standalone' for a standalone spark cluster (default: mesos)") + parser.add_option("-g", "--ganglia", action="store_true", default=False, + help="Setup ganglia monitoring for the cluster. NOTE: The ganglia " + + "monitoring page will be publicly accessible") parser.add_option("-u", "--user", default="root", help="The ssh user you want to connect as (default: root)") parser.add_option("--delete-groups", action="store_true", default=False, @@ -164,22 +167,23 @@ def launch_cluster(conn, opts, cluster_name): master_group.authorize(src_group=zoo_group) master_group.authorize('tcp', 22, 22, '0.0.0.0/0') master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0') + master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0') + master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0') + master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0') if opts.cluster_type == "mesos": - master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0') - master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0') - master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0') master_group.authorize('tcp', 38090, 38090, '0.0.0.0/0') + if opts.ganglia: + master_group.authorize('tcp', 80, 80, '0.0.0.0/0') if slave_group.rules == []: # Group was just now created slave_group.authorize(src_group=master_group) slave_group.authorize(src_group=slave_group) slave_group.authorize(src_group=zoo_group) slave_group.authorize('tcp', 22, 22, '0.0.0.0/0') slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0') - if opts.cluster_type == "mesos": - slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0') - slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0') - slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0') - slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0') + slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0') + slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0') + slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0') + slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0') if zoo_group.rules == []: # Group was just now created zoo_group.authorize(src_group=master_group) zoo_group.authorize(src_group=slave_group) @@ -363,6 +367,9 @@ def setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, deploy_ssh_k elif opts.cluster_type == "standalone": modules = ['ephemeral-hdfs', 'persistent-hdfs', 'spark-standalone'] + if opts.ganglia: + modules.append('ganglia') + master = master_nodes[0].public_dns_name if deploy_ssh_key: print "Copying SSH key %s to master..." % opts.identity_file |