diff options
author | alyaxey <oleksii.sliusarenko@grammarly.com> | 2015-05-19 16:45:52 -0700 |
---|---|---|
committer | Shivaram Venkataraman <shivaram@cs.berkeley.edu> | 2015-05-19 16:45:52 -0700 |
commit | 2bc5e0616d878b09daa8e31a7a1fdb7127bca079 (patch) | |
tree | 7d160284a890c219e4ee4d1ca2ea1dde14bfaf5a /ec2/spark_ec2.py | |
parent | bcb1ff81468eb4afc7c03b2bca18e99cc1ccf6b8 (diff) | |
download | spark-2bc5e0616d878b09daa8e31a7a1fdb7127bca079.tar.gz spark-2bc5e0616d878b09daa8e31a7a1fdb7127bca079.tar.bz2 spark-2bc5e0616d878b09daa8e31a7a1fdb7127bca079.zip |
[SPARK-6246] [EC2] fixed support for more than 100 nodes
This is a small fix. But it is important for amazon users because as the ticket states, "spark-ec2 can't handle clusters with > 100 nodes" now.
Author: alyaxey <oleksii.sliusarenko@grammarly.com>
Closes #6267 from alyaxey/ec2_100_nodes_fix and squashes the following commits:
1e0d747 [alyaxey] [SPARK-6246] fixed support for more than 100 nodes
Diffstat (limited to 'ec2/spark_ec2.py')
-rwxr-xr-x | ec2/spark_ec2.py | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/ec2/spark_ec2.py b/ec2/spark_ec2.py index be92d5f45a..c6d5a1f0d0 100755 --- a/ec2/spark_ec2.py +++ b/ec2/spark_ec2.py @@ -864,7 +864,11 @@ def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state): for i in cluster_instances: i.update() - statuses = conn.get_all_instance_status(instance_ids=[i.id for i in cluster_instances]) + max_batch = 100 + statuses = [] + for j in xrange(0, len(cluster_instances), max_batch): + batch = [i.id for i in cluster_instances[j:j + max_batch]] + statuses.extend(conn.get_all_instance_status(instance_ids=batch)) if cluster_state == 'ssh-ready': if all(i.state == 'running' for i in cluster_instances) and \ |