aboutsummaryrefslogtreecommitdiff
path: root/ec2
diff options
context:
space:
mode:
authormeawoppl <meawoppl@gmail.com>2015-05-26 09:02:25 -0700
committerDavies Liu <davies@databricks.com>2015-05-26 09:02:25 -0700
commit8dbe777703e0aaf47cbdfe98f66d22f723352fb5 (patch)
treee88fad77001bc067c9c194e48746cebb9c01e038 /ec2
parent8948ad3fb5d5d095d3942855960d735f27d97dd5 (diff)
downloadspark-8dbe777703e0aaf47cbdfe98f66d22f723352fb5.tar.gz
spark-8dbe777703e0aaf47cbdfe98f66d22f723352fb5.tar.bz2
spark-8dbe777703e0aaf47cbdfe98f66d22f723352fb5.zip
[SPARK-7806][EC2] Fixes that allow the spark_ec2.py tool to run with Python3
I have used this script to launch, destroy, start, and stop clusters successfully. Author: meawoppl <meawoppl@gmail.com> Closes #6336 from meawoppl/py3ec2spark and squashes the following commits: 2e87046 [meawoppl] Py3 compat fixes.
Diffstat (limited to 'ec2')
-rwxr-xr-xec2/spark_ec2.py14
1 files changed, 9 insertions, 5 deletions
diff --git a/ec2/spark_ec2.py b/ec2/spark_ec2.py
index c6d5a1f0d0..724811eaa1 100755
--- a/ec2/spark_ec2.py
+++ b/ec2/spark_ec2.py
@@ -19,8 +19,9 @@
# limitations under the License.
#
-from __future__ import with_statement, print_function
+from __future__ import division, print_function, with_statement
+import codecs
import hashlib
import itertools
import logging
@@ -47,6 +48,8 @@ if sys.version < "3":
else:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
+ raw_input = input
+ xrange = range
SPARK_EC2_VERSION = "1.3.1"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
@@ -423,13 +426,14 @@ def get_spark_ami(opts):
b=opts.spark_ec2_git_branch)
ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
+ reader = codecs.getreader("ascii")
try:
- ami = urlopen(ami_path).read().strip()
- print("Spark AMI: " + ami)
+ ami = reader(urlopen(ami_path)).read().strip()
except:
print("Could not resolve AMI at: " + ami_path, file=stderr)
sys.exit(1)
+ print("Spark AMI: " + ami)
return ami
@@ -750,7 +754,7 @@ def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
'mapreduce', 'spark-standalone', 'tachyon']
if opts.hadoop_major_version == "1":
- modules = filter(lambda x: x != "mapreduce", modules)
+ modules = list(filter(lambda x: x != "mapreduce", modules))
if opts.ganglia:
modules.append('ganglia')
@@ -1160,7 +1164,7 @@ def get_zones(conn, opts):
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
- num_slaves_this_zone = total / num_partitions
+ num_slaves_this_zone = total // num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone