aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorhaitao.yao <yao.erix@gmail.com>2013-02-20 10:23:58 +0800
committerhaitao.yao <yao.erix@gmail.com>2013-02-20 10:23:58 +0800
commit6a3d44c673008bb12d332d00c9b6f7a3c9e55d0a (patch)
tree6126520d2486a6a30a29bd16d93bf760f78959ef
parent7c129388fbdc90cb6abb99470545dba8a2e90adf (diff)
parent8a992226bd6289cfc11c417e0a17edff7d4a4a87 (diff)
downloadspark-6a3d44c673008bb12d332d00c9b6f7a3c9e55d0a.tar.gz
spark-6a3d44c673008bb12d332d00c9b6f7a3c9e55d0a.tar.bz2
spark-6a3d44c673008bb12d332d00c9b6f7a3c9e55d0a.zip
Merge branch 'mesos'
-rw-r--r--core/src/main/scala/spark/rdd/PartitionPruningRDD.scala12
-rw-r--r--docs/_config.yml1
-rw-r--r--docs/contributing-to-spark.md2
-rwxr-xr-xec2/spark_ec2.py9
4 files changed, 22 insertions, 2 deletions
diff --git a/core/src/main/scala/spark/rdd/PartitionPruningRDD.scala b/core/src/main/scala/spark/rdd/PartitionPruningRDD.scala
index f2f4fd56d1..41ff62dd22 100644
--- a/core/src/main/scala/spark/rdd/PartitionPruningRDD.scala
+++ b/core/src/main/scala/spark/rdd/PartitionPruningRDD.scala
@@ -40,3 +40,15 @@ class PartitionPruningRDD[T: ClassManifest](
override protected def getPartitions: Array[Partition] =
getDependencies.head.asInstanceOf[PruneDependency[T]].partitions
}
+
+
+object PartitionPruningRDD {
+
+ /**
+ * Create a PartitionPruningRDD. This function can be used to create the PartitionPruningRDD
+ * when its type T is not known at compile time.
+ */
+ def create[T](rdd: RDD[T], partitionFilterFunc: Int => Boolean) = {
+ new PartitionPruningRDD[T](rdd, partitionFilterFunc)(rdd.elementClassManifest)
+ }
+}
diff --git a/docs/_config.yml b/docs/_config.yml
index 2bd2eecc86..09617e4a1e 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -7,3 +7,4 @@ SPARK_VERSION: 0.7.0-SNAPSHOT
SPARK_VERSION_SHORT: 0.7.0
SCALA_VERSION: 2.9.2
MESOS_VERSION: 0.9.0-incubating
+SPARK_ISSUE_TRACKER_URL: https://spark-project.atlassian.net
diff --git a/docs/contributing-to-spark.md b/docs/contributing-to-spark.md
index 14d0dc856b..50feeb2d6c 100644
--- a/docs/contributing-to-spark.md
+++ b/docs/contributing-to-spark.md
@@ -15,7 +15,7 @@ The Spark team welcomes contributions in the form of GitHub pull requests. Here
But first, make sure that you have [configured a spark-env.sh](configuration.html) with at least
`SCALA_HOME`, as some of the tests try to spawn subprocesses using this.
- Add new unit tests for your code. We use [ScalaTest](http://www.scalatest.org/) for testing. Just add a new Suite in `core/src/test`, or methods to an existing Suite.
-- If you'd like to report a bug but don't have time to fix it, you can still post it to our [issue tracker](https://spark-project.atlassian.net), or email the [mailing list](http://www.spark-project.org/mailing-lists.html).
+- If you'd like to report a bug but don't have time to fix it, you can still post it to our [issue tracker]({{site.SPARK_ISSUE_TRACKER_URL}}), or email the [mailing list](http://www.spark-project.org/mailing-lists.html).
# Licensing of Contributions
diff --git a/ec2/spark_ec2.py b/ec2/spark_ec2.py
index 7967bcac50..66b1faf2cd 100755
--- a/ec2/spark_ec2.py
+++ b/ec2/spark_ec2.py
@@ -179,7 +179,7 @@ def launch_cluster(conn, opts, cluster_name):
if opts.cluster_type == "mesos":
master_group.authorize('tcp', 38090, 38090, '0.0.0.0/0')
if opts.ganglia:
- master_group.authorize('tcp', 80, 80, '0.0.0.0/0')
+ master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0')
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
@@ -415,6 +415,13 @@ def setup_standalone_cluster(master, slave_nodes, opts):
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
+ if opts.cluster_type == "mesos":
+ print "Mesos cluster started at http://%s:8080" % master
+ elif opts.cluster_type == "standalone":
+ print "Spark standalone cluster started at http://%s:8080" % master
+
+ if opts.ganglia:
+ print "Ganglia started at http://%s:5080/ganglia" % master
# Wait for a whole cluster (masters, slaves and ZooKeeper) to start up