aboutsummaryrefslogtreecommitdiff
path: root/run
diff options
context:
space:
mode:
authorEvan Chan <ev@ooyala.com>2013-06-24 15:39:52 -0700
committerEvan Chan <ev@ooyala.com>2013-06-24 15:39:52 -0700
commit0bcaf036050c3d2b4389339927239e0e35bf02ff (patch)
tree61a54eb812190b2e5d4d2117e091a41534a42f79 /run
parent4cda8f865a003ab354890c4915ea1b5a7674f5b0 (diff)
downloadspark-0bcaf036050c3d2b4389339927239e0e35bf02ff.tar.gz
spark-0bcaf036050c3d2b4389339927239e0e35bf02ff.tar.bz2
spark-0bcaf036050c3d2b4389339927239e0e35bf02ff.zip
Split out source distro CLASSPATH logic to a separate script
Diffstat (limited to 'run')
-rwxr-xr-xrun123
1 files changed, 19 insertions, 104 deletions
diff --git a/run b/run
index c0065c53f1..30a2885a4d 100755
--- a/run
+++ b/run
@@ -1,7 +1,5 @@
#!/bin/bash
-SCALA_VERSION=2.9.3
-
# Figure out where the Scala framework is installed
FWDIR="$(cd `dirname $0`; pwd)"
@@ -46,36 +44,6 @@ case "$1" in
;;
esac
-if [ "$SPARK_LAUNCH_WITH_SCALA" == "1" ]; then
- if [ "$SCALA_HOME" ]; then
- RUNNER="${SCALA_HOME}/bin/scala"
- else
- if [ `command -v scala` ]; then
- RUNNER="scala"
- else
- echo "SCALA_HOME is not set and scala is not in PATH" >&2
- exit 1
- fi
- fi
-else
- if [ `command -v java` ]; then
- RUNNER="java"
- else
- if [ -z "$JAVA_HOME" ]; then
- echo "JAVA_HOME is not set" >&2
- exit 1
- fi
- RUNNER="${JAVA_HOME}/bin/java"
- fi
- if [ -z "$SCALA_LIBRARY_PATH" ]; then
- if [ -z "$SCALA_HOME" ]; then
- echo "SCALA_HOME is not set" >&2
- exit 1
- fi
- SCALA_LIBRARY_PATH="$SCALA_HOME/lib"
- fi
-fi
-
# Figure out how much memory to use per executor and set it as an environment
# variable so that our process sees it and can report it to Mesos
if [ -z "$SPARK_MEM" ] ; then
@@ -93,64 +61,28 @@ if [ -e $FWDIR/conf/java-opts ] ; then
fi
export JAVA_OPTS
-CORE_DIR="$FWDIR/core"
-REPL_DIR="$FWDIR/repl"
-REPL_BIN_DIR="$FWDIR/repl-bin"
-EXAMPLES_DIR="$FWDIR/examples"
-BAGEL_DIR="$FWDIR/bagel"
-STREAMING_DIR="$FWDIR/streaming"
-PYSPARK_DIR="$FWDIR/python"
-
-# Exit if the user hasn't compiled Spark
-if [ ! -e "$CORE_DIR/target" ]; then
- echo "Failed to find Spark classes in $CORE_DIR/target" >&2
- echo "You need to compile Spark before running this program" >&2
- exit 1
-fi
+# Check if this is a binary distribution or source distribution
+# and build up the classpath appropriately
+if [ -f "$FWDIR/RELEASE" ]; then
+ echo "This is a binary distribution"
-if [[ "$@" = *repl* && ! -e "$REPL_DIR/target" ]]; then
- echo "Failed to find Spark classes in $REPL_DIR/target" >&2
- echo "You need to compile Spark repl module before running this program" >&2
- exit 1
-fi
+ if [ `command -v java` ]; then
+ RUNNER="java"
+ else
+ if [ -z "$JAVA_HOME" ]; then
+ echo "JAVA_HOME is not set" >&2
+ exit 1
+ fi
+ RUNNER="${JAVA_HOME}/bin/java"
+ fi
-# Build up classpath
-CLASSPATH="$SPARK_CLASSPATH"
-CLASSPATH="$CLASSPATH:$FWDIR/conf"
-CLASSPATH="$CLASSPATH:$CORE_DIR/target/scala-$SCALA_VERSION/classes"
-if [ -n "$SPARK_TESTING" ] ; then
- CLASSPATH="$CLASSPATH:$CORE_DIR/target/scala-$SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$STREAMING_DIR/target/scala-$SCALA_VERSION/test-classes"
-fi
-CLASSPATH="$CLASSPATH:$CORE_DIR/src/main/resources"
-CLASSPATH="$CLASSPATH:$REPL_DIR/target/scala-$SCALA_VERSION/classes"
-CLASSPATH="$CLASSPATH:$EXAMPLES_DIR/target/scala-$SCALA_VERSION/classes"
-CLASSPATH="$CLASSPATH:$STREAMING_DIR/target/scala-$SCALA_VERSION/classes"
-CLASSPATH="$CLASSPATH:$STREAMING_DIR/lib/org/apache/kafka/kafka/0.7.2-spark/*" # <-- our in-project Kafka Jar
-if [ -e "$FWDIR/lib_managed" ]; then
- CLASSPATH="$CLASSPATH:$FWDIR/lib_managed/jars/*"
- CLASSPATH="$CLASSPATH:$FWDIR/lib_managed/bundles/*"
-fi
-CLASSPATH="$CLASSPATH:$REPL_DIR/lib/*"
-if [ -e $REPL_BIN_DIR/target ]; then
- for jar in `find "$REPL_BIN_DIR/target" -name 'spark-repl-*-shaded-hadoop*.jar'`; do
- CLASSPATH="$CLASSPATH:$jar"
- done
-fi
-CLASSPATH="$CLASSPATH:$BAGEL_DIR/target/scala-$SCALA_VERSION/classes"
-for jar in `find $PYSPARK_DIR/lib -name '*jar'`; do
- CLASSPATH="$CLASSPATH:$jar"
-done
+ CLASSPATH="$SPARK_CLASSPATH:$FWDIR/jars/*"
-# Figure out the JAR file that our examples were packaged into. This includes a bit of a hack
-# to avoid the -sources and -doc packages that are built by publish-local.
-if [ -e "$EXAMPLES_DIR/target/scala-$SCALA_VERSION/spark-examples"*[0-9T].jar ]; then
- # Use the JAR from the SBT build
- export SPARK_EXAMPLES_JAR=`ls "$EXAMPLES_DIR/target/scala-$SCALA_VERSION/spark-examples"*[0-9T].jar`
-fi
-if [ -e "$EXAMPLES_DIR/target/spark-examples-"*hadoop[12].jar ]; then
- # Use the JAR from the Maven build
- export SPARK_EXAMPLES_JAR=`ls "$EXAMPLES_DIR/target/spark-examples-"*hadoop[12].jar`
+ # The JVM doesn't read JAVA_OPTS by default so we need to pass it in
+ EXTRA_ARGS="$JAVA_OPTS"
+else
+ echo "This is a source distribution"
+ . "$FWDIR/set-dev-classpath.sh"
fi
# Add hadoop conf dir - else FileSystem.*, etc fail !
@@ -163,22 +95,5 @@ if [ "x" != "x$YARN_CONF_DIR" ]; then
CLASSPATH="$CLASSPATH:$YARN_CONF_DIR"
fi
-
-# Figure out whether to run our class with java or with the scala launcher.
-# In most cases, we'd prefer to execute our process with java because scala
-# creates a shell script as the parent of its Java process, which makes it
-# hard to kill the child with stuff like Process.destroy(). However, for
-# the Spark shell, the wrapper is necessary to properly reset the terminal
-# when we exit, so we allow it to set a variable to launch with scala.
-if [ "$SPARK_LAUNCH_WITH_SCALA" == "1" ]; then
- EXTRA_ARGS="" # Java options will be passed to scala as JAVA_OPTS
-else
- CLASSPATH="$CLASSPATH:$SCALA_LIBRARY_PATH/scala-library.jar"
- CLASSPATH="$CLASSPATH:$SCALA_LIBRARY_PATH/scala-compiler.jar"
- CLASSPATH="$CLASSPATH:$SCALA_LIBRARY_PATH/jline.jar"
- # The JVM doesn't read JAVA_OPTS by default so we need to pass it in
- EXTRA_ARGS="$JAVA_OPTS"
-fi
-
export CLASSPATH # Needed for spark-shell
exec "$RUNNER" -cp "$CLASSPATH" $EXTRA_ARGS "$@"