aboutsummaryrefslogtreecommitdiff
path: root/bin/compute-classpath.sh
diff options
context:
space:
mode:
authorguliangliang <guliangliang@qiyi.com>2015-03-26 13:28:56 +0000
committerSean Owen <sowen@cloudera.com>2015-03-26 13:28:56 +0000
commit5b5f0e2b08941bde2655b1aec9b2ae28c377be78 (patch)
tree7ccda59b932fb0e9e4a92ad9f733b146d5ab92ff /bin/compute-classpath.sh
parent836c9216599b676ae8f421384f4f20fd35e8c53b (diff)
downloadspark-5b5f0e2b08941bde2655b1aec9b2ae28c377be78.tar.gz
spark-5b5f0e2b08941bde2655b1aec9b2ae28c377be78.tar.bz2
spark-5b5f0e2b08941bde2655b1aec9b2ae28c377be78.zip
[SPARK-6491] Spark will put the current working dir to the CLASSPATH
When running "bin/computer-classpath.sh", the output will be: :/spark/conf:/spark/assembly/target/scala-2.10/spark-assembly-1.3.0-hadoop2.5.0-cdh5.2.0.jar:/spark/lib_managed/jars/datanucleus-rdbms-3.2.9.jar:/spark/lib_managed/jars/datanucleus-api-jdo-3.2.6.jar:/spark/lib_managed/jars/datanucleus-core-3.2.10.jar Java will add the current working dir to the CLASSPATH, if the first ":" exists, which is not expected by spark users. For example, if I call spark-shell in the folder /root. And there exists a "core-site.xml" under /root/. Spark will use this file as HADOOP CONF file, even if I have already set HADOOP_CONF_DIR=/etc/hadoop/conf. Author: guliangliang <guliangliang@qiyi.com> Closes #5156 from marsishandsome/Spark6491 and squashes the following commits: 5ae214f [guliangliang] use appendToClasspath to change CLASSPATH b21f3b2 [guliangliang] keep the classpath order 5d1f870 [guliangliang] [SPARK-6491] Spark will put the current working dir to the CLASSPATH
Diffstat (limited to 'bin/compute-classpath.sh')
-rwxr-xr-xbin/compute-classpath.sh81
1 files changed, 41 insertions, 40 deletions
diff --git a/bin/compute-classpath.sh b/bin/compute-classpath.sh
index fffff2846e..679dfaf3a3 100755
--- a/bin/compute-classpath.sh
+++ b/bin/compute-classpath.sh
@@ -25,17 +25,24 @@ FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
. "$FWDIR"/bin/load-spark-env.sh
-if [ -n "$SPARK_CLASSPATH" ]; then
- CLASSPATH="$SPARK_CLASSPATH:$SPARK_SUBMIT_CLASSPATH"
-else
- CLASSPATH="$SPARK_SUBMIT_CLASSPATH"
-fi
+function appendToClasspath(){
+ if [ -n "$1" ]; then
+ if [ -n "$CLASSPATH" ]; then
+ CLASSPATH="$CLASSPATH:$1"
+ else
+ CLASSPATH="$1"
+ fi
+ fi
+}
+
+appendToClasspath "$SPARK_CLASSPATH"
+appendToClasspath "$SPARK_SUBMIT_CLASSPATH"
# Build up classpath
if [ -n "$SPARK_CONF_DIR" ]; then
- CLASSPATH="$CLASSPATH:$SPARK_CONF_DIR"
+ appendToClasspath "$SPARK_CONF_DIR"
else
- CLASSPATH="$CLASSPATH:$FWDIR/conf"
+ appendToClasspath "$FWDIR/conf"
fi
ASSEMBLY_DIR="$FWDIR/assembly/target/scala-$SPARK_SCALA_VERSION"
@@ -51,20 +58,20 @@ if [ -n "$SPARK_PREPEND_CLASSES" ]; then
echo "NOTE: SPARK_PREPEND_CLASSES is set, placing locally compiled Spark"\
"classes ahead of assembly." >&2
# Spark classes
- CLASSPATH="$CLASSPATH:$FWDIR/core/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/repl/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/mllib/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/bagel/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/graphx/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/streaming/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/tools/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/sql/catalyst/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/sql/core/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/sql/hive/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/sql/hive-thriftserver/target/scala-$SPARK_SCALA_VERSION/classes"
- CLASSPATH="$CLASSPATH:$FWDIR/yarn/stable/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/core/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/repl/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/mllib/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/bagel/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/graphx/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/streaming/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/tools/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/sql/catalyst/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/sql/core/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/sql/hive/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/sql/hive-thriftserver/target/scala-$SPARK_SCALA_VERSION/classes"
+ appendToClasspath "$FWDIR/yarn/stable/target/scala-$SPARK_SCALA_VERSION/classes"
# Jars for shaded deps in their original form (copied here during build)
- CLASSPATH="$CLASSPATH:$FWDIR/core/target/jars/*"
+ appendToClasspath "$FWDIR/core/target/jars/*"
fi
# Use spark-assembly jar from either RELEASE or assembly directory
@@ -106,7 +113,7 @@ if [ $(command -v "$JAR_CMD") ] ; then
fi
fi
-CLASSPATH="$CLASSPATH:$ASSEMBLY_JAR"
+appendToClasspath "$ASSEMBLY_JAR"
# When Hive support is needed, Datanucleus jars must be included on the classpath.
# Datanucleus jars do not work if only included in the uber jar as plugin.xml metadata is lost.
@@ -124,37 +131,31 @@ datanucleus_jars="$(find "$datanucleus_dir" 2>/dev/null | grep "datanucleus-.*\\
datanucleus_jars="$(echo "$datanucleus_jars" | tr "\n" : | sed s/:$//g)"
if [ -n "$datanucleus_jars" ]; then
- CLASSPATH="$CLASSPATH:$datanucleus_jars"
+ appendToClasspath "$datanucleus_jars"
fi
# Add test classes if we're running from SBT or Maven with SPARK_TESTING set to 1
if [[ $SPARK_TESTING == 1 ]]; then
- CLASSPATH="$CLASSPATH:$FWDIR/core/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/repl/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/mllib/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/bagel/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/graphx/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/streaming/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/sql/catalyst/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/sql/core/target/scala-$SPARK_SCALA_VERSION/test-classes"
- CLASSPATH="$CLASSPATH:$FWDIR/sql/hive/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/core/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/repl/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/mllib/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/bagel/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/graphx/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/streaming/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/sql/catalyst/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/sql/core/target/scala-$SPARK_SCALA_VERSION/test-classes"
+ appendToClasspath "$FWDIR/sql/hive/target/scala-$SPARK_SCALA_VERSION/test-classes"
fi
# Add hadoop conf dir if given -- otherwise FileSystem.*, etc fail !
# Note, this assumes that there is either a HADOOP_CONF_DIR or YARN_CONF_DIR which hosts
# the configurtion files.
-if [ -n "$HADOOP_CONF_DIR" ]; then
- CLASSPATH="$CLASSPATH:$HADOOP_CONF_DIR"
-fi
-if [ -n "$YARN_CONF_DIR" ]; then
- CLASSPATH="$CLASSPATH:$YARN_CONF_DIR"
-fi
+appendToClasspath "$HADOOP_CONF_DIR"
+appendToClasspath "$YARN_CONF_DIR"
# To allow for distributions to append needed libraries to the classpath (e.g. when
# using the "hadoop-provided" profile to build Spark), check SPARK_DIST_CLASSPATH and
# append it to tbe final classpath.
-if [ -n "$SPARK_DIST_CLASSPATH" ]; then
- CLASSPATH="$CLASSPATH:$SPARK_DIST_CLASSPATH"
-fi
+appendToClasspath "$SPARK_DIST_CLASSPATH"
echo "$CLASSPATH"