aboutsummaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
authorPrashant Sharma <prashant.s@imaginea.com>2014-09-08 10:24:15 -0700
committerAndrew Or <andrewor14@gmail.com>2014-09-08 10:24:15 -0700
commite16a8e7db5a3b1065b14baf89cb723a59b99226b (patch)
tree09d5b9bd510325047aa20f62f215184e46367bdb /bin
parent711356b422c66e2a80377a9f43fce97282460520 (diff)
downloadspark-e16a8e7db5a3b1065b14baf89cb723a59b99226b.tar.gz
spark-e16a8e7db5a3b1065b14baf89cb723a59b99226b.tar.bz2
spark-e16a8e7db5a3b1065b14baf89cb723a59b99226b.zip
SPARK-3337 Paranoid quoting in shell to allow install dirs with spaces within.
... Tested ! TBH, it isn't a great idea to have directory with spaces within. Because emacs doesn't like it then hadoop doesn't like it. and so on... Author: Prashant Sharma <prashant.s@imaginea.com> Closes #2229 from ScrapCodes/SPARK-3337/quoting-shell-scripts and squashes the following commits: d4ad660 [Prashant Sharma] SPARK-3337 Paranoid quoting in shell to allow install dirs with spaces within.
Diffstat (limited to 'bin')
-rwxr-xr-xbin/beeline2
-rwxr-xr-xbin/compute-classpath.sh12
-rw-r--r--bin/load-spark-env.sh4
-rwxr-xr-xbin/pyspark20
-rwxr-xr-xbin/run-example8
-rwxr-xr-xbin/spark-class20
-rwxr-xr-xbin/spark-shell10
-rwxr-xr-xbin/spark-sql8
-rwxr-xr-xbin/spark-submit4
9 files changed, 44 insertions, 44 deletions
diff --git a/bin/beeline b/bin/beeline
index 1bda4dba50..3fcb6df343 100755
--- a/bin/beeline
+++ b/bin/beeline
@@ -24,7 +24,7 @@
set -o posix
# Figure out where Spark is installed
-FWDIR="$(cd `dirname $0`/..; pwd)"
+FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
CLASS="org.apache.hive.beeline.BeeLine"
exec "$FWDIR/bin/spark-class" $CLASS "$@"
diff --git a/bin/compute-classpath.sh b/bin/compute-classpath.sh
index 16b794a159..15c6779402 100755
--- a/bin/compute-classpath.sh
+++ b/bin/compute-classpath.sh
@@ -23,9 +23,9 @@
SCALA_VERSION=2.10
# Figure out where Spark is installed
-FWDIR="$(cd `dirname $0`/..; pwd)"
+FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
-. $FWDIR/bin/load-spark-env.sh
+. "$FWDIR"/bin/load-spark-env.sh
# Build up classpath
CLASSPATH="$SPARK_CLASSPATH:$SPARK_SUBMIT_CLASSPATH:$FWDIR/conf"
@@ -63,7 +63,7 @@ else
assembly_folder="$ASSEMBLY_DIR"
fi
-num_jars=$(ls "$assembly_folder" | grep "spark-assembly.*hadoop.*\.jar" | wc -l)
+num_jars="$(ls "$assembly_folder" | grep "spark-assembly.*hadoop.*\.jar" | wc -l)"
if [ "$num_jars" -eq "0" ]; then
echo "Failed to find Spark assembly in $assembly_folder"
echo "You need to build Spark before running this program."
@@ -77,7 +77,7 @@ if [ "$num_jars" -gt "1" ]; then
exit 1
fi
-ASSEMBLY_JAR=$(ls "$assembly_folder"/spark-assembly*hadoop*.jar 2>/dev/null)
+ASSEMBLY_JAR="$(ls "$assembly_folder"/spark-assembly*hadoop*.jar 2>/dev/null)"
# Verify that versions of java used to build the jars and run Spark are compatible
jar_error_check=$("$JAR_CMD" -tf "$ASSEMBLY_JAR" nonexistent/class/path 2>&1)
@@ -103,8 +103,8 @@ else
datanucleus_dir="$FWDIR"/lib_managed/jars
fi
-datanucleus_jars=$(find "$datanucleus_dir" 2>/dev/null | grep "datanucleus-.*\\.jar")
-datanucleus_jars=$(echo "$datanucleus_jars" | tr "\n" : | sed s/:$//g)
+datanucleus_jars="$(find "$datanucleus_dir" 2>/dev/null | grep "datanucleus-.*\\.jar")"
+datanucleus_jars="$(echo "$datanucleus_jars" | tr "\n" : | sed s/:$//g)"
if [ -n "$datanucleus_jars" ]; then
hive_files=$("$JAR_CMD" -tf "$ASSEMBLY_JAR" org/apache/hadoop/hive/ql/exec 2>/dev/null)
diff --git a/bin/load-spark-env.sh b/bin/load-spark-env.sh
index 493d3785a0..6d4231b204 100644
--- a/bin/load-spark-env.sh
+++ b/bin/load-spark-env.sh
@@ -25,9 +25,9 @@ if [ -z "$SPARK_ENV_LOADED" ]; then
export SPARK_ENV_LOADED=1
# Returns the parent of the directory this script lives in.
- parent_dir="$(cd `dirname $0`/..; pwd)"
+ parent_dir="$(cd "`dirname "$0"`"/..; pwd)"
- user_conf_dir=${SPARK_CONF_DIR:-"$parent_dir/conf"}
+ user_conf_dir="${SPARK_CONF_DIR:-"$parent_dir"/conf}"
if [ -f "${user_conf_dir}/spark-env.sh" ]; then
# Promote all variable declarations to environment (exported) variables
diff --git a/bin/pyspark b/bin/pyspark
index 26a16dd600..5142411e36 100755
--- a/bin/pyspark
+++ b/bin/pyspark
@@ -18,18 +18,18 @@
#
# Figure out where Spark is installed
-FWDIR="$(cd `dirname $0`/..; pwd)"
+FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
# Export this as SPARK_HOME
export SPARK_HOME="$FWDIR"
-source $FWDIR/bin/utils.sh
+source "$FWDIR/bin/utils.sh"
SCALA_VERSION=2.10
function usage() {
echo "Usage: ./bin/pyspark [options]" 1>&2
- $FWDIR/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
+ "$FWDIR"/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
exit 0
}
@@ -48,7 +48,7 @@ if [ ! -f "$FWDIR/RELEASE" ]; then
fi
fi
-. $FWDIR/bin/load-spark-env.sh
+. "$FWDIR"/bin/load-spark-env.sh
# Figure out which Python executable to use
if [[ -z "$PYSPARK_PYTHON" ]]; then
@@ -57,12 +57,12 @@ fi
export PYSPARK_PYTHON
# Add the PySpark classes to the Python path:
-export PYTHONPATH=$SPARK_HOME/python/:$PYTHONPATH
-export PYTHONPATH=$SPARK_HOME/python/lib/py4j-0.8.2.1-src.zip:$PYTHONPATH
+export PYTHONPATH="$SPARK_HOME/python/:$PYTHONPATH"
+export PYTHONPATH="$SPARK_HOME/python/lib/py4j-0.8.2.1-src.zip:$PYTHONPATH"
# Load the PySpark shell.py script when ./pyspark is used interactively:
-export OLD_PYTHONSTARTUP=$PYTHONSTARTUP
-export PYTHONSTARTUP=$FWDIR/python/pyspark/shell.py
+export OLD_PYTHONSTARTUP="$PYTHONSTARTUP"
+export PYTHONSTARTUP="$FWDIR/python/pyspark/shell.py"
# If IPython options are specified, assume user wants to run IPython
if [[ -n "$IPYTHON_OPTS" ]]; then
@@ -99,10 +99,10 @@ fi
if [[ "$1" =~ \.py$ ]]; then
echo -e "\nWARNING: Running python applications through ./bin/pyspark is deprecated as of Spark 1.0." 1>&2
echo -e "Use ./bin/spark-submit <python file>\n" 1>&2
- primary=$1
+ primary="$1"
shift
gatherSparkSubmitOpts "$@"
- exec $FWDIR/bin/spark-submit "${SUBMISSION_OPTS[@]}" $primary "${APPLICATION_OPTS[@]}"
+ exec "$FWDIR"/bin/spark-submit "${SUBMISSION_OPTS[@]}" "$primary" "${APPLICATION_OPTS[@]}"
else
# PySpark shell requires special handling downstream
export PYSPARK_SHELL=1
diff --git a/bin/run-example b/bin/run-example
index 68a35702ed..34dd71c718 100755
--- a/bin/run-example
+++ b/bin/run-example
@@ -19,7 +19,7 @@
SCALA_VERSION=2.10
-FWDIR="$(cd `dirname $0`/..; pwd)"
+FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
export SPARK_HOME="$FWDIR"
EXAMPLES_DIR="$FWDIR"/examples
@@ -35,12 +35,12 @@ else
fi
if [ -f "$FWDIR/RELEASE" ]; then
- export SPARK_EXAMPLES_JAR=`ls "$FWDIR"/lib/spark-examples-*hadoop*.jar`
+ export SPARK_EXAMPLES_JAR="`ls "$FWDIR"/lib/spark-examples-*hadoop*.jar`"
elif [ -e "$EXAMPLES_DIR"/target/scala-$SCALA_VERSION/spark-examples-*hadoop*.jar ]; then
- export SPARK_EXAMPLES_JAR=`ls "$EXAMPLES_DIR"/target/scala-$SCALA_VERSION/spark-examples-*hadoop*.jar`
+ export SPARK_EXAMPLES_JAR="`ls "$EXAMPLES_DIR"/target/scala-$SCALA_VERSION/spark-examples-*hadoop*.jar`"
fi
-if [[ -z $SPARK_EXAMPLES_JAR ]]; then
+if [[ -z "$SPARK_EXAMPLES_JAR" ]]; then
echo "Failed to find Spark examples assembly in $FWDIR/lib or $FWDIR/examples/target" 1>&2
echo "You need to build Spark before running this program" 1>&2
exit 1
diff --git a/bin/spark-class b/bin/spark-class
index c6543545a5..5f5f9ea748 100755
--- a/bin/spark-class
+++ b/bin/spark-class
@@ -27,12 +27,12 @@ esac
SCALA_VERSION=2.10
# Figure out where Spark is installed
-FWDIR="$(cd `dirname $0`/..; pwd)"
+FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
# Export this as SPARK_HOME
export SPARK_HOME="$FWDIR"
-. $FWDIR/bin/load-spark-env.sh
+. "$FWDIR"/bin/load-spark-env.sh
if [ -z "$1" ]; then
echo "Usage: spark-class <class> [<args>]" 1>&2
@@ -105,7 +105,7 @@ else
exit 1
fi
fi
-JAVA_VERSION=$($RUNNER -version 2>&1 | sed 's/java version "\(.*\)\.\(.*\)\..*"/\1\2/; 1q')
+JAVA_VERSION=$("$RUNNER" -version 2>&1 | sed 's/java version "\(.*\)\.\(.*\)\..*"/\1\2/; 1q')
# Set JAVA_OPTS to be able to load native libraries and to set heap size
if [ "$JAVA_VERSION" -ge 18 ]; then
@@ -117,7 +117,7 @@ JAVA_OPTS="$JAVA_OPTS -Xms$OUR_JAVA_MEM -Xmx$OUR_JAVA_MEM"
# Load extra JAVA_OPTS from conf/java-opts, if it exists
if [ -e "$FWDIR/conf/java-opts" ] ; then
- JAVA_OPTS="$JAVA_OPTS `cat $FWDIR/conf/java-opts`"
+ JAVA_OPTS="$JAVA_OPTS `cat "$FWDIR"/conf/java-opts`"
fi
# Attention: when changing the way the JAVA_OPTS are assembled, the change must be reflected in CommandUtils.scala!
@@ -126,21 +126,21 @@ TOOLS_DIR="$FWDIR"/tools
SPARK_TOOLS_JAR=""
if [ -e "$TOOLS_DIR"/target/scala-$SCALA_VERSION/spark-tools*[0-9Tg].jar ]; then
# Use the JAR from the SBT build
- export SPARK_TOOLS_JAR=`ls "$TOOLS_DIR"/target/scala-$SCALA_VERSION/spark-tools*[0-9Tg].jar`
+ export SPARK_TOOLS_JAR="`ls "$TOOLS_DIR"/target/scala-$SCALA_VERSION/spark-tools*[0-9Tg].jar`"
fi
if [ -e "$TOOLS_DIR"/target/spark-tools*[0-9Tg].jar ]; then
# Use the JAR from the Maven build
# TODO: this also needs to become an assembly!
- export SPARK_TOOLS_JAR=`ls "$TOOLS_DIR"/target/spark-tools*[0-9Tg].jar`
+ export SPARK_TOOLS_JAR="`ls "$TOOLS_DIR"/target/spark-tools*[0-9Tg].jar`"
fi
# Compute classpath using external script
-classpath_output=$($FWDIR/bin/compute-classpath.sh)
+classpath_output=$("$FWDIR"/bin/compute-classpath.sh)
if [[ "$?" != "0" ]]; then
echo "$classpath_output"
exit 1
else
- CLASSPATH=$classpath_output
+ CLASSPATH="$classpath_output"
fi
if [[ "$1" =~ org.apache.spark.tools.* ]]; then
@@ -153,9 +153,9 @@ if [[ "$1" =~ org.apache.spark.tools.* ]]; then
fi
if $cygwin; then
- CLASSPATH=`cygpath -wp $CLASSPATH`
+ CLASSPATH="`cygpath -wp "$CLASSPATH"`"
if [ "$1" == "org.apache.spark.tools.JavaAPICompletenessChecker" ]; then
- export SPARK_TOOLS_JAR=`cygpath -w $SPARK_TOOLS_JAR`
+ export SPARK_TOOLS_JAR="`cygpath -w "$SPARK_TOOLS_JAR"`"
fi
fi
export CLASSPATH
diff --git a/bin/spark-shell b/bin/spark-shell
index 0ab4e14f5b..4a0670fc6c 100755
--- a/bin/spark-shell
+++ b/bin/spark-shell
@@ -29,11 +29,11 @@ esac
set -o posix
## Global script variables
-FWDIR="$(cd `dirname $0`/..; pwd)"
+FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
function usage() {
echo "Usage: ./bin/spark-shell [options]"
- $FWDIR/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
+ "$FWDIR"/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
exit 0
}
@@ -41,7 +41,7 @@ if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
usage
fi
-source $FWDIR/bin/utils.sh
+source "$FWDIR"/bin/utils.sh
SUBMIT_USAGE_FUNCTION=usage
gatherSparkSubmitOpts "$@"
@@ -54,11 +54,11 @@ function main() {
# (see https://github.com/sbt/sbt/issues/562).
stty -icanon min 1 -echo > /dev/null 2>&1
export SPARK_SUBMIT_OPTS="$SPARK_SUBMIT_OPTS -Djline.terminal=unix"
- $FWDIR/bin/spark-submit --class org.apache.spark.repl.Main "${SUBMISSION_OPTS[@]}" spark-shell "${APPLICATION_OPTS[@]}"
+ "$FWDIR"/bin/spark-submit --class org.apache.spark.repl.Main "${SUBMISSION_OPTS[@]}" spark-shell "${APPLICATION_OPTS[@]}"
stty icanon echo > /dev/null 2>&1
else
export SPARK_SUBMIT_OPTS
- $FWDIR/bin/spark-submit --class org.apache.spark.repl.Main "${SUBMISSION_OPTS[@]}" spark-shell "${APPLICATION_OPTS[@]}"
+ "$FWDIR"/bin/spark-submit --class org.apache.spark.repl.Main "${SUBMISSION_OPTS[@]}" spark-shell "${APPLICATION_OPTS[@]}"
fi
}
diff --git a/bin/spark-sql b/bin/spark-sql
index 2a3cb31f58..ae096530ca 100755
--- a/bin/spark-sql
+++ b/bin/spark-sql
@@ -27,7 +27,7 @@ CLASS="org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver"
CLASS_NOT_FOUND_EXIT_STATUS=1
# Figure out where Spark is installed
-FWDIR="$(cd `dirname $0`/..; pwd)"
+FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
function usage {
echo "Usage: ./bin/spark-sql [options] [cli option]"
@@ -38,10 +38,10 @@ function usage {
pattern+="\|--help"
pattern+="\|======="
- $FWDIR/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
+ "$FWDIR"/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
echo
echo "CLI options:"
- $FWDIR/bin/spark-class $CLASS --help 2>&1 | grep -v "$pattern" 1>&2
+ "$FWDIR"/bin/spark-class $CLASS --help 2>&1 | grep -v "$pattern" 1>&2
}
if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
@@ -49,7 +49,7 @@ if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
exit 0
fi
-source $FWDIR/bin/utils.sh
+source "$FWDIR"/bin/utils.sh
SUBMIT_USAGE_FUNCTION=usage
gatherSparkSubmitOpts "$@"
diff --git a/bin/spark-submit b/bin/spark-submit
index 277c4ce571..c557311b4b 100755
--- a/bin/spark-submit
+++ b/bin/spark-submit
@@ -19,7 +19,7 @@
# NOTE: Any changes in this file must be reflected in SparkSubmitDriverBootstrapper.scala!
-export SPARK_HOME="$(cd `dirname $0`/..; pwd)"
+export SPARK_HOME="$(cd "`dirname "$0"`"/..; pwd)"
ORIG_ARGS=("$@")
while (($#)); do
@@ -59,5 +59,5 @@ if [[ "$SPARK_SUBMIT_DEPLOY_MODE" == "client" && -f "$SPARK_SUBMIT_PROPERTIES_FI
fi
fi
-exec $SPARK_HOME/bin/spark-class org.apache.spark.deploy.SparkSubmit "${ORIG_ARGS[@]}"
+exec "$SPARK_HOME"/bin/spark-class org.apache.spark.deploy.SparkSubmit "${ORIG_ARGS[@]}"