aboutsummaryrefslogtreecommitdiff
path: root/bin
diff options
context:
space:
mode:
authorMarcelo Vanzin <vanzin@cloudera.com>2015-06-05 14:32:00 +0200
committerSean Owen <sowen@cloudera.com>2015-06-05 14:32:00 +0200
commit700312e12f9588f01a592d6eac7bff7eb366ac8f (patch)
treed0429a8bb9e78dd2970c12e332a2610f6898d313 /bin
parent019dc9f558cf7c0b708d3b1f0882b0c19134ffb6 (diff)
downloadspark-700312e12f9588f01a592d6eac7bff7eb366ac8f.tar.gz
spark-700312e12f9588f01a592d6eac7bff7eb366ac8f.tar.bz2
spark-700312e12f9588f01a592d6eac7bff7eb366ac8f.zip
[SPARK-6324] [CORE] Centralize handling of script usage messages.
Reorganize code so that the launcher library handles most of the work of printing usage messages, instead of having an awkward protocol between the library and the scripts for that. This mostly applies to SparkSubmit, since the launcher lib does not do command line parsing for classes invoked in other ways, and thus cannot handle failures for those. Most scripts end up going through SparkSubmit, though, so it all works. The change adds a new, internal command line switch, "--usage-error", which prints the usage message and exits with a non-zero status. Scripts can override the command printed in the usage message by setting an environment variable - this avoids having to grep the output of SparkSubmit to remove references to the "spark-submit" script. The only sub-optimal part of the change is the special handling for the spark-sql usage, which is now done in SparkSubmitArguments. Author: Marcelo Vanzin <vanzin@cloudera.com> Closes #5841 from vanzin/SPARK-6324 and squashes the following commits: 2821481 [Marcelo Vanzin] Merge branch 'master' into SPARK-6324 bf139b5 [Marcelo Vanzin] Filter output of Spark SQL CLI help. c6609bf [Marcelo Vanzin] Fix exit code never being used when printing usage messages. 6bc1b41 [Marcelo Vanzin] [SPARK-6324] [core] Centralize handling of script usage messages.
Diffstat (limited to 'bin')
-rwxr-xr-xbin/pyspark16
-rw-r--r--bin/pyspark2.cmd1
-rwxr-xr-xbin/spark-class13
-rwxr-xr-xbin/spark-shell15
-rw-r--r--bin/spark-shell2.cmd21
-rwxr-xr-xbin/spark-sql39
-rwxr-xr-xbin/spark-submit12
-rw-r--r--bin/spark-submit2.cmd13
-rwxr-xr-xbin/sparkR18
9 files changed, 10 insertions, 138 deletions
diff --git a/bin/pyspark b/bin/pyspark
index 7cb19c51b4..f9dbddfa53 100755
--- a/bin/pyspark
+++ b/bin/pyspark
@@ -17,24 +17,10 @@
# limitations under the License.
#
-# Figure out where Spark is installed
export SPARK_HOME="$(cd "`dirname "$0"`"/..; pwd)"
source "$SPARK_HOME"/bin/load-spark-env.sh
-
-function usage() {
- if [ -n "$1" ]; then
- echo $1
- fi
- echo "Usage: ./bin/pyspark [options]" 1>&2
- "$SPARK_HOME"/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
- exit $2
-}
-export -f usage
-
-if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
- usage
-fi
+export _SPARK_CMD_USAGE="Usage: ./bin/pyspark [options]"
# In Spark <= 1.1, setting IPYTHON=1 would cause the driver to be launched using the `ipython`
# executable, while the worker would still be launched using PYSPARK_PYTHON.
diff --git a/bin/pyspark2.cmd b/bin/pyspark2.cmd
index 09b4149c2a..45e9e3def5 100644
--- a/bin/pyspark2.cmd
+++ b/bin/pyspark2.cmd
@@ -21,6 +21,7 @@ rem Figure out where the Spark framework is installed
set SPARK_HOME=%~dp0..
call %SPARK_HOME%\bin\load-spark-env.cmd
+set _SPARK_CMD_USAGE=Usage: bin\pyspark.cmd [options]
rem Figure out which Python to use.
if "x%PYSPARK_DRIVER_PYTHON%"=="x" (
diff --git a/bin/spark-class b/bin/spark-class
index c49d97ce5c..7bb1afe4b4 100755
--- a/bin/spark-class
+++ b/bin/spark-class
@@ -16,18 +16,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-set -e
# Figure out where Spark is installed
export SPARK_HOME="$(cd "`dirname "$0"`"/..; pwd)"
. "$SPARK_HOME"/bin/load-spark-env.sh
-if [ -z "$1" ]; then
- echo "Usage: spark-class <class> [<args>]" 1>&2
- exit 1
-fi
-
# Find the java binary
if [ -n "${JAVA_HOME}" ]; then
RUNNER="${JAVA_HOME}/bin/java"
@@ -98,9 +92,4 @@ CMD=()
while IFS= read -d '' -r ARG; do
CMD+=("$ARG")
done < <("$RUNNER" -cp "$LAUNCH_CLASSPATH" org.apache.spark.launcher.Main "$@")
-
-if [ "${CMD[0]}" = "usage" ]; then
- "${CMD[@]}"
-else
- exec "${CMD[@]}"
-fi
+exec "${CMD[@]}"
diff --git a/bin/spark-shell b/bin/spark-shell
index b3761b5e13..a6dc863d83 100755
--- a/bin/spark-shell
+++ b/bin/spark-shell
@@ -29,20 +29,7 @@ esac
set -o posix
export FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
-
-usage() {
- if [ -n "$1" ]; then
- echo "$1"
- fi
- echo "Usage: ./bin/spark-shell [options]"
- "$FWDIR"/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
- exit "$2"
-}
-export -f usage
-
-if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
- usage "" 0
-fi
+export _SPARK_CMD_USAGE="Usage: ./bin/spark-shell [options]"
# SPARK-4161: scala does not assume use of the java classpath,
# so we need to add the "-Dscala.usejavacp=true" flag manually. We
diff --git a/bin/spark-shell2.cmd b/bin/spark-shell2.cmd
index 00fd30fa38..251309d67f 100644
--- a/bin/spark-shell2.cmd
+++ b/bin/spark-shell2.cmd
@@ -18,12 +18,7 @@ rem limitations under the License.
rem
set SPARK_HOME=%~dp0..
-
-echo "%*" | findstr " \<--help\> \<-h\>" >nul
-if %ERRORLEVEL% equ 0 (
- call :usage
- exit /b 0
-)
+set _SPARK_CMD_USAGE=Usage: .\bin\spark-shell.cmd [options]
rem SPARK-4161: scala does not assume use of the java classpath,
rem so we need to add the "-Dscala.usejavacp=true" flag manually. We
@@ -37,16 +32,4 @@ if "x%SPARK_SUBMIT_OPTS%"=="x" (
set SPARK_SUBMIT_OPTS="%SPARK_SUBMIT_OPTS% -Dscala.usejavacp=true"
:run_shell
-call %SPARK_HOME%\bin\spark-submit2.cmd --class org.apache.spark.repl.Main %*
-set SPARK_ERROR_LEVEL=%ERRORLEVEL%
-if not "x%SPARK_LAUNCHER_USAGE_ERROR%"=="x" (
- call :usage
- exit /b 1
-)
-exit /b %SPARK_ERROR_LEVEL%
-
-:usage
-echo %SPARK_LAUNCHER_USAGE_ERROR%
-echo "Usage: .\bin\spark-shell.cmd [options]" >&2
-call %SPARK_HOME%\bin\spark-submit2.cmd --help 2>&1 | findstr /V "Usage" 1>&2
-goto :eof
+%SPARK_HOME%\bin\spark-submit2.cmd --class org.apache.spark.repl.Main %*
diff --git a/bin/spark-sql b/bin/spark-sql
index ca1729f4cf..4ea7bc6e39 100755
--- a/bin/spark-sql
+++ b/bin/spark-sql
@@ -17,41 +17,6 @@
# limitations under the License.
#
-#
-# Shell script for starting the Spark SQL CLI
-
-# Enter posix mode for bash
-set -o posix
-
-# NOTE: This exact class name is matched downstream by SparkSubmit.
-# Any changes need to be reflected there.
-export CLASS="org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver"
-
-# Figure out where Spark is installed
export FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
-
-function usage {
- if [ -n "$1" ]; then
- echo "$1"
- fi
- echo "Usage: ./bin/spark-sql [options] [cli option]"
- pattern="usage"
- pattern+="\|Spark assembly has been built with Hive"
- pattern+="\|NOTE: SPARK_PREPEND_CLASSES is set"
- pattern+="\|Spark Command: "
- pattern+="\|--help"
- pattern+="\|======="
-
- "$FWDIR"/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
- echo
- echo "CLI options:"
- "$FWDIR"/bin/spark-class "$CLASS" --help 2>&1 | grep -v "$pattern" 1>&2
- exit "$2"
-}
-export -f usage
-
-if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
- usage "" 0
-fi
-
-exec "$FWDIR"/bin/spark-submit --class "$CLASS" "$@"
+export _SPARK_CMD_USAGE="Usage: ./bin/spark-sql [options] [cli option]"
+exec "$FWDIR"/bin/spark-submit --class org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver "$@"
diff --git a/bin/spark-submit b/bin/spark-submit
index 0e0afe71a0..255378b0f0 100755
--- a/bin/spark-submit
+++ b/bin/spark-submit
@@ -22,16 +22,4 @@ SPARK_HOME="$(cd "`dirname "$0"`"/..; pwd)"
# disable randomized hash for string in Python 3.3+
export PYTHONHASHSEED=0
-# Only define a usage function if an upstream script hasn't done so.
-if ! type -t usage >/dev/null 2>&1; then
- usage() {
- if [ -n "$1" ]; then
- echo "$1"
- fi
- "$SPARK_HOME"/bin/spark-class org.apache.spark.deploy.SparkSubmit --help
- exit "$2"
- }
- export -f usage
-fi
-
exec "$SPARK_HOME"/bin/spark-class org.apache.spark.deploy.SparkSubmit "$@"
diff --git a/bin/spark-submit2.cmd b/bin/spark-submit2.cmd
index d3fc4a5cc3..651376e526 100644
--- a/bin/spark-submit2.cmd
+++ b/bin/spark-submit2.cmd
@@ -24,15 +24,4 @@ rem disable randomized hash for string in Python 3.3+
set PYTHONHASHSEED=0
set CLASS=org.apache.spark.deploy.SparkSubmit
-call %~dp0spark-class2.cmd %CLASS% %*
-set SPARK_ERROR_LEVEL=%ERRORLEVEL%
-if not "x%SPARK_LAUNCHER_USAGE_ERROR%"=="x" (
- call :usage
- exit /b 1
-)
-exit /b %SPARK_ERROR_LEVEL%
-
-:usage
-echo %SPARK_LAUNCHER_USAGE_ERROR%
-call %SPARK_HOME%\bin\spark-class2.cmd %CLASS% --help
-goto :eof
+%~dp0spark-class2.cmd %CLASS% %*
diff --git a/bin/sparkR b/bin/sparkR
index 8c918e2b09..464c29f369 100755
--- a/bin/sparkR
+++ b/bin/sparkR
@@ -17,23 +17,7 @@
# limitations under the License.
#
-# Figure out where Spark is installed
export SPARK_HOME="$(cd "`dirname "$0"`"/..; pwd)"
-
source "$SPARK_HOME"/bin/load-spark-env.sh
-
-function usage() {
- if [ -n "$1" ]; then
- echo $1
- fi
- echo "Usage: ./bin/sparkR [options]" 1>&2
- "$SPARK_HOME"/bin/spark-submit --help 2>&1 | grep -v Usage 1>&2
- exit $2
-}
-export -f usage
-
-if [[ "$@" = *--help ]] || [[ "$@" = *-h ]]; then
- usage
-fi
-
+export _SPARK_CMD_USAGE="Usage: ./bin/sparkR [options]"
exec "$SPARK_HOME"/bin/spark-submit sparkr-shell-main "$@"