aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2014-07-25 15:36:57 -0700
committerMichael Armbrust <michael@databricks.com>2014-07-25 15:36:57 -0700
commitafd757a241f41d7f8c458ef8f1f9ce8ed12986e5 (patch)
tree136e49b3392763acd5f952b70a50137cb96e9c75 /core
parent37ad3b724590dcf42bcdbfaf91b7a11914501945 (diff)
downloadspark-afd757a241f41d7f8c458ef8f1f9ce8ed12986e5.tar.gz
spark-afd757a241f41d7f8c458ef8f1f9ce8ed12986e5.tar.bz2
spark-afd757a241f41d7f8c458ef8f1f9ce8ed12986e5.zip
Revert "[SPARK-2410][SQL] Merging Hive Thrift/JDBC server"
This reverts commit 06dc0d2c6b69c5d59b4d194ced2ac85bfe2e05e2. #1399 is making Jenkins fail. We should investigate and put this back after its passing tests. Author: Michael Armbrust <michael@databricks.com> Closes #1594 from marmbrus/revertJDBC and squashes the following commits: 59748da [Michael Armbrust] Revert "[SPARK-2410][SQL] Merging Hive Thrift/JDBC server"
Diffstat (limited to 'core')
-rw-r--r--core/pom.xml2
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala14
-rw-r--r--core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala5
3 files changed, 5 insertions, 16 deletions
diff --git a/core/pom.xml b/core/pom.xml
index a24743495b..1054cec4d7 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -28,7 +28,7 @@
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.10</artifactId>
<properties>
- <sbt.project.name>core</sbt.project.name>
+ <sbt.project.name>core</sbt.project.name>
</properties>
<packaging>jar</packaging>
<name>Spark Project Core</name>
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
index c9cec33eba..3b5642b6ca 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala
@@ -46,10 +46,6 @@ object SparkSubmit {
private val CLUSTER = 2
private val ALL_DEPLOY_MODES = CLIENT | CLUSTER
- // A special jar name that indicates the class being run is inside of Spark itself, and therefore
- // no user jar is needed.
- private val SPARK_INTERNAL = "spark-internal"
-
// Special primary resource names that represent shells rather than application jars.
private val SPARK_SHELL = "spark-shell"
private val PYSPARK_SHELL = "pyspark-shell"
@@ -261,9 +257,7 @@ object SparkSubmit {
// In yarn-cluster mode, use yarn.Client as a wrapper around the user class
if (clusterManager == YARN && deployMode == CLUSTER) {
childMainClass = "org.apache.spark.deploy.yarn.Client"
- if (args.primaryResource != SPARK_INTERNAL) {
- childArgs += ("--jar", args.primaryResource)
- }
+ childArgs += ("--jar", args.primaryResource)
childArgs += ("--class", args.mainClass)
if (args.childArgs != null) {
args.childArgs.foreach { arg => childArgs += ("--arg", arg) }
@@ -338,7 +332,7 @@ object SparkSubmit {
* Return whether the given primary resource represents a user jar.
*/
private def isUserJar(primaryResource: String): Boolean = {
- !isShell(primaryResource) && !isPython(primaryResource) && !isInternal(primaryResource)
+ !isShell(primaryResource) && !isPython(primaryResource)
}
/**
@@ -355,10 +349,6 @@ object SparkSubmit {
primaryResource.endsWith(".py") || primaryResource == PYSPARK_SHELL
}
- private[spark] def isInternal(primaryResource: String): Boolean = {
- primaryResource == SPARK_INTERNAL
- }
-
/**
* Merge a sequence of comma-separated file lists, some of which may be null to indicate
* no files, into a single comma-separated string.
diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
index 01d0ae541a..3ab67a43a3 100644
--- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala
@@ -204,9 +204,8 @@ private[spark] class SparkSubmitArguments(args: Seq[String]) {
/** Fill in values by parsing user options. */
private def parseOpts(opts: Seq[String]): Unit = {
- var inSparkOpts = true
-
// Delineates parsing of Spark options from parsing of user options.
+ var inSparkOpts = true
parse(opts)
def parse(opts: Seq[String]): Unit = opts match {
@@ -319,7 +318,7 @@ private[spark] class SparkSubmitArguments(args: Seq[String]) {
SparkSubmit.printErrorAndExit(errMessage)
case v =>
primaryResource =
- if (!SparkSubmit.isShell(v) && !SparkSubmit.isInternal(v)) {
+ if (!SparkSubmit.isShell(v)) {
Utils.resolveURI(v).toString
} else {
v