aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2014-11-07 11:45:25 -0800
committerMichael Armbrust <michael@databricks.com>2014-11-07 11:45:25 -0800
commit86e9eaa3f0ec23cb38bce67585adb2d5f484f4ee (patch)
tree09ff2bc6b3be9e243a8612650e67cd04abd5e611 /core
parent636d7bcc96b912f5b5caa91110cd55b55fa38ad8 (diff)
downloadspark-86e9eaa3f0ec23cb38bce67585adb2d5f484f4ee.tar.gz
spark-86e9eaa3f0ec23cb38bce67585adb2d5f484f4ee.tar.bz2
spark-86e9eaa3f0ec23cb38bce67585adb2d5f484f4ee.zip
[SPARK-4225][SQL] Resorts to SparkContext.version to inspect Spark version
This PR resorts to `SparkContext.version` rather than META-INF/MANIFEST.MF in the assembly jar to inspect Spark version. Currently, when built with Maven, the MANIFEST.MF file in the assembly jar is incorrectly replaced by Guava 15.0 MANIFEST.MF, probably because of the assembly/shading tricks. Another related PR is #3103, which tries to fix the MANIFEST issue. Author: Cheng Lian <lian@databricks.com> Closes #3105 from liancheng/spark-4225 and squashes the following commits: d9585e1 [Cheng Lian] Resorts to SparkContext.version to inspect Spark version
Diffstat (limited to 'core')
-rw-r--r--core/src/main/scala/org/apache/spark/util/Utils.scala24
1 files changed, 7 insertions, 17 deletions
diff --git a/core/src/main/scala/org/apache/spark/util/Utils.scala b/core/src/main/scala/org/apache/spark/util/Utils.scala
index a14d612548..6b85c03da5 100644
--- a/core/src/main/scala/org/apache/spark/util/Utils.scala
+++ b/core/src/main/scala/org/apache/spark/util/Utils.scala
@@ -21,10 +21,8 @@ import java.io._
import java.lang.management.ManagementFactory
import java.net._
import java.nio.ByteBuffer
-import java.util.jar.Attributes.Name
-import java.util.{Properties, Locale, Random, UUID}
-import java.util.concurrent.{ThreadFactory, ConcurrentHashMap, Executors, ThreadPoolExecutor}
-import java.util.jar.{Manifest => JarManifest}
+import java.util.concurrent.{ConcurrentHashMap, Executors, ThreadFactory, ThreadPoolExecutor}
+import java.util.{Locale, Properties, Random, UUID}
import scala.collection.JavaConversions._
import scala.collection.Map
@@ -38,11 +36,11 @@ import com.google.common.io.{ByteStreams, Files}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.apache.commons.lang3.SystemUtils
import org.apache.hadoop.conf.Configuration
-import org.apache.log4j.PropertyConfigurator
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
+import org.apache.log4j.PropertyConfigurator
import org.eclipse.jetty.util.MultiException
import org.json4s._
-import tachyon.client.{TachyonFile,TachyonFS}
+import tachyon.client.{TachyonFS, TachyonFile}
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
@@ -352,8 +350,8 @@ private[spark] object Utils extends Logging {
* Download a file to target directory. Supports fetching the file in a variety of ways,
* including HTTP, HDFS and files on a standard filesystem, based on the URL parameter.
*
- * If `useCache` is true, first attempts to fetch the file to a local cache that's shared
- * across executors running the same application. `useCache` is used mainly for
+ * If `useCache` is true, first attempts to fetch the file to a local cache that's shared
+ * across executors running the same application. `useCache` is used mainly for
* the executors, and not in local mode.
*
* Throws SparkException if the target file already exists and has different contents than
@@ -400,7 +398,7 @@ private[spark] object Utils extends Logging {
} else {
doFetchFile(url, targetDir, fileName, conf, securityMgr, hadoopConf)
}
-
+
// Decompress the file if it's a .tar or .tar.gz
if (fileName.endsWith(".tar.gz") || fileName.endsWith(".tgz")) {
logInfo("Untarring " + fileName)
@@ -1776,13 +1774,6 @@ private[spark] object Utils extends Logging {
s"$libraryPathEnvName=$libraryPath$ampersand"
}
- lazy val sparkVersion =
- SparkContext.jarOfObject(this).map { path =>
- val manifestUrl = new URL(s"jar:file:$path!/META-INF/MANIFEST.MF")
- val manifest = new JarManifest(manifestUrl.openStream())
- manifest.getMainAttributes.getValue(Name.IMPLEMENTATION_VERSION)
- }.getOrElse("Unknown")
-
/**
* Return the value of a config either through the SparkConf or the Hadoop configuration
* if this is Yarn mode. In the latter case, this defaults to the value set through SparkConf
@@ -1796,7 +1787,6 @@ private[spark] object Utils extends Logging {
sparkValue
}
}
-
}
/**