aboutsummaryrefslogtreecommitdiff
path: root/launcher
diff options
context:
space:
mode:
authorNishkam Ravi <nravi@cloudera.com>2015-03-29 12:40:37 +0100
committerSean Owen <sowen@cloudera.com>2015-03-29 12:40:37 +0100
commite3eb393961051a48ed1cac756ac1928156aa161f (patch)
tree60f5f5e1a0223c8a65b067e2a8e0b0aefe0c2a65 /launcher
parent55153f5c14fad10607b44fbb8eebd9636a6bc2e1 (diff)
downloadspark-e3eb393961051a48ed1cac756ac1928156aa161f.tar.gz
spark-e3eb393961051a48ed1cac756ac1928156aa161f.tar.bz2
spark-e3eb393961051a48ed1cac756ac1928156aa161f.zip
[SPARK-6406] Launch Spark using assembly jar instead of a separate launcher jar
Author: Nishkam Ravi <nravi@cloudera.com> Author: nishkamravi2 <nishkamravi@gmail.com> Author: nravi <nravi@c1704.halxg.cloudera.com> Closes #5085 from nishkamravi2/master_nravi and squashes the following commits: bad4349 [nishkamravi2] Update Main.java 36a6f87 [Nishkam Ravi] Minor changes and bug fixes b7f4ae7 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi 4a45d6a [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi 458af39 [Nishkam Ravi] Locate the jar using getLocation, obviates the need to pass assembly path as an argument d9658d6 [Nishkam Ravi] Changes for SPARK-6406 ccdc334 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi 3faa7a4 [Nishkam Ravi] Launcher library changes (SPARK-6406) 345206a [Nishkam Ravi] spark-class merge Merge branch 'master_nravi' of https://github.com/nishkamravi2/spark into master_nravi ac58975 [Nishkam Ravi] spark-class changes 06bfeb0 [nishkamravi2] Update spark-class 35af990 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi 32c3ab3 [nishkamravi2] Update AbstractCommandBuilder.java 4bd4489 [nishkamravi2] Update AbstractCommandBuilder.java 746f35b [Nishkam Ravi] "hadoop" string in the assembly name should not be mandatory (everywhere else in spark we mandate spark-assembly*hadoop*.jar) bfe96e0 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi ee902fa [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi d453197 [nishkamravi2] Update NewHadoopRDD.scala 6f41a1d [nishkamravi2] Update NewHadoopRDD.scala 0ce2c32 [nishkamravi2] Update HadoopRDD.scala f7e33c2 [Nishkam Ravi] Merge branch 'master_nravi' of https://github.com/nishkamravi2/spark into master_nravi ba1eb8b [Nishkam Ravi] Try-catch block around the two occurrences of removeShutDownHook. Deletion of semi-redundant occurrences of expensive operation inShutDown. 71d0e17 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi 494d8c0 [nishkamravi2] Update DiskBlockManager.scala 3c5ddba [nishkamravi2] Update DiskBlockManager.scala f0d12de [Nishkam Ravi] Workaround for IllegalStateException caused by recent changes to BlockManager.stop 79ea8b4 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi b446edc [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi 5c9a4cb [nishkamravi2] Update TaskSetManagerSuite.scala 535295a [nishkamravi2] Update TaskSetManager.scala 3e1b616 [Nishkam Ravi] Modify test for maxResultSize 9f6583e [Nishkam Ravi] Changes to maxResultSize code (improve error message and add condition to check if maxResultSize > 0) 5f8f9ed [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi 636a9ff [nishkamravi2] Update YarnAllocator.scala 8f76c8b [Nishkam Ravi] Doc change for yarn memory overhead 35daa64 [Nishkam Ravi] Slight change in the doc for yarn memory overhead 5ac2ec1 [Nishkam Ravi] Remove out dac1047 [Nishkam Ravi] Additional documentation for yarn memory overhead issue 42c2c3d [Nishkam Ravi] Additional changes for yarn memory overhead issue 362da5e [Nishkam Ravi] Additional changes for yarn memory overhead c726bd9 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi f00fa31 [Nishkam Ravi] Improving logging for AM memoryOverhead 1cf2d1e [nishkamravi2] Update YarnAllocator.scala ebcde10 [Nishkam Ravi] Modify default YARN memory_overhead-- from an additive constant to a multiplier (redone to resolve merge conflicts) 2e69f11 [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark into master_nravi efd688a [Nishkam Ravi] Merge branch 'master' of https://github.com/apache/spark 2b630f9 [nravi] Accept memory input as "30g", "512M" instead of an int value, to be consistent with rest of Spark 3bf8fad [nravi] Merge branch 'master' of https://github.com/apache/spark 5423a03 [nravi] Merge branch 'master' of https://github.com/apache/spark eb663ca [nravi] Merge branch 'master' of https://github.com/apache/spark df2aeb1 [nravi] Improved fix for ConcurrentModificationIssue (Spark-1097, Hadoop-10456) 6b840f0 [nravi] Undo the fix for SPARK-1758 (the problem is fixed) 5108700 [nravi] Fix in Spark for the Concurrent thread modification issue (SPARK-1097, HADOOP-10456) 681b36f [nravi] Fix for SPARK-1758: failing test org.apache.spark.JavaAPISuite.wholeTextFiles
Diffstat (limited to 'launcher')
-rw-r--r--launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java99
1 files changed, 22 insertions, 77 deletions
diff --git a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
index 2da5f72787..d8279145d8 100644
--- a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
+++ b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java
@@ -86,10 +86,14 @@ abstract class AbstractCommandBuilder {
*/
List<String> buildJavaCommand(String extraClassPath) throws IOException {
List<String> cmd = new ArrayList<String>();
- if (javaHome == null) {
- cmd.add(join(File.separator, System.getProperty("java.home"), "bin", "java"));
- } else {
+ String envJavaHome;
+
+ if (javaHome != null) {
cmd.add(join(File.separator, javaHome, "bin", "java"));
+ } else if ((envJavaHome = System.getenv("JAVA_HOME")) != null) {
+ cmd.add(join(File.separator, envJavaHome, "bin", "java"));
+ } else {
+ cmd.add(join(File.separator, System.getProperty("java.home"), "bin", "java"));
}
// Load extra JAVA_OPTS from conf/java-opts, if it exists.
@@ -182,59 +186,25 @@ abstract class AbstractCommandBuilder {
addToClassPath(cp, String.format("%s/core/target/jars/*", sparkHome));
}
- String assembly = findAssembly();
+ final String assembly = AbstractCommandBuilder.class.getProtectionDomain().getCodeSource().
+ getLocation().getPath();
addToClassPath(cp, assembly);
- // When Hive support is needed, Datanucleus jars must be included on the classpath. Datanucleus
- // jars do not work if only included in the uber jar as plugin.xml metadata is lost. Both sbt
- // and maven will populate "lib_managed/jars/" with the datanucleus jars when Spark is built
- // with Hive, so first check if the datanucleus jars exist, and then ensure the current Spark
- // assembly is built for Hive, before actually populating the CLASSPATH with the jars.
- //
- // This block also serves as a check for SPARK-1703, when the assembly jar is built with
- // Java 7 and ends up with too many files, causing issues with other JDK versions.
- boolean needsDataNucleus = false;
- JarFile assemblyJar = null;
- try {
- assemblyJar = new JarFile(assembly);
- needsDataNucleus = assemblyJar.getEntry("org/apache/hadoop/hive/ql/exec/") != null;
- } catch (IOException ioe) {
- if (ioe.getMessage().indexOf("invalid CEN header") >= 0) {
- System.err.println(
- "Loading Spark jar failed.\n" +
- "This is likely because Spark was compiled with Java 7 and run\n" +
- "with Java 6 (see SPARK-1703). Please use Java 7 to run Spark\n" +
- "or build Spark with Java 6.");
- System.exit(1);
- } else {
- throw ioe;
- }
- } finally {
- if (assemblyJar != null) {
- try {
- assemblyJar.close();
- } catch (IOException e) {
- // Ignore.
- }
- }
+ // Datanucleus jars must be included on the classpath. Datanucleus jars do not work if only
+ // included in the uber jar as plugin.xml metadata is lost. Both sbt and maven will populate
+ // "lib_managed/jars/" with the datanucleus jars when Spark is built with Hive
+ File libdir;
+ if (new File(sparkHome, "RELEASE").isFile()) {
+ libdir = new File(sparkHome, "lib");
+ } else {
+ libdir = new File(sparkHome, "lib_managed/jars");
}
- if (needsDataNucleus) {
- System.err.println("Spark assembly has been built with Hive, including Datanucleus jars " +
- "in classpath.");
- File libdir;
- if (new File(sparkHome, "RELEASE").isFile()) {
- libdir = new File(sparkHome, "lib");
- } else {
- libdir = new File(sparkHome, "lib_managed/jars");
- }
-
- checkState(libdir.isDirectory(), "Library directory '%s' does not exist.",
- libdir.getAbsolutePath());
- for (File jar : libdir.listFiles()) {
- if (jar.getName().startsWith("datanucleus-")) {
- addToClassPath(cp, jar.getAbsolutePath());
- }
+ checkState(libdir.isDirectory(), "Library directory '%s' does not exist.",
+ libdir.getAbsolutePath());
+ for (File jar : libdir.listFiles()) {
+ if (jar.getName().startsWith("datanucleus-")) {
+ addToClassPath(cp, jar.getAbsolutePath());
}
}
@@ -270,7 +240,6 @@ abstract class AbstractCommandBuilder {
if (scala != null) {
return scala;
}
-
String sparkHome = getSparkHome();
File scala210 = new File(sparkHome, "assembly/target/scala-2.10");
File scala211 = new File(sparkHome, "assembly/target/scala-2.11");
@@ -330,30 +299,6 @@ abstract class AbstractCommandBuilder {
return firstNonEmpty(childEnv.get(key), System.getenv(key));
}
- private String findAssembly() {
- String sparkHome = getSparkHome();
- File libdir;
- if (new File(sparkHome, "RELEASE").isFile()) {
- libdir = new File(sparkHome, "lib");
- checkState(libdir.isDirectory(), "Library directory '%s' does not exist.",
- libdir.getAbsolutePath());
- } else {
- libdir = new File(sparkHome, String.format("assembly/target/scala-%s", getScalaVersion()));
- }
-
- final Pattern re = Pattern.compile("spark-assembly.*hadoop.*\\.jar");
- FileFilter filter = new FileFilter() {
- @Override
- public boolean accept(File file) {
- return file.isFile() && re.matcher(file.getName()).matches();
- }
- };
- File[] assemblies = libdir.listFiles(filter);
- checkState(assemblies != null && assemblies.length > 0, "No assemblies found in '%s'.", libdir);
- checkState(assemblies.length == 1, "Multiple assemblies found in '%s'.", libdir);
- return assemblies[0].getAbsolutePath();
- }
-
private String getConfDir() {
String confDir = getenv("SPARK_CONF_DIR");
return confDir != null ? confDir : join(File.separator, getSparkHome(), "conf");