diff options
author | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-08-31 19:27:07 -0700 |
---|---|---|
committer | Matei Zaharia <matei@eecs.berkeley.edu> | 2013-09-01 14:13:13 -0700 |
commit | 46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef (patch) | |
tree | 4a46971b36680bc5ef51be81ada8eb47670f6b22 /tools/src/main/scala | |
parent | a30fac16ca0525f2001b127e5f9518c9680844c9 (diff) | |
download | spark-46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef.tar.gz spark-46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef.tar.bz2 spark-46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef.zip |
Initial work to rename package to org.apache.spark
Diffstat (limited to 'tools/src/main/scala')
-rw-r--r-- | tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala (renamed from tools/src/main/scala/spark/tools/JavaAPICompletenessChecker.scala) | 184 |
1 files changed, 92 insertions, 92 deletions
diff --git a/tools/src/main/scala/spark/tools/JavaAPICompletenessChecker.scala b/tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala index f45d0b281c..50335e5736 100644 --- a/tools/src/main/scala/spark/tools/JavaAPICompletenessChecker.scala +++ b/tools/src/main/scala/org/apache/spark/tools/JavaAPICompletenessChecker.scala @@ -15,17 +15,17 @@ * limitations under the License. */ -package spark.tools +package org.apache.spark.tools import java.lang.reflect.Method import scala.collection.mutable.ArrayBuffer -import spark._ -import spark.api.java._ -import spark.rdd.OrderedRDDFunctions -import spark.streaming.{PairDStreamFunctions, DStream, StreamingContext} -import spark.streaming.api.java.{JavaPairDStream, JavaDStream, JavaStreamingContext} +import org.apache.spark._ +import org.apache.spark.api.java._ +import org.apache.spark.rdd.OrderedRDDFunctions +import org.apache.spark.streaming.{PairDStreamFunctions, DStream, StreamingContext} +import org.apache.spark.streaming.api.java.{JavaPairDStream, JavaDStream, JavaStreamingContext} private[spark] abstract class SparkType(val name: String) @@ -129,7 +129,7 @@ object JavaAPICompletenessChecker { // TODO: the JavaStreamingContext API accepts Array arguments // instead of Lists, so this isn't a trivial translation / sub: "scala.collection.Seq" -> "java.util.List", - "scala.Function2" -> "spark.api.java.function.Function2", + "scala.Function2" -> "org.apache.spark.api.java.function.Function2", "scala.collection.Iterator" -> "java.util.Iterator", "scala.collection.mutable.Queue" -> "java.util.Queue", "double" -> "java.lang.Double" @@ -139,7 +139,7 @@ object JavaAPICompletenessChecker { scalaType match { case ParameterizedType(name, parameters, typebounds) => name match { - case "spark.RDD" => + case "org.apache.spark.RDD" => if (parameters(0).name == classOf[Tuple2[_, _]].getName) { val tupleParams = parameters(0).asInstanceOf[ParameterizedType].parameters.map(applySubs) @@ -147,13 +147,13 @@ object JavaAPICompletenessChecker { } else { ParameterizedType(classOf[JavaRDD[_]].getName, parameters.map(applySubs)) } - case "spark.streaming.DStream" => + case "org.apache.spark.streaming.DStream" => if (parameters(0).name == classOf[Tuple2[_, _]].getName) { val tupleParams = parameters(0).asInstanceOf[ParameterizedType].parameters.map(applySubs) - ParameterizedType("spark.streaming.api.java.JavaPairDStream", tupleParams) + ParameterizedType("org.apache.spark.streaming.api.java.JavaPairDStream", tupleParams) } else { - ParameterizedType("spark.streaming.api.java.JavaDStream", + ParameterizedType("org.apache.spark.streaming.api.java.JavaDStream", parameters.map(applySubs)) } case "scala.Option" => { @@ -167,14 +167,14 @@ object JavaAPICompletenessChecker { val firstParamName = parameters.last.name if (firstParamName.startsWith("scala.collection.Traversable") || firstParamName.startsWith("scala.collection.Iterator")) { - ParameterizedType("spark.api.java.function.FlatMapFunction", + ParameterizedType("org.apache.spark.api.java.function.FlatMapFunction", Seq(parameters(0), parameters.last.asInstanceOf[ParameterizedType].parameters(0)).map(applySubs)) } else if (firstParamName == "scala.runtime.BoxedUnit") { - ParameterizedType("spark.api.java.function.VoidFunction", + ParameterizedType("org.apache.spark.api.java.function.VoidFunction", parameters.dropRight(1).map(applySubs)) } else { - ParameterizedType("spark.api.java.function.Function", parameters.map(applySubs)) + ParameterizedType("org.apache.spark.api.java.function.Function", parameters.map(applySubs)) } case _ => ParameterizedType(renameSubstitutions.getOrElse(name, name), @@ -211,85 +211,85 @@ object JavaAPICompletenessChecker { // This list also includes a few methods that are only used by the web UI or other // internal Spark components. val excludedNames = Seq( - "spark.RDD.origin", - "spark.RDD.elementClassManifest", - "spark.RDD.checkpointData", - "spark.RDD.partitioner", - "spark.RDD.partitions", - "spark.RDD.firstParent", - "spark.RDD.doCheckpoint", - "spark.RDD.markCheckpointed", - "spark.RDD.clearDependencies", - "spark.RDD.getDependencies", - "spark.RDD.getPartitions", - "spark.RDD.dependencies", - "spark.RDD.getPreferredLocations", - "spark.RDD.collectPartitions", - "spark.RDD.computeOrReadCheckpoint", - "spark.PairRDDFunctions.getKeyClass", - "spark.PairRDDFunctions.getValueClass", - "spark.SparkContext.stringToText", - "spark.SparkContext.makeRDD", - "spark.SparkContext.runJob", - "spark.SparkContext.runApproximateJob", - "spark.SparkContext.clean", - "spark.SparkContext.metadataCleaner", - "spark.SparkContext.ui", - "spark.SparkContext.newShuffleId", - "spark.SparkContext.newRddId", - "spark.SparkContext.cleanup", - "spark.SparkContext.receiverJobThread", - "spark.SparkContext.getRDDStorageInfo", - "spark.SparkContext.addedFiles", - "spark.SparkContext.addedJars", - "spark.SparkContext.persistentRdds", - "spark.SparkContext.executorEnvs", - "spark.SparkContext.checkpointDir", - "spark.SparkContext.getSparkHome", - "spark.SparkContext.executorMemoryRequested", - "spark.SparkContext.getExecutorStorageStatus", - "spark.streaming.DStream.generatedRDDs", - "spark.streaming.DStream.zeroTime", - "spark.streaming.DStream.rememberDuration", - "spark.streaming.DStream.storageLevel", - "spark.streaming.DStream.mustCheckpoint", - "spark.streaming.DStream.checkpointDuration", - "spark.streaming.DStream.checkpointData", - "spark.streaming.DStream.graph", - "spark.streaming.DStream.isInitialized", - "spark.streaming.DStream.parentRememberDuration", - "spark.streaming.DStream.initialize", - "spark.streaming.DStream.validate", - "spark.streaming.DStream.setContext", - "spark.streaming.DStream.setGraph", - "spark.streaming.DStream.remember", - "spark.streaming.DStream.getOrCompute", - "spark.streaming.DStream.generateJob", - "spark.streaming.DStream.clearOldMetadata", - "spark.streaming.DStream.addMetadata", - "spark.streaming.DStream.updateCheckpointData", - "spark.streaming.DStream.restoreCheckpointData", - "spark.streaming.DStream.isTimeValid", - "spark.streaming.StreamingContext.nextNetworkInputStreamId", - "spark.streaming.StreamingContext.networkInputTracker", - "spark.streaming.StreamingContext.checkpointDir", - "spark.streaming.StreamingContext.checkpointDuration", - "spark.streaming.StreamingContext.receiverJobThread", - "spark.streaming.StreamingContext.scheduler", - "spark.streaming.StreamingContext.initialCheckpoint", - "spark.streaming.StreamingContext.getNewNetworkStreamId", - "spark.streaming.StreamingContext.validate", - "spark.streaming.StreamingContext.createNewSparkContext", - "spark.streaming.StreamingContext.rddToFileName", - "spark.streaming.StreamingContext.getSparkCheckpointDir", - "spark.streaming.StreamingContext.env", - "spark.streaming.StreamingContext.graph", - "spark.streaming.StreamingContext.isCheckpointPresent" + "org.apache.spark.RDD.origin", + "org.apache.spark.RDD.elementClassManifest", + "org.apache.spark.RDD.checkpointData", + "org.apache.spark.RDD.partitioner", + "org.apache.spark.RDD.partitions", + "org.apache.spark.RDD.firstParent", + "org.apache.spark.RDD.doCheckpoint", + "org.apache.spark.RDD.markCheckpointed", + "org.apache.spark.RDD.clearDependencies", + "org.apache.spark.RDD.getDependencies", + "org.apache.spark.RDD.getPartitions", + "org.apache.spark.RDD.dependencies", + "org.apache.spark.RDD.getPreferredLocations", + "org.apache.spark.RDD.collectPartitions", + "org.apache.spark.RDD.computeOrReadCheckpoint", + "org.apache.spark.PairRDDFunctions.getKeyClass", + "org.apache.spark.PairRDDFunctions.getValueClass", + "org.apache.spark.SparkContext.stringToText", + "org.apache.spark.SparkContext.makeRDD", + "org.apache.spark.SparkContext.runJob", + "org.apache.spark.SparkContext.runApproximateJob", + "org.apache.spark.SparkContext.clean", + "org.apache.spark.SparkContext.metadataCleaner", + "org.apache.spark.SparkContext.ui", + "org.apache.spark.SparkContext.newShuffleId", + "org.apache.spark.SparkContext.newRddId", + "org.apache.spark.SparkContext.cleanup", + "org.apache.spark.SparkContext.receiverJobThread", + "org.apache.spark.SparkContext.getRDDStorageInfo", + "org.apache.spark.SparkContext.addedFiles", + "org.apache.spark.SparkContext.addedJars", + "org.apache.spark.SparkContext.persistentRdds", + "org.apache.spark.SparkContext.executorEnvs", + "org.apache.spark.SparkContext.checkpointDir", + "org.apache.spark.SparkContext.getSparkHome", + "org.apache.spark.SparkContext.executorMemoryRequested", + "org.apache.spark.SparkContext.getExecutorStorageStatus", + "org.apache.spark.streaming.DStream.generatedRDDs", + "org.apache.spark.streaming.DStream.zeroTime", + "org.apache.spark.streaming.DStream.rememberDuration", + "org.apache.spark.streaming.DStream.storageLevel", + "org.apache.spark.streaming.DStream.mustCheckpoint", + "org.apache.spark.streaming.DStream.checkpointDuration", + "org.apache.spark.streaming.DStream.checkpointData", + "org.apache.spark.streaming.DStream.graph", + "org.apache.spark.streaming.DStream.isInitialized", + "org.apache.spark.streaming.DStream.parentRememberDuration", + "org.apache.spark.streaming.DStream.initialize", + "org.apache.spark.streaming.DStream.validate", + "org.apache.spark.streaming.DStream.setContext", + "org.apache.spark.streaming.DStream.setGraph", + "org.apache.spark.streaming.DStream.remember", + "org.apache.spark.streaming.DStream.getOrCompute", + "org.apache.spark.streaming.DStream.generateJob", + "org.apache.spark.streaming.DStream.clearOldMetadata", + "org.apache.spark.streaming.DStream.addMetadata", + "org.apache.spark.streaming.DStream.updateCheckpointData", + "org.apache.spark.streaming.DStream.restoreCheckpointData", + "org.apache.spark.streaming.DStream.isTimeValid", + "org.apache.spark.streaming.StreamingContext.nextNetworkInputStreamId", + "org.apache.spark.streaming.StreamingContext.networkInputTracker", + "org.apache.spark.streaming.StreamingContext.checkpointDir", + "org.apache.spark.streaming.StreamingContext.checkpointDuration", + "org.apache.spark.streaming.StreamingContext.receiverJobThread", + "org.apache.spark.streaming.StreamingContext.scheduler", + "org.apache.spark.streaming.StreamingContext.initialCheckpoint", + "org.apache.spark.streaming.StreamingContext.getNewNetworkStreamId", + "org.apache.spark.streaming.StreamingContext.validate", + "org.apache.spark.streaming.StreamingContext.createNewSparkContext", + "org.apache.spark.streaming.StreamingContext.rddToFileName", + "org.apache.spark.streaming.StreamingContext.getSparkCheckpointDir", + "org.apache.spark.streaming.StreamingContext.env", + "org.apache.spark.streaming.StreamingContext.graph", + "org.apache.spark.streaming.StreamingContext.isCheckpointPresent" ) val excludedPatterns = Seq( - """^spark\.SparkContext\..*To.*Functions""", - """^spark\.SparkContext\..*WritableConverter""", - """^spark\.SparkContext\..*To.*Writable""" + """^org\.apache\.spark\.SparkContext\..*To.*Functions""", + """^org\.apache\.spark\.SparkContext\..*WritableConverter""", + """^org\.apache\.spark\.SparkContext\..*To.*Writable""" ).map(_.r) lazy val excludedByPattern = !excludedPatterns.map(_.findFirstIn(name)).filter(_.isDefined).isEmpty @@ -298,7 +298,7 @@ object JavaAPICompletenessChecker { private def isExcludedByInterface(method: Method): Boolean = { val excludedInterfaces = - Set("spark.Logging", "org.apache.hadoop.mapreduce.HadoopMapReduceUtil") + Set("org.apache.spark.Logging", "org.apache.hadoop.mapreduce.HadoopMapReduceUtil") def toComparisionKey(method: Method) = (method.getReturnType, method.getName, method.getGenericReturnType) val interfaces = method.getDeclaringClass.getInterfaces.filter { i => |