aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorJoseph K. Bradley <joseph@databricks.com>2014-12-03 18:50:03 +0800
committerXiangrui Meng <meng@databricks.com>2014-12-03 18:50:39 +0800
commitfb14bfdd9e0668bc02dc48b2106710db9a0e3cce (patch)
treef3d4b19378cad2d094fbe301e2f87170922440c8 /examples
parent667f7ff440dea9b83dbf3910f26d8dbf82d343a5 (diff)
downloadspark-fb14bfdd9e0668bc02dc48b2106710db9a0e3cce.tar.gz
spark-fb14bfdd9e0668bc02dc48b2106710db9a0e3cce.tar.bz2
spark-fb14bfdd9e0668bc02dc48b2106710db9a0e3cce.zip
[SPARK-4710] [mllib] Eliminate MLlib compilation warnings
Renamed StreamingKMeans to StreamingKMeansExample to avoid warning about name conflict with StreamingKMeans class. Added import to DecisionTreeRunner to eliminate warning. CC: mengxr Author: Joseph K. Bradley <joseph@databricks.com> Closes #3568 from jkbradley/ml-compilation-warnings and squashes the following commits: 64d6bc4 [Joseph K. Bradley] Updated DecisionTreeRunner.scala and StreamingKMeans.scala to eliminate compilation warnings, including renaming StreamingKMeans to StreamingKMeansExample. (cherry picked from commit 4ac21511547dc6227d05bf61821cd2d9ab5ede74) Signed-off-by: Xiangrui Meng <meng@databricks.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala (renamed from examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeans.scala)16
2 files changed, 10 insertions, 8 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
index 98f9d1689c..54953adb5f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
@@ -17,6 +17,8 @@
package org.apache.spark.examples.mllib
+import scala.language.reflectiveCalls
+
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala
index 33e5760aed..8bb12d2ee9 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala
@@ -17,10 +17,10 @@
package org.apache.spark.examples.mllib
+import org.apache.spark.SparkConf
+import org.apache.spark.mllib.clustering.StreamingKMeans
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
-import org.apache.spark.mllib.clustering.StreamingKMeans
-import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
/**
@@ -36,28 +36,28 @@ import org.apache.spark.streaming.{Seconds, StreamingContext}
* `(y,[x1,x2,x3,...,xn])`
* Where y is some identifier. n must be the same for train and test.
*
- * Usage: StreamingKmeans <trainingDir> <testDir> <batchDuration> <numClusters> <numDimensions>
+ * Usage:
+ * StreamingKMeansExample <trainingDir> <testDir> <batchDuration> <numClusters> <numDimensions>
*
* To run on your local machine using the two directories `trainingDir` and `testDir`,
* with updates every 5 seconds, 2 dimensions per data point, and 3 clusters, call:
- * $ bin/run-example \
- * org.apache.spark.examples.mllib.StreamingKMeans trainingDir testDir 5 3 2
+ * $ bin/run-example mllib.StreamingKMeansExample trainingDir testDir 5 3 2
*
* As you add text files to `trainingDir` the clusters will continuously update.
* Anytime you add text files to `testDir`, you'll see predicted labels using the current model.
*
*/
-object StreamingKMeans {
+object StreamingKMeansExample {
def main(args: Array[String]) {
if (args.length != 5) {
System.err.println(
- "Usage: StreamingKMeans " +
+ "Usage: StreamingKMeansExample " +
"<trainingDir> <testDir> <batchDuration> <numClusters> <numDimensions>")
System.exit(1)
}
- val conf = new SparkConf().setMaster("local").setAppName("StreamingLinearRegression")
+ val conf = new SparkConf().setMaster("local").setAppName("StreamingKMeansExample")
val ssc = new StreamingContext(conf, Seconds(args(2).toLong))
val trainingData = ssc.textFileStream(args(0)).map(Vectors.parse)