aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala6
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala10
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala6
3 files changed, 16 insertions, 6 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
index 4893b017ed..822673347b 100644
--- a/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/HBaseTest.scala
@@ -31,12 +31,12 @@ object HBaseTest {
val conf = HBaseConfiguration.create()
// Other options for configuring scan behavior are available. More information available at
// http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html
- conf.set(TableInputFormat.INPUT_TABLE, args(1))
+ conf.set(TableInputFormat.INPUT_TABLE, args(0))
// Initialize hBase table if necessary
val admin = new HBaseAdmin(conf)
- if(!admin.isTableAvailable(args(1))) {
- val tableDesc = new HTableDescriptor(args(1))
+ if (!admin.isTableAvailable(args(0))) {
+ val tableDesc = new HTableDescriptor(args(0))
admin.createTable(tableDesc)
}
diff --git a/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala b/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala
index 331de3ad1e..ed2b38e2ca 100644
--- a/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/HdfsTest.scala
@@ -19,16 +19,22 @@ package org.apache.spark.examples
import org.apache.spark._
+
object HdfsTest {
+
+ /** Usage: HdfsTest [file] */
def main(args: Array[String]) {
+ if (args.length < 1) {
+ System.err.println("Usage: HdfsTest <file>")
+ System.exit(1)
+ }
val sparkConf = new SparkConf().setAppName("HdfsTest")
val sc = new SparkContext(sparkConf)
- val file = sc.textFile(args(1))
+ val file = sc.textFile(args(0))
val mapped = file.map(s => s.length).cache()
for (iter <- 1 to 10) {
val start = System.currentTimeMillis()
for (x <- mapped) { x + 2 }
- // println("Processing: " + x)
val end = System.currentTimeMillis()
println("Iteration " + iter + " took " + (end-start) + " ms")
}
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala b/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
index 40b36c779a..4c7e006da0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkPageRank.scala
@@ -31,8 +31,12 @@ import org.apache.spark.{SparkConf, SparkContext}
*/
object SparkPageRank {
def main(args: Array[String]) {
+ if (args.length < 1) {
+ System.err.println("Usage: SparkPageRank <file> <iter>")
+ System.exit(1)
+ }
val sparkConf = new SparkConf().setAppName("PageRank")
- var iters = args(1).toInt
+ val iters = if (args.length > 0) args(1).toInt else 10
val ctx = new SparkContext(sparkConf)
val lines = ctx.textFile(args(0), 1)
val links = lines.map{ s =>