aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/scala
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-01-05 11:10:14 -0800
committerJosh Rosen <joshrosen@databricks.com>2016-01-05 11:10:14 -0800
commit8ce645d4eeda203cf5e100c4bdba2d71edd44e6a (patch)
treea4bb76e60b52ce5b4c12c6794f24920bd958385d /examples/src/main/scala
parent76768337beec6842660db7522ad15c25ee66d346 (diff)
downloadspark-8ce645d4eeda203cf5e100c4bdba2d71edd44e6a.tar.gz
spark-8ce645d4eeda203cf5e100c4bdba2d71edd44e6a.tar.bz2
spark-8ce645d4eeda203cf5e100c4bdba2d71edd44e6a.zip
[SPARK-12615] Remove some deprecated APIs in RDD/SparkContext
I looked at each case individually and it looks like they can all be removed. The only one that I had to think twice was toArray (I even thought about un-deprecating it, until I realized it was a problem in Java to have toArray returning java.util.List). Author: Reynold Xin <rxin@databricks.com> Closes #10569 from rxin/SPARK-12615.
Diffstat (limited to 'examples/src/main/scala')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala7
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala6
2 files changed, 2 insertions, 11 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
index 6c90dbec3d..04dec57b71 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkHdfsLR.scala
@@ -26,8 +26,6 @@ import breeze.linalg.{DenseVector, Vector}
import org.apache.hadoop.conf.Configuration
import org.apache.spark._
-import org.apache.spark.scheduler.InputFormatInfo
-
/**
* Logistic regression based classification.
@@ -74,10 +72,7 @@ object SparkHdfsLR {
val sparkConf = new SparkConf().setAppName("SparkHdfsLR")
val inputPath = args(0)
val conf = new Configuration()
- val sc = new SparkContext(sparkConf,
- InputFormatInfo.computePreferredLocations(
- Seq(new InputFormatInfo(conf, classOf[org.apache.hadoop.mapred.TextInputFormat], inputPath))
- ))
+ val sc = new SparkContext(sparkConf)
val lines = sc.textFile(inputPath)
val points = lines.map(parsePoint _).cache()
val ITERATIONS = args(1).toInt
diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
index e492582710..ddc99d3f90 100644
--- a/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/SparkTachyonHdfsLR.scala
@@ -26,7 +26,6 @@ import breeze.linalg.{DenseVector, Vector}
import org.apache.hadoop.conf.Configuration
import org.apache.spark._
-import org.apache.spark.scheduler.InputFormatInfo
import org.apache.spark.storage.StorageLevel
/**
@@ -70,10 +69,7 @@ object SparkTachyonHdfsLR {
val inputPath = args(0)
val sparkConf = new SparkConf().setAppName("SparkTachyonHdfsLR")
val conf = new Configuration()
- val sc = new SparkContext(sparkConf,
- InputFormatInfo.computePreferredLocations(
- Seq(new InputFormatInfo(conf, classOf[org.apache.hadoop.mapred.TextInputFormat], inputPath))
- ))
+ val sc = new SparkContext(sparkConf)
val lines = sc.textFile(inputPath)
val points = lines.map(parsePoint _).persist(StorageLevel.OFF_HEAP)
val ITERATIONS = args(1).toInt