aboutsummaryrefslogtreecommitdiff
path: root/mllib/src/test
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-05-27 11:09:15 -0700
committerAndrew Or <andrew@databricks.com>2016-05-27 11:09:15 -0700
commitd24e251572d39a453293cabfe14e4aed25a55208 (patch)
tree448a5e9a9c4ecd553b180c34f82529cacb09592b /mllib/src/test
parentc17272902c95290beca274ee6316a8a98fd7a725 (diff)
downloadspark-d24e251572d39a453293cabfe14e4aed25a55208.tar.gz
spark-d24e251572d39a453293cabfe14e4aed25a55208.tar.bz2
spark-d24e251572d39a453293cabfe14e4aed25a55208.zip
[SPARK-15603][MLLIB] Replace SQLContext with SparkSession in ML/MLLib
## What changes were proposed in this pull request? This PR replaces all deprecated `SQLContext` occurrences with `SparkSession` in `ML/MLLib` module except the following two classes. These two classes use `SQLContext` in their function signatures. - ReadWrite.scala - TreeModels.scala ## How was this patch tested? Pass the existing Jenkins tests. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #13352 from dongjoon-hyun/SPARK-15603.
Diffstat (limited to 'mllib/src/test')
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala14
-rw-r--r--mllib/src/test/scala/org/apache/spark/mllib/util/MLlibTestSparkContext.scala6
2 files changed, 10 insertions, 10 deletions
diff --git a/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala
index 8895d630a0..621c13a8e5 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/QuantileDiscretizerSuite.scala
@@ -20,15 +20,15 @@ package org.apache.spark.ml.feature
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.util.DefaultReadWriteTest
import org.apache.spark.mllib.util.MLlibTestSparkContext
-import org.apache.spark.sql.SQLContext
+import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.udf
class QuantileDiscretizerSuite
extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
test("Test observed number of buckets and their sizes match expected values") {
- val sqlCtx = SQLContext.getOrCreate(sc)
- import sqlCtx.implicits._
+ val spark = SparkSession.builder().config(sc.getConf).getOrCreate()
+ import spark.implicits._
val datasetSize = 100000
val numBuckets = 5
@@ -53,8 +53,8 @@ class QuantileDiscretizerSuite
}
test("Test transform method on unseen data") {
- val sqlCtx = SQLContext.getOrCreate(sc)
- import sqlCtx.implicits._
+ val spark = SparkSession.builder().config(sc.getConf).getOrCreate()
+ import spark.implicits._
val trainDF = sc.parallelize(1.0 to 100.0 by 1.0).map(Tuple1.apply).toDF("input")
val testDF = sc.parallelize(-10.0 to 110.0 by 1.0).map(Tuple1.apply).toDF("input")
@@ -82,8 +82,8 @@ class QuantileDiscretizerSuite
}
test("Verify resulting model has parent") {
- val sqlCtx = SQLContext.getOrCreate(sc)
- import sqlCtx.implicits._
+ val spark = SparkSession.builder().config(sc.getConf).getOrCreate()
+ import spark.implicits._
val df = sc.parallelize(1 to 100).map(Tuple1.apply).toDF("input")
val discretizer = new QuantileDiscretizer()
diff --git a/mllib/src/test/scala/org/apache/spark/mllib/util/MLlibTestSparkContext.scala b/mllib/src/test/scala/org/apache/spark/mllib/util/MLlibTestSparkContext.scala
index ba8d36f45f..db56aff631 100644
--- a/mllib/src/test/scala/org/apache/spark/mllib/util/MLlibTestSparkContext.scala
+++ b/mllib/src/test/scala/org/apache/spark/mllib/util/MLlibTestSparkContext.scala
@@ -21,9 +21,9 @@ import java.io.File
import org.scalatest.Suite
-import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.spark.SparkContext
import org.apache.spark.ml.util.TempDirectory
-import org.apache.spark.sql.{SparkSession, SQLContext}
+import org.apache.spark.sql.SparkSession
import org.apache.spark.util.Utils
trait MLlibTestSparkContext extends TempDirectory { self: Suite =>
@@ -46,7 +46,7 @@ trait MLlibTestSparkContext extends TempDirectory { self: Suite =>
override def afterAll() {
try {
Utils.deleteRecursively(new File(checkpointDir))
- SQLContext.clearActive()
+ SparkSession.clearActiveSession()
if (spark != null) {
spark.stop()
}