aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--python/pyspark/ml/pipeline.py4
-rw-r--r--python/pyspark/ml/wrapper.py2
-rw-r--r--repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala6
3 files changed, 6 insertions, 6 deletions
diff --git a/python/pyspark/ml/pipeline.py b/python/pyspark/ml/pipeline.py
index 5233c5801e..83880a5afc 100644
--- a/python/pyspark/ml/pipeline.py
+++ b/python/pyspark/ml/pipeline.py
@@ -39,7 +39,7 @@ class Estimator(Params):
Fits a model to the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of
- :py:class:`pyspark.sql.SchemaRDD`
+ :py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overwrites embedded
params
:returns: fitted model
@@ -62,7 +62,7 @@ class Transformer(Params):
Transforms the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of
- :py:class:`pyspark.sql.SchemaRDD`
+ :py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overwrites embedded
params
:returns: transformed dataset
diff --git a/python/pyspark/ml/wrapper.py b/python/pyspark/ml/wrapper.py
index 4bae96f678..31a66b3d2f 100644
--- a/python/pyspark/ml/wrapper.py
+++ b/python/pyspark/ml/wrapper.py
@@ -102,7 +102,7 @@ class JavaEstimator(Estimator, JavaWrapper):
"""
Fits a Java model to the input dataset.
:param dataset: input dataset, which is an instance of
- :py:class:`pyspark.sql.SchemaRDD`
+ :py:class:`pyspark.sql.DataFrame`
:param params: additional params (overwriting embedded values)
:return: fitted Java model
"""
diff --git a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala
index f966f25c5a..ed9b207a86 100644
--- a/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala
+++ b/repl/scala-2.11/src/test/scala/org/apache/spark/repl/ReplSuite.scala
@@ -263,14 +263,14 @@ class ReplSuite extends FunSuite {
assertDoesNotContain("Exception", output)
}
- test("SPARK-2576 importing SQLContext.createSchemaRDD.") {
+ test("SPARK-2576 importing SQLContext.createDataFrame.") {
// We need to use local-cluster to test this case.
val output = runInterpreter("local-cluster[1,1,512]",
"""
|val sqlContext = new org.apache.spark.sql.SQLContext(sc)
- |import sqlContext.createSchemaRDD
+ |import sqlContext.implicits._
|case class TestCaseClass(value: Int)
- |sc.parallelize(1 to 10).map(x => TestCaseClass(x)).toSchemaRDD.collect
+ |sc.parallelize(1 to 10).map(x => TestCaseClass(x)).toDF.collect
""".stripMargin)
assertDoesNotContain("error:", output)
assertDoesNotContain("Exception", output)