aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--project/MimaExcludes.scala3
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala7
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DatasetHolder.scala11
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala4
4 files changed, 21 insertions, 4 deletions
diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala
index eeef96c378..90dc947d4e 100644
--- a/project/MimaExcludes.scala
+++ b/project/MimaExcludes.scala
@@ -161,6 +161,9 @@ object MimaExcludes {
"org.apache.spark.sql.UDFRegistration.org$apache$spark$sql$UDFRegistration$$builder$23"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.sql.UDFRegistration.org$apache$spark$sql$UDFRegistration$$builder$24")
+ ) ++ Seq(
+ // SPARK-11485
+ ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.sql.DataFrameHolder.df")
)
case v if v.startsWith("1.5") =>
Seq(
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
index 2f19ec0403..3b30337f1f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameHolder.scala
@@ -20,9 +20,14 @@ package org.apache.spark.sql
/**
* A container for a [[DataFrame]], used for implicit conversions.
*
+ * To use this, import implicit conversions in SQL:
+ * {{{
+ * import sqlContext.implicits._
+ * }}}
+ *
* @since 1.3.0
*/
-private[sql] case class DataFrameHolder(df: DataFrame) {
+case class DataFrameHolder private[sql](private val df: DataFrame) {
// This is declared with parentheses to prevent the Scala compiler from treating
// `rdd.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DatasetHolder.scala b/sql/core/src/main/scala/org/apache/spark/sql/DatasetHolder.scala
index 17817cbcc5..45f0098b92 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DatasetHolder.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DatasetHolder.scala
@@ -18,11 +18,16 @@
package org.apache.spark.sql
/**
- * A container for a [[DataFrame]], used for implicit conversions.
+ * A container for a [[Dataset]], used for implicit conversions.
*
- * @since 1.3.0
+ * To use this, import implicit conversions in SQL:
+ * {{{
+ * import sqlContext.implicits._
+ * }}}
+ *
+ * @since 1.6.0
*/
-private[sql] case class DatasetHolder[T](df: Dataset[T]) {
+case class DatasetHolder[T] private[sql](private val df: Dataset[T]) {
// This is declared with parentheses to prevent the Scala compiler from treating
// `rdd.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
index f2904e2708..6da46a5f7e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLImplicits.scala
@@ -52,6 +52,10 @@ abstract class SQLImplicits {
DatasetHolder(_sqlContext.createDataset(rdd))
}
+ /**
+ * Creates a [[Dataset]] from a local Seq.
+ * @since 1.6.0
+ */
implicit def localSeqToDatasetHolder[T : Encoder](s: Seq[T]): DatasetHolder[T] = {
DatasetHolder(_sqlContext.createDataset(s))
}