aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2015-12-08 15:58:35 -0800
committerMichael Armbrust <michael@databricks.com>2015-12-08 15:58:35 -0800
commit39594894232e0b70c5ca8b0df137da0d61223fd5 (patch)
treed123d9be5c6e58f41b02e5c966a39edbc54e9275 /sql
parent9494521695a1f1526aae76c0aea34a3bead96251 (diff)
downloadspark-39594894232e0b70c5ca8b0df137da0d61223fd5.tar.gz
spark-39594894232e0b70c5ca8b0df137da0d61223fd5.tar.bz2
spark-39594894232e0b70c5ca8b0df137da0d61223fd5.zip
[SPARK-12069][SQL] Update documentation with Datasets
Author: Michael Armbrust <michael@databricks.com> Closes #10060 from marmbrus/docs.
Diffstat (limited to 'sql')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala48
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Column.scala21
2 files changed, 65 insertions, 4 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala
index 3ca5ade7f3..bb0fdc4c3d 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/Encoder.scala
@@ -19,20 +19,60 @@ package org.apache.spark.sql
import java.lang.reflect.Modifier
+import scala.annotation.implicitNotFound
import scala.reflect.{ClassTag, classTag}
+import org.apache.spark.annotation.Experimental
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, encoderFor}
import org.apache.spark.sql.catalyst.expressions.{DecodeUsingSerializer, BoundReference, EncodeUsingSerializer}
import org.apache.spark.sql.types._
/**
+ * :: Experimental ::
* Used to convert a JVM object of type `T` to and from the internal Spark SQL representation.
*
- * Encoders are not intended to be thread-safe and thus they are allow to avoid internal locking
- * and reuse internal buffers to improve performance.
+ * == Scala ==
+ * Encoders are generally created automatically through implicits from a `SQLContext`.
+ *
+ * {{{
+ * import sqlContext.implicits._
+ *
+ * val ds = Seq(1, 2, 3).toDS() // implicitly provided (sqlContext.implicits.newIntEncoder)
+ * }}}
+ *
+ * == Java ==
+ * Encoders are specified by calling static methods on [[Encoders]].
+ *
+ * {{{
+ * List<String> data = Arrays.asList("abc", "abc", "xyz");
+ * Dataset<String> ds = context.createDataset(data, Encoders.STRING());
+ * }}}
+ *
+ * Encoders can be composed into tuples:
+ *
+ * {{{
+ * Encoder<Tuple2<Integer, String>> encoder2 = Encoders.tuple(Encoders.INT(), Encoders.STRING());
+ * List<Tuple2<Integer, String>> data2 = Arrays.asList(new scala.Tuple2(1, "a");
+ * Dataset<Tuple2<Integer, String>> ds2 = context.createDataset(data2, encoder2);
+ * }}}
+ *
+ * Or constructed from Java Beans:
+ *
+ * {{{
+ * Encoders.bean(MyClass.class);
+ * }}}
+ *
+ * == Implementation ==
+ * - Encoders are not required to be thread-safe and thus they do not need to use locks to guard
+ * against concurrent access if they reuse internal buffers to improve performance.
*
* @since 1.6.0
*/
+@Experimental
+@implicitNotFound("Unable to find encoder for type stored in a Dataset. Primitive types " +
+ "(Int, String, etc) and Product types (case classes) are supported by importing " +
+ "sqlContext.implicits._ Support for serializing other types will be added in future " +
+ "releases.")
trait Encoder[T] extends Serializable {
/** Returns the schema of encoding this type of object as a Row. */
@@ -43,10 +83,12 @@ trait Encoder[T] extends Serializable {
}
/**
- * Methods for creating encoders.
+ * :: Experimental ::
+ * Methods for creating an [[Encoder]].
*
* @since 1.6.0
*/
+@Experimental
object Encoders {
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
index ad6af481fa..d641fcac1c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Column.scala
@@ -73,7 +73,26 @@ class TypedColumn[-T, U](
/**
* :: Experimental ::
- * A column in a [[DataFrame]].
+ * A column that will be computed based on the data in a [[DataFrame]].
+ *
+ * A new column is constructed based on the input columns present in a dataframe:
+ *
+ * {{{
+ * df("columnName") // On a specific DataFrame.
+ * col("columnName") // A generic column no yet associcated with a DataFrame.
+ * col("columnName.field") // Extracting a struct field
+ * col("`a.column.with.dots`") // Escape `.` in column names.
+ * $"columnName" // Scala short hand for a named column.
+ * expr("a + 1") // A column that is constructed from a parsed SQL Expression.
+ * lit("1") // A column that produces a literal (constant) value.
+ * }}}
+ *
+ * [[Column]] objects can be composed to form complex expressions:
+ *
+ * {{{
+ * $"a" + 1
+ * $"a" === $"b"
+ * }}}
*
* @groupname java_expr_ops Java-specific expression operators
* @groupname expr_ops Expression operators