aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-03-15 10:39:07 +0800
committerWenchen Fan <wenchen@databricks.com>2016-03-15 10:39:07 +0800
commite76679a814f5a0903c5f93d9a482f5ddc56fe0d2 (patch)
treec66eedb29637bb7925ff90fcee2588b0fc688fc0 /sql/core/src
parentb5e3bd87f5cfa3dc59e5b68d032756feee6b4e25 (diff)
downloadspark-e76679a814f5a0903c5f93d9a482f5ddc56fe0d2.tar.gz
spark-e76679a814f5a0903c5f93d9a482f5ddc56fe0d2.tar.bz2
spark-e76679a814f5a0903c5f93d9a482f5ddc56fe0d2.zip
[SPARK-13880][SPARK-13881][SQL] Rename DataFrame.scala Dataset.scala, and remove LegacyFunctions
## What changes were proposed in this pull request? 1. Rename DataFrame.scala Dataset.scala, since the class is now named Dataset. 2. Remove LegacyFunctions. It was introduced in Spark 1.6 for backward compatibility, and can be removed in Spark 2.0. ## How was this patch tested? Should be covered by existing unit/integration tests. Author: Reynold Xin <rxin@databricks.com> Closes #11704 from rxin/SPARK-13880.
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala (renamed from sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala)0
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala23
2 files changed, 2 insertions, 21 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index 1ea7db0388..1ea7db0388 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 737e125f6c..326c1e5a7c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -33,25 +33,6 @@ import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
-/**
- * Ensures that java functions signatures for methods that now return a [[TypedColumn]] still have
- * legacy equivalents in bytecode. This compatibility is done by forcing the compiler to generate
- * "bridge" methods due to the use of covariant return types.
- *
- * {{{
- * // In LegacyFunctions:
- * public abstract org.apache.spark.sql.Column avg(java.lang.String);
- *
- * // In functions:
- * public static org.apache.spark.sql.TypedColumn<java.lang.Object, java.lang.Object> avg(...);
- * }}}
- *
- * This allows us to use the same functions both in typed [[Dataset]] operations and untyped
- * [[DataFrame]] operations when the return type for a given function is statically known.
- */
-private[sql] abstract class LegacyFunctions {
- def count(columnName: String): Column
-}
/**
* :: Experimental ::
@@ -72,7 +53,7 @@ private[sql] abstract class LegacyFunctions {
*/
@Experimental
// scalastyle:off
-object functions extends LegacyFunctions {
+object functions {
// scalastyle:on
private def withExpr(expr: Expression): Column = Column(expr)
@@ -287,7 +268,7 @@ object functions extends LegacyFunctions {
* @since 1.3.0
*/
def count(columnName: String): TypedColumn[Any, Long] =
- count(Column(columnName)).as(ExpressionEncoder[Long])
+ count(Column(columnName)).as(ExpressionEncoder[Long]())
/**
* Aggregate function: returns the number of distinct items in a group.