aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-07-17 22:48:00 -0700
committerReynold Xin <rxin@databricks.com>2016-07-17 22:48:00 -0700
commit480c870644595a71102be6597146d80b1c0816e4 (patch)
tree2f09403104877e48f313cfc9c1450e258a634099 /sql
parentd27fe9ba6763aae6a5e48f16d7cbd85658df7cf7 (diff)
downloadspark-480c870644595a71102be6597146d80b1c0816e4.tar.gz
spark-480c870644595a71102be6597146d80b1c0816e4.tar.bz2
spark-480c870644595a71102be6597146d80b1c0816e4.zip
[SPARK-16588][SQL] Deprecate monotonicallyIncreasingId in Scala/Java
This patch deprecates monotonicallyIncreasingId in Scala/Java, as done in Python. This patch was originally written by HyukjinKwon. Closes #14236.
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala1
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala13
2 files changed, 7 insertions, 7 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 45d5d05d9f..93af8456c4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -978,6 +978,7 @@ object functions {
* @group normal_funcs
* @since 1.4.0
*/
+ @deprecated("Use monotonically_increasing_id()", "2.0.0")
def monotonicallyIncreasingId(): Column = monotonically_increasing_id()
/**
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
index a170fae577..26e1a9f75d 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala
@@ -508,18 +508,17 @@ class ColumnExpressionSuite extends QueryTest with SharedSQLContext {
Row("ab", "cde"))
}
- test("monotonicallyIncreasingId") {
+ test("monotonically_increasing_id") {
// Make sure we have 2 partitions, each with 2 records.
val df = sparkContext.parallelize(Seq[Int](), 2).mapPartitions { _ =>
Iterator(Tuple1(1), Tuple1(2))
}.toDF("a")
checkAnswer(
- df.select(monotonicallyIncreasingId()),
- Row(0L) :: Row(1L) :: Row((1L << 33) + 0L) :: Row((1L << 33) + 1L) :: Nil
- )
- checkAnswer(
- df.select(expr("monotonically_increasing_id()")),
- Row(0L) :: Row(1L) :: Row((1L << 33) + 0L) :: Row((1L << 33) + 1L) :: Nil
+ df.select(monotonically_increasing_id(), expr("monotonically_increasing_id()")),
+ Row(0L, 0L) ::
+ Row(1L, 1L) ::
+ Row((1L << 33) + 0L, (1L << 33) + 0L) ::
+ Row((1L << 33) + 1L, (1L << 33) + 1L) :: Nil
)
}