diff options
author | Reynold Xin <rxin@databricks.com> | 2015-06-03 14:19:10 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2015-06-03 14:20:27 -0700 |
commit | 2c5a06cafd2885ff5431fa96485db2564ae1cce3 (patch) | |
tree | b334f05556532111f15cf3175e29d539a24c8668 /sql | |
parent | 939e4f3d8def16dfe03f0196be8e1c218a9daa32 (diff) | |
download | spark-2c5a06cafd2885ff5431fa96485db2564ae1cce3.tar.gz spark-2c5a06cafd2885ff5431fa96485db2564ae1cce3.tar.bz2 spark-2c5a06cafd2885ff5431fa96485db2564ae1cce3.zip |
Update documentation for [SPARK-7980] [SQL] Support SQLContext.range(end)
Diffstat (limited to 'sql')
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala index f08fb4fafe..0aab7fa870 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala @@ -705,33 +705,33 @@ class SQLContext(@transient val sparkContext: SparkContext) /** * :: Experimental :: * Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements - * in an range from `start` to `end`(exclusive) with step value 1. + * in an range from 0 to `end` (exclusive) with step value 1. * - * @since 1.4.0 + * @since 1.4.1 * @group dataframe */ @Experimental - def range(start: Long, end: Long): DataFrame = { - createDataFrame( - sparkContext.range(start, end).map(Row(_)), - StructType(StructField("id", LongType, nullable = false) :: Nil)) - } + def range(end: Long): DataFrame = range(0, end) /** * :: Experimental :: * Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements - * in an range from 0 to `end`(exclusive) with step value 1. + * in an range from `start` to `end` (exclusive) with step value 1. * * @since 1.4.0 * @group dataframe */ @Experimental - def range(end: Long): DataFrame = range(0, end) + def range(start: Long, end: Long): DataFrame = { + createDataFrame( + sparkContext.range(start, end).map(Row(_)), + StructType(StructField("id", LongType, nullable = false) :: Nil)) + } /** * :: Experimental :: * Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements - * in an range from `start` to `end`(exclusive) with an step value, with partition number + * in an range from `start` to `end` (exclusive) with an step value, with partition number * specified. * * @since 1.4.0 |