aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-06-03 14:19:10 -0700
committerReynold Xin <rxin@databricks.com>2015-06-03 14:20:27 -0700
commit2c5a06cafd2885ff5431fa96485db2564ae1cce3 (patch)
treeb334f05556532111f15cf3175e29d539a24c8668
parent939e4f3d8def16dfe03f0196be8e1c218a9daa32 (diff)
downloadspark-2c5a06cafd2885ff5431fa96485db2564ae1cce3.tar.gz
spark-2c5a06cafd2885ff5431fa96485db2564ae1cce3.tar.bz2
spark-2c5a06cafd2885ff5431fa96485db2564ae1cce3.zip
Update documentation for [SPARK-7980] [SQL] Support SQLContext.range(end)
-rw-r--r--python/pyspark/sql/context.py2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala20
2 files changed, 12 insertions, 10 deletions
diff --git a/python/pyspark/sql/context.py b/python/pyspark/sql/context.py
index 1bebfc4837..599c9ac579 100644
--- a/python/pyspark/sql/context.py
+++ b/python/pyspark/sql/context.py
@@ -146,6 +146,8 @@ class SQLContext(object):
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
+ If only one argument is specified, it will be used as the end value.
+
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index f08fb4fafe..0aab7fa870 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -705,33 +705,33 @@ class SQLContext(@transient val sparkContext: SparkContext)
/**
* :: Experimental ::
* Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end`(exclusive) with step value 1.
+ * in an range from 0 to `end` (exclusive) with step value 1.
*
- * @since 1.4.0
+ * @since 1.4.1
* @group dataframe
*/
@Experimental
- def range(start: Long, end: Long): DataFrame = {
- createDataFrame(
- sparkContext.range(start, end).map(Row(_)),
- StructType(StructField("id", LongType, nullable = false) :: Nil))
- }
+ def range(end: Long): DataFrame = range(0, end)
/**
* :: Experimental ::
* Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements
- * in an range from 0 to `end`(exclusive) with step value 1.
+ * in an range from `start` to `end` (exclusive) with step value 1.
*
* @since 1.4.0
* @group dataframe
*/
@Experimental
- def range(end: Long): DataFrame = range(0, end)
+ def range(start: Long, end: Long): DataFrame = {
+ createDataFrame(
+ sparkContext.range(start, end).map(Row(_)),
+ StructType(StructField("id", LongType, nullable = false) :: Nil))
+ }
/**
* :: Experimental ::
* Creates a [[DataFrame]] with a single [[LongType]] column named `id`, containing elements
- * in an range from `start` to `end`(exclusive) with an step value, with partition number
+ * in an range from `start` to `end` (exclusive) with an step value, with partition number
* specified.
*
* @since 1.4.0