aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala10
-rw-r--r--sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java4
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala9
3 files changed, 16 insertions, 7 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index d562f55e9f..efaccec262 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -725,7 +725,7 @@ class SQLContext private[sql](
* @group dataset
*/
@Experimental
- def range(end: Long): Dataset[Long] = range(0, end)
+ def range(end: Long): Dataset[java.lang.Long] = range(0, end)
/**
* :: Experimental ::
@@ -736,7 +736,7 @@ class SQLContext private[sql](
* @group dataset
*/
@Experimental
- def range(start: Long, end: Long): Dataset[Long] = {
+ def range(start: Long, end: Long): Dataset[java.lang.Long] = {
range(start, end, step = 1, numPartitions = sparkContext.defaultParallelism)
}
@@ -749,7 +749,7 @@ class SQLContext private[sql](
* @group dataset
*/
@Experimental
- def range(start: Long, end: Long, step: Long): Dataset[Long] = {
+ def range(start: Long, end: Long, step: Long): Dataset[java.lang.Long] = {
range(start, end, step, numPartitions = sparkContext.defaultParallelism)
}
@@ -763,8 +763,8 @@ class SQLContext private[sql](
* @group dataset
*/
@Experimental
- def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[Long] = {
- new Dataset(this, Range(start, end, step, numPartitions), implicits.newLongEncoder)
+ def range(start: Long, end: Long, step: Long, numPartitions: Int): Dataset[java.lang.Long] = {
+ new Dataset(this, Range(start, end, step, numPartitions), Encoders.LONG)
}
/**
diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
index cf764c645f..10ee7d57c7 100644
--- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
+++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameSuite.java
@@ -329,7 +329,7 @@ public class JavaDataFrameSuite {
@Test
public void testCountMinSketch() {
- Dataset df = context.range(1000);
+ Dataset<Long> df = context.range(1000);
CountMinSketch sketch1 = df.stat().countMinSketch("id", 10, 20, 42);
Assert.assertEquals(sketch1.totalCount(), 1000);
@@ -354,7 +354,7 @@ public class JavaDataFrameSuite {
@Test
public void testBloomFilter() {
- Dataset df = context.range(1000);
+ Dataset<Long> df = context.range(1000);
BloomFilter filter1 = df.stat().bloomFilter("id", 1000, 0.03);
Assert.assertTrue(filter1.expectedFpp() - 0.03 < 1e-3);
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
index 04d3a25fcb..677f84eb60 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala
@@ -44,6 +44,15 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
1, 1, 1)
}
+ test("range") {
+ assert(sqlContext.range(10).map(_ + 1).reduce(_ + _) == 55)
+ assert(sqlContext.range(10).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
+ assert(sqlContext.range(0, 10).map(_ + 1).reduce(_ + _) == 55)
+ assert(sqlContext.range(0, 10).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
+ assert(sqlContext.range(0, 10, 1, 2).map(_ + 1).reduce(_ + _) == 55)
+ assert(sqlContext.range(0, 10, 1, 2).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
+ }
+
test("SPARK-12404: Datatype Helper Serializability") {
val ds = sparkContext.parallelize((
new Timestamp(0),