aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-03-19 00:27:23 -0700
committerReynold Xin <rxin@databricks.com>2016-03-19 00:27:23 -0700
commit1970d911d91d7b53389629169e45bf33127ed2e8 (patch)
tree5ba180a3c01dbb3f2b0e2f5024ef0d35e8ede855 /sql/core
parentb39594472b69b4eafc11f1a74eb47872215bdfdd (diff)
downloadspark-1970d911d91d7b53389629169e45bf33127ed2e8.tar.gz
spark-1970d911d91d7b53389629169e45bf33127ed2e8.tar.bz2
spark-1970d911d91d7b53389629169e45bf33127ed2e8.zip
[SPARK-14018][SQL] Use 64-bit num records in BenchmarkWholeStageCodegen
## What changes were proposed in this pull request? 500L << 20 is actually pretty close to 32-bit int limit. I was trying to increase this to 500L << 23 and got negative numbers instead. ## How was this patch tested? I'm only modifying test code. Author: Reynold Xin <rxin@databricks.com> Closes #11839 from rxin/SPARK-14018.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/BenchmarkWholeStageCodegen.scala8
1 files changed, 4 insertions, 4 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/BenchmarkWholeStageCodegen.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/BenchmarkWholeStageCodegen.scala
index a16bd77bfe..0b1cb90186 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/BenchmarkWholeStageCodegen.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/BenchmarkWholeStageCodegen.scala
@@ -42,7 +42,7 @@ class BenchmarkWholeStageCodegen extends SparkFunSuite {
lazy val sc = SparkContext.getOrCreate(conf)
lazy val sqlContext = SQLContext.getOrCreate(sc)
- def runBenchmark(name: String, values: Int)(f: => Unit): Unit = {
+ def runBenchmark(name: String, values: Long)(f: => Unit): Unit = {
val benchmark = new Benchmark(name, values)
Seq(false, true).foreach { enabled =>
@@ -57,7 +57,7 @@ class BenchmarkWholeStageCodegen extends SparkFunSuite {
// These benchmark are skipped in normal build
ignore("range/filter/sum") {
- val N = 500 << 20
+ val N = 500L << 20
runBenchmark("rang/filter/sum", N) {
sqlContext.range(N).filter("(id & 1) = 1").groupBy().sum().collect()
}
@@ -71,7 +71,7 @@ class BenchmarkWholeStageCodegen extends SparkFunSuite {
}
ignore("range/limit/sum") {
- val N = 500 << 20
+ val N = 500L << 20
runBenchmark("range/limit/sum", N) {
sqlContext.range(N).limit(1000000).groupBy().sum().collect()
}
@@ -85,7 +85,7 @@ class BenchmarkWholeStageCodegen extends SparkFunSuite {
}
ignore("stat functions") {
- val N = 100 << 20
+ val N = 100L << 20
runBenchmark("stddev", N) {
sqlContext.range(N).groupBy().agg("id" -> "stddev").collect()