aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test/scala/org
diff options
context:
space:
mode:
authorAla Luszczak <ala@databricks.com>2017-02-07 14:21:30 +0100
committerReynold Xin <rxin@databricks.com>2017-02-07 14:21:30 +0100
commit6ed285c68fee451c45db7b01ca8ec1dea2efd479 (patch)
tree341cff8e7b21f695ccb90c825440bd82b15210ea /sql/hive/src/test/scala/org
parente99e34d0f370211a7c7b96d144cc932b2fc71d10 (diff)
downloadspark-6ed285c68fee451c45db7b01ca8ec1dea2efd479.tar.gz
spark-6ed285c68fee451c45db7b01ca8ec1dea2efd479.tar.bz2
spark-6ed285c68fee451c45db7b01ca8ec1dea2efd479.zip
[SPARK-19447] Fixing input metrics for range operator.
## What changes were proposed in this pull request? This change introduces a new metric "number of generated rows". It is used exclusively for Range, which is a leaf in the query tree, yet doesn't read any input data, and therefore cannot report "recordsRead". Additionally the way in which the metrics are reported by the JIT-compiled version of Range was changed. Previously, it was immediately reported that all the records were produced. This could be confusing for a user monitoring execution progress in the UI. Now, the metric is updated gradually. In order to avoid negative impact on Range performance, the code generation was reworked. The values are now produced in batches in the tighter inner loop, while the metrics are updated in the outer loop. The change also contains a number of unit tests, which should help ensure the correctness of metrics for various input sources. ## How was this patch tested? Unit tests. Author: Ala Luszczak <ala@databricks.com> Closes #16829 from ala/SPARK-19447.
Diffstat (limited to 'sql/hive/src/test/scala/org')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala19
1 files changed, 19 insertions, 0 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala
index ec620c2403..35c41b531c 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala
@@ -19,6 +19,7 @@ package org.apache.spark.sql.hive.execution
import org.scalatest.BeforeAndAfterAll
+import org.apache.spark.sql.execution.MetricsTestHelper
import org.apache.spark.sql.hive.test.TestHive
/**
@@ -47,4 +48,22 @@ class HiveSerDeSuite extends HiveComparisonTest with BeforeAndAfterAll {
createQueryTest("Read with AvroSerDe", "SELECT * FROM episodes")
createQueryTest("Read Partitioned with AvroSerDe", "SELECT * FROM episodes_part")
+
+ test("Test input/generated/output metrics") {
+ import TestHive._
+
+ val episodesCnt = sql("select * from episodes").count()
+ val episodesRes = MetricsTestHelper.runAndGetMetrics(sql("select * from episodes").toDF())
+ assert(episodesRes.recordsRead === episodesCnt :: Nil)
+ assert(episodesRes.shuffleRecordsRead.sum === 0)
+ assert(episodesRes.generatedRows.isEmpty)
+ assert(episodesRes.outputRows === episodesCnt :: Nil)
+
+ val serdeinsCnt = sql("select * from serdeins").count()
+ val serdeinsRes = MetricsTestHelper.runAndGetMetrics(sql("select * from serdeins").toDF())
+ assert(serdeinsRes.recordsRead === serdeinsCnt :: Nil)
+ assert(serdeinsRes.shuffleRecordsRead.sum === 0)
+ assert(serdeinsRes.generatedRows.isEmpty)
+ assert(serdeinsRes.outputRows === serdeinsCnt :: Nil)
+ }
}