aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorwangzhenhua <wangzhenhua@huawei.com>2017-02-24 10:24:59 -0800
committerWenchen Fan <wenchen@databricks.com>2017-02-24 10:24:59 -0800
commit69d0da6373979ce5b2bcd52933b5a7660d893e88 (patch)
tree0604364d8facb22837c95564d6df4d05736477c7 /sql/core
parent05954f32e9bde56dc1f9a72028900d705185f6d7 (diff)
downloadspark-69d0da6373979ce5b2bcd52933b5a7660d893e88.tar.gz
spark-69d0da6373979ce5b2bcd52933b5a7660d893e88.tar.bz2
spark-69d0da6373979ce5b2bcd52933b5a7660d893e88.zip
[SPARK-17078][SQL] Show stats when explain
## What changes were proposed in this pull request? Currently we can only check the estimated stats in logical plans by debugging. We need to provide an easier and more efficient way for developers/users. In this pr, we add EXPLAIN COST command to show stats in the optimized logical plan. E.g. ``` spark-sql> EXPLAIN COST select count(1) from store_returns; ... == Optimized Logical Plan == Aggregate [count(1) AS count(1)#24L], Statistics(sizeInBytes=16.0 B, rowCount=1, isBroadcastable=false) +- Project, Statistics(sizeInBytes=4.3 GB, rowCount=5.76E+8, isBroadcastable=false) +- Relation[sr_returned_date_sk#3,sr_return_time_sk#4,sr_item_sk#5,sr_customer_sk#6,sr_cdemo_sk#7,sr_hdemo_sk#8,sr_addr_sk#9,sr_store_sk#10,sr_reason_sk#11,sr_ticket_number#12,sr_return_quantity#13,sr_return_amt#14,sr_return_tax#15,sr_return_amt_inc_tax#16,sr_fee#17,sr_return_ship_cost#18,sr_refunded_cash#19,sr_reversed_charge#20,sr_store_credit#21,sr_net_loss#22] parquet, Statistics(sizeInBytes=28.6 GB, rowCount=5.76E+8, isBroadcastable=false) ... ``` ## How was this patch tested? Add test cases. Author: wangzhenhua <wangzhenhua@huawei.com> Author: Zhenhua Wang <wzh_zju@163.com> Closes #16594 from wzhfy/showStats.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala16
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala6
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala21
5 files changed, 49 insertions, 6 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
index 9d046c0766..137f7ba04d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
@@ -197,7 +197,11 @@ class QueryExecution(val sparkSession: SparkSession, val logical: LogicalPlan) {
""".stripMargin.trim
}
- override def toString: String = {
+ override def toString: String = completeString(appendStats = false)
+
+ def toStringWithStats: String = completeString(appendStats = true)
+
+ private def completeString(appendStats: Boolean): String = {
def output = Utils.truncatedString(
analyzed.output.map(o => s"${o.name}: ${o.dataType.simpleString}"), ", ")
val analyzedPlan = Seq(
@@ -205,12 +209,20 @@ class QueryExecution(val sparkSession: SparkSession, val logical: LogicalPlan) {
stringOrError(analyzed.treeString(verbose = true))
).filter(_.nonEmpty).mkString("\n")
+ val optimizedPlanString = if (appendStats) {
+ // trigger to compute stats for logical plans
+ optimizedPlan.stats(sparkSession.sessionState.conf)
+ optimizedPlan.treeString(verbose = true, addSuffix = true)
+ } else {
+ optimizedPlan.treeString(verbose = true)
+ }
+
s"""== Parsed Logical Plan ==
|${stringOrError(logical.treeString(verbose = true))}
|== Analyzed Logical Plan ==
|$analyzedPlan
|== Optimized Logical Plan ==
- |${stringOrError(optimizedPlan.treeString(verbose = true))}
+ |${stringOrError(optimizedPlanString)}
|== Physical Plan ==
|${stringOrError(executedPlan.treeString(verbose = true))}
""".stripMargin.trim
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index 1340aebc1d..65df688689 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -283,7 +283,11 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
if (statement == null) {
null // This is enough since ParseException will raise later.
} else if (isExplainableStatement(statement)) {
- ExplainCommand(statement, extended = ctx.EXTENDED != null, codegen = ctx.CODEGEN != null)
+ ExplainCommand(
+ logicalPlan = statement,
+ extended = ctx.EXTENDED != null,
+ codegen = ctx.CODEGEN != null,
+ cost = ctx.COST != null)
} else {
ExplainCommand(OneRowRelation)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
index 2ead8f6baa..c58474eba0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala
@@ -254,7 +254,8 @@ case class InputAdapter(child: SparkPlan) extends UnaryExecNode with CodegenSupp
lastChildren: Seq[Boolean],
builder: StringBuilder,
verbose: Boolean,
- prefix: String = ""): StringBuilder = {
+ prefix: String = "",
+ addSuffix: Boolean = false): StringBuilder = {
child.generateTreeString(depth, lastChildren, builder, verbose, "")
}
}
@@ -428,7 +429,8 @@ case class WholeStageCodegenExec(child: SparkPlan) extends UnaryExecNode with Co
lastChildren: Seq[Boolean],
builder: StringBuilder,
verbose: Boolean,
- prefix: String = ""): StringBuilder = {
+ prefix: String = "",
+ addSuffix: Boolean = false): StringBuilder = {
child.generateTreeString(depth, lastChildren, builder, verbose, "*")
}
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala
index 58f5071193..5de45b1596 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/commands.scala
@@ -88,11 +88,13 @@ case class ExecutedCommandExec(cmd: RunnableCommand) extends SparkPlan {
* @param logicalPlan plan to explain
* @param extended whether to do extended explain or not
* @param codegen whether to output generated code from whole-stage codegen or not
+ * @param cost whether to show cost information for operators.
*/
case class ExplainCommand(
logicalPlan: LogicalPlan,
extended: Boolean = false,
- codegen: Boolean = false)
+ codegen: Boolean = false,
+ cost: Boolean = false)
extends RunnableCommand {
override val output: Seq[Attribute] =
@@ -113,6 +115,8 @@ case class ExplainCommand(
codegenString(queryExecution.executedPlan)
} else if (extended) {
queryExecution.toString
+ } else if (cost) {
+ queryExecution.toStringWithStats
} else {
queryExecution.simpleString
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
index bd1ce8aa3e..b38bbd8e7e 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala
@@ -170,6 +170,27 @@ class StatisticsCollectionSuite extends StatisticsCollectionTestBase with Shared
}
checkColStats(df, mutable.LinkedHashMap(expectedColStats: _*))
}
+
+ test("number format in statistics") {
+ val numbers = Seq(
+ BigInt(0) -> ("0.0 B", "0"),
+ BigInt(100) -> ("100.0 B", "100"),
+ BigInt(2047) -> ("2047.0 B", "2.05E+3"),
+ BigInt(2048) -> ("2.0 KB", "2.05E+3"),
+ BigInt(3333333) -> ("3.2 MB", "3.33E+6"),
+ BigInt(4444444444L) -> ("4.1 GB", "4.44E+9"),
+ BigInt(5555555555555L) -> ("5.1 TB", "5.56E+12"),
+ BigInt(6666666666666666L) -> ("5.9 PB", "6.67E+15"),
+ BigInt(1L << 10 ) * (1L << 60) -> ("1024.0 EB", "1.18E+21"),
+ BigInt(1L << 11) * (1L << 60) -> ("2.36E+21 B", "2.36E+21")
+ )
+ numbers.foreach { case (input, (expectedSize, expectedRows)) =>
+ val stats = Statistics(sizeInBytes = input, rowCount = Some(input))
+ val expectedString = s"sizeInBytes=$expectedSize, rowCount=$expectedRows," +
+ s" isBroadcastable=${stats.isBroadcastable}"
+ assert(stats.simpleString == expectedString)
+ }
+ }
}