aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKaiXinXiaoLei <huleilei1@huawei.com>2016-06-15 16:11:46 -0700
committerShixiong Zhu <shixiong@databricks.com>2016-06-15 16:11:46 -0700
commit3e6d567a4688f064f2a2259c8e436b7c628a431c (patch)
tree9b22ef32c6305e77d8bc965d69af3725a5386754
parent6e0b3d795c572a9df589e148c0e55e4eeba7946c (diff)
downloadspark-3e6d567a4688f064f2a2259c8e436b7c628a431c.tar.gz
spark-3e6d567a4688f064f2a2259c8e436b7c628a431c.tar.bz2
spark-3e6d567a4688f064f2a2259c8e436b7c628a431c.zip
[SPARK-12492][SQL] Add missing SQLExecution.withNewExecutionId for hiveResultString
## What changes were proposed in this pull request? Add missing SQLExecution.withNewExecutionId for hiveResultString so that queries running in `spark-sql` will be shown in Web UI. Closes #13115 ## How was this patch tested? Existing unit tests. Author: KaiXinXiaoLei <huleilei1@huawei.com> Closes #13689 from zsxwing/pr13115.
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala31
1 files changed, 17 insertions, 14 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
index e6dc50a40e..5b9af26dfc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala
@@ -113,24 +113,27 @@ class QueryExecution(val sparkSession: SparkSession, val logical: LogicalPlan) {
*/
def hiveResultString(): Seq[String] = executedPlan match {
case ExecutedCommandExec(desc: DescribeTableCommand) =>
- // If it is a describe command for a Hive table, we want to have the output format
- // be similar with Hive.
- desc.run(sparkSession).map {
- case Row(name: String, dataType: String, comment) =>
- Seq(name, dataType,
- Option(comment.asInstanceOf[String]).getOrElse(""))
- .map(s => String.format(s"%-20s", s))
- .mkString("\t")
+ SQLExecution.withNewExecutionId(sparkSession, this) {
+ // If it is a describe command for a Hive table, we want to have the output format
+ // be similar with Hive.
+ desc.run(sparkSession).map {
+ case Row(name: String, dataType: String, comment) =>
+ Seq(name, dataType,
+ Option(comment.asInstanceOf[String]).getOrElse(""))
+ .map(s => String.format(s"%-20s", s))
+ .mkString("\t")
+ }
}
case command: ExecutedCommandExec =>
command.executeCollect().map(_.getString(0))
-
case other =>
- val result: Seq[Seq[Any]] = other.executeCollectPublic().map(_.toSeq).toSeq
- // We need the types so we can output struct field names
- val types = analyzed.output.map(_.dataType)
- // Reformat to match hive tab delimited output.
- result.map(_.zip(types).map(toHiveString)).map(_.mkString("\t")).toSeq
+ SQLExecution.withNewExecutionId(sparkSession, this) {
+ val result: Seq[Seq[Any]] = other.executeCollectPublic().map(_.toSeq).toSeq
+ // We need the types so we can output struct field names
+ val types = analyzed.output.map(_.dataType)
+ // Reformat to match hive tab delimited output.
+ result.map(_.zip(types).map(toHiveString)).map(_.mkString("\t")).toSeq
+ }
}
/** Formats a datum (based on the given data type) and returns the string representation. */