diff options
author | Eric Liang <ekl@databricks.com> | 2016-08-22 15:48:35 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2016-08-22 15:48:35 -0700 |
commit | 84770b59f773f132073cd2af4204957fc2d7bf35 (patch) | |
tree | f1f4c739df710ebcc7bfe7a459234102c1cb698b /sql/core | |
parent | 929cb8beed9b7014231580cc002853236a5337d6 (diff) | |
download | spark-84770b59f773f132073cd2af4204957fc2d7bf35.tar.gz spark-84770b59f773f132073cd2af4204957fc2d7bf35.tar.bz2 spark-84770b59f773f132073cd2af4204957fc2d7bf35.zip |
[SPARK-17162] Range does not support SQL generation
## What changes were proposed in this pull request?
The range operator previously didn't support SQL generation, which made it not possible to use in views.
## How was this patch tested?
Unit tests.
cc hvanhovell
Author: Eric Liang <ekl@databricks.com>
Closes #14724 from ericl/spark-17162.
Diffstat (limited to 'sql/core')
3 files changed, 5 insertions, 3 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala index af1de511da..dde91b0a86 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/SQLBuilder.scala @@ -208,6 +208,9 @@ class SQLBuilder private ( case p: LocalRelation => p.toSQL(newSubqueryName()) + case p: Range => + p.toSQL() + case OneRowRelation => "" diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala index ad8a716898..3562083b06 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/basicPhysicalOperators.scala @@ -318,7 +318,7 @@ case class RangeExec(range: org.apache.spark.sql.catalyst.plans.logical.Range) def start: Long = range.start def step: Long = range.step - def numSlices: Int = range.numSlices + def numSlices: Int = range.numSlices.getOrElse(sparkContext.defaultParallelism) def numElements: BigInt = range.numElements override val output: Seq[Attribute] = range.output diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala index e397cfa058..f0d7b64c3c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala @@ -179,8 +179,7 @@ case class CreateViewCommand( sparkSession.sql(viewSQL).queryExecution.assertAnalyzed() } catch { case NonFatal(e) => - throw new RuntimeException( - "Failed to analyze the canonicalized SQL. It is possible there is a bug in Spark.", e) + throw new RuntimeException(s"Failed to analyze the canonicalized SQL: ${viewSQL}", e) } val viewSchema = if (userSpecifiedColumns.isEmpty) { |