aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/test/resources/sql-tests
diff options
context:
space:
mode:
authorHerman van Hovell <hvanhovell@databricks.com>2017-02-24 23:05:36 -0800
committerWenchen Fan <wenchen@databricks.com>2017-02-24 23:05:59 -0800
commit8f0511ed49a353fb0745f320a84063ced5cc1857 (patch)
treecb770f0843aa77f2afaa81a98e12a2a1231ce60c /sql/core/src/test/resources/sql-tests
parent4cb025afafe63d5871356d9dc38d58c1df0da996 (diff)
downloadspark-8f0511ed49a353fb0745f320a84063ced5cc1857.tar.gz
spark-8f0511ed49a353fb0745f320a84063ced5cc1857.tar.bz2
spark-8f0511ed49a353fb0745f320a84063ced5cc1857.zip
[SPARK-19650] Commands should not trigger a Spark job
Spark executes SQL commands eagerly. It does this by creating an RDD which contains the command's results. The downside to this is that any action on this RDD triggers a Spark job which is expensive and is unnecessary. This PR fixes this by avoiding the materialization of an `RDD` for `Command`s; it just materializes the result and puts them in a `LocalRelation`. Added a regression test to `SQLQuerySuite`. Author: Herman van Hovell <hvanhovell@databricks.com> Closes #17027 from hvanhovell/no-job-command.
Diffstat (limited to 'sql/core/src/test/resources/sql-tests')
-rw-r--r--sql/core/src/test/resources/sql-tests/results/change-column.sql.out4
-rw-r--r--sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out2
-rw-r--r--sql/core/src/test/resources/sql-tests/results/order-by-ordinal.sql.out2
-rw-r--r--sql/core/src/test/resources/sql-tests/results/outer-join.sql.out4
4 files changed, 6 insertions, 6 deletions
diff --git a/sql/core/src/test/resources/sql-tests/results/change-column.sql.out b/sql/core/src/test/resources/sql-tests/results/change-column.sql.out
index 59eb56920c..ba8bc936f0 100644
--- a/sql/core/src/test/resources/sql-tests/results/change-column.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/change-column.sql.out
@@ -196,7 +196,7 @@ SET spark.sql.caseSensitive=false
-- !query 19 schema
struct<key:string,value:string>
-- !query 19 output
-spark.sql.caseSensitive
+spark.sql.caseSensitive false
-- !query 20
@@ -212,7 +212,7 @@ SET spark.sql.caseSensitive=true
-- !query 21 schema
struct<key:string,value:string>
-- !query 21 output
-spark.sql.caseSensitive
+spark.sql.caseSensitive true
-- !query 22
diff --git a/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out b/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out
index c64520ff93..c0930bbde6 100644
--- a/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out
@@ -177,7 +177,7 @@ set spark.sql.groupByOrdinal=false
-- !query 17 schema
struct<key:string,value:string>
-- !query 17 output
-spark.sql.groupByOrdinal
+spark.sql.groupByOrdinal false
-- !query 18
diff --git a/sql/core/src/test/resources/sql-tests/results/order-by-ordinal.sql.out b/sql/core/src/test/resources/sql-tests/results/order-by-ordinal.sql.out
index 03a4e72d0f..cc47cc67c8 100644
--- a/sql/core/src/test/resources/sql-tests/results/order-by-ordinal.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/order-by-ordinal.sql.out
@@ -114,7 +114,7 @@ set spark.sql.orderByOrdinal=false
-- !query 9 schema
struct<key:string,value:string>
-- !query 9 output
-spark.sql.orderByOrdinal
+spark.sql.orderByOrdinal false
-- !query 10
diff --git a/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out b/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out
index cc50b9444b..5db3bae5d0 100644
--- a/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/outer-join.sql.out
@@ -63,7 +63,7 @@ set spark.sql.crossJoin.enabled = true
-- !query 5 schema
struct<key:string,value:string>
-- !query 5 output
-spark.sql.crossJoin.enabled
+spark.sql.crossJoin.enabled true
-- !query 6
@@ -85,4 +85,4 @@ set spark.sql.crossJoin.enabled = false
-- !query 7 schema
struct<key:string,value:string>
-- !query 7 output
-spark.sql.crossJoin.enabled
+spark.sql.crossJoin.enabled false