aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Liang <ekl@databricks.com>2016-09-28 16:20:49 -0700
committerReynold Xin <rxin@databricks.com>2016-09-28 16:20:49 -0700
commit557d6e32272dee4eaa0f426cc3e2f82ea361c3da (patch)
tree7b894d4d6c4b24b5dd7656ead74d461eefd5dfd9
parenta6cfa3f38bcf6ba154d5ed2a53748fbc90c8872a (diff)
downloadspark-557d6e32272dee4eaa0f426cc3e2f82ea361c3da.tar.gz
spark-557d6e32272dee4eaa0f426cc3e2f82ea361c3da.tar.bz2
spark-557d6e32272dee4eaa0f426cc3e2f82ea361c3da.zip
[SPARK-17713][SQL] Move row-datasource related tests out of JDBCSuite
## What changes were proposed in this pull request? As a followup for https://github.com/apache/spark/pull/15273 we should move non-JDBC specific tests out of that suite. ## How was this patch tested? Ran the test. Author: Eric Liang <ekl@databricks.com> Closes #15287 from ericl/spark-17713.
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala72
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala8
2 files changed, 72 insertions, 8 deletions
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala
new file mode 100644
index 0000000000..d9afa46353
--- /dev/null
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources
+
+import java.sql.DriverManager
+import java.util.Properties
+
+import org.scalatest.BeforeAndAfter
+
+import org.apache.spark.SparkFunSuite
+import org.apache.spark.sql.{DataFrame, Row}
+import org.apache.spark.sql.sources._
+import org.apache.spark.sql.test.SharedSQLContext
+import org.apache.spark.sql.types._
+import org.apache.spark.util.Utils
+
+class RowDataSourceStrategySuite extends SparkFunSuite with BeforeAndAfter with SharedSQLContext {
+ import testImplicits._
+
+ val url = "jdbc:h2:mem:testdb0"
+ val urlWithUserAndPass = "jdbc:h2:mem:testdb0;user=testUser;password=testPass"
+ var conn: java.sql.Connection = null
+
+ before {
+ Utils.classForName("org.h2.Driver")
+ // Extra properties that will be specified for our database. We need these to test
+ // usage of parameters from OPTIONS clause in queries.
+ val properties = new Properties()
+ properties.setProperty("user", "testUser")
+ properties.setProperty("password", "testPass")
+ properties.setProperty("rowId", "false")
+
+ conn = DriverManager.getConnection(url, properties)
+ conn.prepareStatement("create schema test").executeUpdate()
+ conn.prepareStatement("create table test.inttypes (a INT, b INT, c INT)").executeUpdate()
+ conn.prepareStatement("insert into test.inttypes values (1, 2, 3)").executeUpdate()
+ conn.commit()
+ sql(
+ s"""
+ |CREATE TEMPORARY TABLE inttypes
+ |USING org.apache.spark.sql.jdbc
+ |OPTIONS (url '$url', dbtable 'TEST.INTTYPES', user 'testUser', password 'testPass')
+ """.stripMargin.replaceAll("\n", " "))
+ }
+
+ after {
+ conn.close()
+ }
+
+ test("SPARK-17673: Exchange reuse respects differences in output schema") {
+ val df = sql("SELECT * FROM inttypes")
+ val df1 = df.groupBy("a").agg("b" -> "min")
+ val df2 = df.groupBy("a").agg("c" -> "min")
+ val res = df1.union(df2)
+ assert(res.distinct().count() == 2) // would be 1 if the exchange was incorrectly reused
+ }
+}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index c94cb3b69d..10f15ca280 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -791,12 +791,4 @@ class JDBCSuite extends SparkFunSuite
val schema = JdbcUtils.schemaString(df, "jdbc:mysql://localhost:3306/temp")
assert(schema.contains("`order` TEXT"))
}
-
- test("SPARK-17673: Exchange reuse respects differences in output schema") {
- val df = sql("SELECT * FROM inttypes WHERE a IS NOT NULL")
- val df1 = df.groupBy("a").agg("c" -> "min")
- val df2 = df.groupBy("a").agg("d" -> "min")
- val res = df1.union(df2)
- assert(res.distinct().count() == 2) // would be 1 if the exchange was incorrectly reused
- }
}