aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorSean Owen <sowen@cloudera.com>2016-10-24 10:44:45 +0100
committerSean Owen <sowen@cloudera.com>2016-10-24 10:44:45 +0100
commit4ecbe1b92f4c4c5b2d734895c09d8ded0ed48d4d (patch)
treec996509d584d6d5da25d647b7ac43d3eab9b5853 /sql/hive
parentc64a8ff39794d60c596c0d34130019c09c9c8012 (diff)
downloadspark-4ecbe1b92f4c4c5b2d734895c09d8ded0ed48d4d.tar.gz
spark-4ecbe1b92f4c4c5b2d734895c09d8ded0ed48d4d.tar.bz2
spark-4ecbe1b92f4c4c5b2d734895c09d8ded0ed48d4d.zip
[SPARK-17810][SQL] Default spark.sql.warehouse.dir is relative to local FS but can resolve as HDFS path
## What changes were proposed in this pull request? Always resolve spark.sql.warehouse.dir as a local path, and as relative to working dir not home dir ## How was this patch tested? Existing tests. Author: Sean Owen <sowen@cloudera.com> Closes #15382 from srowen/SPARK-17810.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala5
2 files changed, 6 insertions, 3 deletions
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index 2b945dbbe0..6fbbed1d47 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -18,6 +18,7 @@
package org.apache.spark.sql.hive.execution
import java.io.File
+import java.net.URI
import java.sql.Timestamp
import java.util.{Locale, TimeZone}
@@ -954,7 +955,8 @@ class HiveQuerySuite extends HiveComparisonTest with SQLTestUtils with BeforeAnd
.mkString("/")
// Loads partition data to a temporary table to verify contents
- val path = s"${sparkSession.getWarehousePath}/dynamic_part_table/$partFolder/part-00000"
+ val warehousePathFile = new URI(sparkSession.getWarehousePath()).getPath
+ val path = s"$warehousePathFile/dynamic_part_table/$partFolder/part-00000"
sql("DROP TABLE IF EXISTS dp_verify")
sql("CREATE TABLE dp_verify(intcol INT)")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
index 9ed454e578..d9ddcbd57c 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
@@ -18,6 +18,7 @@
package org.apache.spark.sql.sources
import java.io.File
+import java.net.URI
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog.BucketSpec
@@ -489,8 +490,8 @@ class BucketedReadSuite extends QueryTest with SQLTestUtils with TestHiveSinglet
test("error if there exists any malformed bucket files") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
- val tableDir = new File(hiveContext
- .sparkSession.getWarehousePath, "bucketed_table")
+ val warehouseFilePath = new URI(hiveContext.sparkSession.getWarehousePath).getPath
+ val tableDir = new File(warehouseFilePath, "bucketed_table")
Utils.deleteRecursively(tableDir)
df1.write.parquet(tableDir.getAbsolutePath)