aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2015-07-21 15:08:44 +0800
committerCheng Lian <lian@databricks.com>2015-07-21 15:08:44 +0800
commitd38c5029a2ca845e2782096044a6412b653c9f95 (patch)
tree80c71a2f152e5bdd348edf66599f0a3892236ad6 /sql
parent1ddd0f2f1688560f88470e312b72af04364e2d49 (diff)
downloadspark-d38c5029a2ca845e2782096044a6412b653c9f95.tar.gz
spark-d38c5029a2ca845e2782096044a6412b653c9f95.tar.bz2
spark-d38c5029a2ca845e2782096044a6412b653c9f95.zip
[SPARK-9100] [SQL] Adds DataFrame reader/writer shortcut methods for ORC
This PR adds DataFrame reader/writer shortcut methods for ORC in both Scala and Python. Author: Cheng Lian <lian@databricks.com> Closes #7444 from liancheng/spark-9100 and squashes the following commits: 284d043 [Cheng Lian] Fixes PySpark test cases and addresses PR comments e0b09fb [Cheng Lian] Adds DataFrame reader/writer shortcut methods for ORC
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala9
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala12
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala14
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala12
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala8
6 files changed, 38 insertions, 20 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index 0e37ad3e12..f1c1ddf898 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -265,6 +265,15 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
}
/**
+ * Loads an ORC file and returns the result as a [[DataFrame]].
+ *
+ * @param path input path
+ * @since 1.5.0
+ * @note Currently, this method can only be used together with `HiveContext`.
+ */
+ def orc(path: String): DataFrame = format("orc").load(path)
+
+ /**
* Returns the specified table as a [[DataFrame]].
*
* @since 1.4.0
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index 5548b26cb8..3e7b9cd797 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -280,6 +280,18 @@ final class DataFrameWriter private[sql](df: DataFrame) {
*/
def parquet(path: String): Unit = format("parquet").save(path)
+ /**
+ * Saves the content of the [[DataFrame]] in ORC format at the specified path.
+ * This is equivalent to:
+ * {{{
+ * format("orc").save(path)
+ * }}}
+ *
+ * @since 1.5.0
+ * @note Currently, this method can only be used together with `HiveContext`.
+ */
+ def orc(path: String): Unit = format("orc").save(path)
+
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala
index 080af5bb23..af3f468aaa 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala
@@ -41,8 +41,7 @@ class OrcHadoopFsRelationSuite extends HadoopFsRelationTest {
.parallelize(for (i <- 1 to 3) yield (i, s"val_$i", p1))
.toDF("a", "b", "p1")
.write
- .format("orc")
- .save(partitionDir.toString)
+ .orc(partitionDir.toString)
}
val dataSchemaWithPartition =
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala
index 3c2efe329b..d463e8fd62 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcPartitionDiscoverySuite.scala
@@ -49,13 +49,13 @@ class OrcPartitionDiscoverySuite extends QueryTest with BeforeAndAfterAll {
def makeOrcFile[T <: Product: ClassTag: TypeTag](
data: Seq[T], path: File): Unit = {
- data.toDF().write.format("orc").mode("overwrite").save(path.getCanonicalPath)
+ data.toDF().write.mode("overwrite").orc(path.getCanonicalPath)
}
def makeOrcFile[T <: Product: ClassTag: TypeTag](
df: DataFrame, path: File): Unit = {
- df.write.format("orc").mode("overwrite").save(path.getCanonicalPath)
+ df.write.mode("overwrite").orc(path.getCanonicalPath)
}
protected def withTempTable(tableName: String)(f: => Unit): Unit = {
@@ -90,7 +90,7 @@ class OrcPartitionDiscoverySuite extends QueryTest with BeforeAndAfterAll {
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
- read.format("orc").load(base.getCanonicalPath).registerTempTable("t")
+ read.orc(base.getCanonicalPath).registerTempTable("t")
withTempTable("t") {
checkAnswer(
@@ -137,7 +137,7 @@ class OrcPartitionDiscoverySuite extends QueryTest with BeforeAndAfterAll {
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
- read.format("orc").load(base.getCanonicalPath).registerTempTable("t")
+ read.orc(base.getCanonicalPath).registerTempTable("t")
withTempTable("t") {
checkAnswer(
@@ -187,9 +187,8 @@ class OrcPartitionDiscoverySuite extends QueryTest with BeforeAndAfterAll {
}
read
- .format("orc")
.option(ConfVars.DEFAULTPARTITIONNAME.varname, defaultPartitionName)
- .load(base.getCanonicalPath)
+ .orc(base.getCanonicalPath)
.registerTempTable("t")
withTempTable("t") {
@@ -230,9 +229,8 @@ class OrcPartitionDiscoverySuite extends QueryTest with BeforeAndAfterAll {
}
read
- .format("orc")
.option(ConfVars.DEFAULTPARTITIONNAME.varname, defaultPartitionName)
- .load(base.getCanonicalPath)
+ .orc(base.getCanonicalPath)
.registerTempTable("t")
withTempTable("t") {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
index ca131faaee..744d462938 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcQuerySuite.scala
@@ -63,14 +63,14 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
withOrcFile(data) { file =>
checkAnswer(
- sqlContext.read.format("orc").load(file),
+ sqlContext.read.orc(file),
data.toDF().collect())
}
}
test("Read/write binary data") {
withOrcFile(BinaryData("test".getBytes("utf8")) :: Nil) { file =>
- val bytes = read.format("orc").load(file).head().getAs[Array[Byte]](0)
+ val bytes = read.orc(file).head().getAs[Array[Byte]](0)
assert(new String(bytes, "utf8") === "test")
}
}
@@ -88,7 +88,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
withOrcFile(data) { file =>
checkAnswer(
- read.format("orc").load(file),
+ read.orc(file),
data.toDF().collect())
}
}
@@ -158,7 +158,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
withOrcFile(data) { file =>
checkAnswer(
- read.format("orc").load(file),
+ read.orc(file),
Row(Seq.fill(5)(null): _*))
}
}
@@ -310,7 +310,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
""".stripMargin)
val errorMessage = intercept[AnalysisException] {
- sqlContext.read.format("orc").load(path)
+ sqlContext.read.orc(path)
}.getMessage
assert(errorMessage.contains("Failed to discover schema from ORC files"))
@@ -323,7 +323,7 @@ class OrcQuerySuite extends QueryTest with BeforeAndAfterAll with OrcTest {
|SELECT key, value FROM single
""".stripMargin)
- val df = sqlContext.read.format("orc").load(path)
+ val df = sqlContext.read.orc(path)
assert(df.schema === singleRowDF.schema.asNullable)
checkAnswer(df, singleRowDF)
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala
index 5daf691aa8..9d76d6503a 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcTest.scala
@@ -39,7 +39,7 @@ private[sql] trait OrcTest extends SQLTestUtils {
(data: Seq[T])
(f: String => Unit): Unit = {
withTempPath { file =>
- sparkContext.parallelize(data).toDF().write.format("orc").save(file.getCanonicalPath)
+ sparkContext.parallelize(data).toDF().write.orc(file.getCanonicalPath)
f(file.getCanonicalPath)
}
}
@@ -51,7 +51,7 @@ private[sql] trait OrcTest extends SQLTestUtils {
protected def withOrcDataFrame[T <: Product: ClassTag: TypeTag]
(data: Seq[T])
(f: DataFrame => Unit): Unit = {
- withOrcFile(data)(path => f(sqlContext.read.format("orc").load(path)))
+ withOrcFile(data)(path => f(sqlContext.read.orc(path)))
}
/**
@@ -70,11 +70,11 @@ private[sql] trait OrcTest extends SQLTestUtils {
protected def makeOrcFile[T <: Product: ClassTag: TypeTag](
data: Seq[T], path: File): Unit = {
- data.toDF().write.format("orc").mode(SaveMode.Overwrite).save(path.getCanonicalPath)
+ data.toDF().write.mode(SaveMode.Overwrite).orc(path.getCanonicalPath)
}
protected def makeOrcFile[T <: Product: ClassTag: TypeTag](
df: DataFrame, path: File): Unit = {
- df.write.format("orc").mode(SaveMode.Overwrite).save(path.getCanonicalPath)
+ df.write.mode(SaveMode.Overwrite).orc(path.getCanonicalPath)
}
}