aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwangfei <wangfei1@huawei.com>2014-12-01 14:02:02 -0800
committerMichael Armbrust <michael@databricks.com>2014-12-01 14:02:28 -0800
commit31cf51bfaa0e332b903cb5d7f511dfa76d36bdc5 (patch)
treee28efd0bb03d3b7e891edbc5744418e802fc9fdd
parentb39cfee0620ccd9c4e966a7d9bbd6017e35023cd (diff)
downloadspark-31cf51bfaa0e332b903cb5d7f511dfa76d36bdc5.tar.gz
spark-31cf51bfaa0e332b903cb5d7f511dfa76d36bdc5.tar.bz2
spark-31cf51bfaa0e332b903cb5d7f511dfa76d36bdc5.zip
[SQL] Minor fix for doc and comment
Author: wangfei <wangfei1@huawei.com> Closes #3533 from scwf/sql-doc1 and squashes the following commits: 962910b [wangfei] doc and comment fix (cherry picked from commit 7b79957879db4dfcc7c3601cb40ac4fd576259a5) Signed-off-by: Michael Armbrust <michael@databricks.com>
-rw-r--r--docs/sql-programming-guide.md3
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala7
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/parquet/newParquet.scala2
3 files changed, 7 insertions, 5 deletions
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index 96a3209c52..c38ca55653 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -1002,7 +1002,7 @@ Several caching related features are not supported yet:
## Compatibility with Apache Hive
Spark SQL is designed to be compatible with the Hive Metastore, SerDes and UDFs. Currently Spark
-SQL is based on Hive 0.12.0.
+SQL is based on Hive 0.12.0 and 0.13.1.
#### Deploying in Existing Hive Warehouses
@@ -1041,6 +1041,7 @@ Spark SQL supports the vast majority of Hive features, such as:
* Sampling
* Explain
* Partitioned tables
+* View
* All Hive DDL Functions, including:
* `CREATE TABLE`
* `CREATE TABLE AS SELECT`
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
index 227acc1175..138923c4d7 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
@@ -29,9 +29,10 @@ object HiveFromSpark {
val sc = new SparkContext(sparkConf)
val path = s"${System.getenv("SPARK_HOME")}/examples/src/main/resources/kv1.txt"
- // A local hive context creates an instance of the Hive Metastore in process, storing
- // the warehouse data in the current directory. This location can be overridden by
- // specifying a second parameter to the constructor.
+ // A hive context adds support for finding tables in the MetaStore and writing queries
+ // using HiveQL. Users who do not have an existing Hive deployment can still create a
+ // HiveContext. When not configured by the hive-site.xml, the context automatically
+ // creates metastore_db and warehouse in the current directory.
val hiveContext = new HiveContext(sc)
import hiveContext._
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/parquet/newParquet.scala b/sql/core/src/main/scala/org/apache/spark/sql/parquet/newParquet.scala
index 6404fec435..9b89c3bfb3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/parquet/newParquet.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/parquet/newParquet.scala
@@ -49,7 +49,7 @@ class DefaultSource extends RelationProvider {
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
val path =
- parameters.getOrElse("path", sys.error("'path' must be specifed for parquet tables."))
+ parameters.getOrElse("path", sys.error("'path' must be specified for parquet tables."))
ParquetRelation2(path)(sqlContext)
}