aboutsummaryrefslogtreecommitdiff
path: root/examples/src
diff options
context:
space:
mode:
authorSandeep Singh <sandeep@techaddict.me>2016-05-05 14:35:15 -0700
committerAndrew Or <andrew@databricks.com>2016-05-05 14:35:15 -0700
commited6f3f8a5f3a6bf7c53e13c2798de398c9a526a6 (patch)
treecac99af0f11f39aae44ab2dc10ee5d08a4cb25a6 /examples/src
parent8cba57a75cf9e29b54d97366a039a97a2f305d5d (diff)
downloadspark-ed6f3f8a5f3a6bf7c53e13c2798de398c9a526a6.tar.gz
spark-ed6f3f8a5f3a6bf7c53e13c2798de398c9a526a6.tar.bz2
spark-ed6f3f8a5f3a6bf7c53e13c2798de398c9a526a6.zip
[SPARK-15072][SQL][REPL][EXAMPLES] Remove SparkSession.withHiveSupport
## What changes were proposed in this pull request? Removing the `withHiveSupport` method of `SparkSession`, instead use `enableHiveSupport` ## How was this patch tested? ran tests locally Author: Sandeep Singh <sandeep@techaddict.me> Closes #12851 from techaddict/SPARK-15072.
Diffstat (limited to 'examples/src')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala14
1 files changed, 9 insertions, 5 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
index ff33091621..a15cf5ded0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
@@ -36,15 +36,19 @@ object HiveFromSpark {
def main(args: Array[String]) {
val sparkConf = new SparkConf().setAppName("HiveFromSpark")
- val sc = new SparkContext(sparkConf)
// A hive context adds support for finding tables in the MetaStore and writing queries
// using HiveQL. Users who do not have an existing Hive deployment can still create a
// HiveContext. When not configured by the hive-site.xml, the context automatically
// creates metastore_db and warehouse in the current directory.
- val sparkSession = SparkSession.withHiveSupport(sc)
- import sparkSession.implicits._
- import sparkSession.sql
+ val spark = SparkSession.builder
+ .config(sparkConf)
+ .enableHiveSupport()
+ .getOrCreate()
+ val sc = spark.sparkContext
+
+ import spark.implicits._
+ import spark.sql
sql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
sql(s"LOAD DATA LOCAL INPATH '${kv1File.getAbsolutePath}' INTO TABLE src")
@@ -74,7 +78,7 @@ object HiveFromSpark {
println("Result of SELECT *:")
sql("SELECT * FROM records r JOIN src s ON r.key = s.key").collect().foreach(println)
- sc.stop()
+ spark.stop()
}
}
// scalastyle:on println