aboutsummaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-05-31 17:40:44 -0700
committerAndrew Or <andrew@databricks.com>2016-05-31 17:40:44 -0700
commit85d6b0db9f5bd425c36482ffcb1c3b9fd0fcdb31 (patch)
tree2e09a7e6c626ec965d86b31fd3b64207be766349 /examples
parent93e97147eb499dde1e54e07ba113eebcbe25508a (diff)
downloadspark-85d6b0db9f5bd425c36482ffcb1c3b9fd0fcdb31.tar.gz
spark-85d6b0db9f5bd425c36482ffcb1c3b9fd0fcdb31.tar.bz2
spark-85d6b0db9f5bd425c36482ffcb1c3b9fd0fcdb31.zip
[SPARK-15618][SQL][MLLIB] Use SparkSession.builder.sparkContext if applicable.
## What changes were proposed in this pull request? This PR changes function `SparkSession.builder.sparkContext(..)` from **private[sql]** into **private[spark]**, and uses it if applicable like the followings. ``` - val spark = SparkSession.builder().config(sc.getConf).getOrCreate() + val spark = SparkSession.builder().sparkContext(sc).getOrCreate() ``` ## How was this patch tested? Pass the existing Jenkins tests. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #13365 from dongjoon-hyun/SPARK-15618.
Diffstat (limited to 'examples')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala7
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala1
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala5
3 files changed, 4 insertions, 9 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala
index c50f25d951..a68fd0285f 100644
--- a/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/BroadcastTest.scala
@@ -29,13 +29,10 @@ object BroadcastTest {
val blockSize = if (args.length > 2) args(2) else "4096"
- val sparkConf = new SparkConf()
- .set("spark.broadcast.blockSize", blockSize)
-
val spark = SparkSession
- .builder
- .config(sparkConf)
+ .builder()
.appName("Broadcast Test")
+ .config("spark.broadcast.blockSize", blockSize)
.getOrCreate()
val sc = spark.sparkContext
diff --git a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
index 7651aade49..3fbf8e0333 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
@@ -191,6 +191,7 @@ object LDAExample {
val spark = SparkSession
.builder
+ .sparkContext(sc)
.getOrCreate()
import spark.implicits._
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
index d3bb7e4398..2d7a01a95d 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/HiveFromSpark.scala
@@ -22,7 +22,6 @@ import java.io.File
import com.google.common.io.{ByteStreams, Files}
-import org.apache.spark.SparkConf
import org.apache.spark.sql._
object HiveFromSpark {
@@ -35,8 +34,6 @@ object HiveFromSpark {
ByteStreams.copy(kv1Stream, Files.newOutputStreamSupplier(kv1File))
def main(args: Array[String]) {
- val sparkConf = new SparkConf().setAppName("HiveFromSpark")
-
// When working with Hive, one must instantiate `SparkSession` with Hive support, including
// connectivity to a persistent Hive metastore, support for Hive serdes, and Hive user-defined
// functions. Users who do not have an existing Hive deployment can still enable Hive support.
@@ -45,7 +42,7 @@ object HiveFromSpark {
// which defaults to the directory `spark-warehouse` in the current directory that the spark
// application is started.
val spark = SparkSession.builder
- .config(sparkConf)
+ .appName("HiveFromSpark")
.enableHiveSupport()
.getOrCreate()
val sc = spark.sparkContext