aboutsummaryrefslogtreecommitdiff
path: root/examples/src
diff options
context:
space:
mode:
authorAndrew Or <andrew@databricks.com>2016-05-03 13:47:58 -0700
committerReynold Xin <rxin@databricks.com>2016-05-03 13:47:58 -0700
commit588cac414a9cf1e0f40a82cc6a78f77e26825f29 (patch)
treeaeaa2ed8067942678341aaa599d63b173f4cfa42 /examples/src
parent83ee92f60345f016a390d61a82f1d924f64ddf90 (diff)
downloadspark-588cac414a9cf1e0f40a82cc6a78f77e26825f29.tar.gz
spark-588cac414a9cf1e0f40a82cc6a78f77e26825f29.tar.bz2
spark-588cac414a9cf1e0f40a82cc6a78f77e26825f29.zip
[SPARK-15073][SQL] Hide SparkSession constructor from the public
## What changes were proposed in this pull request? Users should use the builder pattern instead. ## How was this patch tested? Jenks. Author: Andrew Or <andrew@databricks.com> Closes #12873 from andrewor14/spark-session-constructor.
Diffstat (limited to 'examples/src')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala9
1 files changed, 3 insertions, 6 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
index 8ce4427c53..b4118b16e2 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala
@@ -18,7 +18,6 @@
// scalastyle:off println
package org.apache.spark.examples.sql
-import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SaveMode, SparkSession}
// One method for defining the schema of an RDD is to make a case class with the desired column
@@ -27,14 +26,12 @@ case class Record(key: Int, value: String)
object RDDRelation {
def main(args: Array[String]) {
- val sparkConf = new SparkConf().setAppName("RDDRelation")
- val sc = new SparkContext(sparkConf)
- val spark = new SparkSession(sc)
+ val spark = SparkSession.builder.appName("RDDRelation").getOrCreate()
// Importing the SparkSession gives access to all the SQL functions and implicit conversions.
import spark.implicits._
- val df = sc.parallelize((1 to 100).map(i => Record(i, s"val_$i"))).toDF()
+ val df = spark.createDataFrame((1 to 100).map(i => Record(i, s"val_$i")))
// Any RDD containing case classes can be registered as a table. The schema of the table is
// automatically inferred using scala reflection.
df.registerTempTable("records")
@@ -70,7 +67,7 @@ object RDDRelation {
parquetFile.registerTempTable("parquetFile")
spark.sql("SELECT * FROM parquetFile").collect().foreach(println)
- sc.stop()
+ spark.stop()
}
}
// scalastyle:on println