diff options
author | Dongjoon Hyun <dongjoon@apache.org> | 2016-04-30 00:15:04 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2016-04-30 00:15:04 -0700 |
commit | f86f71763c014aa23940510e1e4af5a9244271e6 (patch) | |
tree | 66b3c284c8ffb77e9aaebcb54e016f427d8817be /examples/src/main/scala | |
parent | 3d09ceeef9212d4f3a8cd286ce369ace47242358 (diff) | |
download | spark-f86f71763c014aa23940510e1e4af5a9244271e6.tar.gz spark-f86f71763c014aa23940510e1e4af5a9244271e6.tar.bz2 spark-f86f71763c014aa23940510e1e4af5a9244271e6.zip |
[MINOR][EXAMPLE] Use SparkSession instead of SQLContext in RDDRelation.scala
## What changes were proposed in this pull request?
Now, `SQLContext` is used for backward-compatibility, we had better use `SparkSession` in Spark 2.0 examples.
## How was this patch tested?
It's just example change. After building, run `bin/run-example org.apache.spark.examples.sql.RDDRelation`.
Author: Dongjoon Hyun <dongjoon@apache.org>
Closes #12808 from dongjoon-hyun/rddrelation.
Diffstat (limited to 'examples/src/main/scala')
-rw-r--r-- | examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala index 94b67cb29b..8ce4427c53 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/RDDRelation.scala @@ -19,7 +19,7 @@ package org.apache.spark.examples.sql import org.apache.spark.{SparkConf, SparkContext} -import org.apache.spark.sql.{SaveMode, SQLContext} +import org.apache.spark.sql.{SaveMode, SparkSession} // One method for defining the schema of an RDD is to make a case class with the desired column // names and types. @@ -29,10 +29,10 @@ object RDDRelation { def main(args: Array[String]) { val sparkConf = new SparkConf().setAppName("RDDRelation") val sc = new SparkContext(sparkConf) - val sqlContext = new SQLContext(sc) + val spark = new SparkSession(sc) - // Importing the SQL context gives access to all the SQL functions and implicit conversions. - import sqlContext.implicits._ + // Importing the SparkSession gives access to all the SQL functions and implicit conversions. + import spark.implicits._ val df = sc.parallelize((1 to 100).map(i => Record(i, s"val_$i"))).toDF() // Any RDD containing case classes can be registered as a table. The schema of the table is @@ -41,15 +41,15 @@ object RDDRelation { // Once tables have been registered, you can run SQL queries over them. println("Result of SELECT *:") - sqlContext.sql("SELECT * FROM records").collect().foreach(println) + spark.sql("SELECT * FROM records").collect().foreach(println) // Aggregation queries are also supported. - val count = sqlContext.sql("SELECT COUNT(*) FROM records").collect().head.getLong(0) + val count = spark.sql("SELECT COUNT(*) FROM records").collect().head.getLong(0) println(s"COUNT(*): $count") - // The results of SQL queries are themselves RDDs and support all normal RDD functions. The + // The results of SQL queries are themselves RDDs and support all normal RDD functions. The // items in the RDD are of type Row, which allows you to access each column by ordinal. - val rddFromSql = sqlContext.sql("SELECT key, value FROM records WHERE key < 10") + val rddFromSql = spark.sql("SELECT key, value FROM records WHERE key < 10") println("Result of RDD.map:") rddFromSql.rdd.map(row => s"Key: ${row(0)}, Value: ${row(1)}").collect().foreach(println) @@ -61,14 +61,14 @@ object RDDRelation { df.write.mode(SaveMode.Overwrite).parquet("pair.parquet") // Read in parquet file. Parquet files are self-describing so the schema is preserved. - val parquetFile = sqlContext.read.parquet("pair.parquet") + val parquetFile = spark.read.parquet("pair.parquet") // Queries can be run using the DSL on parquet files just like the original RDD. parquetFile.where($"key" === 1).select($"value".as("a")).collect().foreach(println) // These files can also be registered as tables. parquetFile.registerTempTable("parquetFile") - sqlContext.sql("SELECT * FROM parquetFile").collect().foreach(println) + spark.sql("SELECT * FROM parquetFile").collect().foreach(println) sc.stop() } |