aboutsummaryrefslogtreecommitdiff
path: root/examples/src/main/scala
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2016-07-18 23:07:59 -0700
committerYin Huai <yhuai@databricks.com>2016-07-18 23:07:59 -0700
commit1426a080528bdb470b5e81300d892af45dd188bf (patch)
treeabf255120eb8c225179436b5614ab37a3b3283ab /examples/src/main/scala
parente5fbb182c04be8524045fc90541497f506b42f4a (diff)
downloadspark-1426a080528bdb470b5e81300d892af45dd188bf.tar.gz
spark-1426a080528bdb470b5e81300d892af45dd188bf.tar.bz2
spark-1426a080528bdb470b5e81300d892af45dd188bf.zip
[SPARK-16303][DOCS][EXAMPLES] Minor Scala/Java example update
## What changes were proposed in this pull request? This PR moves one and the last hard-coded Scala example snippet from the SQL programming guide into `SparkSqlExample.scala`. It also renames all Scala/Java example files so that all "Sql" in the file names are updated to "SQL". ## How was this patch tested? Manually verified the generated HTML page. Author: Cheng Lian <lian@databricks.com> Closes #14245 from liancheng/minor-scala-example-update.
Diffstat (limited to 'examples/src/main/scala')
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala (renamed from examples/src/main/scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala)2
-rw-r--r--examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala (renamed from examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala)8
2 files changed, 5 insertions, 5 deletions
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala
index 61dea6ad2c..0caba12af0 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/SqlDataSourceExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala
@@ -18,7 +18,7 @@ package org.apache.spark.examples.sql
import org.apache.spark.sql.SparkSession
-object SqlDataSourceExample {
+object SQLDataSourceExample {
case class Person(name: String, age: Long)
diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala
index cf3f864267..952c074d03 100644
--- a/examples/src/main/scala/org/apache/spark/examples/sql/SparkSqlExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/sql/SparkSQLExample.scala
@@ -25,12 +25,12 @@ import org.apache.spark.sql.Row
import org.apache.spark.sql.SparkSession
// $example off:init_session$
// $example on:programmatic_schema$
-import org.apache.spark.sql.types.StringType
-import org.apache.spark.sql.types.StructField
-import org.apache.spark.sql.types.StructType
+// $example on:data_types$
+import org.apache.spark.sql.types._
+// $example off:data_types$
// $example off:programmatic_schema$
-object SparkSqlExample {
+object SparkSQLExample {
// $example on:create_ds$
// Note: Case classes in Scala 2.10 can support only up to 22 fields. To work around this limit,