aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-05-17 20:50:22 +0200
committerNick Pentreath <nickp@za.ibm.com>2016-05-17 20:50:22 +0200
commit9f176dd3918129a72282a6b7a12e2899cbb6dac9 (patch)
treea7feca1f7b01ea38112e6ec7498f1d070ad415ff
parent3308a862ba0983268c9d5acf9e2a7d2b62d3ec27 (diff)
downloadspark-9f176dd3918129a72282a6b7a12e2899cbb6dac9.tar.gz
spark-9f176dd3918129a72282a6b7a12e2899cbb6dac9.tar.bz2
spark-9f176dd3918129a72282a6b7a12e2899cbb6dac9.zip
[MINOR][DOCS] Replace remaining 'sqlContext' in ScalaDoc/JavaDoc.
## What changes were proposed in this pull request? According to the recent change, this PR replaces all the remaining `sqlContext` usage with `spark` in ScalaDoc/JavaDoc (.scala/.java files) except `SQLContext.scala`, `SparkPlan.scala', and `DatasetHolder.scala`. ## How was this patch tested? Manual. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #13125 from dongjoon-hyun/minor_doc_sparksession.
-rw-r--r--mllib/src/main/scala/org/apache/spark/ml/feature/package.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala10
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala4
6 files changed, 15 insertions, 15 deletions
diff --git a/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala b/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala
index 4571ab2680..b94187ae78 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/feature/package.scala
@@ -44,7 +44,7 @@ import org.apache.spark.sql.DataFrame
* import org.apache.spark.ml.Pipeline
*
* // a DataFrame with three columns: id (integer), text (string), and rating (double).
- * val df = sqlContext.createDataFrame(Seq(
+ * val df = spark.createDataFrame(Seq(
* (0, "Hi I heard about Spark", 3.0),
* (1, "I wish Java could use case classes", 4.0),
* (2, "Logistic regression models are neat", 4.0)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index e1a64dfc5e..011aff4ff6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -446,10 +446,10 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
* Each line in the text file is a new row in the resulting Dataset. For example:
* {{{
* // Scala:
- * sqlContext.read.text("/path/to/spark/README.md")
+ * spark.read.text("/path/to/spark/README.md")
*
* // Java:
- * sqlContext.read().text("/path/to/spark/README.md")
+ * spark.read().text("/path/to/spark/README.md")
* }}}
*
* @param paths input path
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
index 3eb1f0f0d5..1855eab96e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameStatFunctions.scala
@@ -160,8 +160,8 @@ final class DataFrameStatFunctions private[sql](df: DataFrame) {
* @return A DataFrame containing for the contingency table.
*
* {{{
- * val df = sqlContext.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 1), (2, 3), (3, 2),
- * (3, 3))).toDF("key", "value")
+ * val df = spark.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 1), (2, 3), (3, 2), (3, 3)))
+ * .toDF("key", "value")
* val ct = df.stat.crosstab("key", "value")
* ct.show()
* +---------+---+---+---+
@@ -197,7 +197,7 @@ final class DataFrameStatFunctions private[sql](df: DataFrame) {
* val rows = Seq.tabulate(100) { i =>
* if (i % 2 == 0) (1, -1.0) else (i, i * -1.0)
* }
- * val df = sqlContext.createDataFrame(rows).toDF("a", "b")
+ * val df = spark.createDataFrame(rows).toDF("a", "b")
* // find the items with a frequency greater than 0.4 (observed 40% of the time) for columns
* // "a" and "b"
* val freqSingles = df.stat.freqItems(Array("a", "b"), 0.4)
@@ -258,7 +258,7 @@ final class DataFrameStatFunctions private[sql](df: DataFrame) {
* val rows = Seq.tabulate(100) { i =>
* if (i % 2 == 0) (1, -1.0) else (i, i * -1.0)
* }
- * val df = sqlContext.createDataFrame(rows).toDF("a", "b")
+ * val df = spark.createDataFrame(rows).toDF("a", "b")
* // find the items with a frequency greater than 0.4 (observed 40% of the time) for columns
* // "a" and "b"
* val freqSingles = df.stat.freqItems(Seq("a", "b"), 0.4)
@@ -314,7 +314,7 @@ final class DataFrameStatFunctions private[sql](df: DataFrame) {
* @return a new [[DataFrame]] that represents the stratified sample
*
* {{{
- * val df = sqlContext.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 1), (2, 3), (3, 2),
+ * val df = spark.createDataFrame(Seq((1, 1), (1, 2), (2, 1), (2, 1), (2, 3), (3, 2),
* (3, 3))).toDF("key", "value")
* val fractions = Map(1 -> 1.0, 3 -> 0.5)
* df.stat.sampleBy("key", fractions, 36L).show()
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala b/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
index a49da6dc2b..a435734b0c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/ExperimentalMethods.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
* regarding binary compatibility and source compatibility of methods here.
*
* {{{
- * sqlContext.experimental.extraStrategies += ...
+ * spark.experimental.extraStrategies += ...
* }}}
*
* @since 1.3.0
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
index e0e4ddc30b..406d2e8e81 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileCatalog.scala
@@ -168,17 +168,17 @@ abstract class PartitioningAwareFileCatalog(
*
* By default, the paths of the dataset provided by users will be base paths.
* Below are three typical examples,
- * Case 1) `sqlContext.read.parquet("/path/something=true/")`: the base path will be
+ * Case 1) `spark.read.parquet("/path/something=true/")`: the base path will be
* `/path/something=true/`, and the returned DataFrame will not contain a column of `something`.
- * Case 2) `sqlContext.read.parquet("/path/something=true/a.parquet")`: the base path will be
+ * Case 2) `spark.read.parquet("/path/something=true/a.parquet")`: the base path will be
* still `/path/something=true/`, and the returned DataFrame will also not contain a column of
* `something`.
- * Case 3) `sqlContext.read.parquet("/path/")`: the base path will be `/path/`, and the returned
+ * Case 3) `spark.read.parquet("/path/")`: the base path will be `/path/`, and the returned
* DataFrame will have the column of `something`.
*
* Users also can override the basePath by setting `basePath` in the options to pass the new base
* path to the data source.
- * For example, `sqlContext.read.option("basePath", "/path/").parquet("/path/something=true/")`,
+ * For example, `spark.read.option("basePath", "/path/").parquet("/path/something=true/")`,
* and the returned DataFrame will have the column of `something`.
*/
private def basePaths: Set[Path] = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 07f55042ee..65bc043076 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -2952,8 +2952,8 @@ object functions {
* import org.apache.spark.sql._
*
* val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
- * val sqlContext = df.sqlContext
- * sqlContext.udf.register("simpleUDF", (v: Int) => v * v)
+ * val spark = df.sparkSession
+ * spark.udf.register("simpleUDF", (v: Int) => v * v)
* df.select($"id", callUDF("simpleUDF", $"value"))
* }}}
*