diff options
author | Dustin Koupal <dkoupal@blizzard.com> | 2017-04-06 16:56:36 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2017-04-06 16:56:36 -0700 |
commit | 8129d59d0e389fa8074958f1b90f7539e3e79bb7 (patch) | |
tree | 8dbc1716a1bcc6f429c9e40e7b6b6adf558e5a49 /examples | |
parent | a4491626ed8169f0162a0dfb78736c9b9e7fb434 (diff) | |
download | spark-8129d59d0e389fa8074958f1b90f7539e3e79bb7.tar.gz spark-8129d59d0e389fa8074958f1b90f7539e3e79bb7.tar.bz2 spark-8129d59d0e389fa8074958f1b90f7539e3e79bb7.zip |
[MINOR][DOCS] Fix typo in Hive Examples
## What changes were proposed in this pull request?
Fix typo in hive examples from "DaraFrames" to "DataFrames"
## How was this patch tested?
N/A
Please review http://spark.apache.org/contributing.html before opening a pull request.
Author: Dustin Koupal <dkoupal@blizzard.com>
Closes #17554 from cooper6581/typo-daraframes.
Diffstat (limited to 'examples')
3 files changed, 3 insertions, 3 deletions
diff --git a/examples/src/main/java/org/apache/spark/examples/sql/hive/JavaSparkHiveExample.java b/examples/src/main/java/org/apache/spark/examples/sql/hive/JavaSparkHiveExample.java index 47638565b1..575a463e87 100644 --- a/examples/src/main/java/org/apache/spark/examples/sql/hive/JavaSparkHiveExample.java +++ b/examples/src/main/java/org/apache/spark/examples/sql/hive/JavaSparkHiveExample.java @@ -89,7 +89,7 @@ public class JavaSparkHiveExample { // The results of SQL queries are themselves DataFrames and support all normal functions. Dataset<Row> sqlDF = spark.sql("SELECT key, value FROM src WHERE key < 10 ORDER BY key"); - // The items in DaraFrames are of type Row, which lets you to access each column by ordinal. + // The items in DataFrames are of type Row, which lets you to access each column by ordinal. Dataset<String> stringsDS = sqlDF.map( (MapFunction<Row, String>) row -> "Key: " + row.get(0) + ", Value: " + row.get(1), Encoders.STRING()); diff --git a/examples/src/main/python/sql/hive.py b/examples/src/main/python/sql/hive.py index 1f175d7258..1f83a6fb48 100644 --- a/examples/src/main/python/sql/hive.py +++ b/examples/src/main/python/sql/hive.py @@ -68,7 +68,7 @@ if __name__ == "__main__": # The results of SQL queries are themselves DataFrames and support all normal functions. sqlDF = spark.sql("SELECT key, value FROM src WHERE key < 10 ORDER BY key") - # The items in DaraFrames are of type Row, which allows you to access each column by ordinal. + # The items in DataFrames are of type Row, which allows you to access each column by ordinal. stringsDS = sqlDF.rdd.map(lambda row: "Key: %d, Value: %s" % (row.key, row.value)) for record in stringsDS.collect(): print(record) diff --git a/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala b/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala index 3de26364b5..e5f75d53ed 100644 --- a/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala +++ b/examples/src/main/scala/org/apache/spark/examples/sql/hive/SparkHiveExample.scala @@ -76,7 +76,7 @@ object SparkHiveExample { // The results of SQL queries are themselves DataFrames and support all normal functions. val sqlDF = sql("SELECT key, value FROM src WHERE key < 10 ORDER BY key") - // The items in DaraFrames are of type Row, which allows you to access each column by ordinal. + // The items in DataFrames are of type Row, which allows you to access each column by ordinal. val stringsDS = sqlDF.map { case Row(key: Int, value: String) => s"Key: $key, Value: $value" } |