aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--R/pkg/R/serialize.R2
-rwxr-xr-xconf/spark-env.sh.template2
-rw-r--r--docs/sql-programming-guide.md2
-rw-r--r--mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala2
-rw-r--r--sql/README.md2
5 files changed, 5 insertions, 5 deletions
diff --git a/R/pkg/R/serialize.R b/R/pkg/R/serialize.R
index 095ddb9aed..70e87a93e6 100644
--- a/R/pkg/R/serialize.R
+++ b/R/pkg/R/serialize.R
@@ -54,7 +54,7 @@ writeObject <- function(con, object, writeType = TRUE) {
# passing in vectors as arrays and instead require arrays to be passed
# as lists.
type <- class(object)[[1]] # class of POSIXlt is c("POSIXlt", "POSIXt")
- # Checking types is needed here, since ‘is.na’ only handles atomic vectors,
+ # Checking types is needed here, since 'is.na' only handles atomic vectors,
# lists and pairlists
if (type %in% c("integer", "character", "logical", "double", "numeric")) {
if (is.na(object)) {
diff --git a/conf/spark-env.sh.template b/conf/spark-env.sh.template
index 771251f90e..a031cd6a72 100755
--- a/conf/spark-env.sh.template
+++ b/conf/spark-env.sh.template
@@ -41,7 +41,7 @@
# - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G)
# - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G)
# - SPARK_YARN_APP_NAME, The name of your application (Default: Spark)
-# - SPARK_YARN_QUEUE, The hadoop queue to use for allocation requests (Default: ‘default’)
+# - SPARK_YARN_QUEUE, The hadoop queue to use for allocation requests (Default: 'default')
# - SPARK_YARN_DIST_FILES, Comma separated list of files to be distributed with the job.
# - SPARK_YARN_DIST_ARCHIVES, Comma separated list of archives to be distributed with the job.
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index ce53a39f9f..d246100f3e 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -2389,7 +2389,7 @@ let user control table caching explicitly:
CACHE TABLE logs_last_month;
UNCACHE TABLE logs_last_month;
-**NOTE:** `CACHE TABLE tbl` is now __eager__ by default not __lazy__. Don’t need to trigger cache materialization manually anymore.
+**NOTE:** `CACHE TABLE tbl` is now __eager__ by default not __lazy__. Don't need to trigger cache materialization manually anymore.
Spark SQL newly introduced a statement to let user control table caching whether or not lazy since Spark 1.2.0:
diff --git a/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala b/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
index 81fc6603cc..3ae108d822 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala
@@ -956,7 +956,7 @@ class LinearRegressionSuite
V1 -3.7271 2.9032 -1.284 0.3279
V2 3.0100 0.6022 4.998 0.0378 *
---
- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
+ Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
(Dispersion parameter for gaussian family taken to be 17.4376)
diff --git a/sql/README.md b/sql/README.md
index a13bdab6d4..9ea271d33d 100644
--- a/sql/README.md
+++ b/sql/README.md
@@ -5,7 +5,7 @@ This module provides support for executing relational queries expressed in eithe
Spark SQL is broken up into four subprojects:
- Catalyst (sql/catalyst) - An implementation-agnostic framework for manipulating trees of relational operators and expressions.
- - Execution (sql/core) - A query planner / execution engine for translating Catalyst’s logical query plans into Spark RDDs. This component also includes a new public interface, SQLContext, that allows users to execute SQL or LINQ statements against existing RDDs and Parquet files.
+ - Execution (sql/core) - A query planner / execution engine for translating Catalyst's logical query plans into Spark RDDs. This component also includes a new public interface, SQLContext, that allows users to execute SQL or LINQ statements against existing RDDs and Parquet files.
- Hive Support (sql/hive) - Includes an extension of SQLContext called HiveContext that allows users to write queries using a subset of HiveQL and access data from a Hive Metastore using Hive SerDes. There are also wrappers that allows users to run queries that include Hive UDFs, UDAFs, and UDTFs.
- HiveServer and CLI support (sql/hive-thriftserver) - Includes support for the SQL CLI (bin/spark-sql) and a HiveServer2 (for JDBC/ODBC) compatible server.