aboutsummaryrefslogtreecommitdiff
path: root/sql/core
diff options
context:
space:
mode:
authorzsxwing <zsxwing@gmail.com>2015-09-19 18:22:43 -0700
committerYin Huai <yhuai@databricks.com>2015-09-19 18:22:43 -0700
commite789000b88a6bd840f821c53f42c08b97dc02496 (patch)
tree454cf87addc9fe93e9eab98744ce5daa4dcfb6e3 /sql/core
parentd83b6aae8b4357c56779cc98804eb350ab8af62d (diff)
downloadspark-e789000b88a6bd840f821c53f42c08b97dc02496.tar.gz
spark-e789000b88a6bd840f821c53f42c08b97dc02496.tar.bz2
spark-e789000b88a6bd840f821c53f42c08b97dc02496.zip
[SPARK-10155] [SQL] Change SqlParser to object to avoid memory leak
Since `scala.util.parsing.combinator.Parsers` is thread-safe since Scala 2.10 (See [SI-4929](https://issues.scala-lang.org/browse/SI-4929)), we can change SqlParser to object to avoid memory leak. I didn't change other subclasses of `scala.util.parsing.combinator.Parsers` because there is only one instance in one SQLContext, which should not be an issue. Author: zsxwing <zsxwing@gmail.com> Closes #8357 from zsxwing/sql-memory-leak.
Diffstat (limited to 'sql/core')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/functions.scala2
4 files changed, 9 insertions, 9 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index 3e61123c14..8f737c2023 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -720,7 +720,7 @@ class DataFrame private[sql](
@scala.annotation.varargs
def selectExpr(exprs: String*): DataFrame = {
select(exprs.map { expr =>
- Column(new SqlParser().parseExpression(expr))
+ Column(SqlParser.parseExpression(expr))
}: _*)
}
@@ -745,7 +745,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
def filter(conditionExpr: String): DataFrame = {
- filter(Column(new SqlParser().parseExpression(conditionExpr)))
+ filter(Column(SqlParser.parseExpression(conditionExpr)))
}
/**
@@ -769,7 +769,7 @@ class DataFrame private[sql](
* @since 1.5.0
*/
def where(conditionExpr: String): DataFrame = {
- filter(Column(new SqlParser().parseExpression(conditionExpr)))
+ filter(Column(SqlParser.parseExpression(conditionExpr)))
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index 745bb4ec9c..03e973666e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -163,7 +163,7 @@ final class DataFrameWriter private[sql](df: DataFrame) {
* @since 1.4.0
*/
def insertInto(tableName: String): Unit = {
- insertInto(new SqlParser().parseTableIdentifier(tableName))
+ insertInto(SqlParser.parseTableIdentifier(tableName))
}
private def insertInto(tableIdent: TableIdentifier): Unit = {
@@ -197,7 +197,7 @@ final class DataFrameWriter private[sql](df: DataFrame) {
* @since 1.4.0
*/
def saveAsTable(tableName: String): Unit = {
- saveAsTable(new SqlParser().parseTableIdentifier(tableName))
+ saveAsTable(SqlParser.parseTableIdentifier(tableName))
}
private def saveAsTable(tableIdent: TableIdentifier): Unit = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index e3fdd782e6..f099940800 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -590,7 +590,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
tableName: String,
source: String,
options: Map[String, String]): DataFrame = {
- val tableIdent = new SqlParser().parseTableIdentifier(tableName)
+ val tableIdent = SqlParser.parseTableIdentifier(tableName)
val cmd =
CreateTableUsing(
tableIdent,
@@ -636,7 +636,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
source: String,
schema: StructType,
options: Map[String, String]): DataFrame = {
- val tableIdent = new SqlParser().parseTableIdentifier(tableName)
+ val tableIdent = SqlParser.parseTableIdentifier(tableName)
val cmd =
CreateTableUsing(
tableIdent,
@@ -732,7 +732,7 @@ class SQLContext(@transient val sparkContext: SparkContext)
* @since 1.3.0
*/
def table(tableName: String): DataFrame = {
- table(new SqlParser().parseTableIdentifier(tableName))
+ table(SqlParser.parseTableIdentifier(tableName))
}
private def table(tableIdent: TableIdentifier): DataFrame = {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
index 60d9c50910..2467b4e484 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala
@@ -823,7 +823,7 @@ object functions {
*
* @group normal_funcs
*/
- def expr(expr: String): Column = Column(new SqlParser().parseExpression(expr))
+ def expr(expr: String): Column = Column(SqlParser.parseExpression(expr))
//////////////////////////////////////////////////////////////////////////////////////////////
// Math Functions