aboutsummaryrefslogtreecommitdiff
path: root/sql/catalyst
diff options
context:
space:
mode:
authorzsxwing <zsxwing@gmail.com>2015-09-19 18:22:43 -0700
committerYin Huai <yhuai@databricks.com>2015-09-19 18:22:43 -0700
commite789000b88a6bd840f821c53f42c08b97dc02496 (patch)
tree454cf87addc9fe93e9eab98744ce5daa4dcfb6e3 /sql/catalyst
parentd83b6aae8b4357c56779cc98804eb350ab8af62d (diff)
downloadspark-e789000b88a6bd840f821c53f42c08b97dc02496.tar.gz
spark-e789000b88a6bd840f821c53f42c08b97dc02496.tar.bz2
spark-e789000b88a6bd840f821c53f42c08b97dc02496.zip
[SPARK-10155] [SQL] Change SqlParser to object to avoid memory leak
Since `scala.util.parsing.combinator.Parsers` is thread-safe since Scala 2.10 (See [SI-4929](https://issues.scala-lang.org/browse/SI-4929)), we can change SqlParser to object to avoid memory leak. I didn't change other subclasses of `scala.util.parsing.combinator.Parsers` because there is only one instance in one SQLContext, which should not be an issue. Author: zsxwing <zsxwing@gmail.com> Closes #8357 from zsxwing/sql-memory-leak.
Diffstat (limited to 'sql/catalyst')
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala2
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala6
3 files changed, 5 insertions, 5 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
index 5898a5f93f..2bac08eac4 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.plans.logical._
private[sql] abstract class AbstractSparkSQLParser
extends StandardTokenParsers with PackratParsers {
- def parse(input: String): LogicalPlan = {
+ def parse(input: String): LogicalPlan = synchronized {
// Initialize the Keywords.
initLexical
phrase(start)(new lexical.Scanner(input)) match {
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala
index 554fb4eb25..e21d3c0546 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ParserDialect.scala
@@ -61,7 +61,7 @@ abstract class ParserDialect {
*/
private[spark] class DefaultParserDialect extends ParserDialect {
@transient
- protected val sqlParser = new SqlParser
+ protected val sqlParser = SqlParser
override def parse(sqlText: String): LogicalPlan = {
sqlParser.parse(sqlText)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
index f2498861c9..dfab239885 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
@@ -37,9 +37,9 @@ import org.apache.spark.unsafe.types.CalendarInterval
* This is currently included mostly for illustrative purposes. Users wanting more complete support
* for a SQL like language should checkout the HiveQL support in the sql/hive sub-project.
*/
-class SqlParser extends AbstractSparkSQLParser with DataTypeParser {
+object SqlParser extends AbstractSparkSQLParser with DataTypeParser {
- def parseExpression(input: String): Expression = {
+ def parseExpression(input: String): Expression = synchronized {
// Initialize the Keywords.
initLexical
phrase(projection)(new lexical.Scanner(input)) match {
@@ -48,7 +48,7 @@ class SqlParser extends AbstractSparkSQLParser with DataTypeParser {
}
}
- def parseTableIdentifier(input: String): TableIdentifier = {
+ def parseTableIdentifier(input: String): TableIdentifier = synchronized {
// Initialize the Keywords.
initLexical
phrase(tableIdentifier)(new lexical.Scanner(input)) match {