diff options
author | Vinod K C <vinod.kc@huawei.com> | 2015-06-30 12:24:47 -0700 |
---|---|---|
committer | Michael Armbrust <michael@databricks.com> | 2015-06-30 12:24:47 -0700 |
commit | b8e5bb6fc1553256e950fdad9cb5acc6b296816e (patch) | |
tree | f8fc062948a7f6aebf22742323fcb8d30ba0189e /sql | |
parent | c1befd780c3defc843baa75097de7ec427d3f8ca (diff) | |
download | spark-b8e5bb6fc1553256e950fdad9cb5acc6b296816e.tar.gz spark-b8e5bb6fc1553256e950fdad9cb5acc6b296816e.tar.bz2 spark-b8e5bb6fc1553256e950fdad9cb5acc6b296816e.zip |
[SPARK-8628] [SQL] Race condition in AbstractSparkSQLParser.parse
Made lexical iniatialization as lazy val
Author: Vinod K C <vinod.kc@huawei.com>
Closes #7015 from vinodkc/handle_lexical_initialize_schronization and squashes the following commits:
b6d1c74 [Vinod K C] Avoided repeated lexical initialization
5863cf7 [Vinod K C] Removed space
e27c66c [Vinod K C] Avoid reinitialization of lexical in parse method
ef4f60f [Vinod K C] Reverted import order
e9fc49a [Vinod K C] handle synchronization in SqlLexical.initialize
Diffstat (limited to 'sql')
-rw-r--r-- | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala | 6 | ||||
-rw-r--r-- | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala | 2 |
2 files changed, 5 insertions, 3 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala index ef7b3ad943..d494ae7b71 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst import scala.language.implicitConversions import scala.util.parsing.combinator.lexical.StdLexical import scala.util.parsing.combinator.syntactical.StandardTokenParsers -import scala.util.parsing.combinator.{PackratParsers, RegexParsers} +import scala.util.parsing.combinator.PackratParsers import scala.util.parsing.input.CharArrayReader.EofCh import org.apache.spark.sql.catalyst.plans.logical._ @@ -30,12 +30,14 @@ private[sql] abstract class AbstractSparkSQLParser def parse(input: String): LogicalPlan = { // Initialize the Keywords. - lexical.initialize(reservedWords) + initLexical phrase(start)(new lexical.Scanner(input)) match { case Success(plan, _) => plan case failureOrError => sys.error(failureOrError.toString) } } + /* One time initialization of lexical.This avoid reinitialization of lexical in parse method */ + protected lazy val initLexical: Unit = lexical.initialize(reservedWords) protected case class Keyword(str: String) { def normalize: String = lexical.normalizeKeyword(str) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala index 79f526e823..8d02fbf4f9 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala @@ -40,7 +40,7 @@ class SqlParser extends AbstractSparkSQLParser with DataTypeParser { def parseExpression(input: String): Expression = { // Initialize the Keywords. - lexical.initialize(reservedWords) + initLexical phrase(projection)(new lexical.Scanner(input)) match { case Success(plan, _) => plan case failureOrError => sys.error(failureOrError.toString) |