aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVinod K C <vinod.kc@huawei.com>2015-06-30 12:24:47 -0700
committerMichael Armbrust <michael@databricks.com>2015-06-30 12:25:13 -0700
commit80b0fe2009dea98a6a09b7cf43590a555c638cad (patch)
treed94a60470af5243de9c93ee95cdcfc9a3b27a8b3
parentf9cd5cc1b1647ebeb8327e7b595aad1e592e8f14 (diff)
downloadspark-80b0fe2009dea98a6a09b7cf43590a555c638cad.tar.gz
spark-80b0fe2009dea98a6a09b7cf43590a555c638cad.tar.bz2
spark-80b0fe2009dea98a6a09b7cf43590a555c638cad.zip
[SPARK-8628] [SQL] Race condition in AbstractSparkSQLParser.parse
Made lexical iniatialization as lazy val Author: Vinod K C <vinod.kc@huawei.com> Closes #7015 from vinodkc/handle_lexical_initialize_schronization and squashes the following commits: b6d1c74 [Vinod K C] Avoided repeated lexical initialization 5863cf7 [Vinod K C] Removed space e27c66c [Vinod K C] Avoid reinitialization of lexical in parse method ef4f60f [Vinod K C] Reverted import order e9fc49a [Vinod K C] handle synchronization in SqlLexical.initialize (cherry picked from commit b8e5bb6fc1553256e950fdad9cb5acc6b296816e) Signed-off-by: Michael Armbrust <michael@databricks.com>
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala6
-rw-r--r--sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala2
2 files changed, 5 insertions, 3 deletions
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
index ef7b3ad943..d494ae7b71 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/AbstractSparkSQLParser.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.catalyst
import scala.language.implicitConversions
import scala.util.parsing.combinator.lexical.StdLexical
import scala.util.parsing.combinator.syntactical.StandardTokenParsers
-import scala.util.parsing.combinator.{PackratParsers, RegexParsers}
+import scala.util.parsing.combinator.PackratParsers
import scala.util.parsing.input.CharArrayReader.EofCh
import org.apache.spark.sql.catalyst.plans.logical._
@@ -30,12 +30,14 @@ private[sql] abstract class AbstractSparkSQLParser
def parse(input: String): LogicalPlan = {
// Initialize the Keywords.
- lexical.initialize(reservedWords)
+ initLexical
phrase(start)(new lexical.Scanner(input)) match {
case Success(plan, _) => plan
case failureOrError => sys.error(failureOrError.toString)
}
}
+ /* One time initialization of lexical.This avoid reinitialization of lexical in parse method */
+ protected lazy val initLexical: Unit = lexical.initialize(reservedWords)
protected case class Keyword(str: String) {
def normalize: String = lexical.normalizeKeyword(str)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
index e85312aee7..df23b7013f 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SqlParser.scala
@@ -39,7 +39,7 @@ class SqlParser extends AbstractSparkSQLParser with DataTypeParser {
def parseExpression(input: String): Expression = {
// Initialize the Keywords.
- lexical.initialize(reservedWords)
+ initLexical
phrase(projection)(new lexical.Scanner(input)) match {
case Success(plan, _) => plan
case failureOrError => sys.error(failureOrError.toString)