aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorYin Huai <yhuai@databricks.com>2015-03-31 16:28:40 +0800
committerCheng Lian <lian@databricks.com>2015-03-31 16:28:40 +0800
commit314afd0e2f08dd8d3333d3143712c2c79fa40d1e (patch)
tree7f1f2c07c454260ae1059099c089f8088d9b1655 /sql
parentb80a030e90d790e27e89b26f536565c582dbf3d5 (diff)
downloadspark-314afd0e2f08dd8d3333d3143712c2c79fa40d1e.tar.gz
spark-314afd0e2f08dd8d3333d3143712c2c79fa40d1e.tar.bz2
spark-314afd0e2f08dd8d3333d3143712c2c79fa40d1e.zip
[SPARK-6618][SQL] HiveMetastoreCatalog.lookupRelation should use fine-grained lock
JIRA: https://issues.apache.org/jira/browse/SPARK-6618 Author: Yin Huai <yhuai@databricks.com> Closes #5281 from yhuai/lookupRelationLock and squashes the following commits: 591b4be [Yin Huai] A test? b3a9625 [Yin Huai] Just protect client.
Diffstat (limited to 'sql')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala12
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala11
2 files changed, 20 insertions, 3 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 203164ea84..6a01a23124 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -172,12 +172,16 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with
def lookupRelation(
tableIdentifier: Seq[String],
- alias: Option[String]): LogicalPlan = synchronized {
+ alias: Option[String]): LogicalPlan = {
val tableIdent = processTableIdentifier(tableIdentifier)
val databaseName = tableIdent.lift(tableIdent.size - 2).getOrElse(
hive.sessionState.getCurrentDatabase)
val tblName = tableIdent.last
- val table = try client.getTable(databaseName, tblName) catch {
+ val table = try {
+ synchronized {
+ client.getTable(databaseName, tblName)
+ }
+ } catch {
case te: org.apache.hadoop.hive.ql.metadata.InvalidTableException =>
throw new NoSuchTableException
}
@@ -199,7 +203,9 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with
} else {
val partitions: Seq[Partition] =
if (table.isPartitioned) {
- HiveShim.getAllPartitionsOf(client, table).toSeq
+ synchronized {
+ HiveShim.getAllPartitionsOf(client, table).toSeq
+ }
} else {
Nil
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 1187228f4c..2f50a33448 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -433,4 +433,15 @@ class SQLQuerySuite extends QueryTest {
dropTempTable("data")
setConf("spark.sql.hive.convertCTAS", originalConf)
}
+
+ test("sanity test for SPARK-6618") {
+ (1 to 100).par.map { i =>
+ val tableName = s"SPARK_6618_table_$i"
+ sql(s"CREATE TABLE $tableName (col1 string)")
+ catalog.lookupRelation(Seq(tableName))
+ table(tableName)
+ tables()
+ sql(s"DROP TABLE $tableName")
+ }
+ }
}