aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorCheng Lian <lian@databricks.com>2015-04-02 12:56:34 +0800
committerCheng Lian <lian@databricks.com>2015-04-02 12:56:34 +0800
commit2bc7fe7f7eb31b8f0591611b1e66b601bba8a4b7 (patch)
tree52bc59ce8c52952381a39cfae40456a4d95f5c8f /sql
parent191524e7401fcdfae46dc7e6a64c28907b1b1c20 (diff)
downloadspark-2bc7fe7f7eb31b8f0591611b1e66b601bba8a4b7.tar.gz
spark-2bc7fe7f7eb31b8f0591611b1e66b601bba8a4b7.tar.bz2
spark-2bc7fe7f7eb31b8f0591611b1e66b601bba8a4b7.zip
Revert "[SPARK-6618][SQL] HiveMetastoreCatalog.lookupRelation should use fine-grained lock"
This reverts commit 314afd0e2f08dd8d3333d3143712c2c79fa40d1e.
Diffstat (limited to 'sql')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala12
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala11
2 files changed, 3 insertions, 20 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 2b5d031741..f0076cef13 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -173,16 +173,12 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with
def lookupRelation(
tableIdentifier: Seq[String],
- alias: Option[String]): LogicalPlan = {
+ alias: Option[String]): LogicalPlan = synchronized {
val tableIdent = processTableIdentifier(tableIdentifier)
val databaseName = tableIdent.lift(tableIdent.size - 2).getOrElse(
hive.sessionState.getCurrentDatabase)
val tblName = tableIdent.last
- val table = try {
- synchronized {
- client.getTable(databaseName, tblName)
- }
- } catch {
+ val table = try client.getTable(databaseName, tblName) catch {
case te: org.apache.hadoop.hive.ql.metadata.InvalidTableException =>
throw new NoSuchTableException
}
@@ -204,9 +200,7 @@ private[hive] class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with
} else {
val partitions: Seq[Partition] =
if (table.isPartitioned) {
- synchronized {
- HiveShim.getAllPartitionsOf(client, table).toSeq
- }
+ HiveShim.getAllPartitionsOf(client, table).toSeq
} else {
Nil
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index 2065f0d60d..310c2bfdf1 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -457,15 +457,4 @@ class SQLQuerySuite extends QueryTest {
dropTempTable("data")
setConf("spark.sql.hive.convertCTAS", originalConf)
}
-
- test("sanity test for SPARK-6618") {
- (1 to 100).par.map { i =>
- val tableName = s"SPARK_6618_table_$i"
- sql(s"CREATE TABLE $tableName (col1 string)")
- catalog.lookupRelation(Seq(tableName))
- table(tableName)
- tables()
- sql(s"DROP TABLE $tableName")
- }
- }
}