aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src
diff options
context:
space:
mode:
authorXiao Li <gatorsmile@gmail.com>2017-03-24 14:42:33 +0800
committerWenchen Fan <wenchen@databricks.com>2017-03-24 14:42:33 +0800
commit344f38b04b271b5f3ec2748b34db4e52d54da1bc (patch)
treeab3a33cb0e33370f9223cf2e4c2f3333590ce0d9 /sql/hive/src
parent8e558041aa0c41ba9fb2ce242daaf6d6ed4d85b7 (diff)
downloadspark-344f38b04b271b5f3ec2748b34db4e52d54da1bc.tar.gz
spark-344f38b04b271b5f3ec2748b34db4e52d54da1bc.tar.bz2
spark-344f38b04b271b5f3ec2748b34db4e52d54da1bc.zip
[SPARK-19970][SQL][FOLLOW-UP] Table owner should be USER instead of PRINCIPAL in kerberized clusters #17311
### What changes were proposed in this pull request? This is a follow-up for the PR: https://github.com/apache/spark/pull/17311 - For safety, use `sessionState` to get the user name, instead of calling `SessionState.get()` in the function `toHiveTable`. - Passing `user names` instead of `conf` when calling `toHiveTable`. ### How was this patch tested? N/A Author: Xiao Li <gatorsmile@gmail.com> Closes #17405 from gatorsmile/user.
Diffstat (limited to 'sql/hive/src')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala26
1 files changed, 13 insertions, 13 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
index 13edcd0517..56ccac32a8 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
@@ -207,6 +207,8 @@ private[hive] class HiveClientImpl(
/** Returns the configuration for the current session. */
def conf: HiveConf = state.getConf
+ private val userName = state.getAuthenticator.getUserName
+
override def getConf(key: String, defaultValue: String): String = {
conf.get(key, defaultValue)
}
@@ -413,7 +415,7 @@ private[hive] class HiveClientImpl(
createTime = h.getTTable.getCreateTime.toLong * 1000,
lastAccessTime = h.getLastAccessTime.toLong * 1000,
storage = CatalogStorageFormat(
- locationUri = shim.getDataLocation(h).map(CatalogUtils.stringToURI(_)),
+ locationUri = shim.getDataLocation(h).map(CatalogUtils.stringToURI),
// To avoid ClassNotFound exception, we try our best to not get the format class, but get
// the class name directly. However, for non-native tables, there is no interface to get
// the format class name, so we may still throw ClassNotFound in this case.
@@ -441,7 +443,7 @@ private[hive] class HiveClientImpl(
}
override def createTable(table: CatalogTable, ignoreIfExists: Boolean): Unit = withHiveState {
- client.createTable(toHiveTable(table, Some(conf)), ignoreIfExists)
+ client.createTable(toHiveTable(table, Some(userName)), ignoreIfExists)
}
override def dropTable(
@@ -453,7 +455,7 @@ private[hive] class HiveClientImpl(
}
override def alterTable(tableName: String, table: CatalogTable): Unit = withHiveState {
- val hiveTable = toHiveTable(table, Some(conf))
+ val hiveTable = toHiveTable(table, Some(userName))
// Do not use `table.qualifiedName` here because this may be a rename
val qualifiedTableName = s"${table.database}.$tableName"
shim.alterTable(client, qualifiedTableName, hiveTable)
@@ -522,7 +524,7 @@ private[hive] class HiveClientImpl(
newSpecs: Seq[TablePartitionSpec]): Unit = withHiveState {
require(specs.size == newSpecs.size, "number of old and new partition specs differ")
val catalogTable = getTable(db, table)
- val hiveTable = toHiveTable(catalogTable, Some(conf))
+ val hiveTable = toHiveTable(catalogTable, Some(userName))
specs.zip(newSpecs).foreach { case (oldSpec, newSpec) =>
val hivePart = getPartitionOption(catalogTable, oldSpec)
.map { p => toHivePartition(p.copy(spec = newSpec), hiveTable) }
@@ -535,7 +537,7 @@ private[hive] class HiveClientImpl(
db: String,
table: String,
newParts: Seq[CatalogTablePartition]): Unit = withHiveState {
- val hiveTable = toHiveTable(getTable(db, table), Some(conf))
+ val hiveTable = toHiveTable(getTable(db, table), Some(userName))
shim.alterPartitions(client, table, newParts.map { p => toHivePartition(p, hiveTable) }.asJava)
}
@@ -563,7 +565,7 @@ private[hive] class HiveClientImpl(
override def getPartitionOption(
table: CatalogTable,
spec: TablePartitionSpec): Option[CatalogTablePartition] = withHiveState {
- val hiveTable = toHiveTable(table, Some(conf))
+ val hiveTable = toHiveTable(table, Some(userName))
val hivePartition = client.getPartition(hiveTable, spec.asJava, false)
Option(hivePartition).map(fromHivePartition)
}
@@ -575,7 +577,7 @@ private[hive] class HiveClientImpl(
override def getPartitions(
table: CatalogTable,
spec: Option[TablePartitionSpec]): Seq[CatalogTablePartition] = withHiveState {
- val hiveTable = toHiveTable(table, Some(conf))
+ val hiveTable = toHiveTable(table, Some(userName))
val parts = spec match {
case None => shim.getAllPartitions(client, hiveTable).map(fromHivePartition)
case Some(s) =>
@@ -589,7 +591,7 @@ private[hive] class HiveClientImpl(
override def getPartitionsByFilter(
table: CatalogTable,
predicates: Seq[Expression]): Seq[CatalogTablePartition] = withHiveState {
- val hiveTable = toHiveTable(table, Some(conf))
+ val hiveTable = toHiveTable(table, Some(userName))
val parts = shim.getPartitionsByFilter(client, hiveTable, predicates).map(fromHivePartition)
HiveCatalogMetrics.incrementFetchedPartitions(parts.length)
parts
@@ -817,9 +819,7 @@ private[hive] object HiveClientImpl {
/**
* Converts the native table metadata representation format CatalogTable to Hive's Table.
*/
- def toHiveTable(
- table: CatalogTable,
- conf: Option[HiveConf] = None): HiveTable = {
+ def toHiveTable(table: CatalogTable, userName: Option[String] = None): HiveTable = {
val hiveTable = new HiveTable(table.database, table.identifier.table)
// For EXTERNAL_TABLE, we also need to set EXTERNAL field in the table properties.
// Otherwise, Hive metastore will change the table to a MANAGED_TABLE.
@@ -851,10 +851,10 @@ private[hive] object HiveClientImpl {
hiveTable.setFields(schema.asJava)
}
hiveTable.setPartCols(partCols.asJava)
- conf.foreach { _ => hiveTable.setOwner(SessionState.get().getAuthenticator().getUserName()) }
+ userName.foreach(hiveTable.setOwner)
hiveTable.setCreateTime((table.createTime / 1000).toInt)
hiveTable.setLastAccessTime((table.lastAccessTime / 1000).toInt)
- table.storage.locationUri.map(CatalogUtils.URIToString(_)).foreach { loc =>
+ table.storage.locationUri.map(CatalogUtils.URIToString).foreach { loc =>
hiveTable.getTTable.getSd.setLocation(loc)}
table.storage.inputFormat.map(toInputFormat).foreach(hiveTable.setInputFormatClass)
table.storage.outputFormat.map(toOutputFormat).foreach(hiveTable.setOutputFormatClass)