aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/main/scala/org
diff options
context:
space:
mode:
authorAndrew Or <andrew@databricks.com>2016-04-21 17:57:59 -0700
committerReynold Xin <rxin@databricks.com>2016-04-21 17:57:59 -0700
commitdf1953f0df8b43136157a18bea05fd6750906f68 (patch)
treeca7fe0da49faa5d9e4b6e637f64196d9931100f6 /sql/hive/src/main/scala/org
parent0bf8df250e0aeae306e2ef33e612ca27187447ed (diff)
downloadspark-df1953f0df8b43136157a18bea05fd6750906f68.tar.gz
spark-df1953f0df8b43136157a18bea05fd6750906f68.tar.bz2
spark-df1953f0df8b43136157a18bea05fd6750906f68.zip
[SPARK-14824][SQL] Rename HiveContext object to HiveUtils
## What changes were proposed in this pull request? Just a rename so we can get rid of `HiveContext.scala`. Note that this will conflict with #12585. ## How was this patch tested? No change in functionality. Author: Andrew Or <andrew@databricks.com> Closes #12586 from andrewor14/rename-hc-object.
Diffstat (limited to 'sql/hive/src/main/scala/org')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQueryExecution.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala10
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala (renamed from sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala)12
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala6
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala8
7 files changed, 23 insertions, 23 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQueryExecution.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQueryExecution.scala
index 0ee34f07fd..ed1340dccf 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQueryExecution.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQueryExecution.scala
@@ -53,7 +53,7 @@ protected[hive] class HiveQueryExecution(ctx: SQLContext, logicalPlan: LogicalPl
// We need the types so we can output struct field names
val types = analyzed.output.map(_.dataType)
// Reformat to match hive tab delimited output.
- result.map(_.zip(types).map(HiveContext.toHiveString)).map(_.mkString("\t")).toSeq
+ result.map(_.zip(types).map(HiveUtils.toHiveString)).map(_.mkString("\t")).toSeq
}
override def simpleString: String =
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
index 4db0d78cfc..d8cc057fe2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
@@ -190,7 +190,7 @@ private[hive] class HiveSessionState(ctx: SQLContext) extends SessionState(ctx)
* SerDe.
*/
def convertMetastoreParquet: Boolean = {
- conf.getConf(HiveContext.CONVERT_METASTORE_PARQUET)
+ conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET)
}
/**
@@ -200,7 +200,7 @@ private[hive] class HiveSessionState(ctx: SQLContext) extends SessionState(ctx)
* This configuration is only effective when "spark.sql.hive.convertMetastoreParquet" is true.
*/
def convertMetastoreParquetWithSchemaMerging: Boolean = {
- conf.getConf(HiveContext.CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING)
+ conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING)
}
/**
@@ -209,7 +209,7 @@ private[hive] class HiveSessionState(ctx: SQLContext) extends SessionState(ctx)
* SerDe.
*/
def convertMetastoreOrc: Boolean = {
- conf.getConf(HiveContext.CONVERT_METASTORE_ORC)
+ conf.getConf(HiveUtils.CONVERT_METASTORE_ORC)
}
/**
@@ -225,14 +225,14 @@ private[hive] class HiveSessionState(ctx: SQLContext) extends SessionState(ctx)
* and no SerDe is specified (no ROW FORMAT SERDE clause).
*/
def convertCTAS: Boolean = {
- conf.getConf(HiveContext.CONVERT_CTAS)
+ conf.getConf(HiveUtils.CONVERT_CTAS)
}
/**
* When true, Hive Thrift server will execute SQL queries asynchronously using a thread pool."
*/
def hiveThriftServerAsync: Boolean = {
- conf.getConf(HiveContext.HIVE_THRIFT_SERVER_ASYNC)
+ conf.getConf(HiveUtils.HIVE_THRIFT_SERVER_ASYNC)
}
def hiveThriftServerSingleSession: Boolean = {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala
index 11097c33df..1d8ce3099d 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSharedState.scala
@@ -34,7 +34,7 @@ private[hive] class HiveSharedState(override val sparkContext: SparkContext)
* A Hive client used for execution.
*/
val executionHive: HiveClientImpl = {
- HiveContext.newClientForExecution(sparkContext.conf, sparkContext.hadoopConfiguration)
+ HiveUtils.newClientForExecution(sparkContext.conf, sparkContext.hadoopConfiguration)
}
/**
@@ -42,7 +42,7 @@ private[hive] class HiveSharedState(override val sparkContext: SparkContext)
*/
// This needs to be a lazy val at here because TestHiveSharedState is overriding it.
lazy val metadataHive: HiveClient = {
- HiveContext.newClientForMetadata(sparkContext.conf, sparkContext.hadoopConfiguration)
+ HiveUtils.newClientForMetadata(sparkContext.conf, sparkContext.hadoopConfiguration)
}
/**
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
index b2ce3e0df2..44d3cc257b 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
@@ -59,7 +59,7 @@ class HiveContext private[hive](
self =>
def this(sc: SparkContext) = {
- this(new SparkSession(HiveContext.withHiveExternalCatalog(sc)), true)
+ this(new SparkSession(HiveUtils.withHiveExternalCatalog(sc)), true)
}
def this(sc: JavaSparkContext) = this(sc.sc)
@@ -84,7 +84,7 @@ class HiveContext private[hive](
}
-private[hive] object HiveContext extends Logging {
+private[spark] object HiveUtils extends Logging {
def withHiveExternalCatalog(sc: SparkContext): SparkContext = {
sc.conf.set(CATALOG_IMPLEMENTATION.key, "hive")
@@ -315,10 +315,10 @@ private[hive] object HiveContext extends Logging {
configurations: Map[String, String]): HiveClient = {
val sqlConf = new SQLConf
sqlConf.setConf(SQLContext.getSQLProperties(conf))
- val hiveMetastoreVersion = HiveContext.hiveMetastoreVersion(sqlConf)
- val hiveMetastoreJars = HiveContext.hiveMetastoreJars(sqlConf)
- val hiveMetastoreSharedPrefixes = HiveContext.hiveMetastoreSharedPrefixes(sqlConf)
- val hiveMetastoreBarrierPrefixes = HiveContext.hiveMetastoreBarrierPrefixes(sqlConf)
+ val hiveMetastoreVersion = HiveUtils.hiveMetastoreVersion(sqlConf)
+ val hiveMetastoreJars = HiveUtils.hiveMetastoreJars(sqlConf)
+ val hiveMetastoreSharedPrefixes = HiveUtils.hiveMetastoreSharedPrefixes(sqlConf)
+ val hiveMetastoreBarrierPrefixes = HiveUtils.hiveMetastoreBarrierPrefixes(sqlConf)
val metaVersion = IsolatedClientLoader.hiveVersion(hiveMetastoreVersion)
val defaultWarehouseLocation = hiveConf.get("hive.metastore.warehouse.dir")
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
index 6a20d7c25b..e95069e830 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
@@ -23,8 +23,7 @@ import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants._
import org.apache.hadoop.hive.ql.exec.Utilities
-import org.apache.hadoop.hive.ql.metadata.{HiveUtils, Partition => HivePartition,
- Table => HiveTable}
+import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition, Table => HiveTable}
import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.hadoop.hive.serde2.Deserializer
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorConverters,
@@ -300,7 +299,8 @@ private[hive] object HiveTableUtil {
def configureJobPropertiesForStorageHandler(
tableDesc: TableDesc, jobConf: JobConf, input: Boolean) {
val property = tableDesc.getProperties.getProperty(META_TABLE_STORAGE)
- val storageHandler = HiveUtils.getStorageHandler(jobConf, property)
+ val storageHandler =
+ org.apache.hadoop.hive.ql.metadata.HiveUtils.getStorageHandler(jobConf, property)
if (storageHandler != null) {
val jobProperties = new util.LinkedHashMap[String, String]
if (input) {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala
index 7e0d1b446f..0380d2342b 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala
@@ -32,7 +32,7 @@ import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkSubmitUtils
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.util.quietly
-import org.apache.spark.sql.hive.HiveContext
+import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.util.{MutableURLClassLoader, Utils}
/** Factory for `IsolatedClientLoader` with specific versions of hive. */
@@ -263,7 +263,7 @@ private[hive] class IsolatedClientLoader(
throw new ClassNotFoundException(
s"$cnf when creating Hive client using classpath: ${execJars.mkString(", ")}\n" +
"Please make sure that jars for your version of hive and hadoop are included in the " +
- s"paths passed to ${HiveContext.HIVE_METASTORE_JARS}.", e)
+ s"paths passed to ${HiveUtils.HIVE_METASTORE_JARS}.", e)
} else {
throw e
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index 741e3bdd18..7f8f6292cb 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -74,7 +74,7 @@ class TestHiveContext(@transient val sparkSession: TestHiveSparkSession, isRootC
extends HiveContext(sparkSession, isRootContext) {
def this(sc: SparkContext) {
- this(new TestHiveSparkSession(HiveContext.withHiveExternalCatalog(sc)), true)
+ this(new TestHiveSparkSession(HiveUtils.withHiveExternalCatalog(sc)), true)
}
override def newSession(): TestHiveContext = {
@@ -117,7 +117,7 @@ private[hive] class TestHiveSparkSession(
sc,
Utils.createTempDir(namePrefix = "warehouse"),
TestHiveContext.makeScratchDir(),
- HiveContext.newTemporaryConfiguration(useInMemoryDerby = false),
+ HiveUtils.newTemporaryConfiguration(useInMemoryDerby = false),
None)
}
@@ -576,7 +576,7 @@ private[hive] object TestHiveContext {
scratchDirPath: File,
metastoreTemporaryConf: Map[String, String]): HiveClient = {
val hiveConf = new HiveConf(hadoopConf, classOf[HiveConf])
- HiveContext.newClientForMetadata(
+ HiveUtils.newClientForMetadata(
conf,
hiveConf,
hadoopConf,
@@ -591,7 +591,7 @@ private[hive] object TestHiveContext {
warehousePath: File,
scratchDirPath: File,
metastoreTemporaryConf: Map[String, String]): Map[String, String] = {
- HiveContext.hiveClientConfigurations(hiveconf) ++ metastoreTemporaryConf ++ Map(
+ HiveUtils.hiveClientConfigurations(hiveconf) ++ metastoreTemporaryConf ++ Map(
ConfVars.METASTOREWAREHOUSE.varname -> warehousePath.toURI.toString,
ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN.varname -> "true",
ConfVars.SCRATCHDIR.varname -> scratchDirPath.toURI.toString,