aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src
diff options
context:
space:
mode:
authorGuoQiang Li <witgo@qq.com>2014-08-01 23:55:11 -0700
committerPatrick Wendell <pwendell@gmail.com>2014-08-01 23:55:11 -0700
commitadc8303294e26efb4ed15e5f5ba1062f7988625d (patch)
tree09d3231b718713f384e51711b31d737c5edcf6c7 /sql/hive/src
parent4bc3bb29a4b6ab24b6b7e1f8df26414c41c80ace (diff)
downloadspark-adc8303294e26efb4ed15e5f5ba1062f7988625d.tar.gz
spark-adc8303294e26efb4ed15e5f5ba1062f7988625d.tar.bz2
spark-adc8303294e26efb4ed15e5f5ba1062f7988625d.zip
[SPARK-1470][SPARK-1842] Use the scala-logging wrapper instead of the directly sfl4j api
Author: GuoQiang Li <witgo@qq.com> Closes #1369 from witgo/SPARK-1470_new and squashes the following commits: 66a1641 [GuoQiang Li] IncompatibleResultTypeProblem 73a89ba [GuoQiang Li] Use the scala-logging wrapper instead of the directly sfl4j api.
Diffstat (limited to 'sql/hive/src')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala3
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala10
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala22
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala2
6 files changed, 22 insertions, 21 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 7e3b8727be..1f31d35eaa 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -207,7 +207,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
}
} catch {
case e: Exception =>
- logger.error(
+ log.error(
s"""
|======================
|HIVE FAILURE OUTPUT
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index fa4e78439c..df3604439e 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -28,7 +28,8 @@ import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.hadoop.hive.serde2.Deserializer
import org.apache.spark.annotation.DeveloperApi
-import org.apache.spark.sql.{SQLContext, Logging}
+import org.apache.spark.Logging
+import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.analysis.{EliminateAnalysisOperators, Catalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
index c50e8c4b5c..7376fb5dc8 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
@@ -148,7 +148,7 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
describedTables ++
logical.collect { case UnresolvedRelation(databaseName, name, _) => name }
val referencedTestTables = referencedTables.filter(testTables.contains)
- logger.debug(s"Query references test tables: ${referencedTestTables.mkString(", ")}")
+ log.debug(s"Query references test tables: ${referencedTestTables.mkString(", ")}")
referencedTestTables.foreach(loadTestTable)
// Proceed with analysis.
analyzer(logical)
@@ -273,7 +273,7 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
if (!(loadedTables contains name)) {
// Marks the table as loaded first to prevent infite mutually recursive table loading.
loadedTables += name
- logger.info(s"Loading test table $name")
+ log.info(s"Loading test table $name")
val createCmds =
testTables.get(name).map(_.commands).getOrElse(sys.error(s"Unknown test table $name"))
createCmds.foreach(_())
@@ -312,7 +312,7 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
loadedTables.clear()
catalog.client.getAllTables("default").foreach { t =>
- logger.debug(s"Deleting table $t")
+ log.debug(s"Deleting table $t")
val table = catalog.client.getTable("default", t)
catalog.client.getIndexes("default", t, 255).foreach { index =>
@@ -325,7 +325,7 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
}
catalog.client.getAllDatabases.filterNot(_ == "default").foreach { db =>
- logger.debug(s"Dropping Database: $db")
+ log.debug(s"Dropping Database: $db")
catalog.client.dropDatabase(db, true, false, true)
}
@@ -347,7 +347,7 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
loadTestTable("srcpart")
} catch {
case e: Exception =>
- logger.error(s"FATAL ERROR: Failed to reset TestDB state. $e")
+ log.error(s"FATAL ERROR: Failed to reset TestDB state. $e")
// At this point there is really no reason to continue, but the test framework traps exits.
// So instead we just pause forever so that at least the developer can see where things
// started to go wrong.
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index 7582b4743d..4d8eaa18d7 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -25,7 +25,7 @@ import org.apache.hadoop.hive.ql.exec.{FunctionInfo, FunctionRegistry}
import org.apache.hadoop.hive.ql.udf.{UDFType => HiveUDFType}
import org.apache.hadoop.hive.ql.udf.generic._
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.types._
@@ -119,7 +119,7 @@ private[hive] case class HiveSimpleUdf(functionClassName: String, children: Seq[
sys.error(s"No matching wrapper found, options: ${argClass.getConstructors.toSeq}."))
(a: Any) => {
- logger.debug(
+ log.debug(
s"Wrapping $a of type ${if (a == null) "null" else a.getClass.getName} using $constructor.")
// We must make sure that primitives get boxed java style.
if (a == null) {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
index 6c8fe4b196..52cb1cf986 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
@@ -21,7 +21,7 @@ import java.io._
import org.scalatest.{BeforeAndAfterAll, FunSuite, GivenWhenThen}
-import org.apache.spark.sql.Logging
+import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.logical.{NativeCommand => LogicalNativeCommand}
@@ -197,7 +197,7 @@ abstract class HiveComparisonTest
// If test sharding is enable, skip tests that are not in the correct shard.
shardInfo.foreach {
case (shardId, numShards) if testCaseName.hashCode % numShards != shardId => return
- case (shardId, _) => logger.debug(s"Shard $shardId includes test '$testCaseName'")
+ case (shardId, _) => log.debug(s"Shard $shardId includes test '$testCaseName'")
}
// Skip tests found in directories specified by user.
@@ -213,13 +213,13 @@ abstract class HiveComparisonTest
.map(new File(_, testCaseName))
.filter(_.exists)
if (runOnlyDirectories.nonEmpty && runIndicators.isEmpty) {
- logger.debug(
+ log.debug(
s"Skipping test '$testCaseName' not found in ${runOnlyDirectories.map(_.getCanonicalPath)}")
return
}
test(testCaseName) {
- logger.debug(s"=== HIVE TEST: $testCaseName ===")
+ log.debug(s"=== HIVE TEST: $testCaseName ===")
// Clear old output for this testcase.
outputDirectories.map(new File(_, testCaseName)).filter(_.exists()).foreach(_.delete())
@@ -235,7 +235,7 @@ abstract class HiveComparisonTest
.filterNot(_ contains "hive.outerjoin.supports.filters")
if (allQueries != queryList)
- logger.warn(s"Simplifications made on unsupported operations for test $testCaseName")
+ log.warn(s"Simplifications made on unsupported operations for test $testCaseName")
lazy val consoleTestCase = {
val quotes = "\"\"\""
@@ -257,11 +257,11 @@ abstract class HiveComparisonTest
}
val hiveCachedResults = hiveCacheFiles.flatMap { cachedAnswerFile =>
- logger.debug(s"Looking for cached answer file $cachedAnswerFile.")
+ log.debug(s"Looking for cached answer file $cachedAnswerFile.")
if (cachedAnswerFile.exists) {
Some(fileToString(cachedAnswerFile))
} else {
- logger.debug(s"File $cachedAnswerFile not found")
+ log.debug(s"File $cachedAnswerFile not found")
None
}
}.map {
@@ -272,7 +272,7 @@ abstract class HiveComparisonTest
val hiveResults: Seq[Seq[String]] =
if (hiveCachedResults.size == queryList.size) {
- logger.info(s"Using answer cache for test: $testCaseName")
+ log.info(s"Using answer cache for test: $testCaseName")
hiveCachedResults
} else {
@@ -287,7 +287,7 @@ abstract class HiveComparisonTest
if (installHooksCommand.findAllMatchIn(queryString).nonEmpty)
sys.error("hive exec hooks not supported for tests.")
- logger.warn(s"Running query ${i+1}/${queryList.size} with hive.")
+ log.warn(s"Running query ${i+1}/${queryList.size} with hive.")
// Analyze the query with catalyst to ensure test tables are loaded.
val answer = hiveQuery.analyzed match {
case _: ExplainCommand => Nil // No need to execute EXPLAIN queries as we don't check the output.
@@ -351,7 +351,7 @@ abstract class HiveComparisonTest
val resultComparison = sideBySide(hivePrintOut, catalystPrintOut).mkString("\n")
if (recomputeCache) {
- logger.warn(s"Clearing cache files for failed test $testCaseName")
+ log.warn(s"Clearing cache files for failed test $testCaseName")
hiveCacheFiles.foreach(_.delete())
}
@@ -380,7 +380,7 @@ abstract class HiveComparisonTest
TestHive.runSqlHive("SELECT key FROM src")
} catch {
case e: Exception =>
- logger.error(s"FATAL ERROR: Canary query threw $e This implies that the testing environment has likely been corrupted.")
+ log.error(s"FATAL ERROR: Canary query threw $e This implies that the testing environment has likely been corrupted.")
// The testing setup traps exits so wait here for a long time so the developer can see when things started
// to go wrong.
Thread.sleep(1000000)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
index 50ab71a900..9ca5575c1b 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
@@ -53,7 +53,7 @@ abstract class HiveQueryFileTest extends HiveComparisonTest {
testCases.sorted.foreach {
case (testCaseName, testCaseFile) =>
if (blackList.map(_.r.pattern.matcher(testCaseName).matches()).reduceLeft(_||_)) {
- logger.debug(s"Blacklisted test skipped $testCaseName")
+ log.debug(s"Blacklisted test skipped $testCaseName")
} else if (realWhiteList.map(_.r.pattern.matcher(testCaseName).matches()).reduceLeft(_||_) || runAll) {
// Build a test case and submit it to scala test framework...
val queriesString = fileToString(testCaseFile)