aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-28 18:08:56 -0700
committerReynold Xin <rxin@databricks.com>2015-05-28 18:08:56 -0700
commitee6a0e12fb76e4d5c24175900e5bf6a8cb35e2b0 (patch)
treed0aee386ce036ae5c8c53bafde7f22edd6ad38e6 /sql
parent3af0b3136e4b7dea52c413d640653ccddc638574 (diff)
downloadspark-ee6a0e12fb76e4d5c24175900e5bf6a8cb35e2b0.tar.gz
spark-ee6a0e12fb76e4d5c24175900e5bf6a8cb35e2b0.tar.bz2
spark-ee6a0e12fb76e4d5c24175900e5bf6a8cb35e2b0.zip
[SPARK-7927] whitespace fixes for Hive and ThriftServer.
So we can enable a whitespace enforcement rule in the style checker to save code review time. Author: Reynold Xin <rxin@databricks.com> Closes #6478 from rxin/whitespace-hive and squashes the following commits: e01b0e0 [Reynold Xin] Fixed tests. a3bba22 [Reynold Xin] [SPARK-7927] whitespace fixes for Hive and ThriftServer.
Diffstat (limited to 'sql')
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala8
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala6
-rw-r--r--sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerSessionPage.scala2
-rw-r--r--sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala6
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala10
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala10
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala9
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala7
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala6
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala6
14 files changed, 43 insertions, 39 deletions
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
index deb1008c46..14f6f658d9 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala
@@ -43,7 +43,7 @@ import org.apache.spark.util.Utils
private[hive] object SparkSQLCLIDriver {
private var prompt = "spark-sql"
private var continuedPrompt = "".padTo(prompt.length, ' ')
- private var transport:TSocket = _
+ private var transport: TSocket = _
installSignalHandler()
@@ -276,13 +276,13 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
driver.init()
val out = sessionState.out
- val start:Long = System.currentTimeMillis()
+ val start: Long = System.currentTimeMillis()
if (sessionState.getIsVerbose) {
out.println(cmd)
}
val rc = driver.run(cmd)
val end = System.currentTimeMillis()
- val timeTaken:Double = (end - start) / 1000.0
+ val timeTaken: Double = (end - start) / 1000.0
ret = rc.getResponseCode
if (ret != 0) {
@@ -310,7 +310,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
res.clear()
}
} catch {
- case e:IOException =>
+ case e: IOException =>
console.printError(
s"""Failed with exception ${e.getClass.getName}: ${e.getMessage}
|${org.apache.hadoop.util.StringUtils.stringifyException(e)}
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala
index 7c48ff4b35..10c83d8b27 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala
@@ -77,7 +77,7 @@ private[ui] class ThriftServerPage(parent: ThriftServerTab) extends WebUIPage(""
[{id}]
</a>
}
- val detail = if(info.state == ExecutionState.FAILED) info.detail else info.executePlan
+ val detail = if (info.state == ExecutionState.FAILED) info.detail else info.executePlan
<tr>
<td>{info.userName}</td>
<td>
@@ -85,7 +85,7 @@ private[ui] class ThriftServerPage(parent: ThriftServerTab) extends WebUIPage(""
</td>
<td>{info.groupId}</td>
<td>{formatDate(info.startTimestamp)}</td>
- <td>{if(info.finishTimestamp > 0) formatDate(info.finishTimestamp)}</td>
+ <td>{if (info.finishTimestamp > 0) formatDate(info.finishTimestamp)}</td>
<td>{formatDurationOption(Some(info.totalTime))}</td>
<td>{info.statement}</td>
<td>{info.state}</td>
@@ -150,7 +150,7 @@ private[ui] class ThriftServerPage(parent: ThriftServerTab) extends WebUIPage(""
<td> {session.ip} </td>
<td> <a href={sessionLink}> {session.sessionId} </a> </td>
<td> {formatDate(session.startTimestamp)} </td>
- <td> {if(session.finishTimestamp > 0) formatDate(session.finishTimestamp)} </td>
+ <td> {if (session.finishTimestamp > 0) formatDate(session.finishTimestamp)} </td>
<td> {formatDurationOption(Some(session.totalTime))} </td>
<td> {session.totalExecution.toString} </td>
</tr>
diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerSessionPage.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerSessionPage.scala
index d9d66dcd85..3b01afa603 100644
--- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerSessionPage.scala
+++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerSessionPage.scala
@@ -87,7 +87,7 @@ private[ui] class ThriftServerSessionPage(parent: ThriftServerTab)
[{id}]
</a>
}
- val detail = if(info.state == ExecutionState.FAILED) info.detail else info.executePlan
+ val detail = if (info.state == ExecutionState.FAILED) info.detail else info.executePlan
<tr>
<td>{info.userName}</td>
<td>
diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
index e1466e0423..4c9fab7ef6 100644
--- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
+++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala
@@ -73,7 +73,7 @@ class UISeleniumSuite
}
ignore("thrift server ui test") {
- withJdbcStatement(statement =>{
+ withJdbcStatement { statement =>
val baseURL = s"http://localhost:$uiPort"
val queries = Seq(
@@ -97,6 +97,6 @@ class UISeleniumSuite
findAll(cssSelector("""ul table tbody tr td""")).map(_.text).toList should contain (line)
}
}
- })
+ }
}
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala
index 3f20c6142e..7f8449cdc2 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/ExtendedHiveQlParser.scala
@@ -29,10 +29,10 @@ import org.apache.spark.sql.hive.execution.{AddJar, AddFile, HiveNativeCommand}
private[hive] class ExtendedHiveQlParser extends AbstractSparkSQLParser {
// Keyword is a convention with AbstractSparkSQLParser, which will scan all of the `Keyword`
// properties via reflection the class in runtime for constructing the SqlLexical object
- protected val ADD = Keyword("ADD")
- protected val DFS = Keyword("DFS")
+ protected val ADD = Keyword("ADD")
+ protected val DFS = Keyword("DFS")
protected val FILE = Keyword("FILE")
- protected val JAR = Keyword("JAR")
+ protected val JAR = Keyword("JAR")
protected lazy val start: Parser[LogicalPlan] = dfs | addJar | addFile | hiveQl
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 2ed71d3d52..fbf2c7d8cb 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -530,7 +530,7 @@ private[hive] object HiveContext {
val propMap: HashMap[String, String] = HashMap()
// We have to mask all properties in hive-site.xml that relates to metastore data source
// as we used a local metastore here.
- HiveConf.ConfVars.values().foreach { confvar =>
+ HiveConf.ConfVars.values().foreach { confvar =>
if (confvar.varname.contains("datanucleus") || confvar.varname.contains("jdo")) {
propMap.put(confvar.varname, confvar.defaultVal)
}
@@ -553,7 +553,7 @@ private[hive] object HiveContext {
}.mkString("{", ",", "}")
case (seq: Seq[_], ArrayType(typ, _)) =>
seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]")
- case (map: Map[_,_], MapType(kType, vType, _)) =>
+ case (map: Map[_, _], MapType(kType, vType, _)) =>
map.map {
case (key, value) =>
toHiveStructString((key, kType)) + ":" + toHiveStructString((value, vType))
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
index 0a694c70e4..24cd335082 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala
@@ -335,7 +335,7 @@ private[hive] trait HiveInspectors {
val allRefs = si.getAllStructFieldRefs
new GenericRow(
allRefs.map(r =>
- unwrap(si.getStructFieldData(data,r), r.getFieldObjectInspector)).toArray)
+ unwrap(si.getStructFieldData(data, r), r.getFieldObjectInspector)).toArray)
}
@@ -561,8 +561,8 @@ private[hive] trait HiveInspectors {
case DecimalType() => PrimitiveObjectInspectorFactory.javaHiveDecimalObjectInspector
case StructType(fields) =>
ObjectInspectorFactory.getStandardStructObjectInspector(
- java.util.Arrays.asList(fields.map(f => f.name) :_*),
- java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)) :_*))
+ java.util.Arrays.asList(fields.map(f => f.name) : _*),
+ java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)) : _*))
}
/**
@@ -677,8 +677,8 @@ private[hive] trait HiveInspectors {
getListTypeInfo(elemType.toTypeInfo)
case StructType(fields) =>
getStructTypeInfo(
- java.util.Arrays.asList(fields.map(_.name) :_*),
- java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo) :_*))
+ java.util.Arrays.asList(fields.map(_.name) : _*),
+ java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo) : _*))
case MapType(keyType, valueType, _) =>
getMapTypeInfo(keyType.toTypeInfo, valueType.toTypeInfo)
case BinaryType => binaryTypeInfo
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 95117f7a68..47b8573158 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -546,13 +546,17 @@ private[hive] class HiveMetastoreCatalog(val client: ClientInterface, hive: Hive
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
- override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = ???
+ override def registerTable(tableIdentifier: Seq[String], plan: LogicalPlan): Unit = {
+ throw new UnsupportedOperationException
+ }
/**
* UNIMPLEMENTED: It needs to be decided how we will persist in-memory tables to the metastore.
* For now, if this functionality is desired mix in the in-memory [[OverrideCatalog]].
*/
- override def unregisterTable(tableIdentifier: Seq[String]): Unit = ???
+ override def unregisterTable(tableIdentifier: Seq[String]): Unit = {
+ throw new UnsupportedOperationException
+ }
override def unregisterAllTables(): Unit = {}
}
@@ -725,7 +729,7 @@ private[hive] case class MetastoreRelation
val output = attributes ++ partitionKeys
/** An attribute map that can be used to lookup original attributes based on expression id. */
- val attributeMap = AttributeMap(output.map(o => (o,o)))
+ val attributeMap = AttributeMap(output.map(o => (o, o)))
/** An attribute map for determining the ordinal for non-partition columns. */
val columnOrdinals = AttributeMap(attributes.zipWithIndex)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
index 2cbb5ca4d2..3915ee8356 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
@@ -665,7 +665,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
HiveColumn(field.getName, field.getType, field.getComment)
})
}
- case Token("TOK_TABLEROWFORMAT", Token("TOK_SERDEPROPS", child :: Nil) :: Nil)=>
+ case Token("TOK_TABLEROWFORMAT", Token("TOK_SERDEPROPS", child :: Nil) :: Nil) =>
val serdeParams = new java.util.HashMap[String, String]()
child match {
case Token("TOK_TABLEROWFORMATFIELD", rowChild1 :: rowChild2) =>
@@ -775,7 +775,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
// Support "TRUNCATE TABLE table_name [PARTITION partition_spec]"
case Token("TOK_TRUNCATETABLE",
- Token("TOK_TABLE_PARTITION",table)::Nil) => NativePlaceholder
+ Token("TOK_TABLE_PARTITION", table) :: Nil) => NativePlaceholder
case Token("TOK_QUERY", queryArgs)
if Seq("TOK_FROM", "TOK_INSERT").contains(queryArgs.head.getText) =>
@@ -1151,7 +1151,7 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
case Seq(false, false) => Inner
}.toBuffer
- val joinedTables = tables.reduceLeft(Join(_,_, Inner, None))
+ val joinedTables = tables.reduceLeft(Join(_, _, Inner, None))
// Must be transform down.
val joinedResult = joinedTables transform {
@@ -1171,7 +1171,8 @@ https://cwiki.apache.org/confluence/display/Hive/Enhanced+Aggregation%2C+Cube%2C
// worth the number of hacks that will be required to implement it. Namely, we need to add
// some sort of mapped star expansion that would expand all child output row to be similarly
// named output expressions where some aggregate expression has been applied (i.e. First).
- ??? // Aggregate(groups, Star(None, First(_)) :: Nil, joinedResult)
+ // Aggregate(groups, Star(None, First(_)) :: Nil, joinedResult)
+ throw new UnsupportedOperationException
case Token(allJoinTokens(joinToken),
relation1 ::
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
index 7a6ca48b54..8613332186 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
@@ -194,10 +194,9 @@ case class InsertIntoHiveTable(
if (partition.nonEmpty) {
// loadPartition call orders directories created on the iteration order of the this map
- val orderedPartitionSpec = new util.LinkedHashMap[String,String]()
- table.hiveQlTable.getPartCols().foreach{
- entry=>
- orderedPartitionSpec.put(entry.getName,partitionSpec.get(entry.getName).getOrElse(""))
+ val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
+ table.hiveQlTable.getPartCols().foreach { entry =>
+ orderedPartitionSpec.put(entry.getName, partitionSpec.get(entry.getName).getOrElse(""))
}
val partVals = MetaStoreUtils.getPvals(table.hiveQlTable.getPartCols, partitionSpec)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
index bfd26e0170..6f27a8626f 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
@@ -216,7 +216,7 @@ case class HiveScriptIOSchema (
val columnTypes = attrs.map {
case aref: AttributeReference => aref.dataType
case e: NamedExpression => e.dataType
- case _ => null
+ case _ => null
}
(columns, columnTypes)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index 7ec4f73325..bb116e3ab7 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -315,7 +315,7 @@ private[hive] case class HiveWindowFunction(
// The object inspector of values returned from the Hive window function.
@transient
- protected lazy val returnInspector = {
+ protected lazy val returnInspector = {
evaluator.init(GenericUDAFEvaluator.Mode.COMPLETE, inputInspectors)
}
@@ -410,7 +410,7 @@ private[hive] case class HiveGenericUdaf(
protected lazy val resolver: AbstractGenericUDAFResolver = funcWrapper.createFunction()
@transient
- protected lazy val objectInspector = {
+ protected lazy val objectInspector = {
val parameterInfo = new SimpleGenericUDAFParameterInfo(inspectors.toArray, false, false)
resolver.getEvaluator(parameterInfo)
.init(GenericUDAFEvaluator.Mode.COMPLETE, inspectors.toArray)
@@ -443,7 +443,7 @@ private[hive] case class HiveUdaf(
new GenericUDAFBridge(funcWrapper.createFunction())
@transient
- protected lazy val objectInspector = {
+ protected lazy val objectInspector = {
val parameterInfo = new SimpleGenericUDAFParameterInfo(inspectors.toArray, false, false)
resolver.getEvaluator(parameterInfo)
.init(GenericUDAFEvaluator.Mode.COMPLETE, inspectors.toArray)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
index 50b209f7cc..2bb526b14b 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
@@ -71,7 +71,7 @@ private[hive] class SparkHiveWriterContainer(
@transient protected lazy val jobContext = newJobContext(conf.value, jID.value)
@transient private lazy val taskContext = newTaskAttemptContext(conf.value, taID.value)
@transient private lazy val outputFormat =
- conf.value.getOutputFormat.asInstanceOf[HiveOutputFormat[AnyRef,Writable]]
+ conf.value.getOutputFormat.asInstanceOf[HiveOutputFormat[AnyRef, Writable]]
def driverSideSetup() {
setIDs(0, 0, 0)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index 2e06cabfa8..7c7afc824d 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -189,7 +189,7 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
}
}
- case class TestTable(name: String, commands: (()=>Unit)*)
+ case class TestTable(name: String, commands: (() => Unit)*)
protected[hive] implicit class SqlCmd(sql: String) {
def cmd: () => Unit = {
@@ -253,8 +253,8 @@ class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
| 'serialization.format'='${classOf[TBinaryProtocol].getName}'
|)
|STORED AS
- |INPUTFORMAT '${classOf[SequenceFileInputFormat[_,_]].getName}'
- |OUTPUTFORMAT '${classOf[SequenceFileOutputFormat[_,_]].getName}'
+ |INPUTFORMAT '${classOf[SequenceFileInputFormat[_, _]].getName}'
+ |OUTPUTFORMAT '${classOf[SequenceFileOutputFormat[_, _]].getName}'
""".stripMargin)
runSqlHive(