aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2015-05-31 00:48:49 -0700
committerReynold Xin <rxin@databricks.com>2015-05-31 00:48:49 -0700
commit63a50be13d32b9e5f3aad8d1a6ba5362f17a252f (patch)
treecd08e81b5effe718382d502306e076f0ccacf6f6 /sql/hive
parent564bc11e9827915c8652bc06f4bd591809dea4b1 (diff)
downloadspark-63a50be13d32b9e5f3aad8d1a6ba5362f17a252f.tar.gz
spark-63a50be13d32b9e5f3aad8d1a6ba5362f17a252f.tar.bz2
spark-63a50be13d32b9e5f3aad8d1a6ba5362f17a252f.zip
[SPARK-3850] Trim trailing spaces for SQL.
Author: Reynold Xin <rxin@databricks.com> Closes #6535 from rxin/whitespace-sql and squashes the following commits: de50316 [Reynold Xin] [SPARK-3850] Trim trailing spaces for SQL.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/client/package.scala2
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala18
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala10
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala8
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala2
9 files changed, 27 insertions, 27 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/package.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/package.scala
index 7db9200d47..410d9881ac 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/package.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/package.scala
@@ -29,5 +29,5 @@ package object client {
case object v13 extends HiveVersion("0.13.1", false)
}
// scalastyle:on
-
+
} \ No newline at end of file
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
index 62dc4167b7..11ee550314 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScan.scala
@@ -63,7 +63,7 @@ case class HiveTableScan(
BindReferences.bindReference(pred, relation.partitionKeys)
}
- // Create a local copy of hiveconf,so that scan specific modifications should not impact
+ // Create a local copy of hiveconf,so that scan specific modifications should not impact
// other queries
@transient
private[this] val hiveExtraConf = new HiveConf(context.hiveconf)
@@ -72,7 +72,7 @@ case class HiveTableScan(
addColumnMetadataToConf(hiveExtraConf)
@transient
- private[this] val hadoopReader =
+ private[this] val hadoopReader =
new HadoopTableReader(attributes, relation, context, hiveExtraConf)
private[this] def castFromString(value: String, dataType: DataType) = {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
index 6f27a8626f..fd623370cc 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
@@ -62,7 +62,7 @@ case class ScriptTransformation(
val inputStream = proc.getInputStream
val outputStream = proc.getOutputStream
val reader = new BufferedReader(new InputStreamReader(inputStream))
-
+
val (outputSerde, outputSoi) = ioschema.initOutputSerDe(output)
val iterator: Iterator[Row] = new Iterator[Row] with HiveInspectors {
@@ -95,7 +95,7 @@ case class ScriptTransformation(
val raw = outputSerde.deserialize(writable)
val dataList = outputSoi.getStructFieldsDataAsList(raw)
val fieldList = outputSoi.getAllStructFieldRefs()
-
+
var i = 0
dataList.foreach( element => {
if (element == null) {
@@ -117,7 +117,7 @@ case class ScriptTransformation(
if (!hasNext) {
throw new NoSuchElementException
}
-
+
if (outputSerde == null) {
val prevLine = curLine
curLine = reader.readLine()
@@ -192,7 +192,7 @@ case class HiveScriptIOSchema (
val inputRowFormatMap = inputRowFormat.toMap.withDefault((k) => defaultFormat(k))
val outputRowFormatMap = outputRowFormat.toMap.withDefault((k) => defaultFormat(k))
-
+
def initInputSerDe(input: Seq[Expression]): (AbstractSerDe, ObjectInspector) = {
val (columns, columnTypes) = parseAttrs(input)
val serde = initSerDe(inputSerdeClass, columns, columnTypes, inputSerdeProps)
@@ -206,13 +206,13 @@ case class HiveScriptIOSchema (
}
def parseAttrs(attrs: Seq[Expression]): (Seq[String], Seq[DataType]) = {
-
+
val columns = attrs.map {
case aref: AttributeReference => aref.name
case e: NamedExpression => e.name
case _ => null
}
-
+
val columnTypes = attrs.map {
case aref: AttributeReference => aref.dataType
case e: NamedExpression => e.dataType
@@ -221,7 +221,7 @@ case class HiveScriptIOSchema (
(columns, columnTypes)
}
-
+
def initSerDe(serdeClassName: String, columns: Seq[String],
columnTypes: Seq[DataType], serdeProps: Seq[(String, String)]): AbstractSerDe = {
@@ -240,7 +240,7 @@ case class HiveScriptIOSchema (
(kv._1.split("'")(1), kv._2.split("'")(1))
}).toMap + (serdeConstants.LIST_COLUMNS -> columns.mkString(","))
propsMap = propsMap + (serdeConstants.LIST_COLUMN_TYPES -> columnTypesNames)
-
+
val properties = new Properties()
properties.putAll(propsMap)
serde.initialize(null, properties)
@@ -261,7 +261,7 @@ case class HiveScriptIOSchema (
null
}
}
-
+
def initOutputputSoi(outputSerde: AbstractSerDe): StructObjectInspector = {
if (outputSerde != null) {
outputSerde.getObjectInspector().asInstanceOf[StructObjectInspector]
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index bb116e3ab7..64a49c83cb 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -555,12 +555,12 @@ private[hive] case class HiveUdafFunction(
} else {
funcWrapper.createFunction[AbstractGenericUDAFResolver]()
}
-
+
private val inspectors = exprs.map(toInspector).toArray
-
- private val function = {
+
+ private val function = {
val parameterInfo = new SimpleGenericUDAFParameterInfo(inspectors, false, false)
- resolver.getEvaluator(parameterInfo)
+ resolver.getEvaluator(parameterInfo)
}
private val returnInspector = function.init(GenericUDAFEvaluator.Mode.COMPLETE, inspectors)
@@ -575,7 +575,7 @@ private[hive] case class HiveUdafFunction(
@transient
protected lazy val cached = new Array[AnyRef](exprs.length)
-
+
def update(input: Row): Unit = {
val inputs = inputProjection(input)
function.iterate(buffer, wrap(inputs, inspectors, cached))
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
index 945596db80..39d315aaea 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala
@@ -57,7 +57,7 @@ class CachedTableSuite extends QueryTest {
checkAnswer(
sql("SELECT * FROM src s"),
preCacheResults)
-
+
uncacheTable("src")
assertCached(sql("SELECT * FROM src"), 0)
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
index 9cc4685499..aa5dbe2db6 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertIntoHiveTableSuite.scala
@@ -240,7 +240,7 @@ class InsertIntoHiveTableSuite extends QueryTest with BeforeAndAfter {
checkAnswer(sql("select key,value from table_with_partition where ds='1' "),
testData.collect().toSeq
)
-
+
// test difference type of field
sql("ALTER TABLE table_with_partition CHANGE COLUMN key key BIGINT")
checkAnswer(sql("select key,value from table_with_partition where ds='1' "),
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
index 446a2f2d64..7eb4842726 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala
@@ -22,9 +22,9 @@ import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.util.Utils
/**
- * A simple set of tests that call the methods of a hive ClientInterface, loading different version
- * of hive from maven central. These tests are simple in that they are mostly just testing to make
- * sure that reflective calls are not throwing NoSuchMethod error, but the actually functionallity
+ * A simple set of tests that call the methods of a hive ClientInterface, loading different version
+ * of hive from maven central. These tests are simple in that they are mostly just testing to make
+ * sure that reflective calls are not throwing NoSuchMethod error, but the actually functionality
* is not fully tested.
*/
class VersionsSuite extends SparkFunSuite with Logging {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
index 0ba4d11478..2209fc2f30 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala
@@ -61,7 +61,7 @@ class HiveTableScanSuite extends HiveComparisonTest {
TestHive.sql("select KEY from tb where VALUE='just_for_test' limit 5").collect()
TestHive.sql("drop table tb")
}
-
+
test("Spark-4077: timestamp query for null value") {
TestHive.sql("DROP TABLE IF EXISTS timestamp_query_null")
TestHive.sql(
@@ -71,11 +71,11 @@ class HiveTableScanSuite extends HiveComparisonTest {
FIELDS TERMINATED BY ','
LINES TERMINATED BY '\n'
""".stripMargin)
- val location =
+ val location =
Utils.getSparkClassLoader.getResource("data/files/issue-4077-data.txt").getFile()
-
+
TestHive.sql(s"LOAD DATA LOCAL INPATH '$location' INTO TABLE timestamp_query_null")
- assert(TestHive.sql("SELECT time from timestamp_query_null limit 2").collect()
+ assert(TestHive.sql("SELECT time from timestamp_query_null limit 2").collect()
=== Array(Row(java.sql.Timestamp.valueOf("2014-12-11 00:00:00")), Row(null)))
TestHive.sql("DROP TABLE timestamp_query_null")
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala
index 7f49eac490..ce5985888f 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUdfSuite.scala
@@ -101,7 +101,7 @@ class HiveUdfSuite extends QueryTest {
sql("DROP TEMPORARY FUNCTION IF EXISTS test_avg")
TestHive.reset()
}
-
+
test("SPARK-2693 udaf aggregates test") {
checkAnswer(sql("SELECT percentile(key, 1) FROM src LIMIT 1"),
sql("SELECT max(key) FROM src").collect().toSeq)