aboutsummaryrefslogtreecommitdiff
path: root/sql/hive
diff options
context:
space:
mode:
authorDongjoon Hyun <dongjoon@apache.org>2016-04-03 18:14:16 -0700
committerReynold Xin <rxin@databricks.com>2016-04-03 18:14:16 -0700
commit3f749f7ed443899d667c9e2b2a11bc595d6fc7f6 (patch)
tree15738bedb4fe8db3a018e6a5c63e635ac0d4009e /sql/hive
parent9023015f059327b3ce4a7eaf71e57ac77b84ad7b (diff)
downloadspark-3f749f7ed443899d667c9e2b2a11bc595d6fc7f6.tar.gz
spark-3f749f7ed443899d667c9e2b2a11bc595d6fc7f6.tar.bz2
spark-3f749f7ed443899d667c9e2b2a11bc595d6fc7f6.zip
[SPARK-14355][BUILD] Fix typos in Exception/Testcase/Comments and static analysis results
## What changes were proposed in this pull request? This PR contains the following 5 types of maintenance fix over 59 files (+94 lines, -93 lines). - Fix typos(exception/log strings, testcase name, comments) in 44 lines. - Fix lint-java errors (MaxLineLength) in 6 lines. (New codes after SPARK-14011) - Use diamond operators in 40 lines. (New codes after SPARK-13702) - Fix redundant semicolon in 5 lines. - Rename class `InferSchemaSuite` to `CSVInferSchemaSuite` in CSVInferSchemaSuite.scala. ## How was this patch tested? Manual and pass the Jenkins tests. Author: Dongjoon Hyun <dongjoon@apache.org> Closes #12139 from dongjoon-hyun/SPARK-14355.
Diffstat (limited to 'sql/hive')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala2
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala4
3 files changed, 5 insertions, 5 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index 4afc8d18a6..9393302355 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -380,8 +380,8 @@ class TestHiveContext private[hive](
""".stripMargin.cmd,
s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/episodes.avro")}' INTO TABLE episodes".cmd
),
- // THIS TABLE IS NOT THE SAME AS THE HIVE TEST TABLE episodes_partitioned AS DYNAMIC PARITIONING
- // IS NOT YET SUPPORTED
+ // THIS TABLE IS NOT THE SAME AS THE HIVE TEST TABLE episodes_partitioned AS DYNAMIC
+ // PARTITIONING IS NOT YET SUPPORTED
TestTable("episodes_part",
s"""CREATE TABLE episodes_part (title STRING, air_date STRING, doctor INT)
|PARTITIONED BY (doctor_pt INT)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
index 4c1b425b16..e67fcbedc3 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
@@ -482,7 +482,7 @@ abstract class HiveComparisonTest
val tablesGenerated = queryList.zip(executions).flatMap {
// We should take executedPlan instead of sparkPlan, because in following codes we
// will run the collected plans. As we will do extra processing for sparkPlan such
- // as adding exchage, collapsing codegen stages, etc., collecing sparkPlan here
+ // as adding exchange, collapsing codegen stages, etc., collecting sparkPlan here
// will cause some errors when running these plans later.
case (q, e) => e.executedPlan.collect {
case i: InsertIntoHiveTable if tablesRead contains i.table.tableName =>
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
index b6fc61d453..eac65d5720 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/parquetSuites.scala
@@ -311,7 +311,7 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest {
case ExecutedCommand(_: InsertIntoHadoopFsRelation) => // OK
case o => fail("test_insert_parquet should be converted to a " +
s"${classOf[HadoopFsRelation ].getCanonicalName} and " +
- s"${classOf[InsertIntoDataSource].getCanonicalName} is expcted as the SparkPlan. " +
+ s"${classOf[InsertIntoDataSource].getCanonicalName} is expected as the SparkPlan. " +
s"However, found a ${o.toString} ")
}
@@ -341,7 +341,7 @@ class ParquetMetastoreSuite extends ParquetPartitioningTest {
case ExecutedCommand(_: InsertIntoHadoopFsRelation) => // OK
case o => fail("test_insert_parquet should be converted to a " +
s"${classOf[HadoopFsRelation ].getCanonicalName} and " +
- s"${classOf[InsertIntoDataSource].getCanonicalName} is expcted as the SparkPlan." +
+ s"${classOf[InsertIntoDataSource].getCanonicalName} is expected as the SparkPlan." +
s"However, found a ${o.toString} ")
}