aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src
diff options
context:
space:
mode:
authorCheng Lian <lian.cs.zju@gmail.com>2014-03-26 15:36:18 -0700
committerPatrick Wendell <pwendell@gmail.com>2014-03-26 15:36:18 -0700
commit345825d97987b9eeb2afcf002f815a05ff51fc2e (patch)
tree677dd5c9514680831ba238f7d0f61a37f0e302fa /sql/hive/src
parenta0853a39e1907700737ec924367cd2610da8395a (diff)
downloadspark-345825d97987b9eeb2afcf002f815a05ff51fc2e.tar.gz
spark-345825d97987b9eeb2afcf002f815a05ff51fc2e.tar.bz2
spark-345825d97987b9eeb2afcf002f815a05ff51fc2e.zip
Unified package definition format in Spark SQL
According to discussions in comments of PR #208, this PR unifies package definition format in Spark SQL. Some broken links in ScalaDoc and typos detected along the way are also fixed. Author: Cheng Lian <lian.cs.zju@gmail.com> Closes #225 from liancheng/packageDefinition and squashes the following commits: 75c47b3 [Cheng Lian] Fixed file line length 4f87968 [Cheng Lian] Unified package definition format in Spark SQL
Diffstat (limited to 'sql/hive/src')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala13
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala38
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala8
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala3
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala4
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/ScriptTransformation.scala3
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala3
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala3
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala3
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/BigDataBenchmarkSuite.scala10
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ConcurrentHiveSuite.scala9
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala9
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala6
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala8
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala7
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala4
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTypeCoercionSuite.scala3
-rw-r--r--sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala6
20 files changed, 64 insertions, 84 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
index d96c2f70e0..7219c030cb 100644
--- a/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
@@ -113,11 +113,10 @@ class SparkHiveHadoopWriter(
cmtr.commitTask(taCtxt)
logInfo (taID + ": Committed")
} catch {
- case e: IOException => {
+ case e: IOException =>
logError("Error committing the output of task: " + taID.value, e)
cmtr.abortTask(taCtxt)
throw e
- }
}
} else {
logWarning ("No need to commit output of task: " + taID.value)
@@ -161,12 +160,12 @@ class SparkHiveHadoopWriter(
taskContext
}
- private def setIDs(jobid: Int, splitid: Int, attemptid: Int) {
- jobID = jobid
- splitID = splitid
- attemptID = attemptid
+ private def setIDs(jobId: Int, splitId: Int, attemptId: Int) {
+ jobID = jobId
+ splitID = splitId
+ attemptID = attemptId
- jID = new SerializableWritable[JobID](SparkHadoopWriter.createJobID(now, jobid))
+ jID = new SerializableWritable[JobID](SparkHadoopWriter.createJobID(now, jobId))
taID = new SerializableWritable[TaskAttemptID](
new TaskAttemptID(new TaskID(jID.value, true, splitID), attemptID))
}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
index 3bcf586662..fc5057b73f 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveContext.scala
@@ -32,7 +32,7 @@ import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.{Analyzer, OverrideCatalog}
import org.apache.spark.sql.catalyst.expressions.GenericRow
-import org.apache.spark.sql.catalyst.plans.logical.{BaseRelation, LogicalPlan, LowerCaseSchema}
+import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, LowerCaseSchema}
import org.apache.spark.sql.catalyst.plans.logical.{NativeCommand, ExplainCommand}
import org.apache.spark.sql.catalyst.types._
import org.apache.spark.sql.execution._
@@ -86,7 +86,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
val input = new java.io.InputStream {
val iterator = (start ++ end).iterator
- def read(): Int = if (iterator.hasNext) iterator.next else -1
+ def read(): Int = if (iterator.hasNext) iterator.next() else -1
}
val reader = new BufferedReader(new InputStreamReader(input))
val stringBuilder = new StringBuilder
@@ -148,24 +148,24 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
SessionState.start(sessionState)
- if (proc.isInstanceOf[Driver]) {
- val driver: Driver = proc.asInstanceOf[Driver]
- driver.init()
+ proc match {
+ case driver: Driver =>
+ driver.init()
- val results = new JArrayList[String]
- val response: CommandProcessorResponse = driver.run(cmd)
- // Throw an exception if there is an error in query processing.
- if (response.getResponseCode != 0) {
+ val results = new JArrayList[String]
+ val response: CommandProcessorResponse = driver.run(cmd)
+ // Throw an exception if there is an error in query processing.
+ if (response.getResponseCode != 0) {
+ driver.destroy()
+ throw new QueryExecutionException(response.getErrorMessage)
+ }
+ driver.setMaxRows(maxRows)
+ driver.getResults(results)
driver.destroy()
- throw new QueryExecutionException(response.getErrorMessage)
- }
- driver.setMaxRows(maxRows)
- driver.getResults(results)
- driver.destroy()
- results
- } else {
- sessionState.out.println(tokens(0) + " " + cmd_1)
- Seq(proc.run(cmd_1).getResponseCode.toString)
+ results
+ case _ =>
+ sessionState.out.println(tokens(0) + " " + cmd_1)
+ Seq(proc.run(cmd_1).getResponseCode.toString)
}
} catch {
case e: Exception =>
@@ -226,7 +226,7 @@ class HiveContext(sc: SparkContext) extends SQLContext(sc) {
sparkContext.parallelize(asRows, 1)
}
case _ =>
- executedPlan.execute.map(_.copy())
+ executedPlan.execute().map(_.copy())
}
protected val primitiveTypes =
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 1667a21729..4f8353666a 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
import scala.util.parsing.combinator.RegexParsers
@@ -27,14 +26,15 @@ import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.serde2.Deserializer
-
-import org.apache.spark.sql.catalyst.analysis.{Catalog, EliminateAnalysisOperators}
+import org.apache.spark.sql.Logging
+import org.apache.spark.sql.catalyst.analysis.{EliminateAnalysisOperators, Catalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.types._
+/* Implicit conversions */
import scala.collection.JavaConversions._
class HiveMetastoreCatalog(hive: HiveContext) extends Catalog with Logging {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
index 8e76a7348e..f4b61381f9 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveQl.scala
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
import org.apache.hadoop.hive.ql.lib.Node
import org.apache.hadoop.hive.ql.parse._
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
index 141067247d..3ca1d93c11 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala
@@ -15,9 +15,9 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
+import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans._
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/ScriptTransformation.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/ScriptTransformation.scala
index dc4181ec99..2610100043 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/ScriptTransformation.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/ScriptTransformation.scala
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
import java.io.{BufferedReader, InputStreamReader}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
index 99dc85ec19..ca53113446 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, PathFilter}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
index a26b0ff231..bc3447b9d8 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TestHive.scala
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
import java.io.File
import java.util.{Set => JavaSet}
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala
index 78f69e7ff5..e2d9d8de25 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
import org.apache.hadoop.hive.common.`type`.{HiveDecimal, HiveVarchar}
import org.apache.hadoop.hive.metastore.MetaStoreUtils
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
index 72ccd4f4a4..44901db3f9 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUdfs.scala
@@ -15,8 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
+package org.apache.spark.sql.hive
import scala.collection.mutable.ArrayBuffer
@@ -29,6 +28,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive._
import org.apache.hadoop.hive.serde2.{io => hiveIo}
import org.apache.hadoop.{io => hadoopIo}
+import org.apache.spark.sql.Logging
import org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.types
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/BigDataBenchmarkSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/BigDataBenchmarkSuite.scala
index 4b45e69860..8488f23abd 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/BigDataBenchmarkSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/BigDataBenchmarkSuite.scala
@@ -15,19 +15,17 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
import java.io.File
+import org.apache.spark.sql.hive.TestHive._
+
/**
* A set of test cases based on the big-data-benchmark.
* https://amplab.cs.berkeley.edu/benchmark/
*/
class BigDataBenchmarkSuite extends HiveComparisonTest {
- import TestHive._
-
val testDataDirectory = new File("target/big-data-benchmark-testdata")
val testTables = Seq(
@@ -123,4 +121,4 @@ class BigDataBenchmarkSuite extends HiveComparisonTest {
|-- SELECT * FROM url_counts_total
""".stripMargin)
}
-} \ No newline at end of file
+}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ConcurrentHiveSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ConcurrentHiveSuite.scala
index 02ee2a0ebc..ac87f2cb10 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ConcurrentHiveSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/ConcurrentHiveSuite.scala
@@ -15,12 +15,11 @@
* limitations under the License.
*/
-package org.apache.spark
-package sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
-import org.scalatest.{FunSuite, BeforeAndAfterAll}
+import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.spark.sql.hive.TestHiveContext
+import org.scalatest.{BeforeAndAfterAll, FunSuite}
class ConcurrentHiveSuite extends FunSuite with BeforeAndAfterAll {
ignore("multiple instances not supported") {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
index e8fcc27235..c7a350ef94 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala
@@ -15,17 +15,16 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
import java.io._
-import org.scalatest.{BeforeAndAfterAll, FunSuite, GivenWhenThen}
-
+import org.apache.spark.sql.Logging
import org.apache.spark.sql.catalyst.plans.logical.{ExplainCommand, NativeCommand}
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.Sort
+import org.scalatest.{BeforeAndAfterAll, FunSuite, GivenWhenThen}
+import org.apache.spark.sql.hive.TestHive
/**
* Allows the creations of tests that execute the same query against both hive
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
index d5b54fa90f..f74b0fbb97 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
@@ -15,9 +15,9 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
+
+import org.apache.spark.sql.hive.TestHive
/**
* Runs the test cases that are included in the hive distribution.
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
index 2d2f13333a..50ab71a900 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQueryFileTest.scala
@@ -15,9 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
import java.io.File
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index b804634db1..4b92d167a1 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -15,16 +15,14 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
+
+import org.apache.spark.sql.hive.TestHive._
/**
* A set of test cases expressed in Hive QL that are not covered by the tests included in the hive distribution.
*/
class HiveQuerySuite extends HiveComparisonTest {
- import TestHive._
-
createQueryTest("Simple Average",
"SELECT AVG(key) FROM src")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
index 4bdea21467..d77900ddc9 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveResolutionSuite.scala
@@ -15,11 +15,10 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
-import TestHive._
+import org.apache.spark.sql.hive.TestHive
+import org.apache.spark.sql.hive.TestHive._
case class Data(a: Int, B: Int, n: Nested)
case class Nested(a: Int, B: Int)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala
index c2264926f4..df9bae9649 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala
@@ -15,9 +15,7 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
/**
* A set of tests that validates support for Hive SerDe.
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTypeCoercionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTypeCoercionSuite.scala
index bb33583e5f..e030c8ee3d 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTypeCoercionSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTypeCoercionSuite.scala
@@ -21,7 +21,6 @@ package org.apache.spark.sql.hive.execution
* A set of tests that validate type promotion rules.
*/
class HiveTypeCoercionSuite extends HiveComparisonTest {
-
val baseTypes = Seq("1", "1.0", "1L", "1S", "1Y", "'1'")
baseTypes.foreach { i =>
@@ -29,4 +28,4 @@ class HiveTypeCoercionSuite extends HiveComparisonTest {
createQueryTest(s"$i + $j", s"SELECT $i + $j FROM src LIMIT 1")
}
}
-} \ No newline at end of file
+}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala
index d2f8e5df5b..1318ac1968 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala
@@ -15,11 +15,9 @@
* limitations under the License.
*/
-package org.apache.spark.sql
-package hive
-package execution
+package org.apache.spark.sql.hive.execution
-import org.apache.spark.sql.hive.TestHive
+import org.apache.spark.sql.hive.{HiveTableScan, TestHive}
/* Implicit conversions */
import scala.collection.JavaConversions._