aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJosh Rosen <joshrosen@databricks.com>2015-04-30 16:23:01 -0700
committerReynold Xin <rxin@databricks.com>2015-04-30 16:23:01 -0700
commitfa01bec484fc000e0a31645b722ffde48556c4df (patch)
tree5dca71204451ebaeaac51bca9f7f254884983e28
parent77cc25fb7473d8a06b727d2ba5ee62db1c651cf8 (diff)
downloadspark-fa01bec484fc000e0a31645b722ffde48556c4df.tar.gz
spark-fa01bec484fc000e0a31645b722ffde48556c4df.tar.bz2
spark-fa01bec484fc000e0a31645b722ffde48556c4df.zip
[Build] Enable MiMa checks for SQL
Now that 1.3 has been released, we should enable MiMa checks for the `sql` subproject. Author: Josh Rosen <joshrosen@databricks.com> Closes #5727 from JoshRosen/enable-more-mima-checks and squashes the following commits: 3ad302b [Josh Rosen] Merge remote-tracking branch 'origin/master' into enable-more-mima-checks 0c48e4d [Josh Rosen] Merge remote-tracking branch 'origin/master' into enable-more-mima-checks e276cee [Josh Rosen] Fix SQL MiMa checks via excludes and private[sql] 44d0d01 [Josh Rosen] Add back 'launcher' exclude 1aae027 [Josh Rosen] Enable MiMa checks for launcher and sql projects.
-rw-r--r--project/MimaExcludes.scala16
-rw-r--r--project/SparkBuild.scala5
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala5
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScan.scala2
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala2
5 files changed, 23 insertions, 7 deletions
diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala
index 3beafa158e..bf343d4b7e 100644
--- a/project/MimaExcludes.scala
+++ b/project/MimaExcludes.scala
@@ -88,6 +88,22 @@ object MimaExcludes {
"org.apache.spark.mllib.linalg.Vector.toSparse"),
ProblemFilters.exclude[MissingMethodProblem](
"org.apache.spark.mllib.linalg.Vector.numActives")
+ ) ++ Seq(
+ // This `protected[sql]` method was removed in 1.3.1
+ ProblemFilters.exclude[MissingMethodProblem](
+ "org.apache.spark.sql.SQLContext.checkAnalysis"),
+ // This `private[sql]` class was removed in 1.4.0:
+ ProblemFilters.exclude[MissingClassProblem](
+ "org.apache.spark.sql.execution.AddExchange"),
+ ProblemFilters.exclude[MissingClassProblem](
+ "org.apache.spark.sql.execution.AddExchange$"),
+ // These test support classes were moved out of src/main and into src/test:
+ ProblemFilters.exclude[MissingClassProblem](
+ "org.apache.spark.sql.parquet.ParquetTestData"),
+ ProblemFilters.exclude[MissingClassProblem](
+ "org.apache.spark.sql.parquet.ParquetTestData$"),
+ ProblemFilters.exclude[MissingClassProblem](
+ "org.apache.spark.sql.parquet.TestGroupWriteSupport")
)
case v if v.startsWith("1.3") =>
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index a93aa12ce1..b4431c7ee0 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -156,9 +156,8 @@ object SparkBuild extends PomBuild {
/* Enable tests settings for all projects except examples, assembly and tools */
(allProjects ++ optionallyEnabledProjects).foreach(enable(TestSettings.settings))
- // TODO: Add Sql to mima checks
- // TODO: remove launcher from this list after 1.3.
- allProjects.filterNot(x => Seq(spark, sql, hive, hiveThriftServer, catalyst, repl,
+ // TODO: remove launcher from this list after 1.4.0
+ allProjects.filterNot(x => Seq(spark, hive, hiveThriftServer, catalyst, repl,
networkCommon, networkShuffle, networkYarn, launcher, unsafe).contains(x)).foreach {
x => enable(MimaBuild.mimaSettings(sparkHome, x))(x)
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
index 1fd387eec7..57effbf7ec 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
@@ -84,7 +84,7 @@ object RDDConversions {
}
/** Logical plan node for scanning data from an RDD. */
-case class LogicalRDD(output: Seq[Attribute], rdd: RDD[Row])(sqlContext: SQLContext)
+private[sql] case class LogicalRDD(output: Seq[Attribute], rdd: RDD[Row])(sqlContext: SQLContext)
extends LogicalPlan with MultiInstanceRelation {
override def children: Seq[LogicalPlan] = Nil
@@ -105,11 +105,12 @@ case class LogicalRDD(output: Seq[Attribute], rdd: RDD[Row])(sqlContext: SQLCont
}
/** Physical plan node for scanning data from an RDD. */
-case class PhysicalRDD(output: Seq[Attribute], rdd: RDD[Row]) extends LeafNode {
+private[sql] case class PhysicalRDD(output: Seq[Attribute], rdd: RDD[Row]) extends LeafNode {
override def execute(): RDD[Row] = rdd
}
/** Logical plan node for scanning data from a local collection. */
+private[sql]
case class LogicalLocalTable(output: Seq[Attribute], rows: Seq[Row])(sqlContext: SQLContext)
extends LogicalPlan with MultiInstanceRelation {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScan.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScan.scala
index 8a8c3a4043..ace9af5f38 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScan.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScan.scala
@@ -26,7 +26,7 @@ import org.apache.spark.sql.catalyst.expressions.Attribute
/**
* Physical plan node for scanning data from a local collection.
*/
-case class LocalTableScan(output: Seq[Attribute], rows: Seq[Row]) extends LeafNode {
+private[sql] case class LocalTableScan(output: Seq[Attribute], rows: Seq[Row]) extends LeafNode {
private lazy val rdd = sqlContext.sparkContext.parallelize(rows)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
index 99f24910fd..98df5bef34 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
@@ -42,7 +42,7 @@ trait RunnableCommand extends logical.Command {
* A physical operator that executes the run method of a `RunnableCommand` and
* saves the result to prevent multiple executions.
*/
-case class ExecutedCommand(cmd: RunnableCommand) extends SparkPlan {
+private[sql] case class ExecutedCommand(cmd: RunnableCommand) extends SparkPlan {
/**
* A concrete command should override this lazy field to wrap up any side effects caused by the
* command or any other computation that should be evaluated exactly once. The value of this field