aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src
diff options
context:
space:
mode:
authorReynold Xin <rxin@databricks.com>2016-08-09 18:22:14 +0800
committerWenchen Fan <wenchen@databricks.com>2016-08-09 18:22:14 +0800
commit511f52f8423e151b0d0133baf040d34a0af3d422 (patch)
tree390a5f9f3bee846625994a447238d9579e17051f /sql/hive/src
parent62e62124419f3fa07b324f5e42feb2c5b4fde715 (diff)
downloadspark-511f52f8423e151b0d0133baf040d34a0af3d422.tar.gz
spark-511f52f8423e151b0d0133baf040d34a0af3d422.tar.bz2
spark-511f52f8423e151b0d0133baf040d34a0af3d422.zip
[SPARK-16964][SQL] Remove private[sql] and private[spark] from sql.execution package
## What changes were proposed in this pull request? This package is meant to be internal, and as a result it does not make sense to mark things as private[sql] or private[spark]. It simply makes debugging harder when Spark developers need to inspect the plans at runtime. This patch removes all private[sql] and private[spark] visibility modifiers in org.apache.spark.sql.execution. ## How was this patch tested? N/A - just visibility changes. Author: Reynold Xin <rxin@databricks.com> Closes #14554 from rxin/remote-private.
Diffstat (limited to 'sql/hive/src')
-rw-r--r--sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala2
1 files changed, 1 insertions, 1 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
index cc3e74b4e8..a716a3eab6 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
@@ -54,7 +54,7 @@ case class HiveTableScanExec(
require(partitionPruningPred.isEmpty || relation.hiveQlTable.isPartitioned,
"Partition pruning predicates only supported for partitioned tables.")
- private[sql] override lazy val metrics = Map(
+ override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def producedAttributes: AttributeSet = outputSet ++