From 511f52f8423e151b0d0133baf040d34a0af3d422 Mon Sep 17 00:00:00 2001 From: Reynold Xin Date: Tue, 9 Aug 2016 18:22:14 +0800 Subject: [SPARK-16964][SQL] Remove private[sql] and private[spark] from sql.execution package ## What changes were proposed in this pull request? This package is meant to be internal, and as a result it does not make sense to mark things as private[sql] or private[spark]. It simply makes debugging harder when Spark developers need to inspect the plans at runtime. This patch removes all private[sql] and private[spark] visibility modifiers in org.apache.spark.sql.execution. ## How was this patch tested? N/A - just visibility changes. Author: Reynold Xin Closes #14554 from rxin/remote-private. --- .../scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'sql/hive/src') diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala index cc3e74b4e8..a716a3eab6 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala @@ -54,7 +54,7 @@ case class HiveTableScanExec( require(partitionPruningPred.isEmpty || relation.hiveQlTable.isPartitioned, "Partition pruning predicates only supported for partitioned tables.") - private[sql] override lazy val metrics = Map( + override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) override def producedAttributes: AttributeSet = outputSet ++ -- cgit v1.2.3