aboutsummaryrefslogtreecommitdiff
path: root/project
diff options
context:
space:
mode:
authorWenchen Fan <wenchen@databricks.com>2016-04-18 15:17:29 -0700
committerReynold Xin <rxin@databricks.com>2016-04-18 15:17:29 -0700
commit602734084c05a79c18446a3c2c051740dba143b3 (patch)
tree5cddac1f7a8add17ec705205e9cc5001d3d63d69 /project
parent6ff0435858eed8310c0298ef0394053dfe06df9e (diff)
downloadspark-602734084c05a79c18446a3c2c051740dba143b3.tar.gz
spark-602734084c05a79c18446a3c2c051740dba143b3.tar.bz2
spark-602734084c05a79c18446a3c2c051740dba143b3.zip
[SPARK-14628][CORE][FOLLLOW-UP] Always tracking read/write metrics
## What changes were proposed in this pull request? This PR is a follow up for https://github.com/apache/spark/pull/12417, now we always track input/output/shuffle metrics in spark JSON protocol and status API. Most of the line changes are because of re-generating the gold answer for `HistoryServerSuite`, and we add a lot of 0 values for read/write metrics. ## How was this patch tested? existing tests. Author: Wenchen Fan <wenchen@databricks.com> Closes #12462 from cloud-fan/follow.
Diffstat (limited to 'project')
-rw-r--r--project/MimaExcludes.scala14
1 files changed, 14 insertions, 0 deletions
diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala
index 7730823f94..ff35dc010d 100644
--- a/project/MimaExcludes.scala
+++ b/project/MimaExcludes.scala
@@ -634,6 +634,20 @@ object MimaExcludes {
// [SPARK-14628] Simplify task metrics by always tracking read/write metrics
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.InputMetrics.readMethod"),
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.executor.OutputMetrics.writeMethod")
+ ) ++ Seq(
+ // SPARK-14628: Always track input/output/shuffle metrics
+ ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.status.api.v1.ShuffleReadMetrics.totalBlocksFetched"),
+ ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.status.api.v1.ShuffleReadMetrics.this"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.inputMetrics"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.outputMetrics"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.shuffleWriteMetrics"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.shuffleReadMetrics"),
+ ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.status.api.v1.TaskMetrics.this"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.inputMetrics"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.outputMetrics"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.shuffleWriteMetrics"),
+ ProblemFilters.exclude[IncompatibleResultTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.shuffleReadMetrics"),
+ ProblemFilters.exclude[IncompatibleMethTypeProblem]("org.apache.spark.status.api.v1.TaskMetricDistributions.this")
)
case v if v.startsWith("1.6") =>
Seq(