diff options
author | Holden Karau <holden@us.ibm.com> | 2016-02-03 17:43:14 -0800 |
---|---|---|
committer | Andrew Or <andrew@databricks.com> | 2016-02-03 17:43:14 -0800 |
commit | a8e2ba776b20c8054918af646d8228bba1b87c9b (patch) | |
tree | b218a63a110be6e738ec7808469b0f149a9b604e /sql | |
parent | de0914522fc5b2658959f9e2272b4e3162b14978 (diff) | |
download | spark-a8e2ba776b20c8054918af646d8228bba1b87c9b.tar.gz spark-a8e2ba776b20c8054918af646d8228bba1b87c9b.tar.bz2 spark-a8e2ba776b20c8054918af646d8228bba1b87c9b.zip |
[SPARK-13152][CORE] Fix task metrics deprecation warning
Make an internal non-deprecated version of incBytesRead and incRecordsRead so we don't have unecessary deprecation warnings in our build.
Right now incBytesRead and incRecordsRead are marked as deprecated and for internal use only. We should make private[spark] versions which are not deprecated and switch to those internally so as to not clutter up the warning messages when building.
cc andrewor14 who did the initial deprecation
Author: Holden Karau <holden@us.ibm.com>
Closes #11056 from holdenk/SPARK-13152-fix-task-metrics-deprecation-warnings.
Diffstat (limited to 'sql')
-rw-r--r-- | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala index 9703b16c86..3605150b3b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala @@ -214,7 +214,7 @@ private[spark] class SqlNewHadoopRDD[V: ClassTag]( } havePair = false if (!finished) { - inputMetrics.incRecordsRead(1) + inputMetrics.incRecordsReadInternal(1) } if (inputMetrics.recordsRead % SparkHadoopUtil.UPDATE_INPUT_METRICS_INTERVAL_RECORDS == 0) { updateBytesRead() @@ -246,7 +246,7 @@ private[spark] class SqlNewHadoopRDD[V: ClassTag]( // If we can't get the bytes read from the FS stats, fall back to the split size, // which may be inaccurate. try { - inputMetrics.incBytesRead(split.serializableHadoopSplit.value.getLength) + inputMetrics.incBytesReadInternal(split.serializableHadoopSplit.value.getLength) } catch { case e: java.io.IOException => logWarning("Unable to get input size to set InputMetrics for task", e) |