aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
authorYuming Wang <wgyumg@gmail.com>2017-02-28 10:13:42 +0000
committerSean Owen <sowen@cloudera.com>2017-02-28 10:13:42 +0000
commit9b8eca65dcf68129470ead39362ce870ffb0bb1d (patch)
tree282c7af7443b31416ff3f9821615f18635de916b /sql/core/src
parenta350bc16d36c58b48ac01f0258678ffcdb77e793 (diff)
downloadspark-9b8eca65dcf68129470ead39362ce870ffb0bb1d.tar.gz
spark-9b8eca65dcf68129470ead39362ce870ffb0bb1d.tar.bz2
spark-9b8eca65dcf68129470ead39362ce870ffb0bb1d.zip
[SPARK-19660][CORE][SQL] Replace the configuration property names that are deprecated in the version of Hadoop 2.6
## What changes were proposed in this pull request? Replace all the Hadoop deprecated configuration property names according to [DeprecatedProperties](https://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-common/DeprecatedProperties.html). except: https://github.com/apache/spark/blob/v2.1.0/python/pyspark/sql/tests.py#L1533 https://github.com/apache/spark/blob/v2.1.0/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala#L987 https://github.com/apache/spark/blob/v2.1.0/sql/core/src/main/scala/org/apache/spark/sql/execution/command/SetCommand.scala#L45 https://github.com/apache/spark/blob/v2.1.0/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala#L614 ## How was this patch tested? Existing tests Author: Yuming Wang <wgyumg@gmail.com> Closes #16990 from wangyum/HadoopDeprecatedProperties.
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala10
2 files changed, 7 insertions, 7 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
index 49407b44d7..3e80916104 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
@@ -269,8 +269,8 @@ case class LoadDataCommand(
} else {
// Follow Hive's behavior:
// If no schema or authority is provided with non-local inpath,
- // we will use hadoop configuration "fs.default.name".
- val defaultFSConf = sparkSession.sessionState.newHadoopConf().get("fs.default.name")
+ // we will use hadoop configuration "fs.defaultFS".
+ val defaultFSConf = sparkSession.sessionState.newHadoopConf().get("fs.defaultFS")
val defaultFS = if (defaultFSConf == null) {
new URI("")
} else {
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala
index 644358493e..c17796811c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala
@@ -210,11 +210,11 @@ object FileFormatWriter extends Logging {
val taskAttemptContext: TaskAttemptContext = {
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
- hadoopConf.set("mapred.job.id", jobId.toString)
- hadoopConf.set("mapred.tip.id", taskAttemptId.getTaskID.toString)
- hadoopConf.set("mapred.task.id", taskAttemptId.toString)
- hadoopConf.setBoolean("mapred.task.is.map", true)
- hadoopConf.setInt("mapred.task.partition", 0)
+ hadoopConf.set("mapreduce.job.id", jobId.toString)
+ hadoopConf.set("mapreduce.task.id", taskAttemptId.getTaskID.toString)
+ hadoopConf.set("mapreduce.task.attempt.id", taskAttemptId.toString)
+ hadoopConf.setBoolean("mapreduce.task.ismap", true)
+ hadoopConf.setInt("mapreduce.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}