aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala23
1 files changed, 22 insertions, 1 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 428032b1fb..f8b7a7f8ef 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -544,7 +544,28 @@ object SQLConf {
.internal()
.doc("How long that a file is guaranteed to be visible for all readers.")
.timeConf(TimeUnit.MILLISECONDS)
- .createWithDefault(60 * 1000L) // 10 minutes
+ .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes
+
+ val FILE_SOURCE_LOG_DELETION = SQLConfigBuilder("spark.sql.streaming.fileSource.log.deletion")
+ .internal()
+ .doc("Whether to delete the expired log files in file stream source.")
+ .booleanConf
+ .createWithDefault(true)
+
+ val FILE_SOURCE_LOG_COMPACT_INTERVAL =
+ SQLConfigBuilder("spark.sql.streaming.fileSource.log.compactInterval")
+ .internal()
+ .doc("Number of log files after which all the previous files " +
+ "are compacted into the next log file.")
+ .intConf
+ .createWithDefault(10)
+
+ val FILE_SOURCE_LOG_CLEANUP_DELAY =
+ SQLConfigBuilder("spark.sql.streaming.fileSource.log.cleanupDelay")
+ .internal()
+ .doc("How long in milliseconds a file is guaranteed to be visible for all readers.")
+ .timeConf(TimeUnit.MILLISECONDS)
+ .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes
val STREAMING_SCHEMA_INFERENCE =
SQLConfigBuilder("spark.sql.streaming.schemaInference")