From 25a020be99b6a540e4001e59e40d5d1c8aa53812 Mon Sep 17 00:00:00 2001 From: hyukjinkwon Date: Wed, 21 Sep 2016 10:35:29 +0100 Subject: [SPARK-17583][SQL] Remove uesless rowSeparator variable and set auto-expanding buffer as default for maxCharsPerColumn option in CSV ## What changes were proposed in this pull request? This PR includes the changes below: 1. Upgrade Univocity library from 2.1.1 to 2.2.1 This includes some performance improvement and also enabling auto-extending buffer in `maxCharsPerColumn` option in CSV. Please refer the [release notes](https://github.com/uniVocity/univocity-parsers/releases). 2. Remove useless `rowSeparator` variable existing in `CSVOptions` We have this unused variable in [CSVOptions.scala#L127](https://github.com/apache/spark/blob/29952ed096fd2a0a19079933ff691671d6f00835/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala#L127) but it seems possibly causing confusion that it actually does not care of `\r\n`. For example, we have an issue open about this, [SPARK-17227](https://issues.apache.org/jira/browse/SPARK-17227), describing this variable. This variable is virtually not being used because we rely on `LineRecordReader` in Hadoop which deals with only both `\n` and `\r\n`. 3. Set the default value of `maxCharsPerColumn` to auto-expending. We are setting 1000000 for the length of each column. It'd be more sensible we allow auto-expending rather than fixed length by default. To make sure, using `-1` is being described in the release note, [2.2.0](https://github.com/uniVocity/univocity-parsers/releases/tag/v2.2.0). ## How was this patch tested? N/A Author: hyukjinkwon Closes #15138 from HyukjinKwon/SPARK-17583. --- sql/core/pom.xml | 2 +- sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala | 4 ++-- .../org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala | 4 +--- .../org/apache/spark/sql/execution/datasources/csv/CSVParser.scala | 2 -- .../main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala | 4 ++-- 5 files changed, 6 insertions(+), 10 deletions(-) (limited to 'sql/core') diff --git a/sql/core/pom.xml b/sql/core/pom.xml index b2752638be..84de1d4a6e 100644 --- a/sql/core/pom.xml +++ b/sql/core/pom.xml @@ -38,7 +38,7 @@ com.univocity univocity-parsers - 2.1.1 + 2.2.1 jar diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index 30f39c70fe..b10d2c86ac 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -392,8 +392,8 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { * `java.sql.Timestamp.valueOf()` and `java.sql.Date.valueOf()` or ISO 8601 format. *
  • `maxColumns` (default `20480`): defines a hard limit of how many columns * a record can have.
  • - *
  • `maxCharsPerColumn` (default `1000000`): defines the maximum number of characters allowed - * for any given value being read.
  • + *
  • `maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed + * for any given value being read. By default, it is -1 meaning unlimited length
  • *
  • `maxMalformedLogPerPartition` (default `10`): sets the maximum number of malformed rows * Spark will log for each partition. Malformed records beyond this number will be ignored.
  • *
  • `mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala index 364d7c831e..e7dcc22272 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala @@ -112,7 +112,7 @@ private[csv] class CSVOptions(@transient private val parameters: Map[String, Str val maxColumns = getInt("maxColumns", 20480) - val maxCharsPerColumn = getInt("maxCharsPerColumn", 1000000) + val maxCharsPerColumn = getInt("maxCharsPerColumn", -1) val escapeQuotes = getBool("escapeQuotes", true) @@ -123,8 +123,6 @@ private[csv] class CSVOptions(@transient private val parameters: Map[String, Str val inputBufferSize = 128 val isCommentSet = this.comment != '\u0000' - - val rowSeparator = "\n" } object CSVOptions { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala index 64bdd6f464..332f5c8e9f 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala @@ -34,7 +34,6 @@ private[csv] class CsvReader(params: CSVOptions) { val settings = new CsvParserSettings() val format = settings.getFormat format.setDelimiter(params.delimiter) - format.setLineSeparator(params.rowSeparator) format.setQuote(params.quote) format.setQuoteEscape(params.escape) format.setComment(params.comment) @@ -70,7 +69,6 @@ private[csv] class LineCsvWriter(params: CSVOptions, headers: Seq[String]) exten private val format = writerSettings.getFormat format.setDelimiter(params.delimiter) - format.setLineSeparator(params.rowSeparator) format.setQuote(params.quote) format.setQuoteEscape(params.escape) format.setComment(params.comment) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala index 9d174051bc..d437c16a25 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala @@ -247,8 +247,8 @@ final class DataStreamReader private[sql](sparkSession: SparkSession) extends Lo * `java.text.SimpleDateFormat`. This applies to timestamp type.
  • *
  • `maxColumns` (default `20480`): defines a hard limit of how many columns * a record can have.
  • - *
  • `maxCharsPerColumn` (default `1000000`): defines the maximum number of characters allowed - * for any given value being read.
  • + *
  • `maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed + * for any given value being read. By default, it is -1 meaning unlimited length
  • *
  • `mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records * during parsing. *
      -- cgit v1.2.3