aboutsummaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala3
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala10
2 files changed, 12 insertions, 1 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
index e90e72dc8c..e048ee1441 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
@@ -75,7 +75,8 @@ case class DataSource(
"org.apache.spark.sql.json" -> classOf[json.DefaultSource].getCanonicalName,
"org.apache.spark.sql.json.DefaultSource" -> classOf[json.DefaultSource].getCanonicalName,
"org.apache.spark.sql.parquet" -> classOf[parquet.DefaultSource].getCanonicalName,
- "org.apache.spark.sql.parquet.DefaultSource" -> classOf[parquet.DefaultSource].getCanonicalName
+ "org.apache.spark.sql.parquet.DefaultSource" -> classOf[parquet.DefaultSource].getCanonicalName,
+ "com.databricks.spark.csv" -> classOf[csv.DefaultSource].getCanonicalName
)
/** Given a provider name, look up the data source class definition. */
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
index 53027bb698..076fe5e041 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
@@ -466,4 +466,14 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils {
df.schema.fields.map(field => field.dataType).deep ==
Array(IntegerType, IntegerType, IntegerType, IntegerType).deep)
}
+
+ test("old csv data source name works") {
+ val cars = sqlContext
+ .read
+ .format("com.databricks.spark.csv")
+ .option("header", "false")
+ .load(testFile(carsFile))
+
+ verifyCars(cars, withHeader = false, checkTypes = false)
+ }
}