aboutsummaryrefslogtreecommitdiff
path: root/sql/core/src
diff options
context:
space:
mode:
Diffstat (limited to 'sql/core/src')
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala8
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala4
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala6
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala12
-rw-r--r--sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala2
-rw-r--r--sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala30
6 files changed, 31 insertions, 31 deletions
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index 1316d90fa4..9b09801896 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -968,7 +968,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
// whether to convert a table created by CTAS to a datasource table.
serde = None,
compressed = false,
- serdeProperties = Map())
+ properties = Map())
}
validateRowFormatFileFormat(ctx.rowFormat, ctx.createFileFormat, ctx)
val fileStorage = Option(ctx.createFileFormat).map(visitCreateFileFormat)
@@ -986,7 +986,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
outputFormat = fileStorage.outputFormat.orElse(defaultStorage.outputFormat),
serde = rowStorage.serde.orElse(fileStorage.serde).orElse(defaultStorage.serde),
compressed = false,
- serdeProperties = rowStorage.serdeProperties ++ fileStorage.serdeProperties)
+ properties = rowStorage.properties ++ fileStorage.properties)
// If location is defined, we'll assume this is an external table.
// Otherwise, we may accidentally delete existing data.
val tableType = if (external || location.isDefined) {
@@ -1145,7 +1145,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
import ctx._
CatalogStorageFormat.empty.copy(
serde = Option(string(name)),
- serdeProperties = Option(tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
+ properties = Option(tablePropertyList).map(visitPropertyKeyValues).getOrElse(Map.empty))
}
/**
@@ -1173,7 +1173,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
ctx)
"line.delim" -> value
}
- CatalogStorageFormat.empty.copy(serdeProperties = entries.toMap)
+ CatalogStorageFormat.empty.copy(properties = entries.toMap)
}
/**
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
index c38eca5156..5e3cd9f895 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/createDataSourceTables.scala
@@ -392,7 +392,7 @@ object CreateDataSourceTableUtils extends Logging {
outputFormat = None,
serde = None,
compressed = false,
- serdeProperties = options
+ properties = options
),
properties = tableProperties.toMap)
}
@@ -412,7 +412,7 @@ object CreateDataSourceTableUtils extends Logging {
outputFormat = serde.outputFormat,
serde = serde.serde,
compressed = false,
- serdeProperties = options
+ properties = options
),
schema = relation.schema.map { f =>
CatalogColumn(f.name, f.dataType.catalogString)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
index a3a057a562..2a62b864a1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
@@ -320,14 +320,14 @@ case class AlterTableSerDePropertiesCommand(
if (partSpec.isEmpty) {
val newTable = table.withNewStorage(
serde = serdeClassName.orElse(table.storage.serde),
- serdeProperties = table.storage.serdeProperties ++ serdeProperties.getOrElse(Map()))
+ serdeProperties = table.storage.properties ++ serdeProperties.getOrElse(Map()))
catalog.alterTable(newTable)
} else {
val spec = partSpec.get
val part = catalog.getPartition(tableName, spec)
val newPart = part.copy(storage = part.storage.copy(
serde = serdeClassName.orElse(part.storage.serde),
- serdeProperties = part.storage.serdeProperties ++ serdeProperties.getOrElse(Map())))
+ properties = part.storage.properties ++ serdeProperties.getOrElse(Map())))
catalog.alterPartitions(tableName, Seq(newPart))
}
Seq.empty[Row]
@@ -466,7 +466,7 @@ case class AlterTableSetLocationCommand(
if (DDLUtils.isDatasourceTable(table)) {
table.withNewStorage(
locationUri = Some(location),
- serdeProperties = table.storage.serdeProperties ++ Map("path" -> location))
+ serdeProperties = table.storage.properties ++ Map("path" -> location))
} else {
table.withNewStorage(locationUri = Some(location))
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
index 6e52a4609a..a62853b05f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
@@ -120,7 +120,7 @@ case class CreateTableCommand(table: CatalogTable, ifNotExists: Boolean) extends
override def run(sparkSession: SparkSession): Seq[Row] = {
DDLUtils.verifyTableProperties(table.properties.keys.toSeq, "CREATE TABLE")
- DDLUtils.verifyTableProperties(table.storage.serdeProperties.keys.toSeq, "CREATE TABLE")
+ DDLUtils.verifyTableProperties(table.storage.properties.keys.toSeq, "CREATE TABLE")
sparkSession.sessionState.catalog.createTable(table, ifNotExists)
Seq.empty[Row]
}
@@ -167,7 +167,7 @@ case class AlterTableRenameCommand(
if (DDLUtils.isDatasourceTable(table) && table.tableType == CatalogTableType.MANAGED) {
val newPath = catalog.defaultTablePath(newName)
val newTable = table.withNewStorage(
- serdeProperties = table.storage.serdeProperties ++ Map("path" -> newPath))
+ serdeProperties = table.storage.properties ++ Map("path" -> newPath))
catalog.alterTable(newTable)
}
// Invalidate the table last, otherwise uncaching the table would load the logical plan
@@ -349,7 +349,7 @@ case class TruncateTableCommand(
}
val locations =
if (isDatasourceTable) {
- Seq(table.storage.serdeProperties.get("path"))
+ Seq(table.storage.properties.get("path"))
} else if (table.partitionColumnNames.isEmpty) {
Seq(table.storage.locationUri)
} else {
@@ -492,7 +492,7 @@ case class DescribeTableCommand(table: TableIdentifier, isExtended: Boolean, isF
describeBucketingInfo(metadata, buffer)
append(buffer, "Storage Desc Parameters:", "", "")
- metadata.storage.serdeProperties.foreach { case (key, value) =>
+ metadata.storage.properties.foreach { case (key, value) =>
append(buffer, s" $key", value, "")
}
}
@@ -820,7 +820,7 @@ case class ShowCreateTableCommand(table: TableIdentifier) extends RunnableComman
storage.serde.foreach { serde =>
builder ++= s"ROW FORMAT SERDE '$serde'\n"
- val serdeProps = metadata.storage.serdeProperties.map {
+ val serdeProps = metadata.storage.properties.map {
case (key, value) =>
s"'${escapeSingleQuotedString(key)}' = '${escapeSingleQuotedString(value)}'"
}
@@ -890,7 +890,7 @@ case class ShowCreateTableCommand(table: TableIdentifier) extends RunnableComman
builder ++= s"USING ${props(CreateDataSourceTableUtils.DATASOURCE_PROVIDER)}\n"
- val dataSourceOptions = metadata.storage.serdeProperties.filterNot {
+ val dataSourceOptions = metadata.storage.properties.filterNot {
case (key, value) =>
// If it's a managed table, omit PATH option. Spark SQL always creates external table
// when the table creation DDL contains the PATH option.
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
index 0841636d33..8ffdc507db 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
@@ -214,7 +214,7 @@ private[sql] class FindDataSourceTable(sparkSession: SparkSession) extends Rule[
val bucketSpec = DDLUtils.getBucketSpecFromTableProperties(table)
- val options = table.storage.serdeProperties
+ val options = table.storage.properties
val dataSource =
DataSource(
sparkSession,
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index 467a2287fc..34c980e321 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -85,7 +85,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
outputFormat = None,
serde = None,
compressed = false,
- serdeProperties = Map())
+ properties = Map())
CatalogTable(
identifier = name,
tableType = CatalogTableType.EXTERNAL,
@@ -892,9 +892,9 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
convertToDatasourceTable(catalog, tableIdent)
}
assert(catalog.getTableMetadata(tableIdent).storage.locationUri.isDefined)
- assert(catalog.getTableMetadata(tableIdent).storage.serdeProperties.isEmpty)
+ assert(catalog.getTableMetadata(tableIdent).storage.properties.isEmpty)
assert(catalog.getPartition(tableIdent, partSpec).storage.locationUri.isEmpty)
- assert(catalog.getPartition(tableIdent, partSpec).storage.serdeProperties.isEmpty)
+ assert(catalog.getPartition(tableIdent, partSpec).storage.properties.isEmpty)
// Verify that the location is set to the expected string
def verifyLocation(expected: String, spec: Option[TablePartitionSpec] = None): Unit = {
val storageFormat = spec
@@ -902,10 +902,10 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
.getOrElse { catalog.getTableMetadata(tableIdent).storage }
if (isDatasourceTable) {
if (spec.isDefined) {
- assert(storageFormat.serdeProperties.isEmpty)
+ assert(storageFormat.properties.isEmpty)
assert(storageFormat.locationUri.isEmpty)
} else {
- assert(storageFormat.serdeProperties.get("path") === Some(expected))
+ assert(storageFormat.properties.get("path") === Some(expected))
assert(storageFormat.locationUri === Some(expected))
}
} else {
@@ -948,7 +948,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
convertToDatasourceTable(catalog, tableIdent)
}
assert(catalog.getTableMetadata(tableIdent).storage.serde.isEmpty)
- assert(catalog.getTableMetadata(tableIdent).storage.serdeProperties.isEmpty)
+ assert(catalog.getTableMetadata(tableIdent).storage.properties.isEmpty)
// set table serde and/or properties (should fail on datasource tables)
if (isDatasourceTable) {
val e1 = intercept[AnalysisException] {
@@ -963,21 +963,21 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
} else {
sql("ALTER TABLE dbx.tab1 SET SERDE 'org.apache.jadoop'")
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some("org.apache.jadoop"))
- assert(catalog.getTableMetadata(tableIdent).storage.serdeProperties.isEmpty)
+ assert(catalog.getTableMetadata(tableIdent).storage.properties.isEmpty)
sql("ALTER TABLE dbx.tab1 SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
assert(catalog.getTableMetadata(tableIdent).storage.serde == Some("org.apache.madoop"))
- assert(catalog.getTableMetadata(tableIdent).storage.serdeProperties ==
+ assert(catalog.getTableMetadata(tableIdent).storage.properties ==
Map("k" -> "v", "kay" -> "vee"))
}
// set serde properties only
sql("ALTER TABLE dbx.tab1 SET SERDEPROPERTIES ('k' = 'vvv', 'kay' = 'vee')")
- assert(catalog.getTableMetadata(tableIdent).storage.serdeProperties ==
+ assert(catalog.getTableMetadata(tableIdent).storage.properties ==
Map("k" -> "vvv", "kay" -> "vee"))
// set things without explicitly specifying database
catalog.setCurrentDatabase("dbx")
sql("ALTER TABLE tab1 SET SERDEPROPERTIES ('kay' = 'veee')")
- assert(catalog.getTableMetadata(tableIdent).storage.serdeProperties ==
+ assert(catalog.getTableMetadata(tableIdent).storage.properties ==
Map("k" -> "vvv", "kay" -> "veee"))
// table to alter does not exist
intercept[AnalysisException] {
@@ -1004,7 +1004,7 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
convertToDatasourceTable(catalog, tableIdent)
}
assert(catalog.getPartition(tableIdent, spec).storage.serde.isEmpty)
- assert(catalog.getPartition(tableIdent, spec).storage.serdeProperties.isEmpty)
+ assert(catalog.getPartition(tableIdent, spec).storage.properties.isEmpty)
// set table serde and/or properties (should fail on datasource tables)
if (isDatasourceTable) {
val e1 = intercept[AnalysisException] {
@@ -1019,25 +1019,25 @@ class DDLSuite extends QueryTest with SharedSQLContext with BeforeAndAfterEach {
} else {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.jadoop'")
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some("org.apache.jadoop"))
- assert(catalog.getPartition(tableIdent, spec).storage.serdeProperties.isEmpty)
+ assert(catalog.getPartition(tableIdent, spec).storage.properties.isEmpty)
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) SET SERDE 'org.apache.madoop' " +
"WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee')")
assert(catalog.getPartition(tableIdent, spec).storage.serde == Some("org.apache.madoop"))
- assert(catalog.getPartition(tableIdent, spec).storage.serdeProperties ==
+ assert(catalog.getPartition(tableIdent, spec).storage.properties ==
Map("k" -> "v", "kay" -> "vee"))
}
// set serde properties only
maybeWrapException(isDatasourceTable) {
sql("ALTER TABLE dbx.tab1 PARTITION (a=1, b=2) " +
"SET SERDEPROPERTIES ('k' = 'vvv', 'kay' = 'vee')")
- assert(catalog.getPartition(tableIdent, spec).storage.serdeProperties ==
+ assert(catalog.getPartition(tableIdent, spec).storage.properties ==
Map("k" -> "vvv", "kay" -> "vee"))
}
// set things without explicitly specifying database
catalog.setCurrentDatabase("dbx")
maybeWrapException(isDatasourceTable) {
sql("ALTER TABLE tab1 PARTITION (a=1, b=2) SET SERDEPROPERTIES ('kay' = 'veee')")
- assert(catalog.getPartition(tableIdent, spec).storage.serdeProperties ==
+ assert(catalog.getPartition(tableIdent, spec).storage.properties ==
Map("k" -> "vvv", "kay" -> "veee"))
}
// table to alter does not exist