diff options
author | Reynold Xin <rxin@databricks.com> | 2016-04-21 00:24:24 -0700 |
---|---|---|
committer | Reynold Xin <rxin@databricks.com> | 2016-04-21 00:24:24 -0700 |
commit | 77d847ddb22cc6c5f21f0794d10bdd73b6fac193 (patch) | |
tree | 1ce982a430bee2277195629056afab8dbd6928b0 /sql/hive/src/main/scala/org | |
parent | cfe472a34ea8bbf2f7a04acbf0c6ab6c48d732ff (diff) | |
download | spark-77d847ddb22cc6c5f21f0794d10bdd73b6fac193.tar.gz spark-77d847ddb22cc6c5f21f0794d10bdd73b6fac193.tar.bz2 spark-77d847ddb22cc6c5f21f0794d10bdd73b6fac193.zip |
[SPARK-14792][SQL] Move as many parsing rules as possible into SQL parser
## What changes were proposed in this pull request?
This patch moves as many parsing rules as possible into SQL parser. There are only three more left after this patch: (1) run native command, (2) analyze, and (3) script IO. These 3 will be dealt with in a follow-up PR.
## How was this patch tested?
No test change. This simply moves code around.
Author: Reynold Xin <rxin@databricks.com>
Closes #12556 from rxin/SPARK-14792.
Diffstat (limited to 'sql/hive/src/main/scala/org')
4 files changed, 11 insertions, 454 deletions
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index c20b022e84..3eea6c06ac 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -40,75 +40,15 @@ import org.apache.spark.sql.catalyst.plans.logical import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ import org.apache.spark.sql.execution.FileRelation +import org.apache.spark.sql.execution.command.{CreateTableAsSelectLogicalPlan, CreateViewAsSelectLogicalCommand} import org.apache.spark.sql.execution.datasources.{Partition => _, _} import org.apache.spark.sql.execution.datasources.parquet.{DefaultSource => ParquetDefaultSource, ParquetRelation} import org.apache.spark.sql.hive.client._ import org.apache.spark.sql.hive.execution.HiveNativeCommand import org.apache.spark.sql.hive.orc.{DefaultSource => OrcDefaultSource} -import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.internal.HiveSerDe import org.apache.spark.sql.types._ -private[hive] case class HiveSerDe( - inputFormat: Option[String] = None, - outputFormat: Option[String] = None, - serde: Option[String] = None) - -private[hive] object HiveSerDe { - /** - * Get the Hive SerDe information from the data source abbreviation string or classname. - * - * @param source Currently the source abbreviation can be one of the following: - * SequenceFile, RCFile, ORC, PARQUET, and case insensitive. - * @param conf SQLConf - * @return HiveSerDe associated with the specified source - */ - def sourceToSerDe(source: String, conf: SQLConf): Option[HiveSerDe] = { - val serdeMap = Map( - "sequencefile" -> - HiveSerDe( - inputFormat = Option("org.apache.hadoop.mapred.SequenceFileInputFormat"), - outputFormat = Option("org.apache.hadoop.mapred.SequenceFileOutputFormat")), - - "rcfile" -> - HiveSerDe( - inputFormat = Option("org.apache.hadoop.hive.ql.io.RCFileInputFormat"), - outputFormat = Option("org.apache.hadoop.hive.ql.io.RCFileOutputFormat"), - serde = Option(conf.getConfString("hive.default.rcfile.serde", - "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"))), - - "orc" -> - HiveSerDe( - inputFormat = Option("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat"), - outputFormat = Option("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"), - serde = Option("org.apache.hadoop.hive.ql.io.orc.OrcSerde")), - - "parquet" -> - HiveSerDe( - inputFormat = Option("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"), - outputFormat = Option("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"), - serde = Option("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe")), - - "textfile" -> - HiveSerDe( - inputFormat = Option("org.apache.hadoop.mapred.TextInputFormat"), - outputFormat = Option("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")), - - "avro" -> - HiveSerDe( - inputFormat = Option("org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat"), - outputFormat = Option("org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat"), - serde = Option("org.apache.hadoop.hive.serde2.avro.AvroSerDe"))) - - val key = source.toLowerCase match { - case s if s.startsWith("org.apache.spark.sql.parquet") => "parquet" - case s if s.startsWith("org.apache.spark.sql.orc") => "orc" - case s => s - } - - serdeMap.get(key) - } -} - /** * Legacy catalog for interacting with the Hive metastore. @@ -699,7 +639,8 @@ private[hive] class HiveMetastoreCatalog(hive: SQLContext) extends Logging { case p: LogicalPlan if !p.childrenResolved => p case p: LogicalPlan if p.resolved => p - case CreateViewAsSelect(table, child, allowExisting, replace, sql) if conf.nativeView => + case CreateViewAsSelectLogicalCommand(table, child, allowExisting, replace, sql) + if conf.nativeView => if (allowExisting && replace) { throw new AnalysisException( "It is not allowed to define a view with both IF NOT EXISTS and OR REPLACE.") @@ -713,10 +654,10 @@ private[hive] class HiveMetastoreCatalog(hive: SQLContext) extends Logging { allowExisting, replace) - case CreateViewAsSelect(table, child, allowExisting, replace, sql) => + case CreateViewAsSelectLogicalCommand(table, child, allowExisting, replace, sql) => HiveNativeCommand(sql) - case p @ CreateTableAsSelect(table, child, allowExisting) => + case p @ CreateTableAsSelectLogicalPlan(table, child, allowExisting) => val schema = if (table.schema.nonEmpty) { table.schema } else { @@ -1081,28 +1022,3 @@ private[hive] object HiveMetastoreTypes { case udt: UserDefinedType[_] => toMetastoreType(udt.sqlType) } } - -private[hive] case class CreateTableAsSelect( - tableDesc: CatalogTable, - child: LogicalPlan, - allowExisting: Boolean) extends UnaryNode with Command { - - override def output: Seq[Attribute] = Seq.empty[Attribute] - override lazy val resolved: Boolean = - tableDesc.identifier.database.isDefined && - tableDesc.schema.nonEmpty && - tableDesc.storage.serde.isDefined && - tableDesc.storage.inputFormat.isDefined && - tableDesc.storage.outputFormat.isDefined && - childrenResolved -} - -private[hive] case class CreateViewAsSelect( - tableDesc: CatalogTable, - child: LogicalPlan, - allowExisting: Boolean, - replace: Boolean, - sql: String) extends UnaryNode with Command { - override def output: Seq[Attribute] = Seq.empty[Attribute] - override lazy val resolved: Boolean = false -} diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala index 2c360cb7ca..171def43b5 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala @@ -82,7 +82,7 @@ private[hive] class HiveSessionState(ctx: SQLContext) extends SessionState(ctx) sharedState.externalCatalog, metadataHive, ctx, - ctx.functionResourceLoader, + ctx.sessionState.functionResourceLoader, functionRegistry, conf, hiveconf) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala index 90f10d5ebd..00f829d850 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveSqlParser.scala @@ -14,12 +14,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.spark.sql.hive.execution -import scala.collection.JavaConverters._ import scala.util.Try -import org.antlr.v4.runtime.{ParserRuleContext, Token} +import org.antlr.v4.runtime.Token import org.apache.hadoop.hive.conf.HiveConf import org.apache.hadoop.hive.ql.parse.VariableSubstitution import org.apache.hadoop.hive.serde.serdeConstants @@ -29,8 +29,6 @@ import org.apache.spark.sql.catalyst.parser._ import org.apache.spark.sql.catalyst.parser.SqlBaseParser._ import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution.SparkSqlAstBuilder -import org.apache.spark.sql.execution.command.{CreateTable, CreateTableLike} -import org.apache.spark.sql.hive.{CreateTableAsSelect => CTAS, CreateViewAsSelect => CreateView, HiveSerDe} import org.apache.spark.sql.internal.SQLConf /** @@ -54,7 +52,8 @@ class HiveSqlParser(conf: SQLConf, hiveconf: HiveConf) extends AbstractSqlParser /** * Builder that converts an ANTLR ParseTree into a LogicalPlan/Expression/TableIdentifier. */ -class HiveSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder { +class HiveSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder(conf) { + import ParserUtils._ /** @@ -66,31 +65,6 @@ class HiveSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder { } /** - * Fail an unsupported Hive native command. - */ - override def visitFailNativeCommand( - ctx: FailNativeCommandContext): LogicalPlan = withOrigin(ctx) { - val keywords = if (ctx.kws != null) { - Seq(ctx.kws.kw1, ctx.kws.kw2, ctx.kws.kw3).filter(_ != null).map(_.getText).mkString(" ") - } else { - // SET ROLE is the exception to the rule, because we handle this before other SET commands. - "SET ROLE" - } - throw new ParseException(s"Unsupported operation: $keywords", ctx) - } - - /** - * Create an [[AddJar]] or [[AddFile]] command depending on the requested resource. - */ - override def visitAddResource(ctx: AddResourceContext): LogicalPlan = withOrigin(ctx) { - ctx.identifier.getText.toLowerCase match { - case "file" => AddFile(remainder(ctx.identifier).trim) - case "jar" => AddJar(remainder(ctx.identifier).trim) - case other => throw new ParseException(s"Unsupported resource type '$other'.", ctx) - } - } - - /** * Create an [[AnalyzeTable]] command. This currently only implements the NOSCAN option (other * options are passed on to Hive) e.g.: * {{{ @@ -108,202 +82,6 @@ class HiveSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder { } /** - * Create a [[CatalogStorageFormat]] for creating tables. - */ - override def visitCreateFileFormat( - ctx: CreateFileFormatContext): CatalogStorageFormat = withOrigin(ctx) { - (ctx.fileFormat, ctx.storageHandler) match { - // Expected format: INPUTFORMAT input_format OUTPUTFORMAT output_format - case (c: TableFileFormatContext, null) => - visitTableFileFormat(c) - // Expected format: SEQUENCEFILE | TEXTFILE | RCFILE | ORC | PARQUET | AVRO - case (c: GenericFileFormatContext, null) => - visitGenericFileFormat(c) - case (null, storageHandler) => - throw new ParseException("Operation not allowed: ... STORED BY storage_handler ...", ctx) - case _ => - throw new ParseException("expected either STORED AS or STORED BY, not both", ctx) - } - } - - /** - * Create a table, returning either a [[CreateTable]] or a [[CreateTableAsSelect]]. - * - * This is not used to create datasource tables, which is handled through - * "CREATE TABLE ... USING ...". - * - * Note: several features are currently not supported - temporary tables, bucketing, - * skewed columns and storage handlers (STORED BY). - * - * Expected format: - * {{{ - * CREATE [TEMPORARY] [EXTERNAL] TABLE [IF NOT EXISTS] [db_name.]table_name - * [(col1 data_type [COMMENT col_comment], ...)] - * [COMMENT table_comment] - * [PARTITIONED BY (col3 data_type [COMMENT col_comment], ...)] - * [CLUSTERED BY (col1, ...) [SORTED BY (col1 [ASC|DESC], ...)] INTO num_buckets BUCKETS] - * [SKEWED BY (col1, col2, ...) ON ((col_value, col_value, ...), ...) [STORED AS DIRECTORIES]] - * [ROW FORMAT row_format] - * [STORED AS file_format | STORED BY storage_handler_class [WITH SERDEPROPERTIES (...)]] - * [LOCATION path] - * [TBLPROPERTIES (property_name=property_value, ...)] - * [AS select_statement]; - * }}} - */ - override def visitCreateTable(ctx: CreateTableContext): LogicalPlan = withOrigin(ctx) { - val (name, temp, ifNotExists, external) = visitCreateTableHeader(ctx.createTableHeader) - // TODO: implement temporary tables - if (temp) { - throw new ParseException( - "CREATE TEMPORARY TABLE is not supported yet. " + - "Please use registerTempTable as an alternative.", ctx) - } - if (ctx.skewSpec != null) { - throw new ParseException("Operation not allowed: CREATE TABLE ... SKEWED BY ...", ctx) - } - if (ctx.bucketSpec != null) { - throw new ParseException("Operation not allowed: CREATE TABLE ... CLUSTERED BY ...", ctx) - } - val tableType = if (external) { - CatalogTableType.EXTERNAL_TABLE - } else { - CatalogTableType.MANAGED_TABLE - } - val comment = Option(ctx.STRING).map(string) - val partitionCols = Option(ctx.partitionColumns).toSeq.flatMap(visitCatalogColumns) - val cols = Option(ctx.columns).toSeq.flatMap(visitCatalogColumns) - val properties = Option(ctx.tablePropertyList).map(visitTablePropertyList).getOrElse(Map.empty) - val selectQuery = Option(ctx.query).map(plan) - - // Note: Hive requires partition columns to be distinct from the schema, so we need - // to include the partition columns here explicitly - val schema = cols ++ partitionCols - - // Storage format - val defaultStorage: CatalogStorageFormat = { - val defaultStorageType = conf.getConfString("hive.default.fileformat", "textfile") - val defaultHiveSerde = HiveSerDe.sourceToSerDe(defaultStorageType, conf) - CatalogStorageFormat( - locationUri = None, - inputFormat = defaultHiveSerde.flatMap(_.inputFormat) - .orElse(Some("org.apache.hadoop.mapred.TextInputFormat")), - outputFormat = defaultHiveSerde.flatMap(_.outputFormat) - .orElse(Some("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")), - // Note: Keep this unspecified because we use the presence of the serde to decide - // whether to convert a table created by CTAS to a datasource table. - serde = None, - serdeProperties = Map()) - } - val fileStorage = Option(ctx.createFileFormat).map(visitCreateFileFormat) - .getOrElse(EmptyStorageFormat) - val rowStorage = Option(ctx.rowFormat).map(visitRowFormat).getOrElse(EmptyStorageFormat) - val location = Option(ctx.locationSpec).map(visitLocationSpec) - val storage = CatalogStorageFormat( - locationUri = location, - inputFormat = fileStorage.inputFormat.orElse(defaultStorage.inputFormat), - outputFormat = fileStorage.outputFormat.orElse(defaultStorage.outputFormat), - serde = rowStorage.serde.orElse(fileStorage.serde).orElse(defaultStorage.serde), - serdeProperties = rowStorage.serdeProperties ++ fileStorage.serdeProperties) - - // TODO support the sql text - have a proper location for this! - val tableDesc = CatalogTable( - identifier = name, - tableType = tableType, - storage = storage, - schema = schema, - partitionColumnNames = partitionCols.map(_.name), - properties = properties, - comment = comment) - - selectQuery match { - case Some(q) => CTAS(tableDesc, q, ifNotExists) - case None => CreateTable(tableDesc, ifNotExists) - } - } - - /** - * Create a [[CreateTableLike]] command. - */ - override def visitCreateTableLike(ctx: CreateTableLikeContext): LogicalPlan = withOrigin(ctx) { - val targetTable = visitTableIdentifier(ctx.target) - val sourceTable = visitTableIdentifier(ctx.source) - CreateTableLike(targetTable, sourceTable, ctx.EXISTS != null) - } - - /** - * Create or replace a view. This creates a [[CreateViewAsSelect]] command. - * - * For example: - * {{{ - * CREATE VIEW [IF NOT EXISTS] [db_name.]view_name - * [(column_name [COMMENT column_comment], ...) ] - * [COMMENT view_comment] - * [TBLPROPERTIES (property_name = property_value, ...)] - * AS SELECT ...; - * }}} - */ - override def visitCreateView(ctx: CreateViewContext): LogicalPlan = withOrigin(ctx) { - if (ctx.identifierList != null) { - throw new ParseException(s"Operation not allowed: partitioned views", ctx) - } else { - val identifiers = Option(ctx.identifierCommentList).toSeq.flatMap(_.identifierComment.asScala) - val schema = identifiers.map { ic => - CatalogColumn(ic.identifier.getText, null, nullable = true, Option(ic.STRING).map(string)) - } - createView( - ctx, - ctx.tableIdentifier, - comment = Option(ctx.STRING).map(string), - schema, - ctx.query, - Option(ctx.tablePropertyList).map(visitTablePropertyList).getOrElse(Map.empty), - ctx.EXISTS != null, - ctx.REPLACE != null - ) - } - } - - /** - * Alter the query of a view. This creates a [[CreateViewAsSelect]] command. - */ - override def visitAlterViewQuery(ctx: AlterViewQueryContext): LogicalPlan = withOrigin(ctx) { - createView( - ctx, - ctx.tableIdentifier, - comment = None, - Seq.empty, - ctx.query, - Map.empty, - allowExist = false, - replace = true) - } - - /** - * Create a [[CreateViewAsSelect]] command. - */ - private def createView( - ctx: ParserRuleContext, - name: TableIdentifierContext, - comment: Option[String], - schema: Seq[CatalogColumn], - query: QueryContext, - properties: Map[String, String], - allowExist: Boolean, - replace: Boolean): LogicalPlan = { - val sql = Option(source(query)) - val tableDesc = CatalogTable( - identifier = visitTableIdentifier(name), - tableType = CatalogTableType.VIRTUAL_VIEW, - schema = schema, - storage = EmptyStorageFormat, - properties = properties, - viewOriginalText = sql, - viewText = sql, - comment = comment) - CreateView(tableDesc, plan(query), allowExist, replace, command(ctx)) - } - - /** * Create a [[HiveScriptIOSchema]]. */ override protected def withScriptIOSchema( @@ -371,115 +149,4 @@ class HiveSqlAstBuilder(conf: SQLConf) extends SparkSqlAstBuilder { reader, writer, schemaLess) } - - /** Empty storage format for default values and copies. */ - private val EmptyStorageFormat = CatalogStorageFormat(None, None, None, None, Map.empty) - - /** - * Create a [[CatalogStorageFormat]]. - */ - override def visitTableFileFormat( - ctx: TableFileFormatContext): CatalogStorageFormat = withOrigin(ctx) { - EmptyStorageFormat.copy( - inputFormat = Option(string(ctx.inFmt)), - outputFormat = Option(string(ctx.outFmt)), - serde = Option(ctx.serdeCls).map(string) - ) - } - - /** - * Resolve a [[HiveSerDe]] based on the name given and return it as a [[CatalogStorageFormat]]. - */ - override def visitGenericFileFormat( - ctx: GenericFileFormatContext): CatalogStorageFormat = withOrigin(ctx) { - val source = ctx.identifier.getText - HiveSerDe.sourceToSerDe(source, conf) match { - case Some(s) => - EmptyStorageFormat.copy( - inputFormat = s.inputFormat, - outputFormat = s.outputFormat, - serde = s.serde) - case None => - throw new ParseException(s"Unrecognized file format in STORED AS clause: $source", ctx) - } - } - - /** - * Create a [[RowFormat]] used for creating tables. - * - * Example format: - * {{{ - * SERDE serde_name [WITH SERDEPROPERTIES (k1=v1, k2=v2, ...)] - * }}} - * - * OR - * - * {{{ - * DELIMITED [FIELDS TERMINATED BY char [ESCAPED BY char]] - * [COLLECTION ITEMS TERMINATED BY char] - * [MAP KEYS TERMINATED BY char] - * [LINES TERMINATED BY char] - * [NULL DEFINED AS char] - * }}} - */ - private def visitRowFormat(ctx: RowFormatContext): CatalogStorageFormat = withOrigin(ctx) { - ctx match { - case serde: RowFormatSerdeContext => visitRowFormatSerde(serde) - case delimited: RowFormatDelimitedContext => visitRowFormatDelimited(delimited) - } - } - - /** - * Create SERDE row format name and properties pair. - */ - override def visitRowFormatSerde( - ctx: RowFormatSerdeContext): CatalogStorageFormat = withOrigin(ctx) { - import ctx._ - EmptyStorageFormat.copy( - serde = Option(string(name)), - serdeProperties = Option(tablePropertyList).map(visitTablePropertyList).getOrElse(Map.empty)) - } - - /** - * Create a delimited row format properties object. - */ - override def visitRowFormatDelimited( - ctx: RowFormatDelimitedContext): CatalogStorageFormat = withOrigin(ctx) { - // Collect the entries if any. - def entry(key: String, value: Token): Seq[(String, String)] = { - Option(value).toSeq.map(x => key -> string(x)) - } - // TODO we need proper support for the NULL format. - val entries = entry(serdeConstants.FIELD_DELIM, ctx.fieldsTerminatedBy) ++ - entry(serdeConstants.SERIALIZATION_FORMAT, ctx.fieldsTerminatedBy) ++ - entry(serdeConstants.ESCAPE_CHAR, ctx.escapedBy) ++ - entry(serdeConstants.COLLECTION_DELIM, ctx.collectionItemsTerminatedBy) ++ - entry(serdeConstants.MAPKEY_DELIM, ctx.keysTerminatedBy) ++ - Option(ctx.linesSeparatedBy).toSeq.map { token => - val value = string(token) - assert( - value == "\n", - s"LINES TERMINATED BY only supports newline '\\n' right now: $value", - ctx) - serdeConstants.LINE_DELIM -> value - } - EmptyStorageFormat.copy(serdeProperties = entries.toMap) - } - - /** - * Create a sequence of [[CatalogColumn]]s from a column list - */ - private def visitCatalogColumns(ctx: ColTypeListContext): Seq[CatalogColumn] = withOrigin(ctx) { - ctx.colType.asScala.map { col => - CatalogColumn( - col.identifier.getText.toLowerCase, - // Note: for types like "STRUCT<myFirstName: STRING, myLastName: STRING>" we can't - // just convert the whole type string to lower case, otherwise the struct field names - // will no longer be case sensitive. Instead, we rely on our parser to get the proper - // case before passing it to Hive. - CatalystSqlParser.parseDataType(col.dataType.getText).simpleString, - nullable = true, - Option(col.STRING).map(string)) - } - } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala index b5ee9a6295..78f8bfe59f 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/commands.scala @@ -124,32 +124,6 @@ case class AnalyzeTable(tableName: String) extends RunnableCommand { } private[hive] -case class AddJar(path: String) extends RunnableCommand { - - override val output: Seq[Attribute] = { - val schema = StructType( - StructField("result", IntegerType, false) :: Nil) - schema.toAttributes - } - - override def run(sqlContext: SQLContext): Seq[Row] = { - sqlContext.addJar(path) - - Seq(Row(0)) - } -} - -private[hive] -case class AddFile(path: String) extends RunnableCommand { - - override def run(sqlContext: SQLContext): Seq[Row] = { - sqlContext.sessionState.runNativeSql(s"ADD FILE $path") - sqlContext.sparkContext.addFile(path) - Seq.empty[Row] - } -} - -private[hive] case class CreateMetastoreDataSource( tableIdent: TableIdentifier, userSpecifiedSchema: Option[StructType], |