aboutsummaryrefslogtreecommitdiff
path: root/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation
diff options
context:
space:
mode:
Diffstat (limited to 'sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation')
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java86
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java70
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java81
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java236
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java148
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java104
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java93
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java135
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java142
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java213
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java51
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java209
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java135
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java322
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java284
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java473
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java44
-rw-r--r--sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMappingFactory.java37
18 files changed, 2863 insertions, 0 deletions
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
new file mode 100644
index 0000000000..87ac39b051
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.TableType;
+
+/**
+ * ClassicTableTypeMapping.
+ * Classic table type mapping :
+ * Managed Table ==> Table
+ * External Table ==> Table
+ * Virtual View ==> View
+ */
+public class ClassicTableTypeMapping implements TableTypeMapping {
+
+ public enum ClassicTableTypes {
+ TABLE,
+ VIEW,
+ }
+
+ private final Map<String, String> hiveToClientMap = new HashMap<String, String>();
+ private final Map<String, String> clientToHiveMap = new HashMap<String, String>();
+
+ public ClassicTableTypeMapping () {
+ hiveToClientMap.put(TableType.MANAGED_TABLE.toString(),
+ ClassicTableTypes.TABLE.toString());
+ hiveToClientMap.put(TableType.EXTERNAL_TABLE.toString(),
+ ClassicTableTypes.TABLE.toString());
+ hiveToClientMap.put(TableType.VIRTUAL_VIEW.toString(),
+ ClassicTableTypes.VIEW.toString());
+
+ clientToHiveMap.put(ClassicTableTypes.TABLE.toString(),
+ TableType.MANAGED_TABLE.toString());
+ clientToHiveMap.put(ClassicTableTypes.VIEW.toString(),
+ TableType.VIRTUAL_VIEW.toString());
+ }
+
+ @Override
+ public String mapToHiveType(String clientTypeName) {
+ if (clientToHiveMap.containsKey(clientTypeName)) {
+ return clientToHiveMap.get(clientTypeName);
+ } else {
+ return clientTypeName;
+ }
+ }
+
+ @Override
+ public String mapToClientType(String hiveTypeName) {
+ if (hiveToClientMap.containsKey(hiveTypeName)) {
+ return hiveToClientMap.get(hiveTypeName);
+ } else {
+ return hiveTypeName;
+ }
+ }
+
+ @Override
+ public Set<String> getTableTypeNames() {
+ Set<String> typeNameSet = new HashSet<String>();
+ for (ClassicTableTypes typeNames : ClassicTableTypes.values()) {
+ typeNameSet.add(typeNames.toString());
+ }
+ return typeNameSet;
+ }
+
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
new file mode 100644
index 0000000000..3f2de108f0
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.cli.operation;
+
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.session.HiveSession;
+
+public abstract class ExecuteStatementOperation extends Operation {
+ protected String statement = null;
+ protected Map<String, String> confOverlay = new HashMap<String, String>();
+
+ public ExecuteStatementOperation(HiveSession parentSession, String statement,
+ Map<String, String> confOverlay, boolean runInBackground) {
+ super(parentSession, OperationType.EXECUTE_STATEMENT, runInBackground);
+ this.statement = statement;
+ setConfOverlay(confOverlay);
+ }
+
+ public String getStatement() {
+ return statement;
+ }
+
+ public static ExecuteStatementOperation newExecuteStatementOperation(
+ HiveSession parentSession, String statement, Map<String, String> confOverlay, boolean runAsync)
+ throws HiveSQLException {
+ String[] tokens = statement.trim().split("\\s+");
+ CommandProcessor processor = null;
+ try {
+ processor = CommandProcessorFactory.getForHiveCommand(tokens, parentSession.getHiveConf());
+ } catch (SQLException e) {
+ throw new HiveSQLException(e.getMessage(), e.getSQLState(), e);
+ }
+ if (processor == null) {
+ return new SQLOperation(parentSession, statement, confOverlay, runAsync);
+ }
+ return new HiveCommandOperation(parentSession, statement, processor, confOverlay);
+ }
+
+ protected Map<String, String> getConfOverlay() {
+ return confOverlay;
+ }
+
+ protected void setConfOverlay(Map<String, String> confOverlay) {
+ if (confOverlay != null) {
+ this.confOverlay = confOverlay;
+ }
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java
new file mode 100644
index 0000000000..8868ec18e0
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetCatalogsOperation.
+ *
+ */
+public class GetCatalogsOperation extends MetadataOperation {
+ private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+ .addStringColumn("TABLE_CAT", "Catalog name. NULL if not applicable.");
+
+ private final RowSet rowSet;
+
+ protected GetCatalogsOperation(HiveSession parentSession) {
+ super(parentSession, OperationType.GET_CATALOGS);
+ rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ try {
+ if (isAuthV2Enabled()) {
+ authorizeMetaGets(HiveOperationType.GET_CATALOGS, null);
+ }
+ setState(OperationState.FINISHED);
+ } catch (HiveSQLException e) {
+ setState(OperationState.ERROR);
+ throw e;
+ }
+
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ return RESULT_SET_SCHEMA;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ rowSet.setStartOffset(0);
+ }
+ return rowSet.extractSubset((int)maxRows);
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
new file mode 100644
index 0000000000..309f10f640
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.sql.DatabaseMetaData;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.plan.HiveOperation;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
+import org.apache.hive.service.cli.ColumnDescriptor;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.Type;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetColumnsOperation.
+ *
+ */
+public class GetColumnsOperation extends MetadataOperation {
+
+ private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+ .addPrimitiveColumn("TABLE_CAT", Type.STRING_TYPE,
+ "Catalog name. NULL if not applicable")
+ .addPrimitiveColumn("TABLE_SCHEM", Type.STRING_TYPE,
+ "Schema name")
+ .addPrimitiveColumn("TABLE_NAME", Type.STRING_TYPE,
+ "Table name")
+ .addPrimitiveColumn("COLUMN_NAME", Type.STRING_TYPE,
+ "Column name")
+ .addPrimitiveColumn("DATA_TYPE", Type.INT_TYPE,
+ "SQL type from java.sql.Types")
+ .addPrimitiveColumn("TYPE_NAME", Type.STRING_TYPE,
+ "Data source dependent type name, for a UDT the type name is fully qualified")
+ .addPrimitiveColumn("COLUMN_SIZE", Type.INT_TYPE,
+ "Column size. For char or date types this is the maximum number of characters,"
+ + " for numeric or decimal types this is precision.")
+ .addPrimitiveColumn("BUFFER_LENGTH", Type.TINYINT_TYPE,
+ "Unused")
+ .addPrimitiveColumn("DECIMAL_DIGITS", Type.INT_TYPE,
+ "The number of fractional digits")
+ .addPrimitiveColumn("NUM_PREC_RADIX", Type.INT_TYPE,
+ "Radix (typically either 10 or 2)")
+ .addPrimitiveColumn("NULLABLE", Type.INT_TYPE,
+ "Is NULL allowed")
+ .addPrimitiveColumn("REMARKS", Type.STRING_TYPE,
+ "Comment describing column (may be null)")
+ .addPrimitiveColumn("COLUMN_DEF", Type.STRING_TYPE,
+ "Default value (may be null)")
+ .addPrimitiveColumn("SQL_DATA_TYPE", Type.INT_TYPE,
+ "Unused")
+ .addPrimitiveColumn("SQL_DATETIME_SUB", Type.INT_TYPE,
+ "Unused")
+ .addPrimitiveColumn("CHAR_OCTET_LENGTH", Type.INT_TYPE,
+ "For char types the maximum number of bytes in the column")
+ .addPrimitiveColumn("ORDINAL_POSITION", Type.INT_TYPE,
+ "Index of column in table (starting at 1)")
+ .addPrimitiveColumn("IS_NULLABLE", Type.STRING_TYPE,
+ "\"NO\" means column definitely does not allow NULL values; "
+ + "\"YES\" means the column might allow NULL values. An empty "
+ + "string means nobody knows.")
+ .addPrimitiveColumn("SCOPE_CATALOG", Type.STRING_TYPE,
+ "Catalog of table that is the scope of a reference attribute "
+ + "(null if DATA_TYPE isn't REF)")
+ .addPrimitiveColumn("SCOPE_SCHEMA", Type.STRING_TYPE,
+ "Schema of table that is the scope of a reference attribute "
+ + "(null if the DATA_TYPE isn't REF)")
+ .addPrimitiveColumn("SCOPE_TABLE", Type.STRING_TYPE,
+ "Table name that this the scope of a reference attribure "
+ + "(null if the DATA_TYPE isn't REF)")
+ .addPrimitiveColumn("SOURCE_DATA_TYPE", Type.SMALLINT_TYPE,
+ "Source type of a distinct type or user-generated Ref type, "
+ + "SQL type from java.sql.Types (null if DATA_TYPE isn't DISTINCT or user-generated REF)")
+ .addPrimitiveColumn("IS_AUTO_INCREMENT", Type.STRING_TYPE,
+ "Indicates whether this column is auto incremented.");
+
+ private final String catalogName;
+ private final String schemaName;
+ private final String tableName;
+ private final String columnName;
+
+ private final RowSet rowSet;
+
+ protected GetColumnsOperation(HiveSession parentSession, String catalogName, String schemaName,
+ String tableName, String columnName) {
+ super(parentSession, OperationType.GET_COLUMNS);
+ this.catalogName = catalogName;
+ this.schemaName = schemaName;
+ this.tableName = tableName;
+ this.columnName = columnName;
+ this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ try {
+ IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
+ String schemaPattern = convertSchemaPattern(schemaName);
+ String tablePattern = convertIdentifierPattern(tableName, true);
+
+ Pattern columnPattern = null;
+ if (columnName != null) {
+ columnPattern = Pattern.compile(convertIdentifierPattern(columnName, false));
+ }
+
+ List<String> dbNames = metastoreClient.getDatabases(schemaPattern);
+ Collections.sort(dbNames);
+ Map<String, List<String>> db2Tabs = new HashMap<>();
+
+ for (String dbName : dbNames) {
+ List<String> tableNames = metastoreClient.getTables(dbName, tablePattern);
+ Collections.sort(tableNames);
+ db2Tabs.put(dbName, tableNames);
+ }
+
+ if (isAuthV2Enabled()) {
+ List<HivePrivilegeObject> privObjs = getPrivObjs(db2Tabs);
+ String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName
+ + ", tablePattern : " + tableName;
+ authorizeMetaGets(HiveOperationType.GET_COLUMNS, privObjs, cmdStr);
+ }
+
+ for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
+ String dbName = dbTabs.getKey();
+ List<String> tableNames = dbTabs.getValue();
+ for (Table table : metastoreClient.getTableObjectsByName(dbName, tableNames)) {
+ TableSchema schema = new TableSchema(metastoreClient.getSchema(dbName, table.getTableName()));
+ for (ColumnDescriptor column : schema.getColumnDescriptors()) {
+ if (columnPattern != null && !columnPattern.matcher(column.getName()).matches()) {
+ continue;
+ }
+ Object[] rowData = new Object[] {
+ null, // TABLE_CAT
+ table.getDbName(), // TABLE_SCHEM
+ table.getTableName(), // TABLE_NAME
+ column.getName(), // COLUMN_NAME
+ column.getType().toJavaSQLType(), // DATA_TYPE
+ column.getTypeName(), // TYPE_NAME
+ column.getTypeDescriptor().getColumnSize(), // COLUMN_SIZE
+ null, // BUFFER_LENGTH, unused
+ column.getTypeDescriptor().getDecimalDigits(), // DECIMAL_DIGITS
+ column.getType().getNumPrecRadix(), // NUM_PREC_RADIX
+ DatabaseMetaData.columnNullable, // NULLABLE
+ column.getComment(), // REMARKS
+ null, // COLUMN_DEF
+ null, // SQL_DATA_TYPE
+ null, // SQL_DATETIME_SUB
+ null, // CHAR_OCTET_LENGTH
+ column.getOrdinalPosition(), // ORDINAL_POSITION
+ "YES", // IS_NULLABLE
+ null, // SCOPE_CATALOG
+ null, // SCOPE_SCHEMA
+ null, // SCOPE_TABLE
+ null, // SOURCE_DATA_TYPE
+ "NO", // IS_AUTO_INCREMENT
+ };
+ rowSet.addRow(rowData);
+ }
+ }
+ }
+ setState(OperationState.FINISHED);
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException(e);
+ }
+
+ }
+
+
+ private List<HivePrivilegeObject> getPrivObjs(Map<String, List<String>> db2Tabs) {
+ List<HivePrivilegeObject> privObjs = new ArrayList<>();
+ for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
+ for (String tabName : dbTabs.getValue()) {
+ privObjs.add(new HivePrivilegeObject(HivePrivilegeObjectType.TABLE_OR_VIEW, dbTabs.getKey(),
+ tabName));
+ }
+ }
+ return privObjs;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ return RESULT_SET_SCHEMA;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ rowSet.setStartOffset(0);
+ }
+ return rowSet.extractSubset((int)maxRows);
+ }
+
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java
new file mode 100644
index 0000000000..6df1e8a227
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.sql.DatabaseMetaData;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.exec.FunctionInfo;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils;
+import org.apache.hive.service.cli.CLIServiceUtils;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.Type;
+import org.apache.hive.service.cli.session.HiveSession;
+import org.apache.thrift.TException;
+
+/**
+ * GetFunctionsOperation.
+ *
+ */
+public class GetFunctionsOperation extends MetadataOperation {
+ private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+ .addPrimitiveColumn("FUNCTION_CAT", Type.STRING_TYPE,
+ "Function catalog (may be null)")
+ .addPrimitiveColumn("FUNCTION_SCHEM", Type.STRING_TYPE,
+ "Function schema (may be null)")
+ .addPrimitiveColumn("FUNCTION_NAME", Type.STRING_TYPE,
+ "Function name. This is the name used to invoke the function")
+ .addPrimitiveColumn("REMARKS", Type.STRING_TYPE,
+ "Explanatory comment on the function")
+ .addPrimitiveColumn("FUNCTION_TYPE", Type.INT_TYPE,
+ "Kind of function.")
+ .addPrimitiveColumn("SPECIFIC_NAME", Type.STRING_TYPE,
+ "The name which uniquely identifies this function within its schema");
+
+ private final String catalogName;
+ private final String schemaName;
+ private final String functionName;
+
+ private final RowSet rowSet;
+
+ public GetFunctionsOperation(HiveSession parentSession,
+ String catalogName, String schemaName, String functionName) {
+ super(parentSession, OperationType.GET_FUNCTIONS);
+ this.catalogName = catalogName;
+ this.schemaName = schemaName;
+ this.functionName = functionName;
+ this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ if (isAuthV2Enabled()) {
+ // get databases for schema pattern
+ IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
+ String schemaPattern = convertSchemaPattern(schemaName);
+ List<String> matchingDbs;
+ try {
+ matchingDbs = metastoreClient.getDatabases(schemaPattern);
+ } catch (TException e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException(e);
+ }
+ // authorize this call on the schema objects
+ List<HivePrivilegeObject> privObjs = HivePrivilegeObjectUtils
+ .getHivePrivDbObjects(matchingDbs);
+ String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName;
+ authorizeMetaGets(HiveOperationType.GET_FUNCTIONS, privObjs, cmdStr);
+ }
+
+ try {
+ if ((null == catalogName || "".equals(catalogName))
+ && (null == schemaName || "".equals(schemaName))) {
+ Set<String> functionNames = FunctionRegistry
+ .getFunctionNames(CLIServiceUtils.patternToRegex(functionName));
+ for (String functionName : functionNames) {
+ FunctionInfo functionInfo = FunctionRegistry.getFunctionInfo(functionName);
+ Object rowData[] = new Object[] {
+ null, // FUNCTION_CAT
+ null, // FUNCTION_SCHEM
+ functionInfo.getDisplayName(), // FUNCTION_NAME
+ "", // REMARKS
+ (functionInfo.isGenericUDTF() ?
+ DatabaseMetaData.functionReturnsTable
+ : DatabaseMetaData.functionNoTable), // FUNCTION_TYPE
+ functionInfo.getClass().getCanonicalName()
+ };
+ rowSet.addRow(rowData);
+ }
+ }
+ setState(OperationState.FINISHED);
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException(e);
+ }
+ }
+
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ return RESULT_SET_SCHEMA;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ rowSet.setStartOffset(0);
+ }
+ return rowSet.extractSubset((int)maxRows);
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java
new file mode 100644
index 0000000000..e56686abb7
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetSchemasOperation.
+ *
+ */
+public class GetSchemasOperation extends MetadataOperation {
+ private final String catalogName;
+ private final String schemaName;
+
+ private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+ .addStringColumn("TABLE_SCHEM", "Schema name.")
+ .addStringColumn("TABLE_CATALOG", "Catalog name.");
+
+ private RowSet rowSet;
+
+ protected GetSchemasOperation(HiveSession parentSession,
+ String catalogName, String schemaName) {
+ super(parentSession, OperationType.GET_SCHEMAS);
+ this.catalogName = catalogName;
+ this.schemaName = schemaName;
+ this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ if (isAuthV2Enabled()) {
+ String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName;
+ authorizeMetaGets(HiveOperationType.GET_SCHEMAS, null, cmdStr);
+ }
+ try {
+ IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
+ String schemaPattern = convertSchemaPattern(schemaName);
+ for (String dbName : metastoreClient.getDatabases(schemaPattern)) {
+ rowSet.addRow(new Object[] {dbName, DEFAULT_HIVE_CATALOG});
+ }
+ setState(OperationState.FINISHED);
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException(e);
+ }
+ }
+
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ return RESULT_SET_SCHEMA;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ rowSet.setStartOffset(0);
+ }
+ return rowSet.extractSubset((int)maxRows);
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java
new file mode 100644
index 0000000000..a09b39a4e0
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetTableTypesOperation.
+ *
+ */
+public class GetTableTypesOperation extends MetadataOperation {
+
+ protected static TableSchema RESULT_SET_SCHEMA = new TableSchema()
+ .addStringColumn("TABLE_TYPE", "Table type name.");
+
+ private final RowSet rowSet;
+ private final TableTypeMapping tableTypeMapping;
+
+ protected GetTableTypesOperation(HiveSession parentSession) {
+ super(parentSession, OperationType.GET_TABLE_TYPES);
+ String tableMappingStr = getParentSession().getHiveConf().
+ getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING);
+ tableTypeMapping =
+ TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
+ rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ if (isAuthV2Enabled()) {
+ authorizeMetaGets(HiveOperationType.GET_TABLETYPES, null);
+ }
+ try {
+ for (TableType type : TableType.values()) {
+ rowSet.addRow(new String[] {tableTypeMapping.mapToClientType(type.toString())});
+ }
+ setState(OperationState.FINISHED);
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException(e);
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ return RESULT_SET_SCHEMA;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ rowSet.setStartOffset(0);
+ }
+ return rowSet.extractSubset((int)maxRows);
+ }
+
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
new file mode 100644
index 0000000000..0e2fdc657c
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetTablesOperation.
+ *
+ */
+public class GetTablesOperation extends MetadataOperation {
+
+ private final String catalogName;
+ private final String schemaName;
+ private final String tableName;
+ private final List<String> tableTypes = new ArrayList<String>();
+ private final RowSet rowSet;
+ private final TableTypeMapping tableTypeMapping;
+
+
+ private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+ .addStringColumn("TABLE_CAT", "Catalog name. NULL if not applicable.")
+ .addStringColumn("TABLE_SCHEM", "Schema name.")
+ .addStringColumn("TABLE_NAME", "Table name.")
+ .addStringColumn("TABLE_TYPE", "The table type, e.g. \"TABLE\", \"VIEW\", etc.")
+ .addStringColumn("REMARKS", "Comments about the table.");
+
+ protected GetTablesOperation(HiveSession parentSession,
+ String catalogName, String schemaName, String tableName,
+ List<String> tableTypes) {
+ super(parentSession, OperationType.GET_TABLES);
+ this.catalogName = catalogName;
+ this.schemaName = schemaName;
+ this.tableName = tableName;
+ String tableMappingStr = getParentSession().getHiveConf().
+ getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING);
+ tableTypeMapping =
+ TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
+ if (tableTypes != null) {
+ this.tableTypes.addAll(tableTypes);
+ }
+ this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ try {
+ IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
+ String schemaPattern = convertSchemaPattern(schemaName);
+ List<String> matchingDbs = metastoreClient.getDatabases(schemaPattern);
+ if(isAuthV2Enabled()){
+ List<HivePrivilegeObject> privObjs = HivePrivilegeObjectUtils.getHivePrivDbObjects(matchingDbs);
+ String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName;
+ authorizeMetaGets(HiveOperationType.GET_TABLES, privObjs, cmdStr);
+ }
+
+ String tablePattern = convertIdentifierPattern(tableName, true);
+ for (String dbName : metastoreClient.getDatabases(schemaPattern)) {
+ List<String> tableNames = metastoreClient.getTables(dbName, tablePattern);
+ for (Table table : metastoreClient.getTableObjectsByName(dbName, tableNames)) {
+ Object[] rowData = new Object[] {
+ DEFAULT_HIVE_CATALOG,
+ table.getDbName(),
+ table.getTableName(),
+ tableTypeMapping.mapToClientType(table.getTableType()),
+ table.getParameters().get("comment")
+ };
+ if (tableTypes.isEmpty() || tableTypes.contains(
+ tableTypeMapping.mapToClientType(table.getTableType()))) {
+ rowSet.addRow(rowData);
+ }
+ }
+ }
+ setState(OperationState.FINISHED);
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException(e);
+ }
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ return RESULT_SET_SCHEMA;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ rowSet.setStartOffset(0);
+ }
+ return rowSet.extractSubset((int)maxRows);
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java
new file mode 100644
index 0000000000..2a0fec2771
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.Type;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetTypeInfoOperation.
+ *
+ */
+public class GetTypeInfoOperation extends MetadataOperation {
+
+ private final static TableSchema RESULT_SET_SCHEMA = new TableSchema()
+ .addPrimitiveColumn("TYPE_NAME", Type.STRING_TYPE,
+ "Type name")
+ .addPrimitiveColumn("DATA_TYPE", Type.INT_TYPE,
+ "SQL data type from java.sql.Types")
+ .addPrimitiveColumn("PRECISION", Type.INT_TYPE,
+ "Maximum precision")
+ .addPrimitiveColumn("LITERAL_PREFIX", Type.STRING_TYPE,
+ "Prefix used to quote a literal (may be null)")
+ .addPrimitiveColumn("LITERAL_SUFFIX", Type.STRING_TYPE,
+ "Suffix used to quote a literal (may be null)")
+ .addPrimitiveColumn("CREATE_PARAMS", Type.STRING_TYPE,
+ "Parameters used in creating the type (may be null)")
+ .addPrimitiveColumn("NULLABLE", Type.SMALLINT_TYPE,
+ "Can you use NULL for this type")
+ .addPrimitiveColumn("CASE_SENSITIVE", Type.BOOLEAN_TYPE,
+ "Is it case sensitive")
+ .addPrimitiveColumn("SEARCHABLE", Type.SMALLINT_TYPE,
+ "Can you use \"WHERE\" based on this type")
+ .addPrimitiveColumn("UNSIGNED_ATTRIBUTE", Type.BOOLEAN_TYPE,
+ "Is it unsigned")
+ .addPrimitiveColumn("FIXED_PREC_SCALE", Type.BOOLEAN_TYPE,
+ "Can it be a money value")
+ .addPrimitiveColumn("AUTO_INCREMENT", Type.BOOLEAN_TYPE,
+ "Can it be used for an auto-increment value")
+ .addPrimitiveColumn("LOCAL_TYPE_NAME", Type.STRING_TYPE,
+ "Localized version of type name (may be null)")
+ .addPrimitiveColumn("MINIMUM_SCALE", Type.SMALLINT_TYPE,
+ "Minimum scale supported")
+ .addPrimitiveColumn("MAXIMUM_SCALE", Type.SMALLINT_TYPE,
+ "Maximum scale supported")
+ .addPrimitiveColumn("SQL_DATA_TYPE", Type.INT_TYPE,
+ "Unused")
+ .addPrimitiveColumn("SQL_DATETIME_SUB", Type.INT_TYPE,
+ "Unused")
+ .addPrimitiveColumn("NUM_PREC_RADIX", Type.INT_TYPE,
+ "Usually 2 or 10");
+
+ private final RowSet rowSet;
+
+ protected GetTypeInfoOperation(HiveSession parentSession) {
+ super(parentSession, OperationType.GET_TYPE_INFO);
+ rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ if (isAuthV2Enabled()) {
+ authorizeMetaGets(HiveOperationType.GET_TYPEINFO, null);
+ }
+ try {
+ for (Type type : Type.values()) {
+ Object[] rowData = new Object[] {
+ type.getName(), // TYPE_NAME
+ type.toJavaSQLType(), // DATA_TYPE
+ type.getMaxPrecision(), // PRECISION
+ type.getLiteralPrefix(), // LITERAL_PREFIX
+ type.getLiteralSuffix(), // LITERAL_SUFFIX
+ type.getCreateParams(), // CREATE_PARAMS
+ type.getNullable(), // NULLABLE
+ type.isCaseSensitive(), // CASE_SENSITIVE
+ type.getSearchable(), // SEARCHABLE
+ type.isUnsignedAttribute(), // UNSIGNED_ATTRIBUTE
+ type.isFixedPrecScale(), // FIXED_PREC_SCALE
+ type.isAutoIncrement(), // AUTO_INCREMENT
+ type.getLocalizedName(), // LOCAL_TYPE_NAME
+ type.getMinimumScale(), // MINIMUM_SCALE
+ type.getMaximumScale(), // MAXIMUM_SCALE
+ null, // SQL_DATA_TYPE, unused
+ null, // SQL_DATETIME_SUB, unused
+ type.getNumPrecRadix() //NUM_PREC_RADIX
+ };
+ rowSet.addRow(rowData);
+ }
+ setState(OperationState.FINISHED);
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException(e);
+ }
+ }
+
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ return RESULT_SET_SCHEMA;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ rowSet.setStartOffset(0);
+ }
+ return rowSet.extractSubset((int)maxRows);
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
new file mode 100644
index 0000000000..bcc66cf811
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.api.Schema;
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * Executes a HiveCommand
+ */
+public class HiveCommandOperation extends ExecuteStatementOperation {
+ private CommandProcessor commandProcessor;
+ private TableSchema resultSchema = null;
+
+ /**
+ * For processors other than Hive queries (Driver), they output to session.out (a temp file)
+ * first and the fetchOne/fetchN/fetchAll functions get the output from pipeIn.
+ */
+ private BufferedReader resultReader;
+
+
+ protected HiveCommandOperation(HiveSession parentSession, String statement,
+ CommandProcessor commandProcessor, Map<String, String> confOverlay) {
+ super(parentSession, statement, confOverlay, false);
+ this.commandProcessor = commandProcessor;
+ setupSessionIO(parentSession.getSessionState());
+ }
+
+ private void setupSessionIO(SessionState sessionState) {
+ try {
+ LOG.info("Putting temp output to file " + sessionState.getTmpOutputFile().toString());
+ sessionState.in = null; // hive server's session input stream is not used
+ // open a per-session file in auto-flush mode for writing temp results
+ sessionState.out = new PrintStream(new FileOutputStream(sessionState.getTmpOutputFile()), true, "UTF-8");
+ // TODO: for hadoop jobs, progress is printed out to session.err,
+ // we should find a way to feed back job progress to client
+ sessionState.err = new PrintStream(System.err, true, "UTF-8");
+ } catch (IOException e) {
+ LOG.error("Error in creating temp output file ", e);
+ try {
+ sessionState.in = null;
+ sessionState.out = new PrintStream(System.out, true, "UTF-8");
+ sessionState.err = new PrintStream(System.err, true, "UTF-8");
+ } catch (UnsupportedEncodingException ee) {
+ LOG.error("Error creating PrintStream", e);
+ ee.printStackTrace();
+ sessionState.out = null;
+ sessionState.err = null;
+ }
+ }
+ }
+
+
+ private void tearDownSessionIO() {
+ IOUtils.cleanup(LOG, parentSession.getSessionState().out);
+ IOUtils.cleanup(LOG, parentSession.getSessionState().err);
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.RUNNING);
+ try {
+ String command = getStatement().trim();
+ String[] tokens = statement.split("\\s");
+ String commandArgs = command.substring(tokens[0].length()).trim();
+
+ CommandProcessorResponse response = commandProcessor.run(commandArgs);
+ int returnCode = response.getResponseCode();
+ if (returnCode != 0) {
+ throw toSQLException("Error while processing statement", response);
+ }
+ Schema schema = response.getSchema();
+ if (schema != null) {
+ setHasResultSet(true);
+ resultSchema = new TableSchema(schema);
+ } else {
+ setHasResultSet(false);
+ resultSchema = new TableSchema();
+ }
+ } catch (HiveSQLException e) {
+ setState(OperationState.ERROR);
+ throw e;
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException("Error running query: " + e.toString(), e);
+ }
+ setState(OperationState.FINISHED);
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.operation.Operation#close()
+ */
+ @Override
+ public void close() throws HiveSQLException {
+ setState(OperationState.CLOSED);
+ tearDownSessionIO();
+ cleanTmpFile();
+ cleanupOperationLog();
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.operation.Operation#getResultSetSchema()
+ */
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ return resultSchema;
+ }
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.operation.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long)
+ */
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ validateDefaultFetchOrientation(orientation);
+ if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+ resetResultReader();
+ }
+ List<String> rows = readResults((int) maxRows);
+ RowSet rowSet = RowSetFactory.create(resultSchema, getProtocolVersion());
+
+ for (String row : rows) {
+ rowSet.addRow(new String[] {row});
+ }
+ return rowSet;
+ }
+
+ /**
+ * Reads the temporary results for non-Hive (non-Driver) commands to the
+ * resulting List of strings.
+ * @param nLines number of lines read at once. If it is <= 0, then read all lines.
+ */
+ private List<String> readResults(int nLines) throws HiveSQLException {
+ if (resultReader == null) {
+ SessionState sessionState = getParentSession().getSessionState();
+ File tmp = sessionState.getTmpOutputFile();
+ try {
+ resultReader = new BufferedReader(new FileReader(tmp));
+ } catch (FileNotFoundException e) {
+ LOG.error("File " + tmp + " not found. ", e);
+ throw new HiveSQLException(e);
+ }
+ }
+ List<String> results = new ArrayList<String>();
+
+ for (int i = 0; i < nLines || nLines <= 0; ++i) {
+ try {
+ String line = resultReader.readLine();
+ if (line == null) {
+ // reached the end of the result file
+ break;
+ } else {
+ results.add(line);
+ }
+ } catch (IOException e) {
+ LOG.error("Reading temp results encountered an exception: ", e);
+ throw new HiveSQLException(e);
+ }
+ }
+ return results;
+ }
+
+ private void cleanTmpFile() {
+ resetResultReader();
+ SessionState sessionState = getParentSession().getSessionState();
+ File tmp = sessionState.getTmpOutputFile();
+ tmp.delete();
+ }
+
+ private void resetResultReader() {
+ if (resultReader != null) {
+ IOUtils.cleanup(LOG, resultReader);
+ resultReader = null;
+ }
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java
new file mode 100644
index 0000000000..b530f21712
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.TableType;
+
+/**
+ * HiveTableTypeMapping.
+ * Default table type mapping
+ *
+ */
+public class HiveTableTypeMapping implements TableTypeMapping {
+
+ @Override
+ public String mapToHiveType(String clientTypeName) {
+ return clientTypeName;
+ }
+
+ @Override
+ public String mapToClientType(String hiveTypeName) {
+ return hiveTypeName;
+ }
+
+ @Override
+ public Set<String> getTableTypeNames() {
+ Set<String> typeNameSet = new HashSet<String>();
+ for (TableType typeNames : TableType.values()) {
+ typeNameSet.add(typeNames.toString());
+ }
+ return typeNameSet;
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
new file mode 100644
index 0000000000..70340bd13c
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+import java.io.CharArrayWriter;
+import java.util.Enumeration;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.log.PerfLogger;
+import org.apache.hadoop.hive.ql.session.OperationLog;
+import org.apache.hadoop.hive.ql.session.OperationLog.LoggingLevel;
+import org.apache.hive.service.cli.CLIServiceUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Logger;
+import org.apache.log4j.WriterAppender;
+import org.apache.log4j.spi.Filter;
+import org.apache.log4j.spi.LoggingEvent;
+
+import com.google.common.base.Joiner;
+
+/**
+ * An Appender to divert logs from individual threads to the LogObject they belong to.
+ */
+public class LogDivertAppender extends WriterAppender {
+ private static final Logger LOG = Logger.getLogger(LogDivertAppender.class.getName());
+ private final OperationManager operationManager;
+ private boolean isVerbose;
+ private Layout verboseLayout;
+
+ /**
+ * A log filter that filters messages coming from the logger with the given names.
+ * It be used as a white list filter or a black list filter.
+ * We apply black list filter on the Loggers used by the log diversion stuff, so that
+ * they don't generate more logs for themselves when they process logs.
+ * White list filter is used for less verbose log collection
+ */
+ private static class NameFilter extends Filter {
+ private Pattern namePattern;
+ private LoggingLevel loggingMode;
+ private OperationManager operationManager;
+
+ /* Patterns that are excluded in verbose logging level.
+ * Filter out messages coming from log processing classes, or we'll run an infinite loop.
+ */
+ private static final Pattern verboseExcludeNamePattern = Pattern.compile(Joiner.on("|").
+ join(new String[] {LOG.getName(), OperationLog.class.getName(),
+ OperationManager.class.getName()}));
+
+ /* Patterns that are included in execution logging level.
+ * In execution mode, show only select logger messages.
+ */
+ private static final Pattern executionIncludeNamePattern = Pattern.compile(Joiner.on("|").
+ join(new String[] {"org.apache.hadoop.mapreduce.JobSubmitter",
+ "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName(),
+ "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
+
+ /* Patterns that are included in performance logging level.
+ * In performance mode, show execution and performance logger messages.
+ */
+ private static final Pattern performanceIncludeNamePattern = Pattern.compile(
+ executionIncludeNamePattern.pattern() + "|" + PerfLogger.class.getName());
+
+ private void setCurrentNamePattern(OperationLog.LoggingLevel mode) {
+ if (mode == OperationLog.LoggingLevel.VERBOSE) {
+ this.namePattern = verboseExcludeNamePattern;
+ } else if (mode == OperationLog.LoggingLevel.EXECUTION) {
+ this.namePattern = executionIncludeNamePattern;
+ } else if (mode == OperationLog.LoggingLevel.PERFORMANCE) {
+ this.namePattern = performanceIncludeNamePattern;
+ }
+ }
+
+ public NameFilter(
+ OperationLog.LoggingLevel loggingMode, OperationManager op) {
+ this.operationManager = op;
+ this.loggingMode = loggingMode;
+ setCurrentNamePattern(loggingMode);
+ }
+
+ @Override
+ public int decide(LoggingEvent ev) {
+ OperationLog log = operationManager.getOperationLogByThread();
+ boolean excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
+
+ if (log == null) {
+ return Filter.DENY;
+ }
+
+ OperationLog.LoggingLevel currentLoggingMode = log.getOpLoggingLevel();
+ // If logging is disabled, deny everything.
+ if (currentLoggingMode == OperationLog.LoggingLevel.NONE) {
+ return Filter.DENY;
+ }
+ // Look at the current session's setting
+ // and set the pattern and excludeMatches accordingly.
+ if (currentLoggingMode != loggingMode) {
+ loggingMode = currentLoggingMode;
+ setCurrentNamePattern(loggingMode);
+ }
+
+ boolean isMatch = namePattern.matcher(ev.getLoggerName()).matches();
+
+ if (excludeMatches == isMatch) {
+ // Deny if this is black-list filter (excludeMatches = true) and it
+ // matched
+ // or if this is whitelist filter and it didn't match
+ return Filter.DENY;
+ }
+ return Filter.NEUTRAL;
+ }
+ }
+
+ /** This is where the log message will go to */
+ private final CharArrayWriter writer = new CharArrayWriter();
+
+ private void setLayout (boolean isVerbose, Layout lo) {
+ if (isVerbose) {
+ if (lo == null) {
+ lo = CLIServiceUtils.verboseLayout;
+ LOG.info("Cannot find a Layout from a ConsoleAppender. Using default Layout pattern.");
+ }
+ } else {
+ lo = CLIServiceUtils.nonVerboseLayout;
+ }
+ setLayout(lo);
+ }
+
+ private void initLayout(boolean isVerbose) {
+ // There should be a ConsoleAppender. Copy its Layout.
+ Logger root = Logger.getRootLogger();
+ Layout layout = null;
+
+ Enumeration<?> appenders = root.getAllAppenders();
+ while (appenders.hasMoreElements()) {
+ Appender ap = (Appender) appenders.nextElement();
+ if (ap.getClass().equals(ConsoleAppender.class)) {
+ layout = ap.getLayout();
+ break;
+ }
+ }
+ setLayout(isVerbose, layout);
+ }
+
+ public LogDivertAppender(OperationManager operationManager,
+ OperationLog.LoggingLevel loggingMode) {
+ isVerbose = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
+ initLayout(isVerbose);
+ setWriter(writer);
+ setName("LogDivertAppender");
+ this.operationManager = operationManager;
+ this.verboseLayout = isVerbose ? layout : CLIServiceUtils.verboseLayout;
+ addFilter(new NameFilter(loggingMode, operationManager));
+ }
+
+ @Override
+ public void doAppend(LoggingEvent event) {
+ OperationLog log = operationManager.getOperationLogByThread();
+
+ // Set current layout depending on the verbose/non-verbose mode.
+ if (log != null) {
+ boolean isCurrModeVerbose = (log.getOpLoggingLevel() == OperationLog.LoggingLevel.VERBOSE);
+
+ // If there is a logging level change from verbose->non-verbose or vice-versa since
+ // the last subAppend call, change the layout to preserve consistency.
+ if (isCurrModeVerbose != isVerbose) {
+ isVerbose = isCurrModeVerbose;
+ setLayout(isVerbose, verboseLayout);
+ }
+ }
+ super.doAppend(event);
+ }
+
+ /**
+ * Overrides WriterAppender.subAppend(), which does the real logging. No need
+ * to worry about concurrency since log4j calls this synchronously.
+ */
+ @Override
+ protected void subAppend(LoggingEvent event) {
+ super.subAppend(event);
+ // That should've gone into our writer. Notify the LogContext.
+ String logOutput = writer.toString();
+ writer.reset();
+
+ OperationLog log = operationManager.getOperationLogByThread();
+ if (log == null) {
+ LOG.debug(" ---+++=== Dropped log event from thread " + event.getThreadName());
+ return;
+ }
+ log.writeOperationLog(logOutput);
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java
new file mode 100644
index 0000000000..4595ef56fc
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * MetadataOperation.
+ *
+ */
+public abstract class MetadataOperation extends Operation {
+
+ protected static final String DEFAULT_HIVE_CATALOG = "";
+ protected static TableSchema RESULT_SET_SCHEMA;
+ private static final char SEARCH_STRING_ESCAPE = '\\';
+
+ protected MetadataOperation(HiveSession parentSession, OperationType opType) {
+ super(parentSession, opType, false);
+ setHasResultSet(true);
+ }
+
+
+ /* (non-Javadoc)
+ * @see org.apache.hive.service.cli.Operation#close()
+ */
+ @Override
+ public void close() throws HiveSQLException {
+ setState(OperationState.CLOSED);
+ cleanupOperationLog();
+ }
+
+ /**
+ * Convert wildchars and escape sequence from JDBC format to datanucleous/regex
+ */
+ protected String convertIdentifierPattern(final String pattern, boolean datanucleusFormat) {
+ if (pattern == null) {
+ return convertPattern("%", true);
+ } else {
+ return convertPattern(pattern, datanucleusFormat);
+ }
+ }
+
+ /**
+ * Convert wildchars and escape sequence of schema pattern from JDBC format to datanucleous/regex
+ * The schema pattern treats empty string also as wildchar
+ */
+ protected String convertSchemaPattern(final String pattern) {
+ if ((pattern == null) || pattern.isEmpty()) {
+ return convertPattern("%", true);
+ } else {
+ return convertPattern(pattern, true);
+ }
+ }
+
+ /**
+ * Convert a pattern containing JDBC catalog search wildcards into
+ * Java regex patterns.
+ *
+ * @param pattern input which may contain '%' or '_' wildcard characters, or
+ * these characters escaped using {@link #getSearchStringEscape()}.
+ * @return replace %/_ with regex search characters, also handle escaped
+ * characters.
+ *
+ * The datanucleus module expects the wildchar as '*'. The columns search on the
+ * other hand is done locally inside the hive code and that requires the regex wildchar
+ * format '.*' This is driven by the datanucleusFormat flag.
+ */
+ private String convertPattern(final String pattern, boolean datanucleusFormat) {
+ String wStr;
+ if (datanucleusFormat) {
+ wStr = "*";
+ } else {
+ wStr = ".*";
+ }
+ return pattern
+ .replaceAll("([^\\\\])%", "$1" + wStr).replaceAll("\\\\%", "%").replaceAll("^%", wStr)
+ .replaceAll("([^\\\\])_", "$1.").replaceAll("\\\\_", "_").replaceAll("^_", ".");
+ }
+
+ protected boolean isAuthV2Enabled(){
+ SessionState ss = SessionState.get();
+ return (ss.isAuthorizationModeV2() &&
+ HiveConf.getBoolVar(ss.getConf(), HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED));
+ }
+
+ protected void authorizeMetaGets(HiveOperationType opType, List<HivePrivilegeObject> inpObjs)
+ throws HiveSQLException {
+ authorizeMetaGets(opType, inpObjs, null);
+ }
+
+ protected void authorizeMetaGets(HiveOperationType opType, List<HivePrivilegeObject> inpObjs,
+ String cmdString) throws HiveSQLException {
+ SessionState ss = SessionState.get();
+ HiveAuthzContext.Builder ctxBuilder = new HiveAuthzContext.Builder();
+ ctxBuilder.setUserIpAddress(ss.getUserIpAddress());
+ ctxBuilder.setCommandString(cmdString);
+ try {
+ ss.getAuthorizerV2().checkPrivileges(opType, inpObjs, null,
+ ctxBuilder.build());
+ } catch (HiveAuthzPluginException | HiveAccessControlException e) {
+ throw new HiveSQLException(e.getMessage(), e);
+ }
+ }
+
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java
new file mode 100644
index 0000000000..19153b654b
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java
@@ -0,0 +1,322 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.cli.operation;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.util.EnumSet;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.OperationLog;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationHandle;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationStatus;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+import org.apache.hive.service.cli.thrift.TProtocolVersion;
+
+public abstract class Operation {
+ protected final HiveSession parentSession;
+ private OperationState state = OperationState.INITIALIZED;
+ private final OperationHandle opHandle;
+ private HiveConf configuration;
+ public static final Log LOG = LogFactory.getLog(Operation.class.getName());
+ public static final FetchOrientation DEFAULT_FETCH_ORIENTATION = FetchOrientation.FETCH_NEXT;
+ public static final long DEFAULT_FETCH_MAX_ROWS = 100;
+ protected boolean hasResultSet;
+ protected volatile HiveSQLException operationException;
+ protected final boolean runAsync;
+ protected volatile Future<?> backgroundHandle;
+ protected OperationLog operationLog;
+ protected boolean isOperationLogEnabled;
+
+ private long operationTimeout;
+ private long lastAccessTime;
+
+ protected static final EnumSet<FetchOrientation> DEFAULT_FETCH_ORIENTATION_SET =
+ EnumSet.of(FetchOrientation.FETCH_NEXT,FetchOrientation.FETCH_FIRST);
+
+ protected Operation(HiveSession parentSession, OperationType opType, boolean runInBackground) {
+ this.parentSession = parentSession;
+ this.runAsync = runInBackground;
+ this.opHandle = new OperationHandle(opType, parentSession.getProtocolVersion());
+ lastAccessTime = System.currentTimeMillis();
+ operationTimeout = HiveConf.getTimeVar(parentSession.getHiveConf(),
+ HiveConf.ConfVars.HIVE_SERVER2_IDLE_OPERATION_TIMEOUT, TimeUnit.MILLISECONDS);
+ }
+
+ public Future<?> getBackgroundHandle() {
+ return backgroundHandle;
+ }
+
+ protected void setBackgroundHandle(Future<?> backgroundHandle) {
+ this.backgroundHandle = backgroundHandle;
+ }
+
+ public boolean shouldRunAsync() {
+ return runAsync;
+ }
+
+ public void setConfiguration(HiveConf configuration) {
+ this.configuration = new HiveConf(configuration);
+ }
+
+ public HiveConf getConfiguration() {
+ return new HiveConf(configuration);
+ }
+
+ public HiveSession getParentSession() {
+ return parentSession;
+ }
+
+ public OperationHandle getHandle() {
+ return opHandle;
+ }
+
+ public TProtocolVersion getProtocolVersion() {
+ return opHandle.getProtocolVersion();
+ }
+
+ public OperationType getType() {
+ return opHandle.getOperationType();
+ }
+
+ public OperationStatus getStatus() {
+ return new OperationStatus(state, operationException);
+ }
+
+ public boolean hasResultSet() {
+ return hasResultSet;
+ }
+
+ protected void setHasResultSet(boolean hasResultSet) {
+ this.hasResultSet = hasResultSet;
+ opHandle.setHasResultSet(hasResultSet);
+ }
+
+ public OperationLog getOperationLog() {
+ return operationLog;
+ }
+
+ protected final OperationState setState(OperationState newState) throws HiveSQLException {
+ state.validateTransition(newState);
+ this.state = newState;
+ this.lastAccessTime = System.currentTimeMillis();
+ return this.state;
+ }
+
+ public boolean isTimedOut(long current) {
+ if (operationTimeout == 0) {
+ return false;
+ }
+ if (operationTimeout > 0) {
+ // check only when it's in terminal state
+ return state.isTerminal() && lastAccessTime + operationTimeout <= current;
+ }
+ return lastAccessTime + -operationTimeout <= current;
+ }
+
+ public long getLastAccessTime() {
+ return lastAccessTime;
+ }
+
+ public long getOperationTimeout() {
+ return operationTimeout;
+ }
+
+ public void setOperationTimeout(long operationTimeout) {
+ this.operationTimeout = operationTimeout;
+ }
+
+ protected void setOperationException(HiveSQLException operationException) {
+ this.operationException = operationException;
+ }
+
+ protected final void assertState(OperationState state) throws HiveSQLException {
+ if (this.state != state) {
+ throw new HiveSQLException("Expected state " + state + ", but found " + this.state);
+ }
+ this.lastAccessTime = System.currentTimeMillis();
+ }
+
+ public boolean isRunning() {
+ return OperationState.RUNNING.equals(state);
+ }
+
+ public boolean isFinished() {
+ return OperationState.FINISHED.equals(state);
+ }
+
+ public boolean isCanceled() {
+ return OperationState.CANCELED.equals(state);
+ }
+
+ public boolean isFailed() {
+ return OperationState.ERROR.equals(state);
+ }
+
+ protected void createOperationLog() {
+ if (parentSession.isOperationLogEnabled()) {
+ File operationLogFile = new File(parentSession.getOperationLogSessionDir(),
+ opHandle.getHandleIdentifier().toString());
+ isOperationLogEnabled = true;
+
+ // create log file
+ try {
+ if (operationLogFile.exists()) {
+ LOG.warn("The operation log file should not exist, but it is already there: " +
+ operationLogFile.getAbsolutePath());
+ operationLogFile.delete();
+ }
+ if (!operationLogFile.createNewFile()) {
+ // the log file already exists and cannot be deleted.
+ // If it can be read/written, keep its contents and use it.
+ if (!operationLogFile.canRead() || !operationLogFile.canWrite()) {
+ LOG.warn("The already existed operation log file cannot be recreated, " +
+ "and it cannot be read or written: " + operationLogFile.getAbsolutePath());
+ isOperationLogEnabled = false;
+ return;
+ }
+ }
+ } catch (Exception e) {
+ LOG.warn("Unable to create operation log file: " + operationLogFile.getAbsolutePath(), e);
+ isOperationLogEnabled = false;
+ return;
+ }
+
+ // create OperationLog object with above log file
+ try {
+ operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf());
+ } catch (FileNotFoundException e) {
+ LOG.warn("Unable to instantiate OperationLog object for operation: " +
+ opHandle, e);
+ isOperationLogEnabled = false;
+ return;
+ }
+
+ // register this operationLog to current thread
+ OperationLog.setCurrentOperationLog(operationLog);
+ }
+ }
+
+ protected void unregisterOperationLog() {
+ if (isOperationLogEnabled) {
+ OperationLog.removeCurrentOperationLog();
+ }
+ }
+
+ /**
+ * Invoked before runInternal().
+ * Set up some preconditions, or configurations.
+ */
+ protected void beforeRun() {
+ createOperationLog();
+ }
+
+ /**
+ * Invoked after runInternal(), even if an exception is thrown in runInternal().
+ * Clean up resources, which was set up in beforeRun().
+ */
+ protected void afterRun() {
+ unregisterOperationLog();
+ }
+
+ /**
+ * Implemented by subclass of Operation class to execute specific behaviors.
+ * @throws HiveSQLException
+ */
+ protected abstract void runInternal() throws HiveSQLException;
+
+ public void run() throws HiveSQLException {
+ beforeRun();
+ try {
+ runInternal();
+ } finally {
+ afterRun();
+ }
+ }
+
+ protected void cleanupOperationLog() {
+ if (isOperationLogEnabled) {
+ if (operationLog == null) {
+ LOG.error("Operation [ " + opHandle.getHandleIdentifier() + " ] "
+ + "logging is enabled, but its OperationLog object cannot be found.");
+ } else {
+ operationLog.close();
+ }
+ }
+ }
+
+ // TODO: make this abstract and implement in subclasses.
+ public void cancel() throws HiveSQLException {
+ setState(OperationState.CANCELED);
+ throw new UnsupportedOperationException("SQLOperation.cancel()");
+ }
+
+ public abstract void close() throws HiveSQLException;
+
+ public abstract TableSchema getResultSetSchema() throws HiveSQLException;
+
+ public abstract RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException;
+
+ public RowSet getNextRowSet() throws HiveSQLException {
+ return getNextRowSet(FetchOrientation.FETCH_NEXT, DEFAULT_FETCH_MAX_ROWS);
+ }
+
+ /**
+ * Verify if the given fetch orientation is part of the default orientation types.
+ * @param orientation
+ * @throws HiveSQLException
+ */
+ protected void validateDefaultFetchOrientation(FetchOrientation orientation)
+ throws HiveSQLException {
+ validateFetchOrientation(orientation, DEFAULT_FETCH_ORIENTATION_SET);
+ }
+
+ /**
+ * Verify if the given fetch orientation is part of the supported orientation types.
+ * @param orientation
+ * @param supportedOrientations
+ * @throws HiveSQLException
+ */
+ protected void validateFetchOrientation(FetchOrientation orientation,
+ EnumSet<FetchOrientation> supportedOrientations) throws HiveSQLException {
+ if (!supportedOrientations.contains(orientation)) {
+ throw new HiveSQLException("The fetch type " + orientation.toString() +
+ " is not supported for this resultset", "HY106");
+ }
+ }
+
+ protected HiveSQLException toSQLException(String prefix, CommandProcessorResponse response) {
+ HiveSQLException ex = new HiveSQLException(prefix + ": " + response.getErrorMessage(),
+ response.getSQLState(), response.getResponseCode());
+ if (response.getException() != null) {
+ ex.initCause(response.getException());
+ }
+ return ex;
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java
new file mode 100644
index 0000000000..92c340a29c
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Schema;
+import org.apache.hadoop.hive.ql.session.OperationLog;
+import org.apache.hive.service.AbstractService;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationHandle;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationStatus;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+import org.apache.log4j.Appender;
+import org.apache.log4j.Logger;
+
+/**
+ * OperationManager.
+ *
+ */
+public class OperationManager extends AbstractService {
+ private final Log LOG = LogFactory.getLog(OperationManager.class.getName());
+
+ private final Map<OperationHandle, Operation> handleToOperation =
+ new HashMap<OperationHandle, Operation>();
+
+ public OperationManager() {
+ super(OperationManager.class.getSimpleName());
+ }
+
+ @Override
+ public synchronized void init(HiveConf hiveConf) {
+ if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) {
+ initOperationLogCapture(hiveConf.getVar(
+ HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL));
+ } else {
+ LOG.debug("Operation level logging is turned off");
+ }
+ super.init(hiveConf);
+ }
+
+ @Override
+ public synchronized void start() {
+ super.start();
+ // TODO
+ }
+
+ @Override
+ public synchronized void stop() {
+ // TODO
+ super.stop();
+ }
+
+ private void initOperationLogCapture(String loggingMode) {
+ // Register another Appender (with the same layout) that talks to us.
+ Appender ap = new LogDivertAppender(this, OperationLog.getLoggingLevel(loggingMode));
+ Logger.getRootLogger().addAppender(ap);
+ }
+
+ public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession,
+ String statement, Map<String, String> confOverlay, boolean runAsync)
+ throws HiveSQLException {
+ ExecuteStatementOperation executeStatementOperation = ExecuteStatementOperation
+ .newExecuteStatementOperation(parentSession, statement, confOverlay, runAsync);
+ addOperation(executeStatementOperation);
+ return executeStatementOperation;
+ }
+
+ public GetTypeInfoOperation newGetTypeInfoOperation(HiveSession parentSession) {
+ GetTypeInfoOperation operation = new GetTypeInfoOperation(parentSession);
+ addOperation(operation);
+ return operation;
+ }
+
+ public GetCatalogsOperation newGetCatalogsOperation(HiveSession parentSession) {
+ GetCatalogsOperation operation = new GetCatalogsOperation(parentSession);
+ addOperation(operation);
+ return operation;
+ }
+
+ public GetSchemasOperation newGetSchemasOperation(HiveSession parentSession,
+ String catalogName, String schemaName) {
+ GetSchemasOperation operation = new GetSchemasOperation(parentSession, catalogName, schemaName);
+ addOperation(operation);
+ return operation;
+ }
+
+ public MetadataOperation newGetTablesOperation(HiveSession parentSession,
+ String catalogName, String schemaName, String tableName,
+ List<String> tableTypes) {
+ MetadataOperation operation =
+ new GetTablesOperation(parentSession, catalogName, schemaName, tableName, tableTypes);
+ addOperation(operation);
+ return operation;
+ }
+
+ public GetTableTypesOperation newGetTableTypesOperation(HiveSession parentSession) {
+ GetTableTypesOperation operation = new GetTableTypesOperation(parentSession);
+ addOperation(operation);
+ return operation;
+ }
+
+ public GetColumnsOperation newGetColumnsOperation(HiveSession parentSession,
+ String catalogName, String schemaName, String tableName, String columnName) {
+ GetColumnsOperation operation = new GetColumnsOperation(parentSession,
+ catalogName, schemaName, tableName, columnName);
+ addOperation(operation);
+ return operation;
+ }
+
+ public GetFunctionsOperation newGetFunctionsOperation(HiveSession parentSession,
+ String catalogName, String schemaName, String functionName) {
+ GetFunctionsOperation operation = new GetFunctionsOperation(parentSession,
+ catalogName, schemaName, functionName);
+ addOperation(operation);
+ return operation;
+ }
+
+ public Operation getOperation(OperationHandle operationHandle) throws HiveSQLException {
+ Operation operation = getOperationInternal(operationHandle);
+ if (operation == null) {
+ throw new HiveSQLException("Invalid OperationHandle: " + operationHandle);
+ }
+ return operation;
+ }
+
+ private synchronized Operation getOperationInternal(OperationHandle operationHandle) {
+ return handleToOperation.get(operationHandle);
+ }
+
+ private synchronized Operation removeTimedOutOperation(OperationHandle operationHandle) {
+ Operation operation = handleToOperation.get(operationHandle);
+ if (operation != null && operation.isTimedOut(System.currentTimeMillis())) {
+ handleToOperation.remove(operationHandle);
+ return operation;
+ }
+ return null;
+ }
+
+ private synchronized void addOperation(Operation operation) {
+ handleToOperation.put(operation.getHandle(), operation);
+ }
+
+ private synchronized Operation removeOperation(OperationHandle opHandle) {
+ return handleToOperation.remove(opHandle);
+ }
+
+ public OperationStatus getOperationStatus(OperationHandle opHandle)
+ throws HiveSQLException {
+ return getOperation(opHandle).getStatus();
+ }
+
+ public void cancelOperation(OperationHandle opHandle) throws HiveSQLException {
+ Operation operation = getOperation(opHandle);
+ OperationState opState = operation.getStatus().getState();
+ if (opState == OperationState.CANCELED ||
+ opState == OperationState.CLOSED ||
+ opState == OperationState.FINISHED ||
+ opState == OperationState.ERROR ||
+ opState == OperationState.UNKNOWN) {
+ // Cancel should be a no-op in either cases
+ LOG.debug(opHandle + ": Operation is already aborted in state - " + opState);
+ }
+ else {
+ LOG.debug(opHandle + ": Attempting to cancel from state - " + opState);
+ operation.cancel();
+ }
+ }
+
+ public void closeOperation(OperationHandle opHandle) throws HiveSQLException {
+ Operation operation = removeOperation(opHandle);
+ if (operation == null) {
+ throw new HiveSQLException("Operation does not exist!");
+ }
+ operation.close();
+ }
+
+ public TableSchema getOperationResultSetSchema(OperationHandle opHandle)
+ throws HiveSQLException {
+ return getOperation(opHandle).getResultSetSchema();
+ }
+
+ public RowSet getOperationNextRowSet(OperationHandle opHandle)
+ throws HiveSQLException {
+ return getOperation(opHandle).getNextRowSet();
+ }
+
+ public RowSet getOperationNextRowSet(OperationHandle opHandle,
+ FetchOrientation orientation, long maxRows)
+ throws HiveSQLException {
+ return getOperation(opHandle).getNextRowSet(orientation, maxRows);
+ }
+
+ public RowSet getOperationLogRowSet(OperationHandle opHandle,
+ FetchOrientation orientation, long maxRows)
+ throws HiveSQLException {
+ // get the OperationLog object from the operation
+ OperationLog operationLog = getOperation(opHandle).getOperationLog();
+ if (operationLog == null) {
+ throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle);
+ }
+
+ // read logs
+ List<String> logs;
+ try {
+ logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows);
+ } catch (SQLException e) {
+ throw new HiveSQLException(e.getMessage(), e.getCause());
+ }
+
+
+ // convert logs to RowSet
+ TableSchema tableSchema = new TableSchema(getLogSchema());
+ RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion());
+ for (String log : logs) {
+ rowSet.addRow(new String[] {log});
+ }
+
+ return rowSet;
+ }
+
+ private boolean isFetchFirst(FetchOrientation fetchOrientation) {
+ //TODO: Since OperationLog is moved to package o.a.h.h.ql.session,
+ // we may add a Enum there and map FetchOrientation to it.
+ if (fetchOrientation.equals(FetchOrientation.FETCH_FIRST)) {
+ return true;
+ }
+ return false;
+ }
+
+ private Schema getLogSchema() {
+ Schema schema = new Schema();
+ FieldSchema fieldSchema = new FieldSchema();
+ fieldSchema.setName("operation_log");
+ fieldSchema.setType("string");
+ schema.addToFieldSchemas(fieldSchema);
+ return schema;
+ }
+
+ public OperationLog getOperationLogByThread() {
+ return OperationLog.getCurrentOperationLog();
+ }
+
+ public List<Operation> removeExpiredOperations(OperationHandle[] handles) {
+ List<Operation> removed = new ArrayList<Operation>();
+ for (OperationHandle handle : handles) {
+ Operation operation = removeTimedOutOperation(handle);
+ if (operation != null) {
+ LOG.warn("Operation " + handle + " is timed-out and will be closed");
+ removed.add(operation);
+ }
+ }
+ return removed;
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
new file mode 100644
index 0000000000..33ee16b80b
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -0,0 +1,473 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.io.UnsupportedEncodingException;
+import java.security.PrivilegedExceptionAction;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.Future;
+import java.util.concurrent.RejectedExecutionException;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Schema;
+import org.apache.hadoop.hive.ql.CommandNeedRetryException;
+import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.exec.ExplainTask;
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.parse.VariableSubstitution;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.OperationLog;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.SerDe;
+import org.apache.hadoop.hive.serde2.SerDeException;
+import org.apache.hadoop.hive.serde2.SerDeUtils;
+import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.StructField;
+import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
+import org.apache.hadoop.hive.shims.Utils;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+import org.apache.hive.service.server.ThreadWithGarbageCleanup;
+
+/**
+ * SQLOperation.
+ *
+ */
+public class SQLOperation extends ExecuteStatementOperation {
+
+ private Driver driver = null;
+ private CommandProcessorResponse response;
+ private TableSchema resultSchema = null;
+ private Schema mResultSchema = null;
+ private SerDe serde = null;
+ private boolean fetchStarted = false;
+
+ public SQLOperation(HiveSession parentSession, String statement, Map<String,
+ String> confOverlay, boolean runInBackground) {
+ // TODO: call setRemoteUser in ExecuteStatementOperation or higher.
+ super(parentSession, statement, confOverlay, runInBackground);
+ }
+
+ /***
+ * Compile the query and extract metadata
+ * @param sqlOperationConf
+ * @throws HiveSQLException
+ */
+ public void prepare(HiveConf sqlOperationConf) throws HiveSQLException {
+ setState(OperationState.RUNNING);
+
+ try {
+ driver = new Driver(sqlOperationConf, getParentSession().getUserName());
+
+ // set the operation handle information in Driver, so that thrift API users
+ // can use the operation handle they receive, to lookup query information in
+ // Yarn ATS
+ String guid64 = Base64.encodeBase64URLSafeString(getHandle().getHandleIdentifier()
+ .toTHandleIdentifier().getGuid()).trim();
+ driver.setOperationId(guid64);
+
+ // In Hive server mode, we are not able to retry in the FetchTask
+ // case, when calling fetch queries since execute() has returned.
+ // For now, we disable the test attempts.
+ driver.setTryCount(Integer.MAX_VALUE);
+
+ String subStatement = new VariableSubstitution().substitute(sqlOperationConf, statement);
+ response = driver.compileAndRespond(subStatement);
+ if (0 != response.getResponseCode()) {
+ throw toSQLException("Error while compiling statement", response);
+ }
+
+ mResultSchema = driver.getSchema();
+
+ // hasResultSet should be true only if the query has a FetchTask
+ // "explain" is an exception for now
+ if(driver.getPlan().getFetchTask() != null) {
+ //Schema has to be set
+ if (mResultSchema == null || !mResultSchema.isSetFieldSchemas()) {
+ throw new HiveSQLException("Error compiling query: Schema and FieldSchema " +
+ "should be set when query plan has a FetchTask");
+ }
+ resultSchema = new TableSchema(mResultSchema);
+ setHasResultSet(true);
+ } else {
+ setHasResultSet(false);
+ }
+ // Set hasResultSet true if the plan has ExplainTask
+ // TODO explain should use a FetchTask for reading
+ for (Task<? extends Serializable> task: driver.getPlan().getRootTasks()) {
+ if (task.getClass() == ExplainTask.class) {
+ resultSchema = new TableSchema(mResultSchema);
+ setHasResultSet(true);
+ break;
+ }
+ }
+ } catch (HiveSQLException e) {
+ setState(OperationState.ERROR);
+ throw e;
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException("Error running query: " + e.toString(), e);
+ }
+ }
+
+ private void runQuery(HiveConf sqlOperationConf) throws HiveSQLException {
+ try {
+ // In Hive server mode, we are not able to retry in the FetchTask
+ // case, when calling fetch queries since execute() has returned.
+ // For now, we disable the test attempts.
+ driver.setTryCount(Integer.MAX_VALUE);
+ response = driver.run();
+ if (0 != response.getResponseCode()) {
+ throw toSQLException("Error while processing statement", response);
+ }
+ } catch (HiveSQLException e) {
+ // If the operation was cancelled by another thread,
+ // Driver#run will return a non-zero response code.
+ // We will simply return if the operation state is CANCELED,
+ // otherwise throw an exception
+ if (getStatus().getState() == OperationState.CANCELED) {
+ return;
+ }
+ else {
+ setState(OperationState.ERROR);
+ throw e;
+ }
+ } catch (Exception e) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException("Error running query: " + e.toString(), e);
+ }
+ setState(OperationState.FINISHED);
+ }
+
+ @Override
+ public void runInternal() throws HiveSQLException {
+ setState(OperationState.PENDING);
+ final HiveConf opConfig = getConfigForOperation();
+ prepare(opConfig);
+ if (!shouldRunAsync()) {
+ runQuery(opConfig);
+ } else {
+ // We'll pass ThreadLocals in the background thread from the foreground (handler) thread
+ final SessionState parentSessionState = SessionState.get();
+ // ThreadLocal Hive object needs to be set in background thread.
+ // The metastore client in Hive is associated with right user.
+ final Hive parentHive = getSessionHive();
+ // Current UGI will get used by metastore when metsatore is in embedded mode
+ // So this needs to get passed to the new background thread
+ final UserGroupInformation currentUGI = getCurrentUGI(opConfig);
+ // Runnable impl to call runInternal asynchronously,
+ // from a different thread
+ Runnable backgroundOperation = new Runnable() {
+ @Override
+ public void run() {
+ PrivilegedExceptionAction<Object> doAsAction = new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws HiveSQLException {
+ Hive.set(parentHive);
+ SessionState.setCurrentSessionState(parentSessionState);
+ // Set current OperationLog in this async thread for keeping on saving query log.
+ registerCurrentOperationLog();
+ try {
+ runQuery(opConfig);
+ } catch (HiveSQLException e) {
+ setOperationException(e);
+ LOG.error("Error running hive query: ", e);
+ } finally {
+ unregisterOperationLog();
+ }
+ return null;
+ }
+ };
+
+ try {
+ currentUGI.doAs(doAsAction);
+ } catch (Exception e) {
+ setOperationException(new HiveSQLException(e));
+ LOG.error("Error running hive query as user : " + currentUGI.getShortUserName(), e);
+ }
+ finally {
+ /**
+ * We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup
+ * when this thread is garbage collected later.
+ * @see org.apache.hive.service.server.ThreadWithGarbageCleanup#finalize()
+ */
+ if (ThreadWithGarbageCleanup.currentThread() instanceof ThreadWithGarbageCleanup) {
+ ThreadWithGarbageCleanup currentThread =
+ (ThreadWithGarbageCleanup) ThreadWithGarbageCleanup.currentThread();
+ currentThread.cacheThreadLocalRawStore();
+ }
+ }
+ }
+ };
+ try {
+ // This submit blocks if no background threads are available to run this operation
+ Future<?> backgroundHandle =
+ getParentSession().getSessionManager().submitBackgroundOperation(backgroundOperation);
+ setBackgroundHandle(backgroundHandle);
+ } catch (RejectedExecutionException rejected) {
+ setState(OperationState.ERROR);
+ throw new HiveSQLException("The background threadpool cannot accept" +
+ " new task for execution, please retry the operation", rejected);
+ }
+ }
+ }
+
+ /**
+ * Returns the current UGI on the stack
+ * @param opConfig
+ * @return UserGroupInformation
+ * @throws HiveSQLException
+ */
+ private UserGroupInformation getCurrentUGI(HiveConf opConfig) throws HiveSQLException {
+ try {
+ return Utils.getUGI();
+ } catch (Exception e) {
+ throw new HiveSQLException("Unable to get current user", e);
+ }
+ }
+
+ /**
+ * Returns the ThreadLocal Hive for the current thread
+ * @return Hive
+ * @throws HiveSQLException
+ */
+ private Hive getSessionHive() throws HiveSQLException {
+ try {
+ return Hive.get();
+ } catch (HiveException e) {
+ throw new HiveSQLException("Failed to get ThreadLocal Hive object", e);
+ }
+ }
+
+ private void registerCurrentOperationLog() {
+ if (isOperationLogEnabled) {
+ if (operationLog == null) {
+ LOG.warn("Failed to get current OperationLog object of Operation: " +
+ getHandle().getHandleIdentifier());
+ isOperationLogEnabled = false;
+ return;
+ }
+ OperationLog.setCurrentOperationLog(operationLog);
+ }
+ }
+
+ private void cleanup(OperationState state) throws HiveSQLException {
+ setState(state);
+ if (shouldRunAsync()) {
+ Future<?> backgroundHandle = getBackgroundHandle();
+ if (backgroundHandle != null) {
+ backgroundHandle.cancel(true);
+ }
+ }
+ if (driver != null) {
+ driver.close();
+ driver.destroy();
+ }
+ driver = null;
+
+ SessionState ss = SessionState.get();
+ if (ss.getTmpOutputFile() != null) {
+ ss.getTmpOutputFile().delete();
+ }
+ }
+
+ @Override
+ public void cancel() throws HiveSQLException {
+ cleanup(OperationState.CANCELED);
+ }
+
+ @Override
+ public void close() throws HiveSQLException {
+ cleanup(OperationState.CLOSED);
+ cleanupOperationLog();
+ }
+
+ @Override
+ public TableSchema getResultSetSchema() throws HiveSQLException {
+ assertState(OperationState.FINISHED);
+ if (resultSchema == null) {
+ resultSchema = new TableSchema(driver.getSchema());
+ }
+ return resultSchema;
+ }
+
+ private transient final List<Object> convey = new ArrayList<Object>();
+
+ @Override
+ public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException {
+ validateDefaultFetchOrientation(orientation);
+ assertState(OperationState.FINISHED);
+
+ RowSet rowSet = RowSetFactory.create(resultSchema, getProtocolVersion());
+
+ try {
+ /* if client is requesting fetch-from-start and its not the first time reading from this operation
+ * then reset the fetch position to beginning
+ */
+ if (orientation.equals(FetchOrientation.FETCH_FIRST) && fetchStarted) {
+ driver.resetFetch();
+ }
+ fetchStarted = true;
+ driver.setMaxRows((int) maxRows);
+ if (driver.getResults(convey)) {
+ return decode(convey, rowSet);
+ }
+ return rowSet;
+ } catch (IOException e) {
+ throw new HiveSQLException(e);
+ } catch (CommandNeedRetryException e) {
+ throw new HiveSQLException(e);
+ } catch (Exception e) {
+ throw new HiveSQLException(e);
+ } finally {
+ convey.clear();
+ }
+ }
+
+ private RowSet decode(List<Object> rows, RowSet rowSet) throws Exception {
+ if (driver.isFetchingTable()) {
+ return prepareFromRow(rows, rowSet);
+ }
+ return decodeFromString(rows, rowSet);
+ }
+
+ // already encoded to thrift-able object in ThriftFormatter
+ private RowSet prepareFromRow(List<Object> rows, RowSet rowSet) throws Exception {
+ for (Object row : rows) {
+ rowSet.addRow((Object[]) row);
+ }
+ return rowSet;
+ }
+
+ private RowSet decodeFromString(List<Object> rows, RowSet rowSet)
+ throws SQLException, SerDeException {
+ getSerDe();
+ StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector();
+ List<? extends StructField> fieldRefs = soi.getAllStructFieldRefs();
+
+ Object[] deserializedFields = new Object[fieldRefs.size()];
+ Object rowObj;
+ ObjectInspector fieldOI;
+
+ int protocol = getProtocolVersion().getValue();
+ for (Object rowString : rows) {
+ try {
+ rowObj = serde.deserialize(new BytesWritable(((String)rowString).getBytes("UTF-8")));
+ } catch (UnsupportedEncodingException e) {
+ throw new SerDeException(e);
+ }
+ for (int i = 0; i < fieldRefs.size(); i++) {
+ StructField fieldRef = fieldRefs.get(i);
+ fieldOI = fieldRef.getFieldObjectInspector();
+ Object fieldData = soi.getStructFieldData(rowObj, fieldRef);
+ deserializedFields[i] = SerDeUtils.toThriftPayload(fieldData, fieldOI, protocol);
+ }
+ rowSet.addRow(deserializedFields);
+ }
+ return rowSet;
+ }
+
+ private SerDe getSerDe() throws SQLException {
+ if (serde != null) {
+ return serde;
+ }
+ try {
+ List<FieldSchema> fieldSchemas = mResultSchema.getFieldSchemas();
+ StringBuilder namesSb = new StringBuilder();
+ StringBuilder typesSb = new StringBuilder();
+
+ if (fieldSchemas != null && !fieldSchemas.isEmpty()) {
+ for (int pos = 0; pos < fieldSchemas.size(); pos++) {
+ if (pos != 0) {
+ namesSb.append(",");
+ typesSb.append(",");
+ }
+ namesSb.append(fieldSchemas.get(pos).getName());
+ typesSb.append(fieldSchemas.get(pos).getType());
+ }
+ }
+ String names = namesSb.toString();
+ String types = typesSb.toString();
+
+ serde = new LazySimpleSerDe();
+ Properties props = new Properties();
+ if (names.length() > 0) {
+ LOG.debug("Column names: " + names);
+ props.setProperty(serdeConstants.LIST_COLUMNS, names);
+ }
+ if (types.length() > 0) {
+ LOG.debug("Column types: " + types);
+ props.setProperty(serdeConstants.LIST_COLUMN_TYPES, types);
+ }
+ SerDeUtils.initializeSerDe(serde, new HiveConf(), props, null);
+
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ throw new SQLException("Could not create ResultSet: " + ex.getMessage(), ex);
+ }
+ return serde;
+ }
+
+ /**
+ * If there are query specific settings to overlay, then create a copy of config
+ * There are two cases we need to clone the session config that's being passed to hive driver
+ * 1. Async query -
+ * If the client changes a config setting, that shouldn't reflect in the execution already underway
+ * 2. confOverlay -
+ * The query specific settings should only be applied to the query config and not session
+ * @return new configuration
+ * @throws HiveSQLException
+ */
+ private HiveConf getConfigForOperation() throws HiveSQLException {
+ HiveConf sqlOperationConf = getParentSession().getHiveConf();
+ if (!getConfOverlay().isEmpty() || shouldRunAsync()) {
+ // clone the partent session config for this query
+ sqlOperationConf = new HiveConf(sqlOperationConf);
+
+ // apply overlay query specific settings, if any
+ for (Map.Entry<String, String> confEntry : getConfOverlay().entrySet()) {
+ try {
+ sqlOperationConf.verifyAndSet(confEntry.getKey(), confEntry.getValue());
+ } catch (IllegalArgumentException e) {
+ throw new HiveSQLException("Error applying statement specific settings", e);
+ }
+ }
+ }
+ return sqlOperationConf;
+ }
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
new file mode 100644
index 0000000000..3a8a07f44f
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.Set;
+
+
+public interface TableTypeMapping {
+ /**
+ * Map client's table type name to hive's table type
+ * @param clientTypeName
+ * @return
+ */
+ public String mapToHiveType (String clientTypeName);
+
+ /**
+ * Map hive's table type name to client's table type
+ * @param clientTypeName
+ * @return
+ */
+ public String mapToClientType (String hiveTypeName);
+
+ /**
+ * Get all the table types of this mapping
+ * @return
+ */
+ public Set<String> getTableTypeNames();
+}
diff --git a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMappingFactory.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMappingFactory.java
new file mode 100644
index 0000000000..d8ac2696b3
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMappingFactory.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+public class TableTypeMappingFactory {
+
+ public enum TableTypeMappings {
+ HIVE,
+ CLASSIC
+ }
+ private static TableTypeMapping hiveTableTypeMapping = new HiveTableTypeMapping();
+ private static TableTypeMapping classicTableTypeMapping = new ClassicTableTypeMapping();
+
+ public static TableTypeMapping getTableTypeMapping(String mappingType) {
+ if (TableTypeMappings.CLASSIC.toString().equalsIgnoreCase(mappingType)) {
+ return classicTableTypeMapping;
+ } else {
+ return hiveTableTypeMapping;
+ }
+ }
+}