aboutsummaryrefslogtreecommitdiff
path: root/sql/hive/src/test/resources/ql
diff options
context:
space:
mode:
authorMichael Armbrust <michael@databricks.com>2014-10-24 18:36:35 -0700
committerJosh Rosen <joshrosen@databricks.com>2014-10-24 18:36:35 -0700
commit3a845d3c048eebb0bddb3937128746fde3e8e4d8 (patch)
tree192dbba101a08ff02de979efba297587f30fd721 /sql/hive/src/test/resources/ql
parent898b22ab1fe90e8a3935b19566465046f2256fa6 (diff)
downloadspark-3a845d3c048eebb0bddb3937128746fde3e8e4d8.tar.gz
spark-3a845d3c048eebb0bddb3937128746fde3e8e4d8.tar.bz2
spark-3a845d3c048eebb0bddb3937128746fde3e8e4d8.zip
[SQL] Update Hive test harness for Hive 12 and 13
As part of the upgrade I also copy the newest version of the query tests, and whitelist a bunch of new ones that are now passing. Author: Michael Armbrust <michael@databricks.com> Closes #2936 from marmbrus/fix13tests and squashes the following commits: d9cbdab [Michael Armbrust] Remove user specific tests 65801cd [Michael Armbrust] style and rat 8f6b09a [Michael Armbrust] Update test harness to work with both Hive 12 and 13. f044843 [Michael Armbrust] Update Hive query tests and golden files to 0.13
Diffstat (limited to 'sql/hive/src/test/resources/ql')
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs.q43
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_00.qv1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_01.qv1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_offline.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col1.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col2.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/analyze_non_existent_tbl.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/archive_corrupt.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addjar.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addpartition.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner_default.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func1.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func2.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_macro1.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_role_no_admin.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_createview.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_ctas.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_desc_table_nosel.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_dfs.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_disallow_transform.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q27
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_droppartition.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_create_db.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_drop_db.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_allpriv.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_dup.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail1.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail_nogrant.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noinspriv.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noselectpriv.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v1.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_rename.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_serdeprop.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_tab.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_view.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_priv_current_role_neg.q29
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_create.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_drop.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail1.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail2.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles1.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles2.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_grant.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_rolehierarchy_privs.q74
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select_view.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg1.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg2.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_parts_nosel.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_no_admin.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_v1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_roles_no_admin.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_truncate.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_add_partition.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_alterpart_loc.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_altertab_setloc.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table_ext.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_createdb.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_export.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_import.q25
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_index.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert_local.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_load_data.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_create_tbl.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_grant_public.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_revoke_public.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_mismatch1.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/clustern1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_dp.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_incorrect_num_keys.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_multiple_part_clause.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_complex_type.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_incorrect_column.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compile_processor.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compute_stats_long.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_class.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_db.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonudf_class.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_recursion.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_with_in_subquery.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/date_literal1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/deletejar.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_func_nonexistent.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_partition_filter_failure2.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dynamic_partitions_with_whitelist.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_21_part_managed_external.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_22_export_authfail.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_23_import_exist_authfail.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_24_import_part_authfail.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_25_import_nonexist_authfail.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/fetchtask_ioexception.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/file_with_header_footer_negative.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type4.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_entry_limit.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_size_limit.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into5.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into6.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insertover_dynapart_ifnotexists.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_2.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_3.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_columns.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_alt_syntax_comma_on.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous_vc.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition_stats.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_exist_part_authfail.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_non_native.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_nonpart_authfail.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_authfail.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_nospec.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_stored_as_dirs.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_view_failure.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_rc_seq.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_txt_seq.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_noof_part.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/local_mapred_error_cache.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_db_lock_conflict.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_drop_locked_db.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_lock_db_in_use.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nested_complex_neg.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_insert.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_load.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/notable_alias3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_char.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_date.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_decimal.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_timestamp.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_varchar.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/protectmode_part2.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_1.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_2.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_groupby.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_broken_pipe1.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_error.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/serde_regex2.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_groupby.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_select.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_multiple_cols_in_select.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_nested_subquery.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_shared_alias.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_subquery_chain.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_unqual_corr_expr.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_windowing_corr.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_with_or_cond.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong3.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_invalid.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_local_resource.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_nonexistent_resource.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_qualified_name.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong3.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udfnull.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/union.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_invalid_udaf.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_ll_no_neg.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter5.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char1.q32
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char2.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_db_owner.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_stats.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q85
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q59
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_coltype.q42
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_rename_partition.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_varchar2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ambiguous_col.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_filter.q76
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_groupby.q69
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_join.q81
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_limit.q30
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_part.q85
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_select.q143
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_table.q53
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_union.q55
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ansi_sql_arithmetic.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_corrupt.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_1_sql_std.q36
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_9.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_func1.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_macro1.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_table_owner_privs.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_public_role.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_table_priv.q43
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_index.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions_db.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_parts.q19
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_revoke_table_priv.q61
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant1.q38
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant2.q34
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q66
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join25.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join32.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_filters.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_nulls.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_reordering_values.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_without_localtask.q29
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q92
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_compression_enabled.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_evolved_schemas.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_joins.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_nullable_fields.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_partitioned.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_sanity_test.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_constant.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_table_colserde.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binarysortable_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_2.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q85
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q50
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_1.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_2.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_3.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_4.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_5.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_6.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_7.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_8.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin1.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin10.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin11.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin12.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin2.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin3.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin4.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin5.q28
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin7.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin8.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin9.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cast_to_int.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_1.q32
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_2.q36
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_cast.q92
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_comparison.q40
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_join1.q35
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_nested_types.q53
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_serde.q102
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_udf1.q156
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_union1.q47
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_varchar_udf.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/column_access_stats.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_partlvl.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_tbllvl.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compile_processor.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_binary.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_boolean.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_decimal.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_double.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_long.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_string.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/constant_prop.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer1.q25
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer4.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer5.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/count.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_func1.q30
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_like.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_merge_compressed.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_nested_type.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_struct_table.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_union_table.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_view_translate.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_1.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_2.q27
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_char.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_1.q28
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_2.q56
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/custom_input_output_format.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database_drop.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_1.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_3.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_4.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_comparison.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_join1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_serde.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_udf.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q59
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q19
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_1.q28
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_2.q60
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_4.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_5.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_6.q27
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_join.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_precision.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_serde.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_udf.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/delimiter.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disable_file_format_check.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/distinct_stats.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_partitions_filter2.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_with_concurrency.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q161
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q155
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition3.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_01_nonpart.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_part.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_all_part.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_05_some_part.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_06_one_part.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_10_external_managed.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_11_managed_external.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_12_external_location.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_13_managed_location.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_15_external_part.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_16_part_external.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_17_part_managed.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_18_part_external.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_part_external_location.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_hidden_files.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/explain_rearrange.q98
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/external_table_with_space_in_location_path.q23
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/file_with_header_footer.q39
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_join_breaktask2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_numeric.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/global_limit.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby10.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby12.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_bigdata.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_cube1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_resolution.q61
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_rollup1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_3.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_4.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_5.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_6.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_7.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_8.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_9.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_test_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/import_exported_table.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auth.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_empty.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_file_format.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_multiple.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_partitioned.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_self_join.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_unused.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap1.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap2.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap3.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_compression.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_rc.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_1.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_2.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_3.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_binary_search.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compression.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_creation.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_serde.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale_partitioned.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_const_type.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input13.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16_cc.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input19.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input20.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input21.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input22.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input33.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input37.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input3_limit.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input40.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input43.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input44.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input45.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4_cb_delim.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_dfs.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl5.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl6.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl7.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert1_overwrite_partitions.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert2_overwrite_partitions.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_into3.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q52
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_1to1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_alt_syntax.q41
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_array.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_casesensitive.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_1.q30
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_2.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_3.q34
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_4.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q52
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q47
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q56
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q49
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_filters.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_hive_626.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_merging.q25
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nulls.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nullsafe.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder2.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder3.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder4.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_star.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_noalias.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_ppd.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lb_fs_stats.q19
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag_queries.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown_negative.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_4.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_decimal.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_double.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_ints.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_string.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_binary_data.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part10.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part3.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part4.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part8.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part9.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs_overwrite.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_hdfs_file_with_space_in_the_name.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_nonpart_authsuccess.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_overwrite.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_part_authsuccess.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart2.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart_err.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/macro.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_addjar.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_decimal.q35
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_hook.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_memcheck.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_subquery2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge4.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q13
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q28
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition5.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_export_drop.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries.q77
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q51
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mi.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mrr.q59
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin2.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nested_complex.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/newline.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/notable_alias3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_cast.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_column.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformat.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatCTAS.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatdir.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup3.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup5.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullscript.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/num_op_type_conv.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ops_comparison.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/optrstat_groupby.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_analyze.q179
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_create.q31
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_empty_strings.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_min_max.q32
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q76
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q97
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q151
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q76
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q168
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q69
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/order_within_subquery.q19
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_create.q36
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_ctas.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_partitioned.q34
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_types.q38
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partcols1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date.q51
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_decode_name.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_special_char.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_type_check.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar1.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar2.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q19
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/pcr.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_join4.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_multi_insert.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_transform.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_udtf.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_union_view.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown2.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/progress_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_decimal.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_general_queries.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_matchpath.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_rcfile.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_register_tblfn.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_seqfile.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quote2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_alter.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_basic.q34
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_partition.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_skew.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_smb.q34
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_tblproperty.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_bigdata.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/regex_col.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/remote_script.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/repair.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/root_dir_external_table.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/schemeAuthority2.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1_win.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/select_dummy_source.q33
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/serde_regex.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/set_processor_namespaces.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_syntax.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_partitions.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_roles.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_tablestatus.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_noskew.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt10.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt11.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt12.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt13.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt14.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt15.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt16.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt17.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt18.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt19.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt2.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt20.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt4.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt5.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt6.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt7.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt8.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt9.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_1.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_10.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_2.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_25.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_3.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_4.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_5.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_7.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_8.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/source.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats11.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats18.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats19.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats3.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats4.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter_partitioned.q45
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_invalidation.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_list_bucket.q45
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_noscan_2.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_only_null.q41
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_publisher_error_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/statsfs.q63
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/str_to_map.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq_where_serialization.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_alias.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists.q45
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists_having.q60
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in.q163
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in_having.q104
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_multiinsert.q82
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists.q41
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists_having.q46
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin.q143
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin_having.q74
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q83
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_views.q48
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/symlink_text_input_format.q9
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/table_access_keys_stats.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/test_boolean_whereclause.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_dml.q40
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_fsstat.q19
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_insert_overwrite_local_directory_1.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_join_tests.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_joins_explain.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_schema_evolution.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_union.q94
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_1.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_2.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_3.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_comparison.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_lazy.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_null.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_udf.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/transform1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column_merge.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_table.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_cast_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_conversions_1.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_widening.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_collect_set.q11
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_corr.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_pop.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_samp.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_sum_list.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_E.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_PI.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_abs.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_acos.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array_contains.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_ascii.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_asin.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_atan.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_between.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bin.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_and.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_empty.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_or.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case.q25
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case_thrift.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_coalesce.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_compare_java_string.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat_ws.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_conv.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_cos.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_current_database.q26
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_degrees.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_div.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_divide.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_elt.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_equal.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_explode.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_field.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_find_in_set.q28
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_format_number.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_get_json_object.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthan.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthanorequal.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hash.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hex.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hour.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_if.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in_file.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_index.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_inline.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_instr.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_isnull_isnotnull.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_java_method.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_length.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthan.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthanorequal.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_like.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_locate.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_logic_java_boolean.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lpad.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_keys.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_values.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_named_struct.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_negative.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notequal.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notop.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_nvl.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_pmod.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_printf.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_radians.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_regexp.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_repeat.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reverse.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_2.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_3.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_rpad.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_second.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sign.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sin.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_size.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sort_array.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_space.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_split.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_struct.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_substr.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_tan.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength2.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_boolean.q60
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_byte.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_double.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_float.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_long.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_short.q22
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_string.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_translate.q16
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unhex.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_union.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unix_timestamp.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_using.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_weekofyear.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_when.q23
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_boolean.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_double.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_float.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_int.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_long.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_short.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_string.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_explode.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_json_tuple.q14
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_posexplode.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union34.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_date.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_null.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_10.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_11.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_12.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_13.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_14.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_15.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_16.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_17.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_18.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_19.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_2.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_20.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_21.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_22.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_23.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_24.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_3.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_4.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_5.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_6.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_7.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_8.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_9.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_top_level.q106
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_view.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/uniquejoin.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_1.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_cast.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_comparison.q1
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_join1.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_serde.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_union1.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_between_in.q35
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_coalesce.q32
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_cast.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_expressions.q5
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q19
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q77
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_left_outer_join.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_non_string_partition.q17
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_0.q27
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_1.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_10.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_11.q15
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_12.q32
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_13.q31
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_14.q33
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_15.q31
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_16.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_2.q23
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_3.q25
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_4.q23
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_5.q20
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_6.q21
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_7.q25
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_8.q23
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_9.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_decimal_date.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_div0.q24
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_limit.q37
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_nested_udf.q3
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_not.q27
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part_project.q7
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_pushdown.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_short_regress.q852
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q46
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_case.q37
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_casts.q149
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_context.q47
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_date_funcs.q122
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_mapjoin.q12
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_math_funcs.q107
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q8
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q18
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q10
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_string_funcs.q46
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q124
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view_cast.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_columnPruning.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_expressions.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_multipartitioning.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_navfn.q6
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_ntile.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_rank.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf.q2
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf2.q4
-rw-r--r--sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_windowspec.q2
1022 files changed, 12890 insertions, 2088 deletions
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs.q
new file mode 100644
index 0000000000..c640ca148b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs.q
@@ -0,0 +1,43 @@
+
+select
+ cdouble
+ ,Round(cdouble, 2)
+ ,Floor(cdouble)
+ ,Ceil(cdouble)
+ ,Rand(98007) as rnd
+ ,Exp(ln(cdouble))
+ ,Ln(cdouble)
+ ,Ln(cfloat)
+ ,Log10(cdouble)
+ -- Use log2 as a representative function to test all input types.
+ ,Log2(cdouble)
+ ,Log2(cfloat)
+ ,Log2(cbigint)
+ ,Log2(cint)
+ ,Log2(csmallint)
+ ,Log2(ctinyint)
+ ,Log(2.0, cdouble)
+ ,Pow(log2(cdouble), 2.0)
+ ,Power(log2(cdouble), 2.0)
+ ,Sqrt(cdouble)
+ ,Sqrt(cbigint)
+ ,Bin(cbigint)
+ ,Hex(cdouble)
+ ,Conv(cbigint, 10, 16)
+ ,Abs(cdouble)
+ ,Abs(ctinyint)
+ ,Pmod(cint, 3)
+ ,Sin(cdouble)
+ ,Asin(cdouble)
+ ,Cos(cdouble)
+ ,ACos(cdouble)
+ ,Atan(cdouble)
+ ,Degrees(cdouble)
+ ,Radians(cdouble)
+ ,Positive(cdouble)
+ ,Positive(cbigint)
+ ,Negative(cdouble)
+ ,Sign(cdouble)
+ ,Sign(cbigint)
+from alltypesorc order by rnd limit 400;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_00.qv b/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_00.qv
new file mode 100644
index 0000000000..51f231008f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_00.qv
@@ -0,0 +1 @@
+SET hive.vectorized.execution.enabled = false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_01.qv b/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_01.qv
new file mode 100644
index 0000000000..18e02dc854
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientcompare/vectorized_math_funcs_01.qv
@@ -0,0 +1 @@
+SET hive.vectorized.execution.enabled = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
index 4881757a46..4193315d30 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_concatenate_indexed_table.q
@@ -1,9 +1,9 @@
set hive.exec.concatenate.check.index=true;
create table src_rc_concatenate_test(key int, value string) stored as rcfile;
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
show table extended like `src_rc_concatenate_test`;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q
index 5f9d5ef9ca..8cbb25cfa9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_invalidspec.q
@@ -2,7 +2,7 @@
create table if not exists alter_part_invalidspec(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-- Load data
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='10');
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='12');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='10');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_invalidspec partition (year='1996', month='12');
alter table alter_part_invalidspec partition (year='1997') enable no_drop;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop.q
index 92af30b6b5..3c0ff02b1a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop.q
@@ -2,8 +2,8 @@
create table if not exists alter_part_nodrop_part(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-- Load data
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='10');
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='12');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='10');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_part partition (year='1996', month='12');
alter table alter_part_nodrop_part partition (year='1996') enable no_drop;
alter table alter_part_nodrop_part drop partition (year='1996');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q
index 135411fd32..f2135b1aa0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_nodrop_table.q
@@ -2,8 +2,8 @@
create table if not exists alter_part_nodrop_table(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-- Load data
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='10');
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='12');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='10');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_nodrop_table partition (year='1996', month='12');
alter table alter_part_nodrop_table partition (year='1996') enable no_drop;
drop table alter_part_nodrop_table;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_offline.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_offline.q
index 899145deaf..7376d8bfe4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_offline.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_partition_offline.q
@@ -2,8 +2,8 @@
create table if not exists alter_part_offline (key string, value string ) partitioned by (year string, month string) stored as textfile ;
-- Load data
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='10');
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='12');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='10');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_offline partition (year='1996', month='12');
alter table alter_part_offline partition (year='1996') disable offline;
select * from alter_part_offline where year = '1996';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q
index 26ba287890..be971f1849 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure.q
@@ -1,5 +1,5 @@
create table alter_rename_partition_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table alter_rename_partition_src ;
create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile;
insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q
index 6e51c2f762..4babdda2db 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure2.q
@@ -1,5 +1,5 @@
create table alter_rename_partition_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table alter_rename_partition_src ;
create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile;
insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q
index 2d4ce0b9f6..3af807ef61 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/alter_rename_partition_failure3.q
@@ -1,5 +1,5 @@
create table alter_rename_partition_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table alter_rename_partition_src ;
create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile;
insert overwrite table alter_rename_partition partition (pCol1='old_part1:', pcol2='old_part2:') select col1 from alter_rename_partition_src ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col1.q
index fdf20f850e..9e8bcbd1bb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col1.q
@@ -1,2 +1,3 @@
+set hive.support.quoted.identifiers=none;
-- TOK_TABLE_OR_COL
explain select * from (select `.*` from (select * from src) a join (select * from src1) b on (a.key = b.key)) t;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col2.q
index de59bc579a..33d4aed3cd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ambiguous_col2.q
@@ -1,2 +1,3 @@
+set hive.support.quoted.identifiers=none;
-- DOT
explain select * from (select a.`[kv].*`, b.`[kv].*` from (select * from src) a join (select * from src1) b on (a.key = b.key)) t;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/analyze_non_existent_tbl.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/analyze_non_existent_tbl.q
new file mode 100644
index 0000000000..78a97019f1
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/analyze_non_existent_tbl.q
@@ -0,0 +1 @@
+analyze table nonexistent compute statistics;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/archive_corrupt.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/archive_corrupt.q
index bea2539162..130b37b5c9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/archive_corrupt.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/archive_corrupt.q
@@ -14,5 +14,5 @@ create table tstsrcpart like srcpart;
-- to be thrown during the LOAD step. This former behavior is tested
-- in clientpositive/archive_corrupt.q
-load data local inpath '../data/files/archive_corrupt.rc' overwrite into table tstsrcpart partition (ds='2008-04-08', hr='11');
+load data local inpath '../../data/files/archive_corrupt.rc' overwrite into table tstsrcpart partition (ds='2008-04-08', hr='11');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addjar.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addjar.q
new file mode 100644
index 0000000000..a1709dae5f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addjar.q
@@ -0,0 +1,7 @@
+set hive.security.authorization.enabled=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory;
+
+-- running a sql query to initialize the authorization - not needed in real HS2 mode
+show tables;
+
+add jar ${system:maven.local.repository}/org/apache/hive/hcatalog/hive-hcatalog-core/${system:hive.version}/hive-hcatalog-core-${system:hive.version}.jar;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addpartition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addpartition.q
new file mode 100644
index 0000000000..8abdd2b3cd
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_addpartition.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+set user.name=user1;
+-- check add partition without insert privilege
+create table tpart(i int, j int) partitioned by (k string);
+
+set user.name=user2;
+alter table tpart add partition (k = 'abc');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner.q
new file mode 100644
index 0000000000..f716262e23
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if alter table owner fails
+-- for now, alter db owner is allowed only for admin
+
+create database dbao;
+alter database dbao set owner user user2;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner_default.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner_default.q
new file mode 100644
index 0000000000..f904935018
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_alter_db_owner_default.q
@@ -0,0 +1,8 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if alter table owner fails
+alter database default set owner user user1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q
new file mode 100644
index 0000000000..de91e91923
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_all_role.q
@@ -0,0 +1,6 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+create role all;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q
new file mode 100644
index 0000000000..42a42f65b2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_default_role.q
@@ -0,0 +1,6 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+create role default;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q
new file mode 100644
index 0000000000..0d14cde6d5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_cannot_create_none_role.q
@@ -0,0 +1,6 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+create role None;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q
new file mode 100644
index 0000000000..d5ea284f14
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_caseinsensitivity.q
@@ -0,0 +1,17 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+
+create role testrole;
+show roles;
+drop role TESTROLE;
+show roles;
+create role TESTROLE;
+show roles;
+grant role testROLE to user hive_admin_user;
+set role testrolE;
+set role adMin;
+show roles;
+create role TESTRoLE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func1.q
new file mode 100644
index 0000000000..02bbe090cf
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func1.q
@@ -0,0 +1,7 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=hive_test_user;
+
+-- permanent function creation should fail for non-admin roles
+create function perm_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func2.q
new file mode 100644
index 0000000000..8760fa8d82
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_func2.q
@@ -0,0 +1,8 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=hive_test_user;
+
+-- temp function creation should fail for non-admin roles
+create temporary function temp_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_macro1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_macro1.q
new file mode 100644
index 0000000000..c904a100c5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_macro1.q
@@ -0,0 +1,8 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=hive_test_user;
+
+-- temp macro creation should fail for non-admin roles
+create temporary macro mymacro1(x double) x * x;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_role_no_admin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_role_no_admin.q
new file mode 100644
index 0000000000..a84fe64bd6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_create_role_no_admin.q
@@ -0,0 +1,3 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+-- this test will fail because hive_test_user is not in admin role.
+create role r1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_createview.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_createview.q
new file mode 100644
index 0000000000..9b1f2ea6c6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_createview.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+-- check create view without select privileges
+create table t1(i int);
+set user.name=user1;
+create view v1 as select * from t1;
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_ctas.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_ctas.q
new file mode 100644
index 0000000000..1cf74a365d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_ctas.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+-- check query without select privilege fails
+create table t1(i int);
+
+set user.name=user1;
+create table t2 as select * from t1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_desc_table_nosel.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_desc_table_nosel.q
new file mode 100644
index 0000000000..47663c9bb9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_desc_table_nosel.q
@@ -0,0 +1,14 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if alter table fails as different user
+create table t1(i int);
+desc t1;
+
+grant all on table t1 to user user2;
+revoke select on table t1 from user user2;
+
+set user.name=user2;
+desc t1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_dfs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_dfs.q
new file mode 100644
index 0000000000..7d47a7b649
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_dfs.q
@@ -0,0 +1,7 @@
+set hive.security.authorization.enabled=true;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory;
+
+-- running a sql query to initialize the authorization - not needed in real HS2 mode
+show tables;
+dfs -ls ${system:test.tmp.dir}/
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_disallow_transform.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_disallow_transform.q
new file mode 100644
index 0000000000..64b300c8d9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_disallow_transform.q
@@ -0,0 +1,3 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set role ALL;
+SELECT TRANSFORM (*) USING 'cat' AS (key, value) FROM src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q
new file mode 100644
index 0000000000..edeae9b71d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_cascade.q
@@ -0,0 +1,22 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- ensure that drop database cascade works
+create database dba1;
+create table dba1.tab1(i int);
+drop database dba1 cascade;
+
+-- check if drop database fails if the db has a table for which user does not have permission
+create database dba2;
+create table dba2.tab2(i int);
+
+set user.name=hive_admin_user;
+set role ADMIN;
+alter database dba2 set owner user user2;
+
+set user.name=user2;
+show current roles;
+drop database dba2 cascade ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q
new file mode 100644
index 0000000000..46d4d0f92c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_db_empty.q
@@ -0,0 +1,27 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if changing owner and dropping as other user works
+create database dba1;
+
+set user.name=hive_admin_user;
+set role ADMIN;
+alter database dba1 set owner user user2;
+
+set user.name=user2;
+show current roles;
+drop database dba1;
+
+
+set user.name=user1;
+-- check if dropping db as another user fails
+show current roles;
+create database dba2;
+
+set user.name=user2;
+show current roles;
+
+drop database dba2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q
new file mode 100644
index 0000000000..a7aa17f5ab
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_drop_role_no_admin.q
@@ -0,0 +1,10 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+show current roles;
+create role r1;
+set role ALL;
+show current roles;
+drop role r1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_droppartition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_droppartition.q
new file mode 100644
index 0000000000..f05e9458fa
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_droppartition.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/authz_drop_part_1;
+
+-- check drop partition without delete privilege
+create table tpart(i int, j int) partitioned by (k string);
+alter table tpart add partition (k = 'abc') location 'file:${system:test.tmp.dir}/authz_drop_part_1' ;
+set user.name=user1;
+alter table tpart drop partition (k = 'abc');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_create_db.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_create_db.q
new file mode 100644
index 0000000000..d969e39027
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_create_db.q
@@ -0,0 +1,5 @@
+set hive.security.authorization.enabled=true;
+
+create database db_to_fail;
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_drop_db.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_drop_db.q
new file mode 100644
index 0000000000..87719b0043
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_fail_drop_db.q
@@ -0,0 +1,5 @@
+set hive.security.authorization.enabled=false;
+create database db_fail_to_drop;
+set hive.security.authorization.enabled=true;
+
+drop database db_fail_to_drop;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_allpriv.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_allpriv.q
new file mode 100644
index 0000000000..f3c86b97ce
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_allpriv.q
@@ -0,0 +1,14 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE table_priv_allf(i int);
+
+-- grant insert to user2 WITH grant option
+GRANT INSERT ON table_priv_allf TO USER user2 with grant option;
+
+set user.name=user2;
+-- try grant all to user3, without having all privileges
+GRANT ALL ON table_priv_allf TO USER user3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_dup.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_dup.q
new file mode 100644
index 0000000000..7808cb3ec7
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_dup.q
@@ -0,0 +1,16 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE tauth_gdup(i int);
+
+-- It should be possible to revert owners privileges
+revoke SELECT ON tauth_gdup from user user1;
+
+show grant user user1 on table tauth_gdup;
+
+-- Owner already has all privileges granted, another grant would become duplicate
+-- and result in error
+GRANT INSERT ON tauth_gdup TO USER user1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail1.q
new file mode 100644
index 0000000000..8dc8e45a79
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail1.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE table_priv_gfail1(i int);
+
+set user.name=user2;
+-- try grant insert to user3 as user2
+GRANT INSERT ON table_priv_gfail1 TO USER user3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail_nogrant.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail_nogrant.q
new file mode 100644
index 0000000000..d51c1c3507
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_grant_table_fail_nogrant.q
@@ -0,0 +1,14 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE table_priv_gfail1(i int);
+
+-- grant insert to user2 WITHOUT grant option
+GRANT INSERT ON table_priv_gfail1 TO USER user2;
+
+set user.name=user2;
+-- try grant insert to user3
+GRANT INSERT ON table_priv_gfail1 TO USER user3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noinspriv.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noinspriv.q
new file mode 100644
index 0000000000..2fa3cb260b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noinspriv.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+-- check insert without select priv
+create table t1(i int);
+
+set user.name=user1;
+create table user2tab(i int);
+insert into table t1 select * from user2tab;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noselectpriv.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noselectpriv.q
new file mode 100644
index 0000000000..b9bee4ea40
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_insert_noselectpriv.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+-- check insert without select priv
+create table t1(i int);
+
+set user.name=user1;
+create table t2(i int);
+insert into table t2 select * from t1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v1.q
new file mode 100644
index 0000000000..2a1da23dae
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v1.q
@@ -0,0 +1,6 @@
+create table if not exists authorization_invalid_v1 (key int, value string);
+grant delete on table authorization_invalid_v1 to user hive_test_user;
+drop table authorization_invalid_v1;
+
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q
new file mode 100644
index 0000000000..9c724085d9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_invalid_priv_v2.q
@@ -0,0 +1,5 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+
+create table if not exists authorization_invalid_v2 (key int, value string);
+grant index on table authorization_invalid_v2 to user hive_test_user;
+drop table authorization_invalid_v2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_rename.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_rename.q
new file mode 100644
index 0000000000..8a3300cb2e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_rename.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if alter table fails as different user
+create table t1(i int);
+
+set user.name=user2;
+alter table t1 rename to tnew1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_serdeprop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_serdeprop.q
new file mode 100644
index 0000000000..0172c4c74c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_alter_tab_serdeprop.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if alter table fails as different user
+create table t1(i int);
+
+set user.name=user2;
+ALTER TABLE t1 SET SERDEPROPERTIES ('field.delim' = ',');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_tab.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_tab.q
new file mode 100644
index 0000000000..2d0e52da00
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_tab.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if create table fails as different user
+create table t1(i int);
+
+set user.name=user2;
+drop table t1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_view.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_view.q
new file mode 100644
index 0000000000..76bbab42b3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_not_owner_drop_view.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if create table fails as different user
+create table t1(i int);
+create view vt1 as select * from t1;
+
+set user.name=user2;
+drop view vt1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_priv_current_role_neg.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_priv_current_role_neg.q
new file mode 100644
index 0000000000..bbf3b66970
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_priv_current_role_neg.q
@@ -0,0 +1,29 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+
+-- the test verifies that authorization is happening with privileges of the current roles
+
+-- grant privileges with grant option for table to role2
+create role role2;
+grant role role2 to user user2;
+create table tpriv_current_role(i int);
+grant all on table tpriv_current_role to role role2 with grant option;
+
+set user.name=user2;
+-- switch to user2
+
+-- by default all roles should be in current roles, and grant to new user should work
+show current roles;
+grant all on table tpriv_current_role to user user3;
+
+set role role2;
+-- switch to role2, grant should work
+grant all on table tpriv_current_role to user user4;
+show grant user user4 on table tpriv_current_role;
+
+set role PUBLIC;
+-- set role to public, should fail as role2 is not one of the current roles
+grant all on table tpriv_current_role to user user5;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_create.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_create.q
new file mode 100644
index 0000000000..002389f203
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_create.q
@@ -0,0 +1 @@
+create role PUBLIC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_drop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_drop.q
new file mode 100644
index 0000000000..69c5a8de8b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_public_drop.q
@@ -0,0 +1 @@
+drop role PUBLIC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail1.q
new file mode 100644
index 0000000000..e19bf370fa
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail1.q
@@ -0,0 +1,14 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE table_priv_rfail1(i int);
+
+-- grant insert to user2
+GRANT INSERT ON table_priv_rfail1 TO USER user2;
+
+set user.name=user3;
+-- try dropping the privilege as user3
+REVOKE INSERT ON TABLE table_priv_rfail1 FROM USER user2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail2.q
new file mode 100644
index 0000000000..4b0cf3286a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_revoke_table_fail2.q
@@ -0,0 +1,18 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE table_priv_rfai2(i int);
+
+-- grant insert to user2
+GRANT INSERT ON table_priv_rfai2 TO USER user2;
+GRANT SELECT ON table_priv_rfai2 TO USER user3 WITH GRANT OPTION;
+
+set user.name=user3;
+-- grant select as user3 to user 2
+GRANT SELECT ON table_priv_rfai2 TO USER user2;
+
+-- try dropping the privilege as user3
+REVOKE INSERT ON TABLE table_priv_rfai2 FROM USER user2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles1.q
new file mode 100644
index 0000000000..a819d204f5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles1.q
@@ -0,0 +1,12 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+-- this is applicable to any security mode as check is in metastore
+create role role1;
+create role role2;
+grant role role1 to role role2;
+
+-- this will create a cycle
+grant role role2 to role role1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles2.q
new file mode 100644
index 0000000000..423f030630
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_cycles2.q
@@ -0,0 +1,24 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=hive_admin_user;
+set role ADMIN;
+-- this is applicable to any security mode as check is in metastore
+
+create role role1;
+
+create role role2;
+grant role role2 to role role1;
+
+create role role3;
+grant role role3 to role role2;
+
+create role role4;
+grant role role4 to role role3;
+
+create role role5;
+grant role role5 to role role4;
+
+-- this will create a cycle in middle of the hierarchy
+grant role role2 to role role4;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_grant.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_grant.q
new file mode 100644
index 0000000000..c5c500a712
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_role_grant.q
@@ -0,0 +1,22 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+
+set role ADMIN;
+
+----------------------------------------
+-- role granting with admin option
+-- since user2 doesn't have admin option for role_noadmin, last grant should fail
+----------------------------------------
+
+create role role_noadmin;
+create role src_role_wadmin;
+grant src_role_wadmin to user user2 with admin option;
+grant role_noadmin to user user2;
+show role grant user user2;
+
+
+set user.name=user2;
+set role role_noadmin;
+grant src_role_wadmin to user user3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_rolehierarchy_privs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_rolehierarchy_privs.q
new file mode 100644
index 0000000000..d9f4c7cdb8
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_rolehierarchy_privs.q
@@ -0,0 +1,74 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+set user.name=hive_admin_user;
+show current roles;
+set role ADMIN;
+
+----------
+-- create the following user, role mapping
+-- user1 -> role1 -> role2 -> role3
+----------
+
+create role role1;
+grant role1 to user user1;
+
+create role role2;
+grant role2 to role role1;
+
+create role role3;
+grant role3 to role role2;
+
+
+create table t1(i int);
+grant select on t1 to role role3;
+
+set user.name=user1;
+show current roles;
+select * from t1;
+
+set user.name=hive_admin_user;
+show current roles;
+grant select on t1 to role role2;
+
+
+set user.name=user1;
+show current roles;
+select * from t1;
+
+set user.name=hive_admin_user;
+set role ADMIN;
+show current roles;
+revoke select on table t1 from role role2;
+
+
+create role role4;
+grant role4 to user user1;
+grant role3 to role role4;;
+
+set user.name=user1;
+show current roles;
+select * from t1;
+
+set user.name=hive_admin_user;
+show current roles;
+set role ADMIN;
+
+-- Revoke role3 from hierarchy one at a time and check permissions
+-- after revoking from both, select should fail
+revoke role3 from role role2;
+
+set user.name=user1;
+show current roles;
+select * from t1;
+
+set user.name=hive_admin_user;
+show current roles;
+set role ADMIN;
+revoke role3 from role role4;
+
+set user.name=user1;
+show current roles;
+select * from t1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select.q
new file mode 100644
index 0000000000..39871793af
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select.q
@@ -0,0 +1,9 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+-- check query without select privilege fails
+create table t1(i int);
+
+set user.name=user1;
+select * from t1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select_view.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select_view.q
new file mode 100644
index 0000000000..a4071cd0d4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_select_view.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+-- check create view without select privileges
+create table t1(i int);
+create view v1 as select * from t1;
+set user.name=user1;
+select * from v1;
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg1.q
new file mode 100644
index 0000000000..9ba3a82a56
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg1.q
@@ -0,0 +1,6 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+
+-- an error should be thrown if 'set role ' is done for role that does not exist
+
+set role nosuchroleexists;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg2.q
new file mode 100644
index 0000000000..03f748fcc9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_set_role_neg2.q
@@ -0,0 +1,16 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+
+-- an error should be thrown if 'set role ' is done for role that does not exist
+
+create role rset_role_neg;
+grant role rset_role_neg to user user2;
+
+set user.name=user2;
+set role rset_role_neg;
+set role public;
+set role nosuchroleexists;;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_parts_nosel.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_parts_nosel.q
new file mode 100644
index 0000000000..d8190de950
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_parts_nosel.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- check if alter table fails as different user
+create table t_show_parts(i int) partitioned by (j string);
+
+set user.name=user2;
+show partitions t_show_parts;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_no_admin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_no_admin.q
new file mode 100644
index 0000000000..2afe87fc30
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_no_admin.q
@@ -0,0 +1,3 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+-- This test will fail because hive_test_user is not in admin role
+show principals role1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_v1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_v1.q
new file mode 100644
index 0000000000..69cea2f267
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_role_principals_v1.q
@@ -0,0 +1,2 @@
+-- This test will fail because the command is not currently supported in auth mode v1
+show principals role1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_roles_no_admin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_roles_no_admin.q
new file mode 100644
index 0000000000..0fc9fca940
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_show_roles_no_admin.q
@@ -0,0 +1,3 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+-- This test will fail because hive_test_user is not in admin role
+show roles;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_truncate.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_truncate.q
new file mode 100644
index 0000000000..285600b23a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_truncate.q
@@ -0,0 +1,9 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+-- check add partition without insert privilege
+create table t1(i int, j int);
+set user.name=user1;
+truncate table t1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_add_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_add_partition.q
new file mode 100644
index 0000000000..d82ac710cc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_add_partition.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_uri_add_part;
+dfs -touchz ${system:test.tmp.dir}/a_uri_add_part/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/a_uri_add_part/1.txt;
+
+create table tpart(i int, j int) partitioned by (k string);
+alter table tpart add partition (k = 'abc') location '${system:test.tmp.dir}/a_uri_add_part/';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_alterpart_loc.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_alterpart_loc.q
new file mode 100644
index 0000000000..d38ba74d90
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_alterpart_loc.q
@@ -0,0 +1,16 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_alterpart_loc_perm;
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_alterpart_loc;
+dfs -touchz ${system:test.tmp.dir}/az_uri_alterpart_loc/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/az_uri_alterpart_loc/1.txt;
+
+create table tpart(i int, j int) partitioned by (k string);
+alter table tpart add partition (k = 'abc') location '${system:test.tmp.dir}/az_uri_alterpart_loc_perm/';
+
+alter table tpart partition (k = 'abc') set location '${system:test.tmp.dir}/az_uri_alterpart_loc/';
+
+
+-- Attempt to set partition to location without permissions should fail
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_altertab_setloc.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_altertab_setloc.q
new file mode 100644
index 0000000000..c446b8636f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_altertab_setloc.q
@@ -0,0 +1,13 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_altertab_setloc;
+dfs -touchz ${system:test.tmp.dir}/az_uri_altertab_setloc/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/az_uri_altertab_setloc/1.txt;
+
+create table t1(i int);
+
+alter table t1 set location '${system:test.tmp.dir}/az_uri_altertab_setloc/1.txt'
+
+-- Attempt to set location of table to a location without permissions should fail
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q
new file mode 100644
index 0000000000..c8e1fb43ee
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_uri_crtab1;
+dfs -touchz ${system:test.tmp.dir}/a_uri_crtab1/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/a_uri_crtab1/1.txt;
+
+create table t1(i int) location '${system:test.tmp.dir}/a_uri_crtab_ext';
+
+-- Attempt to create table with dir that does not have write permission should fail
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table_ext.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table_ext.q
new file mode 100644
index 0000000000..c8549b4563
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_create_table_ext.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_uri_crtab_ext;
+dfs -touchz ${system:test.tmp.dir}/a_uri_crtab_ext/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/a_uri_crtab_ext/1.txt;
+
+create external table t1(i int) location '${system:test.tmp.dir}/a_uri_crtab_ext';
+
+-- Attempt to create table with dir that does not have write permission should fail
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_createdb.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_createdb.q
new file mode 100644
index 0000000000..edfdf5a8fc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_createdb.q
@@ -0,0 +1,12 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_createdb;
+dfs -touchz ${system:test.tmp.dir}/az_uri_createdb/1.txt;
+dfs -chmod 300 ${system:test.tmp.dir}/az_uri_createdb/1.txt;
+
+create database az_test_db location '${system:test.tmp.dir}/az_uri_createdb/';
+
+-- Attempt to create db for dir without sufficient permissions should fail
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_export.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_export.q
new file mode 100644
index 0000000000..81763916a0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_export.q
@@ -0,0 +1,22 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+set hive.test.mode=true;
+set hive.test.mode.prefix=;
+set hive.test.mode.nosamplelist=export_auth_uri;
+
+
+create table export_auth_uri ( dep_id int comment "department id")
+ stored as textfile;
+
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/export_auth_uri/temp;
+dfs -rmr target/tmp/ql/test/data/exports/export_auth_uri;
+
+
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/export_auth_uri/;
+dfs -chmod 555 target/tmp/ql/test/data/exports/export_auth_uri;
+
+export table export_auth_uri to 'ql/test/data/exports/export_auth_uri';
+
+-- Attempt to export to location without sufficient permissions should fail
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_import.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_import.q
new file mode 100644
index 0000000000..4ea4dc0a47
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_import.q
@@ -0,0 +1,25 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+set hive.test.mode=true;
+set hive.test.mode.prefix=;
+set hive.test.mode.nosamplelist=import_auth_uri;
+
+
+create table import_auth_uri ( dep_id int comment "department id")
+ stored as textfile;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/import_auth_uri/temp;
+dfs -rmr target/tmp/ql/test/data/exports/import_auth_uri;
+export table import_auth_uri to 'ql/test/data/exports/import_auth_uri';
+drop table import_auth_uri;
+
+dfs -touchz target/tmp/ql/test/data/exports/import_auth_uri/1.txt;
+dfs -chmod 555 target/tmp/ql/test/data/exports/import_auth_uri/1.txt;
+
+create database importer;
+use importer;
+
+import from 'ql/test/data/exports/import_auth_uri';
+
+-- Attempt to import from location without sufficient permissions should fail
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_index.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_index.q
new file mode 100644
index 0000000000..1a8f9cb2ad
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_index.q
@@ -0,0 +1,13 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_index;
+dfs -touchz ${system:test.tmp.dir}/az_uri_index/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/az_uri_index/1.txt;
+
+
+create table t1(i int);
+create index idt1 on table t1 (i) as 'COMPACT' WITH DEFERRED REBUILD LOCATION '${system:test.tmp.dir}/az_uri_index/';
+
+-- Attempt to use location for index that does not have permissions should fail
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert.q
new file mode 100644
index 0000000000..81b6e522c1
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert.q
@@ -0,0 +1,14 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_insert;
+dfs -touchz ${system:test.tmp.dir}/az_uri_insert/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/az_uri_insert/1.txt;
+
+create table t1(i int, j int);
+
+insert overwrite directory '${system:test.tmp.dir}/az_uri_insert/' select * from t1;
+
+-- Attempt to insert into uri without permissions should fail
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert_local.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert_local.q
new file mode 100644
index 0000000000..0a2fd8919f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_insert_local.q
@@ -0,0 +1,14 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/az_uri_insert_local;
+dfs -touchz ${system:test.tmp.dir}/az_uri_insert_local/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/az_uri_insert_local/1.txt;
+
+create table t1(i int, j int);
+
+insert overwrite local directory '${system:test.tmp.dir}/az_uri_insert_local/' select * from t1;
+
+-- Attempt to insert into uri without permissions should fail
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_load_data.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_load_data.q
new file mode 100644
index 0000000000..6af41f0cda
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorization_uri_load_data.q
@@ -0,0 +1,11 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/authz_uri_load_data;
+dfs -touchz ${system:test.tmp.dir}/authz_uri_load_data/1.txt;
+dfs -chmod 555 ${system:test.tmp.dir}/authz_uri_load_data/1.txt;
+
+create table t1(i int);
+load data inpath 'pfile:${system:test.tmp.dir}/authz_uri_load_data/' overwrite into table t1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_create_tbl.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_create_tbl.q
new file mode 100644
index 0000000000..d8beac370d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_create_tbl.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set hive.security.authorization.enabled=true;
+set user.name=user33;
+create database db23221;
+use db23221;
+
+set user.name=user44;
+create table twew221(a string);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_grant_public.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_grant_public.q
new file mode 100644
index 0000000000..bfd3165237
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_grant_public.q
@@ -0,0 +1 @@
+grant role PUBLIC to user hive_test_user;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_revoke_public.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_revoke_public.q
new file mode 100644
index 0000000000..2b29822371
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/authorize_revoke_public.q
@@ -0,0 +1 @@
+revoke role PUBLIC from user hive_test_user;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_mismatch1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_mismatch1.q
index 048a8fd5cf..6bebb8942d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_mismatch1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_mismatch1.q
@@ -1,19 +1,19 @@
CREATE TABLE srcbucket_mapjoin_part (key int, value string)
partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS
STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt'
+load data local inpath '../../data/files/srcbucket20.txt'
INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt'
+load data local inpath '../../data/files/srcbucket21.txt'
INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt'
+load data local inpath '../../data/files/srcbucket22.txt'
INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string)
partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS
STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket22.txt'
+load data local inpath '../../data/files/srcbucket22.txt'
INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt'
+load data local inpath '../../data/files/srcbucket23.txt'
INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-- The number of buckets in the 2 tables above (being joined later) dont match.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q
index 9478a2f1b9..802fcd903c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q
@@ -8,10 +8,10 @@ into 2 BUCKETS stored as textfile;
create table table2(key string, value string) clustered by (value, key)
into 2 BUCKETS stored as textfile;
-load data local inpath '../data/files/T1.txt' overwrite into table table1;
+load data local inpath '../../data/files/T1.txt' overwrite into table table1;
-load data local inpath '../data/files/T1.txt' overwrite into table table2;
-load data local inpath '../data/files/T2.txt' overwrite into table table2;
+load data local inpath '../../data/files/T1.txt' overwrite into table table2;
+load data local inpath '../../data/files/T2.txt' overwrite into table table2;
set hive.optimize.bucketmapjoin = true;
set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q
index 69afe0ae70..ac5abebb0b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q
@@ -8,13 +8,13 @@ into 2 BUCKETS stored as textfile;
create table table2(key string, value string) clustered by (value, key)
into 2 BUCKETS stored as textfile;
-load data local inpath '../data/files/T1.txt' overwrite into table table1 partition (ds='1');
-load data local inpath '../data/files/T2.txt' overwrite into table table1 partition (ds='1');
+load data local inpath '../../data/files/T1.txt' overwrite into table table1 partition (ds='1');
+load data local inpath '../../data/files/T2.txt' overwrite into table table1 partition (ds='1');
-load data local inpath '../data/files/T1.txt' overwrite into table table1 partition (ds='2');
+load data local inpath '../../data/files/T1.txt' overwrite into table table1 partition (ds='2');
-load data local inpath '../data/files/T1.txt' overwrite into table table2;
-load data local inpath '../data/files/T2.txt' overwrite into table table2;
+load data local inpath '../../data/files/T1.txt' overwrite into table table2;
+load data local inpath '../../data/files/T2.txt' overwrite into table table2;
set hive.optimize.bucketmapjoin = true;
set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/clustern1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/clustern1.q
deleted file mode 100644
index 0ff4477965..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/clustern1.q
+++ /dev/null
@@ -1,2 +0,0 @@
-EXPLAIN
-SELECT x.key, x.value as key FROM SRC x CLUSTER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_dp.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_dp.q
index af923504c8..b4887c4115 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_dp.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_dp.q
@@ -3,12 +3,12 @@ DROP TABLE Employee_Part;
CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
-- dynamic partitioning syntax
explain
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_incorrect_num_keys.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_incorrect_num_keys.q
index d9725ddc7f..2f8e9271dd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_incorrect_num_keys.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_incorrect_num_keys.q
@@ -3,12 +3,12 @@ DROP TABLE Employee_Part;
CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
-- don't specify all partitioning keys
explain
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
index eb73962ba1..34f91fc8d1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
@@ -3,12 +3,12 @@ DROP TABLE Employee_Part;
CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
-- specify invalid values for the partitioning keys
explain
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_multiple_part_clause.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_multiple_part_clause.q
index dbfaaecbdf..49d89dd121 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_multiple_part_clause.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_partlvl_multiple_part_clause.q
@@ -3,12 +3,12 @@ DROP TABLE Employee_Part;
CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
-LOAD DATA LOCAL INPATH "../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK');
+LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK');
-- specify partitioning clause multiple times
explain
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl.q
index ca8548958f..a4e0056bff 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl.q
@@ -13,7 +13,7 @@ CREATE TABLE UserVisits_web_text_none (
avgTimeOnSite int)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
+LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
explain
analyze table UserVisits_web_text_none compute statistics for columns destIP;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_complex_type.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_complex_type.q
index 5bbd70d86b..85a5f0a021 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_complex_type.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_complex_type.q
@@ -8,7 +8,7 @@ CREATE TABLE table_complex_type (
d MAP<STRING,ARRAY<STRING>>
) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table_complex_type;
+LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table_complex_type;
explain
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_incorrect_column.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_incorrect_column.q
index ca8548958f..a4e0056bff 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_incorrect_column.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/columnstats_tbllvl_incorrect_column.q
@@ -13,7 +13,7 @@ CREATE TABLE UserVisits_web_text_none (
avgTimeOnSite int)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
+LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
explain
analyze table UserVisits_web_text_none compute statistics for columns destIP;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compile_processor.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compile_processor.q
new file mode 100644
index 0000000000..c314a940f9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compile_processor.q
@@ -0,0 +1,8 @@
+
+compile `import org.apache.hadoop.hive.ql.exec.UDF \;
+public class Pyth extsfgsfgfsends UDF {
+ public double evaluate(double a, double b){
+ return Math.sqrt((a*a) + (b*b)) \;
+ }
+} ` AS GROOVY NAMED Pyth.groovy;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compute_stats_long.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compute_stats_long.q
new file mode 100644
index 0000000000..5974811280
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/compute_stats_long.q
@@ -0,0 +1,7 @@
+create table tab_int(a int);
+
+-- insert some data
+LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int;
+
+-- compute stats should raise an error since the number of bit vectors > 1024
+select compute_stats(a, 10000) from tab_int;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_class.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_class.q
new file mode 100644
index 0000000000..3b71e00b2e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_class.q
@@ -0,0 +1 @@
+create function default.badfunc as 'my.nonexistent.class';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_db.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_db.q
new file mode 100644
index 0000000000..ae95391edd
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonexistent_db.q
@@ -0,0 +1 @@
+create function nonexistentdb.badfunc as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonudf_class.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonudf_class.q
new file mode 100644
index 0000000000..2083064593
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/create_function_nonudf_class.q
@@ -0,0 +1 @@
+create function default.badfunc as 'java.lang.String';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_recursion.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_recursion.q
new file mode 100644
index 0000000000..2160b47196
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_recursion.q
@@ -0,0 +1,4 @@
+explain
+with q1 as ( select key from q2 where key = '5'),
+q2 as ( select key from q1 where key = '5')
+select * from (select key from q1) a; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_with_in_subquery.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_with_in_subquery.q
new file mode 100644
index 0000000000..e52a1d97db
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/cte_with_in_subquery.q
@@ -0,0 +1 @@
+select * from (with q1 as ( select key from q2 where key = '5') select * from q1) a;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/date_literal1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/date_literal1.q
deleted file mode 100644
index b7fac0d3df..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/date_literal1.q
+++ /dev/null
@@ -1,2 +0,0 @@
--- Not in YYYY-MM-DD format
-SELECT DATE '2001-1-1' FROM src LIMIT 2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q
new file mode 100644
index 0000000000..1c658c79b9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodblock.q
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop database if exists drop_nodblock;
+create database drop_nodblock;
+lock database drop_nodblock shared;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q
new file mode 100644
index 0000000000..ef4b323f06
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_nodbunlock.q
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop database if exists drop_nodbunlock;
+create database drop_nodbunlock;
+unlock database drop_nodbunlock;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q
new file mode 100644
index 0000000000..4a0c6c25c6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notablelock.q
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop table if exists drop_notablelock;
+create table drop_notablelock (c int);
+lock table drop_notablelock shared;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q
new file mode 100644
index 0000000000..0b00046579
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dbtxnmgr_notableunlock.q
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+drop table if exists drop_notableunlock;
+create table drop_notableunlock (c int);
+unlock table drop_notableunlock;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/deletejar.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/deletejar.q
index 7b0c92311a..0bd6985e03 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/deletejar.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/deletejar.q
@@ -1,4 +1,4 @@
-ADD JAR ../data/files/TestSerDe.jar;
-DELETE JAR ../data/files/TestSerDe.jar;
+ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
+DELETE JAR ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
CREATE TABLE DELETEJAR(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_func_nonexistent.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_func_nonexistent.q
new file mode 100644
index 0000000000..892ef00e3f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_func_nonexistent.q
@@ -0,0 +1,3 @@
+set hive.exec.drop.ignorenonexistent=false;
+-- Can't use DROP FUNCTION if the function doesn't exist and IF EXISTS isn't specified
+drop function nonexistent_function;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_partition_filter_failure2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_partition_filter_failure2.q
deleted file mode 100644
index 4d238d73a9..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/drop_partition_filter_failure2.q
+++ /dev/null
@@ -1,11 +0,0 @@
-create table ptestfilter (a string, b int) partitioned by (c string, d int);
-describe ptestfilter;
-
-alter table ptestfilter add partition (c='US', d=1);
-alter table ptestfilter add partition (c='US', d=2);
-show partitions ptestfilter;
-
-alter table ptestfilter drop partition (c='US', d<'2');
-
-
-
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dynamic_partitions_with_whitelist.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dynamic_partitions_with_whitelist.q
index 0be2e71c94..0ad99d100d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dynamic_partitions_with_whitelist.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/dynamic_partitions_with_whitelist.q
@@ -8,7 +8,7 @@ create table source_table like srcpart;
create table dest_table like srcpart;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE source_table partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE source_table partition(ds='2008-04-08', hr=11);
-- Tests creating dynamic partitions with characters not in the whitelist (i.e. 9)
-- If the directory is not empty the hook will throw an error, instead the error should come from the metastore
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q
index e4f0daca92..ca60d047ef 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_incomplete_partition.q
@@ -3,8 +3,8 @@ CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRIN
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
-ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='h1');
-ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='h2');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='h1');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='h2');
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q
index 4d1e0a62a4..7083edc32b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists.q
@@ -8,5 +8,5 @@ ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05');
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
--- exchange_part_test2 table partition (ds='2013-04-05') already exists thus this query will fail
+-- exchange_part_test1 table partition (ds='2013-04-05') already exists thus this query will fail
alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q
index 23777db3ea..6dfe81a8b0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists2.q
@@ -9,5 +9,5 @@ ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='3');
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
--- exchange_part_test2 table partition (ds='2013-04-05', hr='3') already exists thus this query will fail
+-- exchange_part_test1 table partition (ds='2013-04-05') already exists thus this query will fail
alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q
index 350bf248ac..60671e52e0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_exists3.q
@@ -9,5 +9,5 @@ ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='1');
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
--- exchange_part_test2 table partition (ds='2013-04-05', hr='1') already exists thus this query will fail
+-- exchange_part_test2 table partition (ds='2013-04-05') already exists thus this query will fail
alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q
index 81944b3330..38c0eda236 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exchange_partition_neg_partition_missing.q
@@ -2,5 +2,5 @@ CREATE TABLE exchange_part_test1 (f1 string) PARTITIONED BY (ds STRING);
CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING);
SHOW PARTITIONS exchange_part_test1;
--- exchange_part_test1 partition (ds='2013-04-05') does not exist thus this query will fail
+-- exchange_part_test2 partition (ds='2013-04-05') does not exist thus this query will fail
alter table exchange_part_test1 exchange partition (ds='2013-04-05') with table exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q
index d86ecd5785..6ffc33acb9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_00_unsupported_schema.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'nosuchschema://nosuchauthority/ql/test/data/exports/exim_department';
drop table exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q
index 5f3223152f..970e6463e2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_01_nonpart_over_loaded.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -16,9 +16,9 @@ use importer;
create table exim_department ( dep_id int comment "department identifier")
stored as textfile
tblproperties("maker"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q
index d7204dc478..358918363d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_02_all_part_over_overlap.q
@@ -6,16 +6,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -27,12 +27,12 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "iso code", emp_state string comment "free-form text")
stored as textfile
tblproperties("maker"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
import from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
select * from exim_employee;
drop table exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q
index 6cd7eda455..45268c21c0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_03_nonpart_noncompat_colschema.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -18,6 +18,6 @@ create table exim_department ( dep_key int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q
index 7f3f577c43..cad6c90fd3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_04_nonpart_noncompat_colnumber.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -18,6 +18,6 @@ create table exim_department ( dep_id int comment "department id", dep_name stri
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q
index d3ec9fff82..f5f904f42a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_05_nonpart_noncompat_coltype.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -18,6 +18,6 @@ create table exim_department ( dep_id bigint comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q
index 1cc691fc29..c56329c03f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_06_nonpart_noncompat_storage.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -18,6 +18,6 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q
index 27830ad5f9..afaedcd37b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_07_nonpart_noncompat_ifof.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -21,6 +21,6 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q
index d85048a97a..230b28c402 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_08_nonpart_noncompat_serde.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -19,6 +19,6 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q
index 84b3786a16..c2e00a9663 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_09_nonpart_noncompat_serdeparam.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -23,6 +23,6 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q
index eaf9c579d5..a6586ead0c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_10_nonpart_noncompat_bucketing.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -19,6 +19,6 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q
index 092fd77954..990a686ebe 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_11_nonpart_noncompat_sorting.q
@@ -5,9 +5,9 @@ create table exim_department ( dep_id int comment "department id")
clustered by (dep_id) sorted by (dep_id desc) into 10 buckets
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -20,6 +20,6 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q
index 05de3d77b0..02537ef022 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_13_nonnative_import.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -18,7 +18,7 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
\ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q
index dc194ca814..897c674735 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_14_nonpart_part.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -19,7 +19,7 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
\ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q
index e233707cc4..12013e5ccf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_15_part_nonpart.q
@@ -5,9 +5,9 @@ create table exim_department ( dep_id int comment "department id")
partitioned by (dep_org string)
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department partition (dep_org="hr");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department partition (dep_org="hr");
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -19,7 +19,7 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
\ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q
index a10788e3e3..d8d2b8008c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_16_part_noncompat_schema.q
@@ -5,9 +5,9 @@ create table exim_department ( dep_id int comment "department id")
partitioned by (dep_org string)
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department partition (dep_org="hr");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department partition (dep_org="hr");
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -20,7 +20,7 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import from 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
\ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q
index cc4a56ca34..82dcce9455 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_17_part_spec_underspec.q
@@ -6,16 +6,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -25,6 +25,6 @@ import table exim_employee partition (emp_country="us") from 'ql/test/data/expor
describe extended exim_employee;
select * from exim_employee;
drop table exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q
index 140e3bb3b1..d92efeb9a7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_18_part_spec_missing.q
@@ -6,16 +6,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -25,6 +25,6 @@ import table exim_employee partition (emp_country="us", emp_state="kl") from 'ql
describe extended exim_employee;
select * from exim_employee;
drop table exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q
index 048befe4d3..12d827b9c8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_19_external_over_existing.q
@@ -4,9 +4,9 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -17,7 +17,7 @@ create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
import external table exim_department from 'ql/test/data/exports/exim_department';
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop table exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q
index 89cbb9ecd8..726dee5395 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_20_managed_location_over_existing.q
@@ -4,17 +4,17 @@ set hive.test.mode.prefix=;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_department/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
create table exim_department ( dep_id int comment "department id")
stored as textfile
@@ -22,9 +22,9 @@ create table exim_department ( dep_id int comment "department id")
tblproperties("creator"="krishna");
import table exim_department from 'ql/test/data/exports/exim_department'
location 'ql/test/data/tablestore2/exim_department';
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop table exim_department;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_21_part_managed_external.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_21_part_managed_external.q
index 0cbfc85258..d187c78202 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_21_part_managed_external.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_21_part_managed_external.q
@@ -6,16 +6,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -29,7 +29,7 @@ create table exim_employee ( emp_id int comment "employee id")
tblproperties("creator"="krishna");
import external table exim_employee partition (emp_country="us", emp_state="tn")
from 'ql/test/data/exports/exim_employee';
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop table exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_22_export_authfail.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_22_export_authfail.q
index d9ab0cf0e4..b818686f77 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_22_export_authfail.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_22_export_authfail.q
@@ -5,8 +5,8 @@ create table exim_department ( dep_id int) stored as textfile;
set hive.security.authorization.enabled=true;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
set hive.security.authorization.enabled=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_23_import_exist_authfail.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_23_import_exist_authfail.q
index 2dbd534074..4acefb9f0a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_23_import_exist_authfail.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_23_import_exist_authfail.q
@@ -2,9 +2,9 @@ set hive.test.mode=true;
set hive.test.mode.prefix=;
create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -18,5 +18,5 @@ import from 'ql/test/data/exports/exim_department';
set hive.security.authorization.enabled=false;
drop table exim_department;
drop database importer;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_24_import_part_authfail.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_24_import_part_authfail.q
index ccbcee3698..467014e467 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_24_import_part_authfail.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_24_import_part_authfail.q
@@ -7,10 +7,10 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -26,6 +26,6 @@ set hive.security.authorization.enabled=true;
import from 'ql/test/data/exports/exim_employee';
set hive.security.authorization.enabled=false;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop table exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_25_import_nonexist_authfail.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_25_import_nonexist_authfail.q
index 50bfe005c4..595fa7e764 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_25_import_nonexist_authfail.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/exim_25_import_nonexist_authfail.q
@@ -3,9 +3,9 @@ set hive.test.mode.prefix=;
set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -19,5 +19,5 @@ set hive.security.authorization.enabled=false;
select * from exim_department;
drop table exim_department;
drop database importer;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/fetchtask_ioexception.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/fetchtask_ioexception.q
index 9f44f225e9..82230f782e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/fetchtask_ioexception.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/fetchtask_ioexception.q
@@ -2,6 +2,6 @@ CREATE TABLE fetchtask_ioexception (
KEY STRING,
VALUE STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1_broken.seq' OVERWRITE INTO TABLE fetchtask_ioexception;
+LOAD DATA LOCAL INPATH '../../data/files/kv1_broken.seq' OVERWRITE INTO TABLE fetchtask_ioexception;
SELECT * FROM fetchtask_ioexception;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/file_with_header_footer_negative.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/file_with_header_footer_negative.q
new file mode 100644
index 0000000000..286cf1afb4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/file_with_header_footer_negative.q
@@ -0,0 +1,13 @@
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test_file_with_header_footer_negative/;
+
+dfs -copyFromLocal ../data/files/header_footer_table_1 hdfs:///tmp/test_file_with_header_footer_negative/header_footer_table_1;
+
+dfs -copyFromLocal ../data/files/header_footer_table_2 hdfs:///tmp/test_file_with_header_footer_negative/header_footer_table_2;
+
+CREATE EXTERNAL TABLE header_footer_table_1 (name string, message string, id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LOCATION 'hdfs:///tmp/test_file_with_header_footer_negative/header_footer_table_1' tblproperties ("skip.header.line.count"="1", "skip.footer.line.count"="200");
+
+SELECT * FROM header_footer_table_1;
+
+DROP TABLE header_footer_table_1;
+
+dfs -rmr hdfs:///tmp/test_file_with_header_footer_negative; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type.q
index 1cdaffd1f3..1ab828c8be 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type.q
@@ -1,6 +1,6 @@
-- begin part(string, int) pass(string, string)
CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='second');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='second');
select * from tab1;
drop table tab1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type3.q
new file mode 100644
index 0000000000..49e6a092fc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type3.q
@@ -0,0 +1,4 @@
+create table tab1(c int) partitioned by (i int);
+alter table tab1 add partition(i = "some name");
+
+drop table tab1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type4.q
new file mode 100644
index 0000000000..50f486e624
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/illegal_partition_type4.q
@@ -0,0 +1,3 @@
+create table tab1(s string) PARTITIONED BY(dt date, st string);
+alter table tab1 add partition (dt=date 'foo', st='foo');
+drop table tab1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_entry_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_entry_limit.q
index 7d003e3e4b..5bb889c027 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_entry_limit.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_entry_limit.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
drop index src_index on src;
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_size_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_size_limit.q
index d79674539a..c6600e69b6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_size_limit.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/index_compact_size_limit.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
drop index src_index on src;
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into5.q
new file mode 100644
index 0000000000..c20c168a88
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into5.q
@@ -0,0 +1,9 @@
+DROP TABLE if exists insert_into5_neg;
+
+CREATE TABLE insert_into5_neg (key int, value string) TBLPROPERTIES ("immutable"="true");
+
+INSERT INTO TABLE insert_into5_neg SELECT * FROM src LIMIT 100;
+
+INSERT INTO TABLE insert_into5_neg SELECT * FROM src LIMIT 100;
+
+DROP TABLE insert_into5_neg;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into6.q
new file mode 100644
index 0000000000..a92ee5ca94
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insert_into6.q
@@ -0,0 +1,17 @@
+DROP TABLE IF EXISTS insert_into6_neg;
+
+CREATE TABLE insert_into6_neg (key int, value string)
+ PARTITIONED BY (ds string) TBLPROPERTIES("immutable"="true") ;
+
+INSERT INTO TABLE insert_into6_neg PARTITION (ds='1')
+ SELECT * FROM src LIMIT 100;
+
+INSERT INTO TABLE insert_into6_neg PARTITION (ds='2')
+ SELECT * FROM src LIMIT 100;
+
+SELECT COUNT(*) from insert_into6_neg;
+
+INSERT INTO TABLE insert_into6_neg PARTITION (ds='1')
+ SELECT * FROM src LIMIT 100;
+
+DROP TABLE insert_into6_neg;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insertover_dynapart_ifnotexists.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insertover_dynapart_ifnotexists.q
index cbf65c4ac6..a8f77c28a8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insertover_dynapart_ifnotexists.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/insertover_dynapart_ifnotexists.q
@@ -4,6 +4,6 @@ create table srcpart_dp like srcpart;
create table destpart_dp like srcpart;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_dp partition(ds='2008-04-08', hr=11);
insert overwrite table destpart_dp partition (ds='2008-04-08', hr) if not exists select key, value, hr from srcpart_dp where ds='2008-04-08'; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_1.q
new file mode 100644
index 0000000000..ba7d164c77
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_1.q
@@ -0,0 +1,2 @@
+drop table invalid_char_length_1;
+create table invalid_char_length_1 (c1 char(1000000));
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_2.q
new file mode 100644
index 0000000000..866b43d312
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_2.q
@@ -0,0 +1 @@
+select cast(value as char(100000)) from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_3.q
new file mode 100644
index 0000000000..481b630d20
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_char_length_3.q
@@ -0,0 +1,3 @@
+drop table invalid_char_length_3;
+create table invalid_char_length_3 (c1 char(0));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_columns.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_columns.q
deleted file mode 100644
index 14b3409cb4..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/invalid_columns.q
+++ /dev/null
@@ -1,4 +0,0 @@
-ADD JAR ../data/files/TestSerDe.jar;
-CREATE TABLE DELETEJAR(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe'
-STORED AS TEXTFILE
-TBLPROPERTIES('columns'='valid_colname,invalid.colname');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_alt_syntax_comma_on.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_alt_syntax_comma_on.q
new file mode 100644
index 0000000000..e39a38e2fc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_alt_syntax_comma_on.q
@@ -0,0 +1,3 @@
+explain select *
+from src s1 ,
+src s2 on s1.key = s2.key; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous.q
new file mode 100644
index 0000000000..c0da913c28
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous.q
@@ -0,0 +1,6 @@
+
+
+explain select s1.key, s2.key
+from src s1, src s2
+where key = s2.key
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous_vc.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous_vc.q
new file mode 100644
index 0000000000..8e219637eb
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/join_cond_unqual_ambiguous_vc.q
@@ -0,0 +1,5 @@
+
+explain select s1.key, s2.key
+from src s1, src s2
+where INPUT__FILE__NAME = s2.INPUT__FILE__NAME
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition.q
new file mode 100644
index 0000000000..d59394544c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition.q
@@ -0,0 +1,7 @@
+set hive.limit.query.max.table.partition=1;
+
+explain select * from srcpart limit 1;
+select * from srcpart limit 1;
+
+explain select * from srcpart;
+select * from srcpart;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition_stats.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition_stats.q
new file mode 100644
index 0000000000..0afd4a965a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/limit_partition_stats.q
@@ -0,0 +1,18 @@
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.stats.autogather=true;
+set hive.compute.query.using.stats=true;
+
+create table part (c int) partitioned by (d string);
+insert into table part partition (d)
+select hr,ds from srcpart;
+
+set hive.limit.query.max.table.partition=1;
+
+explain select count(*) from part;
+select count(*) from part;
+
+set hive.compute.query.using.stats=false;
+
+explain select count(*) from part;
+select count(*) from part;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_exist_part_authfail.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_exist_part_authfail.q
index f86cd92d9d..eb72d940a5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_exist_part_authfail.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_exist_part_authfail.q
@@ -1,4 +1,4 @@
create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile;
alter table hive_test_src add partition (pcol1 = 'test_part');
set hive.security.authorization.enabled=true;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_non_native.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_non_native.q
index 387aaed9a1..75a5216e00 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_non_native.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_non_native.q
@@ -2,4 +2,4 @@
CREATE TABLE non_native2(key int, value string)
STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler';
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE non_native2;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE non_native2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_nonpart_authfail.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_nonpart_authfail.q
index d807c69877..32653631ad 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_nonpart_authfail.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_nonpart_authfail.q
@@ -1,3 +1,3 @@
create table hive_test_src ( col1 string ) stored as textfile;
set hive.security.authorization.enabled=true;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_authfail.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_authfail.q
index c409d5a94a..315988dc0a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_authfail.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_authfail.q
@@ -1,3 +1,3 @@
create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile;
set hive.security.authorization.enabled=true;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_nospec.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_nospec.q
index 356c16a664..81517991b2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_nospec.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_part_nospec.q
@@ -1,2 +1,2 @@
create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile;
-load data local inpath '../data/files/test.dat' into table hive_test_src;
+load data local inpath '../../data/files/test.dat' into table hive_test_src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_stored_as_dirs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_stored_as_dirs.q
index eed5651cbf..c56f0d408d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_stored_as_dirs.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_stored_as_dirs.q
@@ -4,4 +4,4 @@ set hive.mapred.supports.subdirectories=true;
CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING)
SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE stored_as_dirs_multiple;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE stored_as_dirs_multiple;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_view_failure.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_view_failure.q
index 927f02e82b..64182eac83 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_view_failure.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_view_failure.q
@@ -1,3 +1,3 @@
DROP VIEW xxx11;
CREATE VIEW xxx11 AS SELECT * FROM src;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE xxx11;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE xxx11;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat.q
index 16feeca226..f0c3b59d30 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat.q
@@ -3,4 +3,4 @@
CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE load_wrong_fileformat_T1;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE load_wrong_fileformat_T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_rc_seq.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_rc_seq.q
index 7e589fbfde..4d79bbeb10 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_rc_seq.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_rc_seq.q
@@ -3,4 +3,4 @@
CREATE TABLE T1(name STRING) STORED AS RCFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.seq' INTO TABLE T1; \ No newline at end of file
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T1; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_txt_seq.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_txt_seq.q
index ff5ed4e2e3..050c819a2f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_txt_seq.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_fileformat_txt_seq.q
@@ -3,4 +3,4 @@
CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.seq' INTO TABLE T1; \ No newline at end of file
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T1; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_noof_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_noof_part.q
index ffb64ed643..7f5ad75414 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_noof_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/load_wrong_noof_part.q
@@ -1,3 +1,3 @@
CREATE TABLE loadpart1(a STRING, b STRING) PARTITIONED BY (ds STRING,ds1 STRING);
-LOAD DATA LOCAL INPATH '../data1/files/kv1.txt' INTO TABLE loadpart1 PARTITION(ds='2009-05-05');
+LOAD DATA LOCAL INPATH '../../data1/files/kv1.txt' INTO TABLE loadpart1 PARTITION(ds='2009-05-05');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/local_mapred_error_cache.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/local_mapred_error_cache.q
index 8f4b37a9d4..ed9e21dd8a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/local_mapred_error_cache.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/local_mapred_error_cache.q
@@ -1,4 +1,4 @@
set hive.exec.mode.local.auto=true;
set hive.exec.failure.hooks=org.apache.hadoop.hive.ql.hooks.VerifySessionStateLocalErrorsHook;
-FROM src SELECT TRANSFORM(key, value) USING 'python ../data/scripts/cat_error.py' AS (key, value);
+FROM src SELECT TRANSFORM(key, value) USING 'python ../../data/scripts/cat_error.py' AS (key, value);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q
new file mode 100644
index 0000000000..4966f2b9b2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_query_tbl_in_locked_db.q
@@ -0,0 +1,17 @@
+create database lockneg1;
+use lockneg1;
+
+create table tstsrcpart like default.srcpart;
+
+insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11')
+select key, value from default.srcpart where ds='2008-04-08' and hr='11';
+
+lock database lockneg1 shared;
+show locks database lockneg1;
+select count(1) from tstsrcpart where ds='2008-04-08' and hr='11';
+
+unlock database lockneg1;
+show locks database lockneg1;
+lock database lockneg1 exclusive;
+show locks database lockneg1;
+select count(1) from tstsrcpart where ds='2008-04-08' and hr='11';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_db_lock_conflict.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_db_lock_conflict.q
new file mode 100644
index 0000000000..1f9ad90898
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_db_lock_conflict.q
@@ -0,0 +1,6 @@
+set hive.lock.numretries=0;
+
+create database lockneg4;
+
+lock database lockneg4 exclusive;
+lock database lockneg4 shared;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_drop_locked_db.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_drop_locked_db.q
new file mode 100644
index 0000000000..8cbe31083b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_drop_locked_db.q
@@ -0,0 +1,8 @@
+set hive.lock.numretries=0;
+
+create database lockneg9;
+
+lock database lockneg9 shared;
+show locks database lockneg9;
+
+drop database lockneg9;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_lock_db_in_use.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_lock_db_in_use.q
new file mode 100644
index 0000000000..4127a6f150
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/lockneg_try_lock_db_in_use.q
@@ -0,0 +1,15 @@
+set hive.lock.numretries=0;
+
+create database lockneg2;
+use lockneg2;
+
+create table tstsrcpart like default.srcpart;
+
+insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='11')
+select key, value from default.srcpart where ds='2008-04-08' and hr='11';
+
+lock table tstsrcpart shared;
+show locks;
+
+lock database lockneg2 exclusive;
+show locks;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nested_complex_neg.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nested_complex_neg.q
index ac6c4ee549..09f13f52ae 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nested_complex_neg.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nested_complex_neg.q
@@ -10,6 +10,6 @@ simple_string string)
-- This should fail in as extended nesting levels are not enabled using the serdeproperty hive.serialization.extend.nesting.levels
-load data local inpath '../data/files/nested_complex.txt' overwrite into table nestedcomplex;
+load data local inpath '../../data/files/nested_complex.txt' overwrite into table nestedcomplex;
select * from nestedcomplex sort by simple_int;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_insert.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_insert.q
index 4841f9e11c..6669bf62d8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_insert.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_insert.q
@@ -2,6 +2,6 @@
CREATE TABLE nopart_insert(a STRING, b STRING) PARTITIONED BY (ds STRING);
INSERT OVERWRITE TABLE nopart_insert
-SELECT TRANSFORM(src.key, src.value) USING '../data/scripts/error_script' AS (tkey, tvalue)
+SELECT TRANSFORM(src.key, src.value) USING '../../data/scripts/error_script' AS (tkey, tvalue)
FROM src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_load.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_load.q
index 6e5ad6eb41..966982fd5c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_load.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/nopart_load.q
@@ -1,5 +1,5 @@
CREATE TABLE nopart_load(a STRING, b STRING) PARTITIONED BY (ds STRING);
-load data local inpath '../data/files/kv1.txt' overwrite into table nopart_load ;
+load data local inpath '../../data/files/kv1.txt' overwrite into table nopart_load ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/notable_alias3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/notable_alias3.q
deleted file mode 100644
index 6cc3e87288..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/notable_alias3.q
+++ /dev/null
@@ -1,4 +0,0 @@
-CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
-
-FROM src
-INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_char.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_char.q
new file mode 100644
index 0000000000..745a786726
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_char.q
@@ -0,0 +1,3 @@
+drop table if exists parquet_char;
+
+create table parquet_char (t char(10)) stored as parquet;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_date.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_date.q
new file mode 100644
index 0000000000..89d3602fd3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_date.q
@@ -0,0 +1,3 @@
+drop table if exists parquet_date;
+
+create table parquet_date (t date) stored as parquet;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_decimal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_decimal.q
new file mode 100644
index 0000000000..8a4973110a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_decimal.q
@@ -0,0 +1,3 @@
+drop table if exists parquet_decimal;
+
+create table parquet_decimal (t decimal(4,2)) stored as parquet;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_timestamp.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_timestamp.q
new file mode 100644
index 0000000000..4ef36fa0ef
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_timestamp.q
@@ -0,0 +1,3 @@
+drop table if exists parquet_timestamp;
+
+create table parquet_timestamp (t timestamp) stored as parquet;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_varchar.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_varchar.q
new file mode 100644
index 0000000000..55825f76dc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/parquet_varchar.q
@@ -0,0 +1,3 @@
+drop table if exists parquet_varchar;
+
+create table parquet_varchar (t varchar(10)) stored as parquet;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/protectmode_part2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/protectmode_part2.q
index 72b55ea25d..3fdc036996 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/protectmode_part2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/protectmode_part2.q
@@ -4,6 +4,6 @@ drop table tbl_protectmode6;
create table tbl_protectmode6 (c1 string,c2 string) partitioned by (p string);
alter table tbl_protectmode6 add partition (p='p1');
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' OVERWRITE INTO TABLE tbl_protectmode6 partition (p='p1');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE tbl_protectmode6 partition (p='p1');
alter table tbl_protectmode6 partition (p='p1') enable offline;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' OVERWRITE INTO TABLE tbl_protectmode6 partition (p='p1');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE tbl_protectmode6 partition (p='p1');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q
index 542367ace2..ef372259ed 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AggrFuncsWithNoGBYNoPartDef.q
@@ -12,7 +12,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-- testAggrFuncsWithNoGBYNoPartDef
select p_mfgr,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q
index 95b35113e3..5843042343 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/ptf_negative_AmbiguousWindowDefn.q
@@ -12,7 +12,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-- testAmbiguousWindowDefn
select p_mfgr, p_name, p_size,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_1.q
index 8333ddc948..a171961a68 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_1.q
@@ -1,2 +1,3 @@
+set hive.support.quoted.identifiers=none;
EXPLAIN
SELECT `+++` FROM srcpart;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_2.q
index d1aa1f1a95..7bac1c7755 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_2.q
@@ -1,2 +1,3 @@
+set hive.support.quoted.identifiers=none;
EXPLAIN
SELECT `.a.` FROM srcpart;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_groupby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_groupby.q
index 53971916e6..300d145508 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_groupby.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/regex_col_groupby.q
@@ -1,2 +1,3 @@
+set hive.support.quoted.identifiers=none;
EXPLAIN
SELECT `..`, count(1) FROM srcpart GROUP BY `..`;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_broken_pipe1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_broken_pipe1.q
deleted file mode 100644
index 6b1c09decf..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_broken_pipe1.q
+++ /dev/null
@@ -1,3 +0,0 @@
-set hive.exec.script.allow.partial.consumption = false;
--- Tests exception in ScriptOperator.close() by passing to the operator a small amount of data
-SELECT TRANSFORM(*) USING 'true' AS a, b FROM (SELECT TRANSFORM(*) USING 'echo' AS a, b FROM src LIMIT 1) tmp; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_error.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_error.q
index e46aed03b1..8ca849b82d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_error.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/script_error.q
@@ -1,7 +1,7 @@
EXPLAIN
-SELECT TRANSFORM(src.key, src.value) USING '../data/scripts/error_script' AS (tkey, tvalue)
+SELECT TRANSFORM(src.key, src.value) USING '../../data/scripts/error_script' AS (tkey, tvalue)
FROM src;
-SELECT TRANSFORM(src.key, src.value) USING '../data/scripts/error_script' AS (tkey, tvalue)
+SELECT TRANSFORM(src.key, src.value) USING '../../data/scripts/error_script' AS (tkey, tvalue)
FROM src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/serde_regex2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/serde_regex2.q
index a395574422..d523d03e90 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/serde_regex2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/serde_regex2.q
@@ -16,8 +16,8 @@ WITH SERDEPROPERTIES (
)
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH "../data/files/apache.access.log" INTO TABLE serde_regex;
-LOAD DATA LOCAL INPATH "../data/files/apache.access.2.log" INTO TABLE serde_regex;
+LOAD DATA LOCAL INPATH "../../data/files/apache.access.log" INTO TABLE serde_regex;
+LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" INTO TABLE serde_regex;
-- raise an exception
SELECT * FROM serde_regex ORDER BY time; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q
new file mode 100644
index 0000000000..579e9408b6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/set_hiveconf_validation2.q
@@ -0,0 +1,5 @@
+-- should fail: hive.fetch.task.conversion accepts minimal or more
+desc src;
+
+set hive.conf.validation=true;
+set hive.fetch.task.conversion=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_1.q
index 401cc37f67..1b2872d3d7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_1.q
@@ -6,7 +6,7 @@
create table tmptable(key string, value string);
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher;
set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.DummyStatsAggregator;
set hive.test.dummystats.aggregator=connect;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_2.q
index c7e63591ad..0fa9ff6820 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_aggregator_error_2.q
@@ -5,7 +5,7 @@
create table tmptable(key string, value string);
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher;
set hive.stats.default.aggregator="";
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_1.q
index 7fa0f55f2a..be7c4f72fe 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_1.q
@@ -6,7 +6,7 @@
create table tmptable(key string, value string);
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher;
set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.DummyStatsAggregator;
set hive.test.dummystats.publisher=connect;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_2.q
index f82d4b54b6..652afe7c5b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/stats_publisher_error_2.q
@@ -5,7 +5,7 @@
create table tmptable(key string, value string);
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher="";
set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.DummyStatsAggregator;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q
new file mode 100644
index 0000000000..9013df6f93
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_exists_implicit_gby.q
@@ -0,0 +1,10 @@
+
+
+select *
+from src b
+where exists
+ (select count(*)
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9'
+ )
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_groupby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_groupby.q
new file mode 100644
index 0000000000..a9bc6ee6a3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_groupby.q
@@ -0,0 +1,5 @@
+
+
+select count(*)
+from src
+group by src.key in (select key from src s1 where s1.key > '9') \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_select.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_select.q
new file mode 100644
index 0000000000..1365389cb2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_in_select.q
@@ -0,0 +1,6 @@
+
+
+
+select src.key in (select key from src s1 where s1.key > '9')
+from src
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_multiple_cols_in_select.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_multiple_cols_in_select.q
new file mode 100644
index 0000000000..6805c5b16b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_multiple_cols_in_select.q
@@ -0,0 +1,7 @@
+
+
+explain
+ select *
+from src
+where src.key in (select * from src s1 where s1.key > '9')
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_nested_subquery.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_nested_subquery.q
new file mode 100644
index 0000000000..e8c41e6b17
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_nested_subquery.q
@@ -0,0 +1,18 @@
+
+
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+select *
+from part x
+where x.p_name in (select y.p_name from part y where exists (select z.p_name from part z where y.p_name = z.p_name))
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q
new file mode 100644
index 0000000000..852b2953ff
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_notexists_implicit_gby.q
@@ -0,0 +1,10 @@
+
+
+select *
+from src b
+where not exists
+ (select sum(1)
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9'
+ )
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_shared_alias.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_shared_alias.q
new file mode 100644
index 0000000000..d442f077c0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_shared_alias.q
@@ -0,0 +1,6 @@
+
+
+select *
+from src
+where src.key in (select key from src where key > '9')
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_subquery_chain.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_subquery_chain.q
new file mode 100644
index 0000000000..8ea94c5fc6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_subquery_chain.q
@@ -0,0 +1,6 @@
+
+explain
+select *
+from src
+where src.key in (select key from src) in (select key from src)
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_unqual_corr_expr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_unqual_corr_expr.q
new file mode 100644
index 0000000000..99ff9ca703
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_unqual_corr_expr.q
@@ -0,0 +1,6 @@
+
+
+select *
+from src
+where key in (select key from src)
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_windowing_corr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_windowing_corr.q
new file mode 100644
index 0000000000..105d3d22d9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_windowing_corr.q
@@ -0,0 +1,26 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+-- corr and windowing
+select p_mfgr, p_name, p_size
+from part a
+where a.p_size in
+ (select first_value(p_size) over(partition by p_mfgr order by p_size)
+ from part b
+ where a.p_brand = b.p_brand)
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_with_or_cond.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_with_or_cond.q
new file mode 100644
index 0000000000..c2c322178f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/subquery_with_or_cond.q
@@ -0,0 +1,5 @@
+
+select count(*)
+from src
+where src.key in (select key from src s1 where s1.key > '9') or src.value is not null
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong.q
deleted file mode 100644
index 2fb5ff74cc..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT CASE '1'
- WHEN 1 THEN 2
- WHEN 3 THEN 4
- ELSE 5
- END
-FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong2.q
deleted file mode 100644
index 5772dc1a95..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong2.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT CASE 1
- WHEN 1 THEN '2'
- WHEN 3 THEN 4
- ELSE 5
- END
-FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong3.q
deleted file mode 100644
index 5aaf0188eb..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_case_type_wrong3.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT CASE 1
- WHEN 1 THEN NULL
- WHEN 3 THEN '2'
- ELSE 7
- END
-FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_invalid.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_invalid.q
new file mode 100644
index 0000000000..68050fd95c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_invalid.q
@@ -0,0 +1 @@
+select default.nonexistfunc() from src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_local_resource.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_local_resource.q
new file mode 100644
index 0000000000..bcfa217737
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_local_resource.q
@@ -0,0 +1 @@
+create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file '../../data/files/sales.txt';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_nonexistent_resource.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_nonexistent_resource.q
new file mode 100644
index 0000000000..d37665dde6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_nonexistent_resource.q
@@ -0,0 +1 @@
+create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file 'nonexistent_file.txt';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_qualified_name.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_qualified_name.q
new file mode 100644
index 0000000000..476dfa21a2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_qualified_name.q
@@ -0,0 +1 @@
+create temporary function default.myfunc as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong2.q
deleted file mode 100644
index 79fa65f63d..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong2.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT CASE
- WHEN 1=2 THEN '2'
- WHEN 3=4 THEN 4
- ELSE 5
- END
-FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong3.q
deleted file mode 100644
index 8bb5fdd7ea..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udf_when_type_wrong3.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SELECT CASE
- WHEN 1=2 THEN '2'
- WHEN 3=4 THEN '5'
- ELSE 5.3
- END
-FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udfnull.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udfnull.q
deleted file mode 100644
index 3c4204f780..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/udfnull.q
+++ /dev/null
@@ -1,6 +0,0 @@
-
-CREATE TEMPORARY FUNCTION example_arraysum AS 'org.apache.hadoop.hive.contrib.udf.example.UDFExampleArraySum';
-
-SELECT example_arraysum(lint)FROM src_thrift;
-
-DROP TEMPORARY FUNCTION example_arraysum;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/union.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/union.q
deleted file mode 100644
index e3c5c83089..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/union.q
+++ /dev/null
@@ -1,4 +0,0 @@
-explain
-select s1.key as key, s1.value as value from src s1
- UNION ALL
-select s2.key as key, s2.value as value from src s2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_invalid_udaf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_invalid_udaf.q
new file mode 100644
index 0000000000..c5b593e4bb
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_invalid_udaf.q
@@ -0,0 +1 @@
+select nonexistfunc(key) over () from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_ll_no_neg.q b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_ll_no_neg.q
new file mode 100644
index 0000000000..15f8fae292
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientnegative/windowing_ll_no_neg.q
@@ -0,0 +1,26 @@
+DROP TABLE IF EXISTS part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+select p_mfgr, p_name, p_size,
+min(p_retailprice),
+rank() over(distribute by p_mfgr sort by p_name)as r,
+dense_rank() over(distribute by p_mfgr sort by p_name) as dr,
+p_size, p_size - lag(p_size,-1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz
+from part
+group by p_mfgr, p_name, p_size
+;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter1.q
index 5fd19455f9..312a0177fa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter1.q
@@ -15,7 +15,7 @@ describe extended alter1;
alter table alter1 set serdeproperties('s1'='10', 's2' ='20');
describe extended alter1;
-add jar ../data/files/TestSerDe.jar;
+add jar ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
alter table alter1 set serde 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties('s1'='9');
describe extended alter1;
@@ -56,7 +56,7 @@ DESCRIBE EXTENDED alter1;
ALTER TABLE alter1 SET SERDEPROPERTIES('s1'='10', 's2' ='20');
DESCRIBE EXTENDED alter1;
-add jar ../data/files/TestSerDe.jar;
+add jar ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
ALTER TABLE alter1 SET SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' WITH SERDEPROPERTIES ('s1'='9');
DESCRIBE EXTENDED alter1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter3.q
index 3cde00748b..91e4e9bad0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter3.q
@@ -1,5 +1,5 @@
create table alter3_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table alter3_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table alter3_src ;
create table alter3 ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile;
@@ -34,7 +34,7 @@ USE alter3_db;
SHOW TABLES;
CREATE TABLE alter3_src (col1 STRING) STORED AS TEXTFILE ;
-LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter3_src ;
+LOAD DATA LOCAL INPATH '../../data/files/test.dat' OVERWRITE INTO TABLE alter3_src ;
CREATE TABLE alter3 (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter5.q
index 0d14f228d1..66c9f8dc54 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter5.q
@@ -3,7 +3,7 @@
--
create table alter5_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table alter5_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table alter5_src ;
create table alter5 ( col1 string ) partitioned by (dt string);
@@ -32,7 +32,7 @@ USE alter5_db;
SHOW TABLES;
create table alter5_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table alter5_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table alter5_src ;
create table alter5 ( col1 string ) partitioned by (dt string);
alter table alter5 add partition (dt='a') location 'parta';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char1.q
new file mode 100644
index 0000000000..4ecb7e7389
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char1.q
@@ -0,0 +1,32 @@
+drop table alter_char_1;
+
+create table alter_char_1 (key string, value string);
+insert overwrite table alter_char_1
+ select key, value from src order by key limit 5;
+
+select * from alter_char_1 order by key;
+
+-- change column to char
+alter table alter_char_1 change column value value char(20);
+-- contents should still look the same
+select * from alter_char_1 order by key;
+
+-- change column to smaller char
+alter table alter_char_1 change column value value char(3);
+-- value column should be truncated now
+select * from alter_char_1 order by key;
+
+-- change back to bigger char
+alter table alter_char_1 change column value value char(20);
+-- column values should be full size again
+select * from alter_char_1 order by key;
+
+-- add char column
+alter table alter_char_1 add columns (key2 int, value2 char(10));
+select * from alter_char_1 order by key;
+
+insert overwrite table alter_char_1
+ select key, value, key, value from src order by key limit 5;
+select * from alter_char_1 order by key;
+
+drop table alter_char_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char2.q
new file mode 100644
index 0000000000..7fa9fcef11
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_char2.q
@@ -0,0 +1,22 @@
+
+-- alter column type, with partitioned table
+drop table if exists alter_char2;
+
+create table alter_char2 (
+ c1 char(255)
+) partitioned by (hr int);
+
+insert overwrite table alter_char2 partition (hr=1)
+ select value from src limit 1;
+
+select c1, length(c1) from alter_char2;
+
+alter table alter_char2 change column c1 c1 char(10);
+
+select hr, c1, length(c1) from alter_char2 where hr = 1;
+
+insert overwrite table alter_char2 partition (hr=2)
+ select key from src limit 1;
+
+select hr, c1, length(c1) from alter_char2 where hr = 1;
+select hr, c1, length(c1) from alter_char2 where hr = 2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
index 807ef539c8..e1c3780a9b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_concatenate_indexed_table.q
@@ -1,9 +1,9 @@
set hive.exec.concatenate.check.index =false;
create table src_rc_concatenate_test(key int, value string) stored as rcfile;
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test;
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test;
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test;
show table extended like `src_rc_concatenate_test`;
@@ -26,9 +26,9 @@ create table src_rc_concatenate_test_part(key int, value string) partitioned by
alter table src_rc_concatenate_test_part add partition (ds='2011');
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011');
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_concatenate_test_part partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_concatenate_test_part partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_concatenate_test_part partition (ds='2011');
show table extended like `src_rc_concatenate_test_part` partition (ds='2011');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_db_owner.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_db_owner.q
new file mode 100644
index 0000000000..b224f3339e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_db_owner.q
@@ -0,0 +1,9 @@
+
+create database db_alter_onr;
+describe database db_alter_onr;
+
+alter database db_alter_onr set owner user user1;
+describe database db_alter_onr;
+
+alter database db_alter_onr set owner role role1;
+describe database db_alter_onr;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge.q
index c3502739d5..ceabd0830a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge.q
@@ -1,8 +1,8 @@
create table src_rc_merge_test(key int, value string) stored as rcfile;
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test;
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test;
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test;
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test;
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test;
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test;
show table extended like `src_rc_merge_test`;
@@ -21,9 +21,9 @@ create table src_rc_merge_test_part(key int, value string) partitioned by (ds st
alter table src_rc_merge_test_part add partition (ds='2011');
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011');
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011');
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2011');
show table extended like `src_rc_merge_test_part` partition (ds='2011');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_2.q
index 65ddfed13d..e09703d1c6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_2.q
@@ -3,9 +3,9 @@ create table src_rc_merge_test_part(key int, value string) partitioned by (ds st
alter table src_rc_merge_test_part add partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
desc extended src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part partition (ds='2012-01-03', ts='2012-01-03+14:46:31');
select count(1) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
select sum(hash(key)), sum(hash(value)) from src_rc_merge_test_part where ds='2012-01-03' and ts='2012-01-03+14:46:31';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_stats.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_stats.q
index 23bae55ab2..0af87e2c07 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_stats.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_merge_stats.q
@@ -1,8 +1,8 @@
create table src_rc_merge_test_stat(key int, value string) stored as rcfile;
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_stat;
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_stat;
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_stat;
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_stat;
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_stat;
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_stat;
show table extended like `src_rc_merge_test_stat`;
desc extended src_rc_merge_test_stat;
@@ -21,9 +21,9 @@ create table src_rc_merge_test_part_stat(key int, value string) partitioned by (
alter table src_rc_merge_test_part_stat add partition (ds='2011');
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-load data local inpath '../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
-load data local inpath '../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_2.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
+load data local inpath '../../data/files/smbbucket_3.rc' into table src_rc_merge_test_part_stat partition (ds='2011');
show table extended like `src_rc_merge_test_part_stat` partition (ds='2011');
desc extended src_rc_merge_test_part_stat;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
index b6d1eb8f2d..5dda4c08fd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table.q
@@ -1,4 +1,4 @@
-
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
alter table tst1 clustered by (key) into 8 buckets;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
index c6a4ad24fc..acc028bdd8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2.q
@@ -1,3 +1,4 @@
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
-- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
-- the partition metadata is updated as well.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
new file mode 100644
index 0000000000..d81430441c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table2_h23.q
@@ -0,0 +1,85 @@
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
+-- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata
+-- the partition metadata is updated as well.
+
+CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING);
+
+DESCRIBE FORMATTED tst1;
+
+SET hive.enforce.bucketing=true;
+SET hive.enforce.sorting=true;
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test an unbucketed partition gets converted to bucketed
+ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test an unsorted partition gets converted to sorted
+ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test changing the bucket columns
+ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test changing the number of buckets
+ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test changing the sort columns
+ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test changing the sort order
+ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test a sorted partition gets converted to unsorted
+ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
+
+-- Test a bucketed partition gets converted to unbucketed
+ALTER TABLE tst1 NOT CLUSTERED;
+
+DESCRIBE FORMATTED tst1;
+
+INSERT OVERWRITE TABLE tst1 PARTITION (ds = '1') SELECT key, value FROM src;
+
+DESCRIBE FORMATTED tst1 PARTITION (ds = '1');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
new file mode 100644
index 0000000000..a03992510b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_numbuckets_partitioned_table_h23.q
@@ -0,0 +1,59 @@
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20,0.20S)
+create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets;
+
+alter table tst1 clustered by (key) into 8 buckets;
+
+describe formatted tst1;
+
+set hive.enforce.bucketing=true;
+insert overwrite table tst1 partition (ds='1') select key, value from src;
+
+describe formatted tst1 partition (ds = '1');
+
+-- Test changing bucket number
+
+alter table tst1 clustered by (key) into 12 buckets;
+
+insert overwrite table tst1 partition (ds='1') select key, value from src;
+
+describe formatted tst1 partition (ds = '1');
+
+describe formatted tst1;
+
+-- Test changing bucket number of (table/partition)
+
+alter table tst1 into 4 buckets;
+
+describe formatted tst1;
+
+describe formatted tst1 partition (ds = '1');
+
+alter table tst1 partition (ds = '1') into 6 buckets;
+
+describe formatted tst1;
+
+describe formatted tst1 partition (ds = '1');
+
+-- Test adding sort order
+
+alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets;
+
+describe formatted tst1;
+
+-- Test changing sort order
+
+alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets;
+
+describe formatted tst1;
+
+-- Test removing test order
+
+alter table tst1 clustered by (value) into 12 buckets;
+
+describe formatted tst1;
+
+-- Test removing buckets
+
+alter table tst1 not clustered;
+
+describe formatted tst1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_coltype.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_coltype.q
index 5479afbbd5..19c0f9d1d8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_coltype.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_coltype.q
@@ -10,48 +10,48 @@ desc alter_coltype;
select count(*) from alter_coltype where dt = '100x';
-- alter partition key column data type for dt column.
-alter table alter_coltype partition column (dt int);
+-- alter table alter_coltype partition column (dt int);
-- load a new partition using new data type.
-insert overwrite table alter_coltype partition(dt=10, ts='3.0') select * from src1;
+-- insert overwrite table alter_coltype partition(dt=10, ts='3.0') select * from src1;
-- make sure the partition predicate still works.
-select count(*) from alter_coltype where dt = '100x';
-explain extended select count(*) from alter_coltype where dt = '100x';
+-- select count(*) from alter_coltype where dt = '100x';
+-- explain extended select count(*) from alter_coltype where dt = '100x';
-select count(*) from alter_coltype where dt = 100;
+-- select count(*) from alter_coltype where dt = '100';
-- alter partition key column data type for ts column.
-alter table alter_coltype partition column (ts double);
+-- alter table alter_coltype partition column (ts double);
-alter table alter_coltype partition column (dt string);
+-- alter table alter_coltype partition column (dt string);
-- load a new partition using new data type.
-insert overwrite table alter_coltype partition(dt='100x', ts=3.0) select * from src1;
+-- insert overwrite table alter_coltype partition(dt='100x', ts=3.0) select * from src1;
-- validate partition key column predicate can still work.
-select count(*) from alter_coltype where ts = '6:30pm';
-explain extended select count(*) from alter_coltype where ts = '6:30pm';
+-- select count(*) from alter_coltype where ts = '6:30pm';
+-- explain extended select count(*) from alter_coltype where ts = '6:30pm';
-- validate partition key column predicate on two different partition column data type
-- can still work.
-select count(*) from alter_coltype where ts = 3.0 and dt=10;
-explain extended select count(*) from alter_coltype where ts = 3.0 and dt=10;
+-- select count(*) from alter_coltype where ts = 3.0 and dt=10;
+-- explain extended select count(*) from alter_coltype where ts = 3.0 and dt=10;
-- query where multiple partition values (of different datatypes) are being selected
-select key, value, dt, ts from alter_coltype where dt is not null;
-explain extended select key, value, dt, ts from alter_coltype where dt is not null;
+-- select key, value, dt, ts from alter_coltype where dt is not null;
+-- explain extended select key, value, dt, ts from alter_coltype where dt is not null;
-select count(*) from alter_coltype where ts = 3.0;
+-- select count(*) from alter_coltype where ts = 3.0;
-- make sure the partition predicate still works.
-select count(*) from alter_coltype where dt = '100x' or dt = '10';
-explain extended select count(*) from alter_coltype where dt = '100x' or dt = '10';
+-- select count(*) from alter_coltype where dt = '100x' or dt = '10';
+-- explain extended select count(*) from alter_coltype where dt = '100x' or dt = '10';
-desc alter_coltype;
-desc alter_coltype partition (dt='100x', ts='6:30pm');
-desc alter_coltype partition (dt='100x', ts=3.0);
-desc alter_coltype partition (dt=10, ts=3.0);
+-- desc alter_coltype;
+-- desc alter_coltype partition (dt='100x', ts='6:30pm');
+-- desc alter_coltype partition (dt='100x', ts=3.0);
+-- desc alter_coltype partition (dt=10, ts=3.0);
drop table alter_coltype;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q
index 7bcb9f071c..7a1f3dd51d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_partition_protect_mode.q
@@ -2,10 +2,10 @@
create table if not exists alter_part_protect_mode(key string, value string ) partitioned by (year string, month string) stored as textfile ;
-- Load data
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='10');
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='12');
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1995', month='09');
-load data local inpath '../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1994', month='07');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='10');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1996', month='12');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1995', month='09');
+load data local inpath '../../data/files/T1.txt' overwrite into table alter_part_protect_mode partition (year='1994', month='07');
-- offline
alter table alter_part_protect_mode partition (year='1996') disable offline;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_rename_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_rename_partition.q
index d498cd52a5..8ebbe98824 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_rename_partition.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_rename_partition.q
@@ -4,7 +4,7 @@ DROP TABLE alter_rename_partition;
SHOW TABLES;
create table alter_rename_partition_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table alter_rename_partition_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table alter_rename_partition_src ;
create table alter_rename_partition ( col1 string ) partitioned by (pcol1 string , pcol2 string) stored as sequencefile;
@@ -28,7 +28,7 @@ USE alter_rename_partition_db;
SHOW TABLES;
CREATE TABLE alter_rename_partition_src (col1 STRING) STORED AS TEXTFILE ;
-LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src ;
+LOAD DATA LOCAL INPATH '../../data/files/test.dat' OVERWRITE INTO TABLE alter_rename_partition_src ;
CREATE TABLE alter_rename_partition (col1 STRING) PARTITIONED BY (pcol1 STRING, pcol2 STRING) STORED AS SEQUENCEFILE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_varchar2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_varchar2.q
index 5a481e7f8e..b870108bdd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_varchar2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/alter_varchar2.q
@@ -7,7 +7,7 @@ create table alter_varchar2 (
) partitioned by (hr int);
insert overwrite table alter_varchar2 partition (hr=1)
- select value from src limit 1;
+ select value from src tablesample (1 rows);
select c1, length(c1) from alter_varchar2;
@@ -16,7 +16,9 @@ alter table alter_varchar2 change column c1 c1 varchar(10);
select hr, c1, length(c1) from alter_varchar2 where hr = 1;
insert overwrite table alter_varchar2 partition (hr=2)
- select key from src limit 1;
+ select key from src tablesample (1 rows);
+
+set hive.fetch.task.conversion=more;
select hr, c1, length(c1) from alter_varchar2 where hr = 1;
select hr, c1, length(c1) from alter_varchar2 where hr = 2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ambiguous_col.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ambiguous_col.q
index e7053c1c8e..5ccd2c8c62 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ambiguous_col.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ambiguous_col.q
@@ -1,3 +1,4 @@
+set hive.support.quoted.identifiers=none;
-- TOK_ALLCOLREF
explain select * from (select a.key, a.* from (select * from src) a join (select * from src1) b on (a.key = b.key)) t;
-- DOT
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_filter.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_filter.q
new file mode 100644
index 0000000000..ec973e1596
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_filter.q
@@ -0,0 +1,76 @@
+set hive.stats.fetch.column.stats=true;
+
+create table if not exists loc_staging (
+ state string,
+ locid int,
+ zip bigint,
+ year int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table loc_orc like loc_staging;
+alter table loc_orc set fileformat orc;
+
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+
+insert overwrite table loc_orc select * from loc_staging;
+
+-- numRows: 8 rawDataSize: 796
+explain extended select * from loc_orc;
+
+-- column stats are not COMPLETE, so stats are not updated
+-- numRows: 8 rawDataSize: 796
+explain extended select * from loc_orc where state='OH';
+
+analyze table loc_orc compute statistics for columns state,locid,zip,year;
+
+-- state column has 5 distincts. numRows/countDistincts
+-- numRows: 1 rawDataSize: 102
+explain extended select * from loc_orc where state='OH';
+
+-- not equals comparison shouldn't affect number of rows
+-- numRows: 8 rawDataSize: 804
+explain extended select * from loc_orc where state!='OH';
+explain extended select * from loc_orc where state<>'OH';
+
+-- nulls are treated as constant equality comparison
+-- numRows: 1 rawDataSize: 102
+explain extended select * from loc_orc where zip is null;
+-- numRows: 1 rawDataSize: 102
+explain extended select * from loc_orc where !(zip is not null);
+
+-- not nulls are treated as inverse of nulls
+-- numRows: 7 rawDataSize: 702
+explain extended select * from loc_orc where zip is not null;
+-- numRows: 7 rawDataSize: 702
+explain extended select * from loc_orc where !(zip is null);
+
+-- NOT evaluation. true will pass all rows, false will not pass any rows
+-- numRows: 8 rawDataSize: 804
+explain extended select * from loc_orc where !false;
+-- numRows: 0 rawDataSize: 0
+explain extended select * from loc_orc where !true;
+
+-- OR evaluation. 1 row for OH and 1 row for CA
+-- numRows: 2 rawDataSize: 204
+explain extended select * from loc_orc where state='OH' or state='CA';
+
+-- AND evaluation. cascadingly apply rules. 8/2 = 4/2 = 2
+-- numRows: 2 rawDataSize: 204
+explain extended select * from loc_orc where year=2001 and year is null;
+-- numRows: 1 rawDataSize: 102
+explain extended select * from loc_orc where year=2001 and state='OH' and state='FL';
+
+-- AND and OR together. left expr will yield 1 row and right will yield 1 row
+-- numRows: 3 rawDataSize: 306
+explain extended select * from loc_orc where (year=2001 and year is null) or (state='CA');
+
+-- AND and OR together. left expr will yield 8 rows and right will yield 1 row
+-- numRows: 1 rawDataSize: 102
+explain extended select * from loc_orc where (year=2001 or year is null) and (state='CA');
+
+-- all inequality conditions rows/3 is the rules
+-- numRows: 2 rawDataSize: 204
+explain extended select * from loc_orc where locid < 30;
+explain extended select * from loc_orc where locid > 30;
+explain extended select * from loc_orc where locid <= 30;
+explain extended select * from loc_orc where locid >= 30;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_groupby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
new file mode 100644
index 0000000000..05cb036b46
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_groupby.q
@@ -0,0 +1,69 @@
+set hive.stats.fetch.column.stats=true;
+
+create table if not exists loc_staging (
+ state string,
+ locid int,
+ zip bigint,
+ year int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table loc_orc like loc_staging;
+alter table loc_orc set fileformat orc;
+
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+
+insert overwrite table loc_orc select * from loc_staging;
+
+-- numRows: 8 rawDataSize: 796
+explain extended select * from loc_orc;
+
+-- partial column stats
+analyze table loc_orc compute statistics for columns state;
+
+-- inner group by: map - numRows: 8 reduce - numRows: 4
+-- outer group by: map - numRows: 4 reduce numRows: 2
+explain extended select a, c, min(b)
+from ( select state as a, locid as b, count(*) as c
+ from loc_orc
+ group by state,locid
+ ) sq1
+group by a,c;
+
+analyze table loc_orc compute statistics for columns state,locid,zip,year;
+
+-- only one distinct value in year column + 1 NULL value
+-- map-side GBY: numRows: 8 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2
+explain extended select year from loc_orc group by year;
+
+-- map-side GBY: numRows: 8
+-- reduce-side GBY: numRows: 4
+explain extended select state,locid from loc_orc group by state,locid;
+
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain extended select state,locid from loc_orc group by state,locid with cube;
+
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain extended select state,locid from loc_orc group by state,locid with rollup;
+
+-- map-side GBY numRows: 8 reduce-side GBY numRows: 4
+explain extended select state,locid from loc_orc group by state,locid grouping sets((state));
+
+-- map-side GBY numRows: 16 reduce-side GBY numRows: 8
+explain extended select state,locid from loc_orc group by state,locid grouping sets((state),(locid));
+
+-- map-side GBY numRows: 24 reduce-side GBY numRows: 12
+explain extended select state,locid from loc_orc group by state,locid grouping sets((state),(locid),());
+
+-- map-side GBY numRows: 32 reduce-side GBY numRows: 16
+explain extended select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),());
+
+set hive.stats.map.parallelism=10;
+
+-- map-side GBY: numRows: 80 (map-side will not do any reduction)
+-- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2)
+explain extended select year from loc_orc group by year;
+
+-- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7)
+explain extended select state,locid from loc_orc group by state,locid with cube;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_join.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_join.q
new file mode 100644
index 0000000000..965b0b7ed0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_join.q
@@ -0,0 +1,81 @@
+set hive.stats.fetch.column.stats=true;
+
+create table if not exists emp_staging (
+ lastname string,
+ deptid int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table if not exists dept_staging (
+ deptid int,
+ deptname string
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table if not exists loc_staging (
+ state string,
+ locid int,
+ zip bigint,
+ year int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table if not exists emp_orc like emp_staging;
+alter table emp_orc set fileformat orc;
+
+create table if not exists dept_orc like dept_staging;
+alter table dept_orc set fileformat orc;
+
+create table loc_orc like loc_staging;
+alter table loc_orc set fileformat orc;
+
+LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_staging;
+LOAD DATA LOCAL INPATH '../../data/files/dept.txt' OVERWRITE INTO TABLE dept_staging;
+LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging;
+
+insert overwrite table emp_orc select * from emp_staging;
+insert overwrite table dept_orc select * from dept_staging;
+insert overwrite table loc_orc select * from loc_staging;
+
+analyze table emp_orc compute statistics for columns lastname,deptid;
+analyze table dept_orc compute statistics for columns deptname,deptid;
+analyze table loc_orc compute statistics for columns state,locid,zip,year;
+
+-- number of rows
+-- emp_orc - 6
+-- dept_orc - 4
+-- loc_orc - 8
+
+-- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows)
+-- emp_orc.deptid - 3
+-- emp_orc.lastname - 7
+-- dept_orc.deptid - 6
+-- dept_orc.deptname - 5
+-- loc_orc.locid - 6
+-- loc_orc.state - 7
+
+-- Expected output rows: 4
+-- Reason: #rows = (6*4)/max(3,6)
+explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid);
+
+-- 3 way join
+-- Expected output rows: 4
+-- Reason: #rows = (6*4*6)/max(3,6)*max(6,3)
+explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid) join emp_orc e1 on (e.deptid = e1.deptid);
+
+-- Expected output rows: 5
+-- Reason: #rows = (6*4*8)/max(3,6)*max(6,6)
+explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid) join loc_orc l on (e.deptid = l.locid);
+
+-- join keys of different types
+-- Expected output rows: 4
+-- Reason: #rows = (6*4*8)/max(3,6)*max(6,7)
+explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid) join loc_orc l on (e.deptid = l.state);
+
+-- multi-attribute join
+-- Expected output rows: 0
+-- Reason: #rows = (6*4)/max(3,6)*max(7,5)
+explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid and e.lastname = d.deptname);
+
+-- 3 way and multi-attribute join
+-- Expected output rows: 0
+-- Reason: #rows = (6*4*8)/max(3,6)*max(7,5)*max(3,6)*max(7,7)
+explain extended select * from emp_orc e join dept_orc d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc_orc l on (e.deptid = l.locid and e.lastname = l.state);
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_limit.q
new file mode 100644
index 0000000000..0a9f880b5f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_limit.q
@@ -0,0 +1,30 @@
+set hive.stats.fetch.column.stats=true;
+
+create table if not exists loc_staging (
+ state string,
+ locid int,
+ zip bigint,
+ year int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table loc_orc like loc_staging;
+alter table loc_orc set fileformat orc;
+
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+
+insert overwrite table loc_orc select * from loc_staging;
+
+analyze table loc_orc compute statistics for columns state, locid, zip, year;
+
+-- numRows: 8 rawDataSize: 796
+explain extended select * from loc_orc;
+
+-- numRows: 4 rawDataSize: 396
+explain extended select * from loc_orc limit 4;
+
+-- greater than the available number of rows
+-- numRows: 8 rawDataSize: 796
+explain extended select * from loc_orc limit 16;
+
+-- numRows: 0 rawDataSize: 0
+explain extended select * from loc_orc limit 0;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_part.q
new file mode 100644
index 0000000000..839c7d8496
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_part.q
@@ -0,0 +1,85 @@
+set hive.stats.fetch.column.stats=true;
+set hive.stats.autogather=false;
+set hive.exec.dynamic.partition=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+create table if not exists loc_staging (
+ state string,
+ locid int,
+ zip bigint,
+ year string
+) row format delimited fields terminated by '|' stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/loc.txt' OVERWRITE INTO TABLE loc_staging;
+
+create table if not exists loc_orc (
+ state string,
+ locid int,
+ zip bigint
+) partitioned by(year string) stored as orc;
+
+-- basicStatState: NONE colStatState: NONE
+explain extended select * from loc_orc;
+
+insert overwrite table loc_orc partition(year) select * from loc_staging;
+
+-- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL
+
+-- basicStatState: PARTIAL colStatState: NONE
+explain extended select * from loc_orc;
+
+-- partition level analyze statistics for specific parition
+analyze table loc_orc partition(year='2001') compute statistics;
+
+-- basicStatState: PARTIAL colStatState: NONE
+explain extended select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__';
+
+-- basicStatState: PARTIAL colStatState: NONE
+explain extended select * from loc_orc;
+
+-- basicStatState: COMPLETE colStatState: NONE
+explain extended select * from loc_orc where year='2001';
+
+-- partition level analyze statistics for all partitions
+analyze table loc_orc partition(year) compute statistics;
+
+-- basicStatState: COMPLETE colStatState: NONE
+explain extended select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__';
+
+-- basicStatState: COMPLETE colStatState: NONE
+explain extended select * from loc_orc;
+
+-- basicStatState: COMPLETE colStatState: NONE
+explain extended select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__';
+
+-- both partitions will be pruned
+-- basicStatState: NONE colStatState: NONE
+explain extended select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__';
+
+-- partition level partial column statistics
+analyze table loc_orc partition(year='2001') compute statistics for columns state,locid;
+
+-- basicStatState: COMPLETE colStatState: NONE
+explain extended select zip from loc_orc;
+
+-- basicStatState: COMPLETE colStatState: PARTIAL
+explain extended select state from loc_orc;
+
+-- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL
+-- basicStatState: COMPLETE colStatState: PARTIAL
+explain extended select state,locid from loc_orc;
+
+-- basicStatState: COMPLETE colStatState: COMPLETE
+explain extended select state,locid from loc_orc where year='2001';
+
+-- basicStatState: COMPLETE colStatState: NONE
+explain extended select state,locid from loc_orc where year!='2001';
+
+-- basicStatState: COMPLETE colStatState: PARTIAL
+explain extended select * from loc_orc;
+
+-- This is to test filter expression evaluation on partition column
+-- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE
+explain extended select locid from loc_orc where locid>0 and year='2001';
+explain extended select locid,year from loc_orc where locid>0 and year='2001';
+explain extended select * from (select locid,year from loc_orc) test where locid>0 and year='2001';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_select.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_select.q
new file mode 100644
index 0000000000..5fc3f64b90
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_select.q
@@ -0,0 +1,143 @@
+set hive.stats.fetch.column.stats=true;
+
+create table if not exists alltypes (
+ bo1 boolean,
+ ti1 tinyint,
+ si1 smallint,
+ i1 int,
+ bi1 bigint,
+ f1 float,
+ d1 double,
+ de1 decimal,
+ ts1 timestamp,
+ da1 timestamp,
+ s1 string,
+ vc1 varchar(5),
+ m1 map<string, string>,
+ l1 array<int>,
+ st1 struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile;
+
+create table alltypes_orc like alltypes;
+alter table alltypes_orc set fileformat orc;
+
+load data local inpath '../../data/files/alltypes.txt' overwrite into table alltypes;
+
+insert overwrite table alltypes_orc select * from alltypes;
+
+-- basicStatState: COMPLETE colStatState: NONE numRows: 2 rawDataSize: 1514
+explain extended select * from alltypes_orc;
+
+-- statistics for complex types are not supported yet
+analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1;
+
+-- numRows: 2 rawDataSize: 1514
+explain extended select * from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 8
+explain extended select bo1 from alltypes_orc;
+
+-- col alias renaming
+-- numRows: 2 rawDataSize: 8
+explain extended select i1 as int1 from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 174
+explain extended select s1 from alltypes_orc;
+
+-- column statistics for complex types unsupported and so statistics will not be updated
+-- numRows: 2 rawDataSize: 1514
+explain extended select m1 from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 246
+explain extended select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 0
+explain extended select null from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 8
+explain extended select 11 from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 16
+explain extended select 11L from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 16
+explain extended select 11.0 from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 178
+explain extended select "hello" from alltypes_orc;
+explain extended select cast("hello" as char(5)) from alltypes_orc;
+explain extended select cast("hello" as varchar(5)) from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 96
+explain extended select unbase64("0xe23") from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 16
+explain extended select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 80
+explain extended select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 112
+explain extended select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 224
+explain extended select cast("58.174" as DECIMAL) from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 112
+explain extended select array(1,2,3) from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 1508
+explain extended select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 112
+explain extended select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc;
+
+-- numRows: 2 rawDataSize: 250
+explain extended select CREATE_UNION(0, "hello") from alltypes_orc;
+
+-- COUNT(*) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows
+-- numRows: 1 rawDataSize: 8
+explain extended select count(*) from alltypes_orc;
+
+-- COUNT(1) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows
+-- numRows: 1 rawDataSize: 8
+explain extended select count(1) from alltypes_orc;
+
+-- column statistics for complex column types will be missing. data size will be calculated from available column statistics
+-- numRows: 2 rawDataSize: 254
+explain extended select *,11 from alltypes_orc;
+
+-- subquery selects
+-- inner select - numRows: 2 rawDataSize: 8
+-- outer select - numRows: 2 rawDataSize: 8
+explain extended select i1 from (select i1 from alltypes_orc limit 10) temp;
+
+-- inner select - numRows: 2 rawDataSize: 16
+-- outer select - numRows: 2 rawDataSize: 8
+explain extended select i1 from (select i1,11 from alltypes_orc limit 10) temp;
+
+-- inner select - numRows: 2 rawDataSize: 16
+-- outer select - numRows: 2 rawDataSize: 186
+explain extended select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp;
+
+-- inner select - numRows: 2 rawDataSize: 24
+-- outer select - numRows: 2 rawDataSize: 16
+explain extended select x from (select i1,11.0 as x from alltypes_orc limit 10) temp;
+
+-- inner select - numRows: 2 rawDataSize: 104
+-- outer select - numRows: 2 rawDataSize: 186
+explain extended select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp;
+
+-- inner select - numRows: 2 rawDataSize: 186
+-- middle select - numRows: 2 rawDataSize: 178
+-- outer select - numRows: 2 rawDataSize: 194
+explain extended select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2;
+
+-- This test is for FILTER operator where filter expression is a boolean column
+-- numRows: 2 rawDataSize: 8
+explain extended select bo1 from alltypes_orc where bo1;
+
+-- numRows: 0 rawDataSize: 0
+explain extended select bo1 from alltypes_orc where !bo1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_table.q
new file mode 100644
index 0000000000..4140fe610d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_table.q
@@ -0,0 +1,53 @@
+set hive.stats.fetch.column.stats=true;
+set hive.stats.autogather=false;
+
+create table if not exists emp_staging (
+ lastname string,
+ deptid int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table if not exists emp_orc like emp_staging;
+alter table emp_orc set fileformat orc;
+
+-- basicStatState: NONE colStatState: NONE
+explain extended select * from emp_orc;
+
+LOAD DATA LOCAL INPATH '../../data/files/emp.txt' OVERWRITE INTO TABLE emp_staging;
+
+insert overwrite table emp_orc select * from emp_staging;
+
+-- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL
+
+-- basicStatState: PARTIAL colStatState: NONE
+explain extended select * from emp_orc;
+
+-- table level analyze statistics
+analyze table emp_orc compute statistics;
+
+-- basicStatState: COMPLETE colStatState: NONE
+explain extended select * from emp_orc;
+
+-- column level partial statistics
+analyze table emp_orc compute statistics for columns deptid;
+
+-- basicStatState: COMPLETE colStatState: PARTIAL
+explain extended select * from emp_orc;
+
+-- all selected columns have statistics
+-- basicStatState: COMPLETE colStatState: COMPLETE
+explain extended select deptid from emp_orc;
+
+-- column level complete statistics
+analyze table emp_orc compute statistics for columns lastname,deptid;
+
+-- basicStatState: COMPLETE colStatState: COMPLETE
+explain extended select * from emp_orc;
+
+-- basicStatState: COMPLETE colStatState: COMPLETE
+explain extended select lastname from emp_orc;
+
+-- basicStatState: COMPLETE colStatState: COMPLETE
+explain extended select deptid from emp_orc;
+
+-- basicStatState: COMPLETE colStatState: COMPLETE
+explain extended select lastname,deptid from emp_orc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_union.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_union.q
new file mode 100644
index 0000000000..586d9e1e2c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/annotate_stats_union.q
@@ -0,0 +1,55 @@
+set hive.stats.fetch.column.stats=true;
+
+create table if not exists loc_staging (
+ state string,
+ locid int,
+ zip bigint,
+ year int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table loc_orc like loc_staging;
+alter table loc_orc set fileformat orc;
+
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+
+insert overwrite table loc_orc select * from loc_staging;
+
+analyze table loc_orc compute statistics for columns state,locid,zip,year;
+
+-- numRows: 8 rawDataSize: 688
+explain extended select state from loc_orc;
+
+-- numRows: 16 rawDataSize: 1376
+explain extended select * from (select state from loc_orc union all select state from loc_orc) tmp;
+
+-- numRows: 8 rawDataSize: 796
+explain extended select * from loc_orc;
+
+-- numRows: 16 rawDataSize: 1592
+explain extended select * from (select * from loc_orc union all select * from loc_orc) tmp;
+
+create database test;
+use test;
+create table if not exists loc_staging (
+ state string,
+ locid int,
+ zip bigint,
+ year int
+) row format delimited fields terminated by '|' stored as textfile;
+
+create table loc_orc like loc_staging;
+alter table loc_orc set fileformat orc;
+
+load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging;
+
+insert overwrite table loc_orc select * from loc_staging;
+
+analyze table loc_staging compute statistics;
+analyze table loc_staging compute statistics for columns state,locid,zip,year;
+analyze table loc_orc compute statistics for columns state,locid,zip,year;
+
+-- numRows: 16 rawDataSize: 1376
+explain extended select * from (select state from default.loc_orc union all select state from test.loc_orc) temp;
+
+-- numRows: 16 rawDataSize: 1376
+explain extended select * from (select state from test.loc_staging union all select state from test.loc_orc) temp;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ansi_sql_arithmetic.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ansi_sql_arithmetic.q
new file mode 100644
index 0000000000..3788301ebb
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ansi_sql_arithmetic.q
@@ -0,0 +1,13 @@
+
+set hive.compat=latest;
+
+-- With ansi sql arithmetic enabled, int / int => exact numeric type
+explain select cast(key as int) / cast(key as int) from src limit 1;
+select cast(key as int) / cast(key as int) from src limit 1;
+
+
+set hive.compat=0.12;
+
+-- With ansi sql arithmetic disabled, int / int => double
+explain select cast(key as int) / cast(key as int) from src limit 1;
+select cast(key as int) / cast(key as int) from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_corrupt.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_corrupt.q
index b83eab5d38..cc9801d887 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_corrupt.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_corrupt.q
@@ -14,7 +14,7 @@ create table tstsrcpart like srcpart;
-- to be thrown during the LOAD step. This behavior is now tested in
-- clientnegative/archive_corrupt.q
-load data local inpath '../data/files/archive_corrupt.rc' overwrite into table tstsrcpart partition (ds='2008-04-08', hr='11');
+load data local inpath '../../data/files/archive_corrupt.rc' overwrite into table tstsrcpart partition (ds='2008-04-08', hr='11');
insert overwrite table tstsrcpart partition (ds='2008-04-08', hr='12')
select key, value from srcpart where ds='2008-04-08' and hr='12';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
index ddc06a99bb..50c0faa5e4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/archive_excludeHadoop20.q
@@ -1,7 +1,7 @@
set hive.archive.enabled = true;
set hive.enforce.bucketing = true;
--- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
+-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20)
drop table tstsrc;
drop table tstsrcpart;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_1_sql_std.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
new file mode 100644
index 0000000000..79ae17ad5d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
@@ -0,0 +1,36 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+
+create table src_autho_test (key STRING, value STRING) ;
+
+set hive.security.authorization.enabled=true;
+set role ADMIN;
+--table grant to user
+
+grant select on table src_autho_test to user user_sauth;
+
+show grant user user_sauth on table src_autho_test;
+
+
+revoke select on table src_autho_test from user user_sauth;
+show grant user user_sauth on table src_autho_test;
+
+--role
+create role src_role;
+grant role src_role to user user_sauth;
+show role grant user user_sauth;
+
+--table grant to role
+
+grant select on table src_autho_test to role src_role;
+
+show grant role src_role on table src_autho_test;
+revoke select on table src_autho_test from role src_role;
+
+-- drop role
+drop role src_role;
+
+set hive.security.authorization.enabled=false;
+drop table src_autho_test;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_2.q
index 4fc79b9ed2..3353c534e1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_2.q
@@ -6,7 +6,7 @@ ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="T
set hive.security.authorization.enabled=true;
-- column grant to user
-grant Create on table authorization_part to user hive_test_user;
+grant Create on authorization_part to user hive_test_user;
grant Update on table authorization_part to user hive_test_user;
grant Drop on table authorization_part to user hive_test_user;
grant select on table src_auth_tmp to user hive_test_user;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_9.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_9.q
new file mode 100644
index 0000000000..1abe659fa4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_9.q
@@ -0,0 +1,17 @@
+-- SORT_BEFORE_DIFF
+
+create table dummy (key string, value string);
+
+grant select on database default to user hive_test_user;
+grant select on table dummy to user hive_test_user;
+grant select (key, value) on table dummy to user hive_test_user;
+
+show grant user hive_test_user on database default;
+show grant user hive_test_user on table dummy;
+show grant user hive_test_user on all;
+
+grant select on database default to user hive_test_user2;
+grant select on table dummy to user hive_test_user2;
+grant select (key, value) on table dummy to user hive_test_user2;
+
+show grant on all;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q
new file mode 100644
index 0000000000..45c4a7dc85
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_admin_almighty1.q
@@ -0,0 +1,17 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_test_user;
+
+-- actions from admin should work as if admin has all privileges
+
+create table t1(i int);
+set user.name=hive_admin_user;
+
+show current roles;
+set role ADMIN;
+show current roles;
+select * from t1;
+grant all on table t1 to user user1;
+show grant user user1 on table t1;
+drop table t1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_func1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_func1.q
new file mode 100644
index 0000000000..65a7b339c2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_func1.q
@@ -0,0 +1,14 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=hive_admin_user;
+
+-- admin required for create function
+set role ADMIN;
+
+create temporary function temp_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+create function perm_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii';
+
+drop temporary function temp_fn;
+drop function perm_fn;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_macro1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_macro1.q
new file mode 100644
index 0000000000..fb60500b89
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_macro1.q
@@ -0,0 +1,12 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=hive_admin_user;
+
+-- admin required for create macro
+set role ADMIN;
+
+create temporary macro mymacro1(x double) x * x;
+
+drop temporary macro mymacro1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_table_owner_privs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_table_owner_privs.q
new file mode 100644
index 0000000000..17f4861cd2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_create_table_owner_privs.q
@@ -0,0 +1,10 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+
+create table create_table_creator_priv_test(i int);
+
+-- all privileges should have been set for user
+
+show grant user user1 on table create_table_creator_priv_test;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_public_role.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_public_role.q
new file mode 100644
index 0000000000..8473178cd6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_public_role.q
@@ -0,0 +1,18 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE t_gpr1(i int);
+
+-- all privileges should have been set for user
+
+GRANT ALL ON t_gpr1 TO ROLE public;
+
+SHOW GRANT ON TABLE t_gpr1;
+
+set user.name=user2;
+SHOW CURRENT ROLES;
+-- user2 should be able to do a describe table, as pubic is in the current roles
+DESC t_gpr1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_table_priv.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_table_priv.q
new file mode 100644
index 0000000000..02d364edb4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_grant_table_priv.q
@@ -0,0 +1,43 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE table_priv1(i int);
+
+-- all privileges should have been set for user
+
+-- grant insert privilege to another user
+GRANT INSERT ON table_priv1 TO USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv1;
+
+-- grant select privilege to another user with grant
+GRANT SELECT ON table_priv1 TO USER user2 with grant option;
+SHOW GRANT USER user2 ON TABLE table_priv1;
+
+set user.name=user2;
+-- change to other user - user2
+-- grant permissions to another user as user2
+GRANT SELECT ON table_priv1 TO USER user3 with grant option;
+SHOW GRANT USER user3 ON TABLE table_priv1;
+
+set user.name=user3;
+-- change to other user - user3
+-- grant permissions to another user as user3
+GRANT SELECT ON table_priv1 TO USER user4 with grant option;
+SHOW GRANT USER user4 ON TABLE table_priv1;
+
+set user.name=user1;
+-- switched back to table owner
+
+-- grant all with grant to user22
+GRANT ALL ON table_priv1 TO USER user22 with grant option;
+SHOW GRANT USER user22 ON TABLE table_priv1;
+
+set user.name=user22;
+
+-- grant all without grant to user33
+GRANT ALL ON table_priv1 TO USER user33 with grant option;
+SHOW GRANT USER user33 ON TABLE table_priv1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_index.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_index.q
new file mode 100644
index 0000000000..1f177ffd1f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_index.q
@@ -0,0 +1,12 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.stats.dbclass=fs;
+set hive.security.authorization.enabled=true;
+create table t1 (a int);
+create index t1_index on table t1(a) as 'COMPACT' WITH DEFERRED REBUILD;
+desc formatted default__t1_t1_index__;
+alter index t1_index on t1 rebuild;
+
+drop table t1;
+
+set hive.security.authorization.enabled=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions.q
new file mode 100644
index 0000000000..85d8b1114b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions.q
@@ -0,0 +1,16 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- actions that require user to be table owner
+create table t1(i int);
+
+ALTER TABLE t1 SET SERDEPROPERTIES ('field.delim' = ',');
+drop table t1;
+
+create table t1(i int);
+create view vt1 as select * from t1;
+
+drop view vt1;
+alter table t1 rename to tnew1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions_db.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions_db.q
new file mode 100644
index 0000000000..36ab2600dc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_owner_actions_db.q
@@ -0,0 +1,21 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=hive_admin_user;
+
+set role admin;
+-- create role, db, make role the owner of db
+create role testrole;
+grant role testrole to user hrt_1;
+create database testdb;
+alter database testdb set owner role testrole;
+desc database testdb;
+
+-- actions that require user to be db owner
+-- create table
+use testdb;
+create table foobar (foo string, bar string);
+
+-- drop db
+drop database testdb cascade;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_parts.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_parts.q
new file mode 100644
index 0000000000..bee091b1d3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_parts.q
@@ -0,0 +1,19 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_uri_add_part1;
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_uri_add_part2;
+
+
+
+
+-- check add partition without insert privilege
+create table tpart(i int, j int) partitioned by (k string);
+
+alter table tpart add partition (k = '1') location '${system:test.tmp.dir}/a_uri_add_part1/';
+alter table tpart add partition (k = '2') location '${system:test.tmp.dir}/a_uri_add_part2/';
+
+select count(*) from tpart;
+
+analyze table tpart partition (k) compute statistics;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_revoke_table_priv.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_revoke_table_priv.q
new file mode 100644
index 0000000000..ccda3b5157
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_revoke_table_priv.q
@@ -0,0 +1,61 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set user.name=user1;
+-- current user has been set (comment line before the set cmd is resulting in parse error!!)
+
+CREATE TABLE table_priv_rev(i int);
+
+-- grant insert privilege to user2
+GRANT INSERT ON table_priv_rev TO USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+SHOW GRANT USER user2 ON ALL;
+
+-- revoke insert privilege from user2
+REVOKE INSERT ON TABLE table_priv_rev FROM USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+
+-- grant all privileges one at a time --
+-- grant insert privilege to user2
+GRANT INSERT ON table_priv_rev TO USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+SHOW GRANT USER user2 ON ALL;
+
+-- grant select privilege to user2, with grant option
+GRANT SELECT ON table_priv_rev TO USER user2 WITH GRANT OPTION;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+
+-- grant update privilege to user2
+GRANT UPDATE ON table_priv_rev TO USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+
+-- grant delete privilege to user2
+GRANT DELETE ON table_priv_rev TO USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+
+
+-- start revoking --
+-- revoke update privilege from user2
+REVOKE UPDATE ON TABLE table_priv_rev FROM USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+SHOW GRANT USER user2 ON ALL;
+
+-- revoke DELETE privilege from user2
+REVOKE DELETE ON TABLE table_priv_rev FROM USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+
+-- revoke insert privilege from user2
+REVOKE INSERT ON TABLE table_priv_rev FROM USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+
+-- revoke select privilege from user2
+REVOKE SELECT ON TABLE table_priv_rev FROM USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+SHOW GRANT USER user2 ON ALL;
+
+-- grant all followed by revoke all
+GRANT ALL ON table_priv_rev TO USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
+
+REVOKE ALL ON TABLE table_priv_rev FROM USER user2;
+SHOW GRANT USER user2 ON TABLE table_priv_rev;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant1.q
new file mode 100644
index 0000000000..f89d0dc985
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant1.q
@@ -0,0 +1,38 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+
+-- enable sql standard authorization
+-- role granting without role keyword
+set role ADMIN;
+create role src_role2;
+grant src_role2 to user user2 ;
+show role grant user user2;
+show roles;
+
+-- revoke role without role keyword
+revoke src_role2 from user user2;
+show role grant user user2;
+show roles;
+
+----------------------------------------
+-- role granting without role keyword, with admin option (syntax check)
+----------------------------------------
+
+create role src_role_wadmin;
+grant src_role_wadmin to user user2 with admin option;
+show role grant user user2;
+
+-- revoke role without role keyword
+revoke src_role_wadmin from user user2;
+show role grant user user2;
+
+
+
+-- drop roles
+show roles;
+drop role src_role2;
+show roles;
+drop role src_role_wadmin;
+show roles;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant2.q
new file mode 100644
index 0000000000..984d7ed1d0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_role_grant2.q
@@ -0,0 +1,34 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
+set hive.cli.print.header=true;
+set user.name=hive_admin_user;
+set role ADMIN;
+
+----------------------------------------
+-- role granting with admin option
+----------------------------------------
+
+create role src_role_wadmin;
+grant src_role_wadmin to user user2 with admin option;
+show role grant user user2;
+show principals src_role_wadmin;
+
+set user.name=user2;
+set role src_role_wadmin;
+grant src_role_wadmin to user user3;
+show role grant user user3;
+
+set user.name=hive_admin_user;
+set role ADMIN;
+show principals src_role_wadmin;
+
+set user.name=user2;
+set role src_role_wadmin;
+revoke src_role_wadmin from user user3;
+show role grant user user3;
+
+set user.name=hive_admin_user;
+set role ADMIN;
+show principals src_role_wadmin;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
new file mode 100644
index 0000000000..6b5af6e94e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q
@@ -0,0 +1,21 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set user.name=hive_admin_user;
+set role ADMIN;
+show current roles;
+
+create role r1;
+grant role r1 to user hive_admin_user;
+set role r1;
+show current roles;
+
+set role PUBLIC;
+show current roles;
+
+set role ALL;
+show current roles;
+
+set role ADMIN;
+drop role r1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q
new file mode 100644
index 0000000000..bd7bbfedf8
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/authorization_view_sqlstd.q
@@ -0,0 +1,66 @@
+set hive.users.in.admin.role=hive_admin_user;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+set hive.security.authorization.enabled=true;
+set user.name=user1;
+
+-- Test view authorization , and 'show grant' variants
+
+create table t1(i int, j int, k int);
+show grant on table t1;
+
+-- protecting certain columns
+create view vt1 as select i,k from t1;
+
+-- protecting certain rows
+create view vt2 as select * from t1 where i > 1;
+
+show grant user user1 on all;
+
+--view grant to user
+-- try with and without table keyword
+
+grant select on vt1 to user user2;
+grant insert on table vt1 to user user3;
+
+show grant user user2 on table vt1;
+show grant user user3 on table vt1;
+
+
+set user.name=user2;
+select * from vt1;
+
+set user.name=user1;
+
+grant all on table vt2 to user user2;
+show grant user user2 on table vt2;
+show grant user user2 on all;
+
+revoke all on vt2 from user user2;
+show grant user user2 on table vt2;
+
+show grant on table vt2;
+
+
+revoke select on table vt1 from user user2;
+show grant user user2 on table vt1;
+
+show grant user user2 on all;
+
+-- grant privileges on roles for view, after next statement
+show grant user user3 on table vt1;
+
+set user.name=hive_admin_user;
+show current roles;
+set role ADMIN;
+create role role_v;
+grant role_v to user user4 ;
+show role grant user user4;
+show roles;
+
+grant all on table vt2 to role role_v;
+show grant role role_v on table vt2;
+
+revoke delete on table vt2 from role role_v;
+show grant role role_v on table vt2;
+show grant on table vt2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join25.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join25.q
index eaf7489a17..b8734abfd1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join25.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join25.q
@@ -1,3 +1,5 @@
+set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
+
set hive.auto.convert.join = true;
set hive.mapjoin.localtask.max.memory.usage = 0.0001;
set hive.mapjoin.check.memory.rows = 2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join32.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join32.q
index 289bfbc6da..e7846eeecf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join32.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join32.q
@@ -32,10 +32,10 @@ from studenttab10k_smb s join votertab10k_smb v
on (s.name = v.name)
group by s.name;
-load data local inpath '../data/files/empty1.txt' into table studenttab10k_smb;
-load data local inpath '../data/files/empty2.txt' into table studenttab10k_smb;
-load data local inpath '../data/files/empty1.txt' into table votertab10k_smb;
-load data local inpath '../data/files/empty2.txt' into table votertab10k_smb;
+load data local inpath '../../data/files/empty1.txt' into table studenttab10k_smb;
+load data local inpath '../../data/files/empty2.txt' into table studenttab10k_smb;
+load data local inpath '../../data/files/empty1.txt' into table votertab10k_smb;
+load data local inpath '../../data/files/empty2.txt' into table votertab10k_smb;
explain select s.name, count(distinct registration)
from studenttab10k_smb s join votertab10k_smb v
@@ -51,10 +51,10 @@ group by s.name;
create table studenttab10k_part (name string, age int, gpa double) partitioned by (p string) clustered by (name) sorted by (name) into 2 buckets;
create table votertab10k_part (name string, age int, registration string, contributions float) partitioned by (p string) clustered by (name) sorted by (name) into 2 buckets;
-load data local inpath '../data/files/empty1.txt' into table studenttab10k_part partition (p='foo');
-load data local inpath '../data/files/empty2.txt' into table studenttab10k_part partition (p='foo');
-load data local inpath '../data/files/empty1.txt' into table votertab10k_part partition (p='foo');
-load data local inpath '../data/files/empty2.txt' into table votertab10k_part partition (p='foo');
+load data local inpath '../../data/files/empty1.txt' into table studenttab10k_part partition (p='foo');
+load data local inpath '../../data/files/empty2.txt' into table studenttab10k_part partition (p='foo');
+load data local inpath '../../data/files/empty1.txt' into table votertab10k_part partition (p='foo');
+load data local inpath '../../data/files/empty2.txt' into table votertab10k_part partition (p='foo');
explain select s.name, count(distinct registration)
from studenttab10k_part s join votertab10k_part v
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_filters.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_filters.q
index 458504cdc3..eefd2111c9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_filters.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_filters.q
@@ -1,7 +1,7 @@
set hive.auto.convert.join = true;
CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in3.txt' INTO TABLE myinput1;
+LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1;
SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value;
@@ -38,10 +38,10 @@ SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN my
CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input2;
-LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input2;
SET hive.optimize.bucketmapjoin = true;
SET hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_nulls.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_nulls.q
index 766348d7c0..d1b7bb4018 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_nulls.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_nulls.q
@@ -1,7 +1,7 @@
set hive.auto.convert.join = true;
CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' INTO TABLE myinput1;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1;
SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b;
SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_reordering_values.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_reordering_values.q
index 46a4a0d533..55bd975803 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_reordering_values.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_reordering_values.q
@@ -1,13 +1,13 @@
-- HIVE-5056 RS has expression list for values, but it's ignored in MapJoinProcessor
create table testsrc ( `key` int,`val` string);
-load data local inpath '../data/files/kv1.txt' overwrite into table testsrc;
+load data local inpath '../../data/files/kv1.txt' overwrite into table testsrc;
drop table if exists orderpayment_small;
create table orderpayment_small (`dealid` int,`date` string,`time` string, `cityid` int, `userid` int);
-insert overwrite table orderpayment_small select 748, '2011-03-24', '2011-03-24', 55 ,5372613 from testsrc limit 1;
+insert overwrite table orderpayment_small select 748, '2011-03-24', '2011-03-24', 55 ,5372613 from testsrc tablesample (1 rows);
drop table if exists user_small;
create table user_small( userid int);
-insert overwrite table user_small select key from testsrc limit 100;
+insert overwrite table user_small select key from testsrc tablesample (100 rows);
set hive.auto.convert.join.noconditionaltask.size = 200;
explain extended SELECT
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_without_localtask.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_without_localtask.q
new file mode 100644
index 0000000000..f23e227f1e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_join_without_localtask.q
@@ -0,0 +1,29 @@
+set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.use.nonstaged=true;
+
+set hive.auto.convert.join.noconditionaltask.size=100;
+
+explain
+select a.* from src a join src b on a.key=b.key limit 40;
+
+select a.* from src a join src b on a.key=b.key limit 40;
+
+explain
+select a.* from src a join src b on a.key=b.key join src c on a.value=c.value limit 40;
+
+select a.* from src a join src b on a.key=b.key join src c on a.value=c.value limit 40;
+
+set hive.auto.convert.join.noconditionaltask.size=100;
+
+explain
+select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 limit 40;
+
+select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 limit 40;
+
+set hive.mapjoin.localtask.max.memory.usage = 0.0001;
+set hive.mapjoin.check.memory.rows = 2;
+
+-- fallback to common join
+select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 limit 40;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q
index e76b560f41..ddd2c1882e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_1.q
@@ -2,19 +2,19 @@
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string)
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q
index f9fa1e4d4e..da2e26fde7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_11.q
@@ -1,19 +1,19 @@
-- small 1 part, 2 bucket & big 2 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q
index db53a656a8..f434b33603 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_12.q
@@ -2,19 +2,19 @@
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string)
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
@@ -23,9 +23,9 @@ set hive.optimize.bucketmapjoin.sortedmerge = true;
CREATE TABLE bucket_medium (key string, value string) partitioned by (ds string)
CLUSTERED BY (key) SORTED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08');
explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key;
select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
new file mode 100644
index 0000000000..7e9555d791
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
@@ -0,0 +1,92 @@
+set hive.auto.convert.join=true;
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting=true;
+
+set hive.auto.convert.sortmerge.join=true;
+set hive.optimize.bucketmapjoin = true;
+set hive.optimize.bucketmapjoin.sortedmerge = true;
+
+CREATE TABLE stage_bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (file_tag STRING);
+
+CREATE TABLE bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (day STRING, pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile;
+
+CREATE TABLE stage_bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (file_tag STRING);
+
+CREATE TABLE bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile;
+
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='1');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='2');
+
+insert overwrite table bucket_small partition(pri)
+select
+key,
+value,
+file_tag as pri
+from
+stage_bucket_small
+where file_tag between 1 and 2;
+
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' overwrite into table stage_bucket_big partition (file_tag='1');
+
+insert overwrite table bucket_big partition(day,pri)
+select
+key,
+value,
+'day1' as day,
+1 as pri
+from
+stage_bucket_big
+where
+file_tag='1';
+
+select
+a.key ,
+a.value ,
+b.value ,
+'day1' as day,
+1 as pri
+from
+(
+select
+key,
+value
+from bucket_big where day='day1'
+) a
+left outer join
+(
+select
+key,
+value
+from bucket_small
+where pri between 1 and 2
+) b
+on
+(a.key = b.key)
+;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q
index 75339778af..eef5483b53 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_2.q
@@ -1,16 +1,16 @@
-- small 1 part, 4 bucket & big 2 part, 2 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q
index 39a695fdf0..c094ecdb6b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_3.q
@@ -1,16 +1,16 @@
-- small 2 part, 2 bucket & big 1 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q
index 6072272c47..18acfbfb76 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_4.q
@@ -1,18 +1,18 @@
-- small 2 part, 4 bucket & big 1 part, 2 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q
index a28ce3de5f..98d6df9b19 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_5.q
@@ -1,13 +1,13 @@
-- small no part, 4 bucket & big no part, 2 bucket
CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small;
CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big;
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big;
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big;
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big;
set hive.auto.convert.sortmerge.join=true;
set hive.optimize.bucketmapjoin = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q
index d62f637721..e19cc317f3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_7.q
@@ -1,21 +1,21 @@
-- small 2 part, 4 bucket & big 2 part, 2 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q
index 6302a1be14..a66806f21a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_8.q
@@ -1,21 +1,21 @@
-- small 2 part, 2 bucket & big 2 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.auto.convert.join=true;
set hive.auto.convert.sortmerge.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_compression_enabled.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_compression_enabled.q
index 8367206231..cb6f173ccf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_compression_enabled.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_compression_enabled.q
@@ -35,7 +35,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
]
}');
-LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors4;
+LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4;
set hive.exec.compress.output=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_evolved_schemas.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_evolved_schemas.q
index 3fe8ff12b4..f723cbcc60 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_evolved_schemas.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_evolved_schemas.q
@@ -36,7 +36,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
DESCRIBE doctors_with_new_field;
-LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors_with_new_field;
+LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_with_new_field;
SELECT * FROM doctors_with_new_field ORDER BY first_name;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_joins.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_joins.q
index 25b77c0627..4c33a83466 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_joins.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_joins.q
@@ -37,7 +37,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
DESCRIBE doctors4;
-LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors4;
+LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4;
CREATE TABLE episodes
ROW FORMAT
@@ -70,7 +70,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
DESCRIBE episodes;
-LOAD DATA LOCAL INPATH '../data/files/episodes.avro' INTO TABLE episodes;
+LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes;
SELECT e.title, e.air_date, d.first_name, d.last_name, d.extra_field, e.air_date
FROM doctors4 d JOIN episodes e ON (d.number=e.doctor)
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_nullable_fields.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_nullable_fields.q
index 584c6f740b..f90ceb96f5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_nullable_fields.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_nullable_fields.q
@@ -17,7 +17,7 @@ CREATE TABLE test_serializer(string1 STRING,
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY ':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/csv.txt' INTO TABLE test_serializer;
+LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE test_serializer;
CREATE TABLE as_avro
ROW FORMAT
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_partitioned.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_partitioned.q
index 8e4d40f2bd..6fe5117026 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_partitioned.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_partitioned.q
@@ -28,7 +28,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
]
}');
-LOAD DATA LOCAL INPATH '../data/files/episodes.avro' INTO TABLE episodes;
+LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes;
CREATE TABLE episodes_partitioned
PARTITIONED BY (doctor_pt INT)
@@ -66,7 +66,7 @@ INSERT OVERWRITE TABLE episodes_partitioned PARTITION (doctor_pt) SELECT title,
SELECT * FROM episodes_partitioned WHERE doctor_pt > 6 ORDER BY air_date;
-- Verify that Fetch works in addition to Map
-SELECT * FROM episodes_partitioned LIMIT 5;
+SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5;
-- Fetch w/filter to specific partition
SELECT * FROM episodes_partitioned WHERE doctor_pt = 6;
-- Fetch w/non-existant partition
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_sanity_test.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_sanity_test.q
index e3f8b07b30..dbb999503b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_sanity_test.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/avro_sanity_test.q
@@ -30,7 +30,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
DESCRIBE doctors;
-LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors;
+LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors;
SELECT * FROM doctors ORDER BY number;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_constant.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_constant.q
index e0a8b95401..4f80dc33c9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_constant.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_constant.q
@@ -1 +1,3 @@
-select cast(cast('a' as binary) as string) from src limit 1;
+set hive.fetch.task.conversion=more;
+
+select cast(cast('a' as binary) as string) from src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_table_colserde.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_table_colserde.q
index eadf07d14d..1f5c98a239 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_table_colserde.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binary_table_colserde.q
@@ -3,6 +3,7 @@ drop table ba_test;
-- Everything in ba_table1.q + columnar serde in RCFILE.
create table ba_test (ba_key binary, ba_val binary) stored as rcfile;
+alter table ba_test set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
describe extended ba_test;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binarysortable_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binarysortable_1.q
index a98a2305cf..39c1d25e73 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binarysortable_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/binarysortable_1.q
@@ -3,7 +3,7 @@ ROW FORMAT DELIMITED
FIELDS TERMINATED BY '9'
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/string.txt' INTO TABLE mytable;
+LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable;
EXPLAIN
SELECT REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(key, '\001', '^A'), '\0', '^@'), '\002', '^B'), value
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q
new file mode 100644
index 0000000000..956a61f7bd
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_if_with_path_filter.q
@@ -0,0 +1,15 @@
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/bmjpathfilter;
+
+create table t1 (dt string) location '${system:test.tmp.dir}/bmjpathfilter/t1';
+Create table t2 (dt string) stored as orc;
+dfs -touchz ${system:test.tmp.dir}/bmjpathfilter/t1/_SUCCESS;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+SET hive.optimize.bucketmapjoin=true;
+
+SELECT /*+ MAPJOIN(b) */ a.dt FROM t1 a JOIN t2 b ON (a.dt = b.dt);
+
+SET hive.optimize.bucketmapjoin=false;
+set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
+
+dfs -rmr ${system:test.tmp.dir}/bmjpathfilter;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_1.q
index 33dd5d5cd2..6bdb09ed64 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_1.q
@@ -9,8 +9,8 @@ sorted by (key, value) into 1 BUCKETS stored as textfile;
create table table2(key string, value string) clustered by (value, key)
sorted by (value, key) into 1 BUCKETS stored as textfile;
-load data local inpath '../data/files/SortCol1Col2.txt' overwrite into table table1;
-load data local inpath '../data/files/SortCol2Col1.txt' overwrite into table table2;
+load data local inpath '../../data/files/SortCol1Col2.txt' overwrite into table table1;
+load data local inpath '../../data/files/SortCol2Col1.txt' overwrite into table table2;
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_2.q
index d1097e70a9..07f6d150ea 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_2.q
@@ -9,8 +9,8 @@ sorted by (key desc, value desc) into 1 BUCKETS stored as textfile;
create table table2(key string, value string) clustered by (value, key)
sorted by (value desc, key desc) into 1 BUCKETS stored as textfile;
-load data local inpath '../data/files/SortCol1Col2.txt' overwrite into table table1;
-load data local inpath '../data/files/SortCol2Col1.txt' overwrite into table table2;
+load data local inpath '../../data/files/SortCol1Col2.txt' overwrite into table table1;
+load data local inpath '../../data/files/SortCol2Col1.txt' overwrite into table table2;
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
new file mode 100644
index 0000000000..c9266a59c3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez1.q
@@ -0,0 +1,85 @@
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting = true;
+set hive.optimize.bucketingsorting=false;
+insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part;
+
+CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin;
+
+set hive.convert.join.bucket.mapjoin.tez = true;
+explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.key = b.key;
+
+-- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table.
+-- In this case the sub-query is chosen as the big table.
+explain
+select a.k1, a.v1, b.value
+from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab b on a.k1 = b.key;
+
+explain
+select a.k1, a.v1, b.value
+from (select sum(substr(tab.value,5)) as v1, key as k1 from tab_part join tab on tab_part.key = tab.key GROUP BY tab.key) a
+join tab b on a.k1 = b.key;
+
+explain
+select a.k1, a.v1, b.value
+from (select sum(substr(x.value,5)) as v1, x.key as k1 from tab x join tab y on x.key = y.key GROUP BY x.key) a
+join tab_part b on a.k1 = b.key;
+
+-- multi-way join
+explain
+select a.key, a.value, b.value
+from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key;
+
+explain
+select a.key, a.value, c.value
+from (select x.key, x.value from tab_part x join tab y on x.key = y.key) a join tab c on a.key = c.key;
+
+-- in this case sub-query is the small table
+explain
+select a.key, a.value, b.value
+from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab_part b on a.key = b.key;
+
+set hive.map.aggr=false;
+explain
+select a.key, a.value, b.value
+from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a
+join tab_part b on a.key = b.key;
+
+-- join on non-bucketed column results in broadcast join.
+explain
+select a.key, a.value, b.value
+from tab a join tab_part b on a.value = b.value;
+
+CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab1
+select key,value from srcbucket_mapjoin;
+
+explain
+select a.key, a.value, b.value
+from tab1 a join tab_part b on a.key = b.key;
+
+explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value;
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
new file mode 100644
index 0000000000..a3588ec94c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucket_map_join_tez2.q
@@ -0,0 +1,50 @@
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000;
+
+CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08');
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting = true;
+set hive.optimize.bucketingsorting=false;
+insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin_part;
+
+CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab partition (ds='2008-04-08')
+select key,value from srcbucket_mapjoin;
+
+set hive.convert.join.bucket.mapjoin.tez = true;
+
+explain select a.key, b.key from tab_part a join tab_part c on a.key = c.key join tab_part b on a.value = b.value;
+
+CREATE TABLE tab1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
+insert overwrite table tab1
+select key,value from srcbucket_mapjoin;
+
+explain
+select a.key, a.value, b.value
+from tab1 a join src b on a.key = b.key;
+
+explain
+select a.key, b.key from (select key from tab_part where key > 1) a join (select key from tab_part where key > 2) b on a.key = b.key;
+
+explain
+select a.key, b.key from (select key from tab_part where key > 1) a left outer join (select key from tab_part where key > 2) b on a.key = b.key;
+
+explain
+select a.key, b.key from (select key from tab_part where key > 1) a right outer join (select key from tab_part where key > 2) b on a.key = b.key;
+
+explain select a.key, b.key from (select distinct key from tab) a join tab b on b.key = a.key;
+
+explain select a.value, b.value from (select distinct value from tab) a join tab b on b.key = a.value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_1.q
index 5b013995d5..047a2a5230 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_1.q
@@ -1,18 +1,18 @@
-- small 1 part, 2 bucket & big 2 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_2.q
index f952f2ee6d..d58e8448a0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_2.q
@@ -1,16 +1,16 @@
-- small 1 part, 4 bucket & big 2 part, 2 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_3.q
index 461fbb196e..fd80174f23 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_3.q
@@ -1,16 +1,16 @@
-- small 2 part, 2 bucket & big 1 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_4.q
index 366da4473b..5d21ea5d63 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_4.q
@@ -1,18 +1,18 @@
-- small 2 part, 4 bucket & big 1 part, 2 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_5.q
index 411fed392a..5078072677 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_5.q
@@ -1,13 +1,13 @@
-- small no part, 4 bucket & big no part, 2 bucket
CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small;
CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big;
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big;
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big;
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big;
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_6.q
index 204d1e9010..0f7c72f4ea 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_6.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_6.q
@@ -1,16 +1,16 @@
-- small no part, 4 bucket & big 2 part, 2 bucket
CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small;
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small;
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small;
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_7.q
index b0bca460f3..c528f61302 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_7.q
@@ -1,21 +1,21 @@
-- small 2 part, 4 bucket & big 2 part, 2 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_8.q
index 9533c5512f..27c55a36f4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketcontext_8.q
@@ -1,21 +1,21 @@
-- small 2 part, 2 bucket & big 2 part, 4 bucket
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
explain extended select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
index 4c7f0c9595..d2e12e82d4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat.q
@@ -3,7 +3,7 @@ set mapred.min.split.size = 64;
CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1;
CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE;
@@ -26,8 +26,8 @@ EXPLAIN SELECT COUNT(1) FROM T2;
SELECT COUNT(1) FROM T2;
CREATE TABLE T3(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T3;
-LOAD DATA LOCAL INPATH '../data/files/kv2.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3;
EXPLAIN SELECT COUNT(1) FROM T3;
SELECT COUNT(1) FROM T3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q
index 9cdfe8e6e9..8b7535dce1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketizedhiveinputformat_auto.q
@@ -1,17 +1,17 @@
CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08');
CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08');
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
select /* + MAPJOIN(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin1.q
index 2bd8e1e2e5..204e75988f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin1.q
@@ -25,16 +25,16 @@ select /*+mapjoin(a)*/ a.key, a.value, b.value
from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b
on a.key=b.key where b.ds="2008-04-08";
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin10.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin10.q
index cf4222bdd7..09c0ae2bfc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin10.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin10.q
@@ -2,23 +2,23 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 3 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin11.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin11.q
index e10ab522a2..d330b7718c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin11.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin11.q
@@ -2,25 +2,25 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 4 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='2');
CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='2');
set hive.optimize.bucketmapjoin=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin12.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin12.q
index 8139000f4a..43a9de4e8e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin12.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin12.q
@@ -2,20 +2,20 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_2 NOT CLUSTERED;
CREATE TABLE srcbucket_mapjoin_part_3 (key INT, value STRING) PARTITIONED BY (part STRING)
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_3 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_3 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_3 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_3 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_3 CLUSTERED BY (key) INTO 2 BUCKETS;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin2.q
index fdbadfcb04..108b67a9f7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin2.q
@@ -1,12 +1,12 @@
CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
@@ -76,8 +76,8 @@ from bucketmapjoin_hash_result_1 a left outer join bucketmapjoin_hash_result_2 b
on a.key = b.key;
-- HIVE-3210
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin3.q
index 8fda802315..78c23d5132 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin3.q
@@ -1,16 +1,16 @@
CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin4.q
index c1a8f2ab7b..54626e7c48 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin4.q
@@ -1,17 +1,17 @@
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin5.q
index 2df49b64f3..72cffc2da2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin5.q
@@ -1,22 +1,22 @@
CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-09');
CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin7.q
index 3a96c642b3..a15570b776 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin7.q
@@ -2,13 +2,13 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (ds='2008-04-08', hr='0');
CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (ds='2008-04-08', hr='0');
set hive.optimize.bucketmapjoin=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin8.q
index 5e8daa5f8f..f467ea6cb3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin8.q
@@ -2,13 +2,13 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 BUCKETS;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin9.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin9.q
index 86344a53f5..f1d5f58184 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin9.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin9.q
@@ -2,14 +2,14 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
CREATE TABLE srcbucket_mapjoin_part_1 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_1 PARTITION (part='1');
CREATE TABLE srcbucket_mapjoin_part_2 (key INT, value STRING) PARTITIONED BY (part STRING)
CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
@@ -28,8 +28,8 @@ ON a.key = b.key AND a.part = '1' and b.part = '1';
ALTER TABLE srcbucket_mapjoin_part_2 DROP PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part_2 PARTITION (part='1');
ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 BUCKETS;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q
index d7634333e2..ea140ddda9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative.q
@@ -3,13 +3,13 @@
CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q
index 901f056759..e2c0d8c591 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative2.q
@@ -1,12 +1,12 @@
CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09');
set hive.optimize.bucketmapjoin = true;
create table bucketmapjoin_tmp_result (key string , value1 string, value2 string);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q
index d66e1238bb..6398fff909 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketmapjoin_negative3.q
@@ -8,21 +8,21 @@ create table test2 (key string, value string) clustered by (value) sorted by (va
create table test3 (key string, value string) clustered by (key, value) sorted by (key, value) into 3 buckets;
create table test4 (key string, value string) clustered by (value, key) sorted by (value, key) into 3 buckets;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test1;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test1;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test1;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE test1;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE test1;
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE test1;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test2;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test2;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test2;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE test2;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE test2;
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE test2;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test3;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test3;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test3;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE test3;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE test3;
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE test3;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE test4;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE test4;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE test4;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE test4;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE test4;
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE test4;
set hive.optimize.bucketmapjoin = true;
-- should be allowed
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cast_to_int.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cast_to_int.q
index 729ffdc868..b1551f2feb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cast_to_int.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cast_to_int.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
-- cast string floats to integer types
select
cast('1' as float),
@@ -27,4 +29,4 @@ select
cast('127' as tinyint),
cast('1.0a' as int),
cast('-1.-1' as int)
-from src limit 1;
+from src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_1.q
new file mode 100644
index 0000000000..840a84a7b3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_1.q
@@ -0,0 +1,32 @@
+drop table char1;
+drop table char1_1;
+
+create table char1 (key char(10), value char(20));
+create table char1_1 (key string, value string);
+
+-- load from file
+load data local inpath '../../data/files/srcbucket0.txt' overwrite into table char1;
+select * from char1 order by key, value limit 2;
+
+-- insert overwrite, from same/different length char
+insert overwrite table char1
+ select cast(key as char(10)), cast(value as char(15)) from src order by key, value limit 2;
+select key, value from char1 order by key, value;
+
+-- insert overwrite, from string
+insert overwrite table char1
+ select key, value from src order by key, value limit 2;
+select key, value from char1 order by key, value;
+
+-- insert string from char
+insert overwrite table char1_1
+ select key, value from char1 order by key, value limit 2;
+select key, value from char1_1 order by key, value;
+
+-- respect string length
+insert overwrite table char1
+ select key, cast(value as char(3)) from src order by key, value limit 2;
+select key, value from char1 order by key, value;
+
+drop table char1;
+drop table char1_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_2.q
new file mode 100644
index 0000000000..3e4900cb7c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_2.q
@@ -0,0 +1,36 @@
+drop table char_2;
+
+create table char_2 (
+ key char(10),
+ value char(20)
+);
+
+insert overwrite table char_2 select * from src;
+
+select value, sum(cast(key as int)), count(*) numrows
+from src
+group by value
+order by value asc
+limit 5;
+
+-- should match the query from src
+select value, sum(cast(key as int)), count(*) numrows
+from char_2
+group by value
+order by value asc
+limit 5;
+
+select value, sum(cast(key as int)), count(*) numrows
+from src
+group by value
+order by value desc
+limit 5;
+
+-- should match the query from src
+select value, sum(cast(key as int)), count(*) numrows
+from char_2
+group by value
+order by value desc
+limit 5;
+
+drop table char_2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_cast.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_cast.q
new file mode 100644
index 0000000000..7f44d4d508
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_cast.q
@@ -0,0 +1,92 @@
+
+-- Cast from char to other data types
+select
+ cast(cast('11' as string) as tinyint),
+ cast(cast('11' as string) as smallint),
+ cast(cast('11' as string) as int),
+ cast(cast('11' as string) as bigint),
+ cast(cast('11.00' as string) as float),
+ cast(cast('11.00' as string) as double),
+ cast(cast('11.00' as string) as decimal)
+from src limit 1;
+
+select
+ cast(cast('11' as char(10)) as tinyint),
+ cast(cast('11' as char(10)) as smallint),
+ cast(cast('11' as char(10)) as int),
+ cast(cast('11' as char(10)) as bigint),
+ cast(cast('11.00' as char(10)) as float),
+ cast(cast('11.00' as char(10)) as double),
+ cast(cast('11.00' as char(10)) as decimal)
+from src limit 1;
+
+select
+ cast(cast('2011-01-01' as string) as date),
+ cast(cast('2011-01-01 01:02:03' as string) as timestamp)
+from src limit 1;
+
+select
+ cast(cast('2011-01-01' as char(10)) as date),
+ cast(cast('2011-01-01 01:02:03' as char(30)) as timestamp)
+from src limit 1;
+
+-- no tests from string/char to boolean, that conversion doesn't look useful
+select
+ cast(cast('abc123' as string) as string),
+ cast(cast('abc123' as string) as varchar(10)),
+ cast(cast('abc123' as string) as char(10))
+from src limit 1;
+
+select
+ cast(cast('abc123' as char(10)) as string),
+ cast(cast('abc123' as char(10)) as varchar(10)),
+ cast(cast('abc123' as char(10)) as char(10))
+from src limit 1;
+
+select
+ cast(cast('abc123' as varchar(10)) as string),
+ cast(cast('abc123' as varchar(10)) as varchar(10)),
+ cast(cast('abc123' as varchar(10)) as char(10))
+from src limit 1;
+
+-- cast from other types to char
+select
+ cast(cast(11 as tinyint) as string),
+ cast(cast(11 as smallint) as string),
+ cast(cast(11 as int) as string),
+ cast(cast(11 as bigint) as string),
+ cast(cast(11.00 as float) as string),
+ cast(cast(11.00 as double) as string),
+ cast(cast(11.00 as decimal) as string)
+from src limit 1;
+
+select
+ cast(cast(11 as tinyint) as char(10)),
+ cast(cast(11 as smallint) as char(10)),
+ cast(cast(11 as int) as char(10)),
+ cast(cast(11 as bigint) as char(10)),
+ cast(cast(11.00 as float) as char(10)),
+ cast(cast(11.00 as double) as char(10)),
+ cast(cast(11.00 as decimal) as char(10))
+from src limit 1;
+
+select
+ cast(date '2011-01-01' as string),
+ cast(timestamp('2011-01-01 01:02:03') as string)
+from src limit 1;
+
+select
+ cast(date '2011-01-01' as char(10)),
+ cast(timestamp('2011-01-01 01:02:03') as char(30))
+from src limit 1;
+
+select
+ cast(true as string),
+ cast(false as string)
+from src limit 1;
+
+select
+ cast(true as char(10)),
+ cast(false as char(10))
+from src limit 1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_comparison.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_comparison.q
new file mode 100644
index 0000000000..e1cfdb2bdb
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_comparison.q
@@ -0,0 +1,40 @@
+
+-- Should all be true
+select
+ cast('abc' as char(10)) = cast('abc' as char(10)),
+ cast('abc' as char(10)) <= cast('abc' as char(10)),
+ cast('abc' as char(10)) >= cast('abc' as char(10)),
+ cast('abc' as char(10)) < cast('abd' as char(10)),
+ cast('abc' as char(10)) > cast('abb' as char(10)),
+ cast('abc' as char(10)) <> cast('abb' as char(10))
+from src limit 1;
+
+-- Different char lengths should still compare the same
+select
+ cast('abc' as char(10)) = cast('abc' as char(3)),
+ cast('abc' as char(10)) <= cast('abc' as char(3)),
+ cast('abc' as char(10)) >= cast('abc' as char(3)),
+ cast('abc' as char(10)) < cast('abd' as char(3)),
+ cast('abc' as char(10)) > cast('abb' as char(3)),
+ cast('abc' as char(10)) <> cast('abb' as char(3))
+from src limit 1;
+
+-- Should work with string types as well
+select
+ cast('abc' as char(10)) = 'abc',
+ cast('abc' as char(10)) <= 'abc',
+ cast('abc' as char(10)) >= 'abc',
+ cast('abc' as char(10)) < 'abd',
+ cast('abc' as char(10)) > 'abb',
+ cast('abc' as char(10)) <> 'abb'
+from src limit 1;
+
+-- leading space is significant for char
+select
+ cast(' abc' as char(10)) <> cast('abc' as char(10))
+from src limit 1;
+
+-- trailing space is not significant for char
+select
+ cast('abc ' as char(10)) = cast('abc' as char(10))
+from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_join1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_join1.q
new file mode 100644
index 0000000000..373352dee0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_join1.q
@@ -0,0 +1,35 @@
+drop table char_join1_ch1;
+drop table char_join1_ch2;
+drop table char_join1_str;
+
+create table char_join1_ch1 (
+ c1 int,
+ c2 char(10)
+);
+
+create table char_join1_ch2 (
+ c1 int,
+ c2 char(20)
+);
+
+create table char_join1_str (
+ c1 int,
+ c2 string
+);
+
+load data local inpath '../../data/files/vc1.txt' into table char_join1_ch1;
+load data local inpath '../../data/files/vc1.txt' into table char_join1_ch2;
+load data local inpath '../../data/files/vc1.txt' into table char_join1_str;
+
+-- Join char with same length char
+select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2) order by a.c1;
+
+-- Join char with different length char
+select * from char_join1_ch1 a join char_join1_ch2 b on (a.c2 = b.c2) order by a.c1;
+
+-- Join char with string
+select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2) order by a.c1;
+
+drop table char_join1_ch1;
+drop table char_join1_ch2;
+drop table char_join1_str;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_nested_types.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_nested_types.q
new file mode 100644
index 0000000000..c710b6c731
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_nested_types.q
@@ -0,0 +1,53 @@
+drop table char_nested_1;
+drop table char_nested_array;
+drop table char_nested_map;
+drop table char_nested_struct;
+drop table char_nested_cta;
+drop table char_nested_view;
+
+create table char_nested_1 (key int, value char(20));
+insert overwrite table char_nested_1
+ select key, value from src order by key limit 1;
+
+-- arrays
+create table char_nested_array (c1 array<char(20)>);
+insert overwrite table char_nested_array
+ select array(value, value) from char_nested_1;
+describe char_nested_array;
+select * from char_nested_array;
+
+-- maps
+create table char_nested_map (c1 map<int, char(20)>);
+insert overwrite table char_nested_map
+ select map(key, value) from char_nested_1;
+describe char_nested_map;
+select * from char_nested_map;
+
+-- structs
+create table char_nested_struct (c1 struct<a:int, b:char(20), c:string>);
+insert overwrite table char_nested_struct
+ select named_struct('a', key,
+ 'b', value,
+ 'c', cast(value as string))
+ from char_nested_1;
+describe char_nested_struct;
+select * from char_nested_struct;
+
+-- nested type with create table as
+create table char_nested_cta as
+ select * from char_nested_struct;
+describe char_nested_cta;
+select * from char_nested_cta;
+
+-- nested type with view
+create table char_nested_view as
+ select * from char_nested_struct;
+describe char_nested_view;
+select * from char_nested_view;
+
+drop table char_nested_1;
+drop table char_nested_array;
+drop table char_nested_map;
+drop table char_nested_struct;
+drop table char_nested_cta;
+drop table char_nested_view;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_serde.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_serde.q
new file mode 100644
index 0000000000..4340b4de6d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_serde.q
@@ -0,0 +1,102 @@
+drop table if exists char_serde_regex;
+drop table if exists char_serde_lb;
+drop table if exists char_serde_ls;
+drop table if exists char_serde_c;
+drop table if exists char_serde_lbc;
+drop table if exists char_serde_orc;
+
+--
+-- RegexSerDe
+--
+create table char_serde_regex (
+ key char(15),
+ value char(20)
+)
+row format serde 'org.apache.hadoop.hive.serde2.RegexSerDe'
+with serdeproperties (
+ "input.regex" = "([^]*)([^]*)"
+)
+stored as textfile;
+
+load data local inpath '../../data/files/srcbucket0.txt' overwrite into table char_serde_regex;
+
+select * from char_serde_regex limit 5;
+select value, count(*) from char_serde_regex group by value limit 5;
+
+--
+-- LazyBinary
+--
+create table char_serde_lb (
+ key char(15),
+ value char(20)
+);
+alter table char_serde_lb set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe';
+
+insert overwrite table char_serde_lb
+ select key, value from char_serde_regex;
+select * from char_serde_lb limit 5;
+select value, count(*) from char_serde_lb group by value limit 5;
+
+--
+-- LazySimple
+--
+create table char_serde_ls (
+ key char(15),
+ value char(20)
+);
+alter table char_serde_ls set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
+
+insert overwrite table char_serde_ls
+ select key, value from char_serde_lb;
+select * from char_serde_ls limit 5;
+select value, count(*) from char_serde_ls group by value limit 5;
+
+--
+-- Columnar
+--
+create table char_serde_c (
+ key char(15),
+ value char(20)
+) stored as rcfile;
+alter table char_serde_c set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe';
+
+insert overwrite table char_serde_c
+ select key, value from char_serde_ls;
+select * from char_serde_c limit 5;
+select value, count(*) from char_serde_c group by value limit 5;
+
+--
+-- LazyBinaryColumnar
+--
+create table char_serde_lbc (
+ key char(15),
+ value char(20)
+) stored as rcfile;
+alter table char_serde_lbc set serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
+
+insert overwrite table char_serde_lbc
+ select key, value from char_serde_c;
+select * from char_serde_lbc limit 5;
+select value, count(*) from char_serde_lbc group by value limit 5;
+
+--
+-- ORC
+--
+create table char_serde_orc (
+ key char(15),
+ value char(20)
+) stored as orc;
+alter table char_serde_orc set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde';
+
+
+insert overwrite table char_serde_orc
+ select key, value from char_serde_lbc;
+select * from char_serde_orc limit 5;
+select value, count(*) from char_serde_orc group by value limit 5;
+
+drop table if exists char_serde_regex;
+drop table if exists char_serde_lb;
+drop table if exists char_serde_ls;
+drop table if exists char_serde_c;
+drop table if exists char_serde_lbc;
+drop table if exists char_serde_orc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_udf1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_udf1.q
new file mode 100644
index 0000000000..629d41dca9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_udf1.q
@@ -0,0 +1,156 @@
+drop table char_udf_1;
+
+create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20));
+insert overwrite table char_udf_1
+ select key, value, key, value from src where key = '238' limit 1;
+
+-- UDFs with char support
+select
+ concat(c1, c2),
+ concat(c3, c4),
+ concat(c1, c2) = concat(c3, c4)
+from char_udf_1 limit 1;
+
+select
+ upper(c2),
+ upper(c4),
+ upper(c2) = upper(c4)
+from char_udf_1 limit 1;
+
+select
+ lower(c2),
+ lower(c4),
+ lower(c2) = lower(c4)
+from char_udf_1 limit 1;
+
+-- Scalar UDFs
+select
+ ascii(c2),
+ ascii(c4),
+ ascii(c2) = ascii(c4)
+from char_udf_1 limit 1;
+
+select
+ concat_ws('|', c1, c2),
+ concat_ws('|', c3, c4),
+ concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from char_udf_1 limit 1;
+
+select
+ decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+ decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+ decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from char_udf_1 limit 1;
+
+select
+ instr(c2, '_'),
+ instr(c4, '_'),
+ instr(c2, '_') = instr(c4, '_')
+from char_udf_1 limit 1;
+
+select
+ length(c2),
+ length(c4),
+ length(c2) = length(c4)
+from char_udf_1 limit 1;
+
+select
+ locate('a', 'abcdabcd', 3),
+ locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
+ locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
+from char_udf_1 limit 1;
+
+select
+ lpad(c2, 15, ' '),
+ lpad(c4, 15, ' '),
+ lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from char_udf_1 limit 1;
+
+select
+ ltrim(c2),
+ ltrim(c4),
+ ltrim(c2) = ltrim(c4)
+from char_udf_1 limit 1;
+
+select
+ regexp(c2, 'val'),
+ regexp(c4, 'val'),
+ regexp(c2, 'val') = regexp(c4, 'val')
+from char_udf_1 limit 1;
+
+select
+ regexp_extract(c2, 'val_([0-9]+)', 1),
+ regexp_extract(c4, 'val_([0-9]+)', 1),
+ regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from char_udf_1 limit 1;
+
+select
+ regexp_replace(c2, 'val', 'replaced'),
+ regexp_replace(c4, 'val', 'replaced'),
+ regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from char_udf_1 limit 1;
+
+select
+ reverse(c2),
+ reverse(c4),
+ reverse(c2) = reverse(c4)
+from char_udf_1 limit 1;
+
+select
+ rpad(c2, 15, ' '),
+ rpad(c4, 15, ' '),
+ rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from char_udf_1 limit 1;
+
+select
+ rtrim(c2),
+ rtrim(c4),
+ rtrim(c2) = rtrim(c4)
+from char_udf_1 limit 1;
+
+select
+ sentences('See spot run. See jane run.'),
+ sentences(cast('See spot run. See jane run.' as char(50)))
+from char_udf_1 limit 1;
+
+select
+ split(c2, '_'),
+ split(c4, '_')
+from char_udf_1 limit 1;
+
+select
+ str_to_map('a:1,b:2,c:3',',',':'),
+ str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
+from char_udf_1 limit 1;
+
+select
+ substr(c2, 1, 3),
+ substr(c4, 1, 3),
+ substr(c2, 1, 3) = substr(c4, 1, 3)
+from char_udf_1 limit 1;
+
+select
+ trim(c2),
+ trim(c4),
+ trim(c2) = trim(c4)
+from char_udf_1 limit 1;
+
+
+-- Aggregate Functions
+select
+ compute_stats(c2, 16),
+ compute_stats(c4, 16)
+from char_udf_1;
+
+select
+ min(c2),
+ min(c4)
+from char_udf_1;
+
+select
+ max(c2),
+ max(c4)
+from char_udf_1;
+
+
+drop table char_udf_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_union1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_union1.q
new file mode 100644
index 0000000000..2ce5e89c2a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_union1.q
@@ -0,0 +1,47 @@
+drop table char_union1_ch1;
+drop table char_union1_ch2;
+drop table char_union1_str;
+
+create table char_union1_ch1 (
+ c1 int,
+ c2 char(10)
+);
+
+create table char_union1_ch2 (
+ c1 int,
+ c2 char(20)
+);
+
+create table char_union1_str (
+ c1 int,
+ c2 string
+);
+
+load data local inpath '../../data/files/vc1.txt' into table char_union1_ch1;
+load data local inpath '../../data/files/vc1.txt' into table char_union1_ch2;
+load data local inpath '../../data/files/vc1.txt' into table char_union1_str;
+
+-- union char with same length char
+select * from (
+ select * from char_union1_ch1
+ union all
+ select * from char_union1_ch1 limit 1
+) q1 sort by c1;
+
+-- union char with different length char
+select * from (
+ select * from char_union1_ch1
+ union all
+ select * from char_union1_ch2 limit 1
+) q1 sort by c1;
+
+-- union char with string
+select * from (
+ select * from char_union1_ch1
+ union all
+ select * from char_union1_str limit 1
+) q1 sort by c1;
+
+drop table char_union1_ch1;
+drop table char_union1_ch2;
+drop table char_union1_str;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_varchar_udf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_varchar_udf.q
new file mode 100644
index 0000000000..332b84087e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/char_varchar_udf.q
@@ -0,0 +1,9 @@
+DROP TABLE IF EXISTS char_varchar_udf;
+
+CREATE TABLE char_varchar_udf (c char(8), vc varchar(10)) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
+LOAD DATA LOCAL INPATH '../../data/files/char_varchar_udf.txt' INTO TABLE char_varchar_udf;
+
+SELECT ROUND(c, 2), ROUND(vc, 3) FROM char_varchar_udf;
+SELECT AVG(c), AVG(vc), SUM(c), SUM(vc) FROM char_varchar_udf;
+
+DROP TABLE char_varchar_udf; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/column_access_stats.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/column_access_stats.q
index 3c8a309991..fbf8bba56e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/column_access_stats.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/column_access_stats.q
@@ -4,7 +4,7 @@ SET hive.stats.collect.scancols=true;
-- This test is used for testing the ColumnAccessAnalyzer
CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_partlvl.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_partlvl.q
index 98627776d3..9dfe8ffbc3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_partlvl.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_partlvl.q
@@ -4,8 +4,8 @@ DROP TABLE Employee_Part;
CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary=2000.0);
-LOAD DATA LOCAL INPATH "../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary=4000.0);
+LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary=2000.0);
+LOAD DATA LOCAL INPATH "../../data/files/employee.dat" INTO TABLE Employee_Part partition(employeeSalary=4000.0);
explain
analyze table Employee_Part partition (employeeSalary=2000.0) compute statistics for columns employeeID;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_tbllvl.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_tbllvl.q
index 72d88a67b5..170fbc5191 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_tbllvl.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/columnstats_tbllvl.q
@@ -13,7 +13,7 @@ CREATE TABLE UserVisits_web_text_none (
avgTimeOnSite int)
row format delimited fields terminated by '|' stored as textfile;
-LOAD DATA LOCAL INPATH "../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
+LOAD DATA LOCAL INPATH "../../data/files/UserVisits.dat" INTO TABLE UserVisits_web_text_none;
explain
analyze table UserVisits_web_text_none compute statistics for columns sourceIP, avgTimeOnSite, adRevenue;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compile_processor.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compile_processor.q
new file mode 100644
index 0000000000..6be02ec8a7
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compile_processor.q
@@ -0,0 +1,12 @@
+
+compile `import org.apache.hadoop.hive.ql.exec.UDF \;
+public class Pyth extends UDF {
+ public double evaluate(double a, double b){
+ return Math.sqrt((a*a) + (b*b)) \;
+ }
+} ` AS GROOVY NAMED Pyth.groovy;
+CREATE TEMPORARY FUNCTION Pyth as 'Pyth';
+
+SELECT Pyth(3,4) FROM src tablesample (1 rows);
+
+DROP TEMPORARY FUNCTION Pyth;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_binary.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_binary.q
index c198136027..fd15634f20 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_binary.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_binary.q
@@ -1,7 +1,7 @@
create table tab_binary(a binary);
-- insert some data
-LOAD DATA LOCAL INPATH "../data/files/binary.txt" INTO TABLE tab_binary;
+LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary;
select count(*) from tab_binary;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_boolean.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_boolean.q
index dc76f7c7d5..cddb53f8f6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_boolean.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_boolean.q
@@ -1,7 +1,7 @@
create table tab_bool(a boolean);
-- insert some data
-LOAD DATA LOCAL INPATH "../data/files/bool.txt" INTO TABLE tab_bool;
+LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool;
select count(*) from tab_bool;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_decimal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_decimal.q
new file mode 100644
index 0000000000..77ec066dad
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_decimal.q
@@ -0,0 +1,11 @@
+set hive.stats.autogather=true;
+
+create table tab_decimal(a decimal(10,3));
+
+-- insert some data
+LOAD DATA LOCAL INPATH "../../data/files/decimal.txt" INTO TABLE tab_decimal;
+
+select count(*) from tab_decimal;
+
+-- compute statistical summary of data
+select compute_stats(a, 18) from tab_decimal;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_double.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_double.q
index 6c6dc47e67..7a1e0f6295 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_double.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_double.q
@@ -1,7 +1,7 @@
create table tab_double(a double);
-- insert some data
-LOAD DATA LOCAL INPATH "../data/files/double.txt" INTO TABLE tab_double;
+LOAD DATA LOCAL INPATH "../../data/files/double.txt" INTO TABLE tab_double;
select count(*) from tab_double;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_long.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_long.q
index 7d0a1584a2..6a2070f780 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_long.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_long.q
@@ -1,7 +1,7 @@
create table tab_int(a int);
-- insert some data
-LOAD DATA LOCAL INPATH "../data/files/int.txt" INTO TABLE tab_int;
+LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int;
select count(*) from tab_int;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_string.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_string.q
index f146f6b95b..0023e7f6bd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_string.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/compute_stats_string.q
@@ -1,7 +1,7 @@
create table tab_string(a string);
-- insert some data
-LOAD DATA LOCAL INPATH "../data/files/string.txt" INTO TABLE tab_string;
+LOAD DATA LOCAL INPATH "../../data/files/string.txt" INTO TABLE tab_string;
select count(*) from tab_string;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/constant_prop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/constant_prop.q
index ced72d6044..d51b80194e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/constant_prop.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/constant_prop.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
SELECT NAMED_STRUCT(
IF(ARRAY_CONTAINS(ARRAY(1, 2), 3), "F1", "B1"), 1,
@@ -7,7 +9,7 @@ SELECT NAMED_STRUCT(
IF(ARRAY_CONTAINS(ARRAY(1, 2), 3), "F1", "B1"), 1,
IF(ARRAY_CONTAINS(MAP_KEYS(MAP("b", "x")), "b"), "F2", "B2"), 2
).F2
- FROM src LIMIT 1;
+ FROM src tablesample (1 rows);
SELECT NAMED_STRUCT(
IF(ARRAY_CONTAINS(ARRAY(1, 2), 3), "F1", "B1"), 1,
@@ -17,4 +19,4 @@ SELECT NAMED_STRUCT(
IF(ARRAY_CONTAINS(ARRAY(1, 2), 3), "F1", "B1"), 1,
IF(ARRAY_CONTAINS(MAP_KEYS(MAP("b", "x")), "b"), "F2", "B2"), 2
).F2
- FROM src LIMIT 1;
+ FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer1.q
index b3fd3f760e..0596f965ed 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer1.q
@@ -104,7 +104,7 @@ FROM (SELECT x.key AS key, count(1) AS cnt
set hive.optimize.correlation=false;
-- If the key of a GroupByOperator is the right table's key in
--- a Left Outer Join, we cannot use a single MR to execute these two
+-- a Left Outer Join, we cannot use a single MR to execute these two
-- operators because those keys with a null value are not grouped.
EXPLAIN
SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt))
@@ -130,6 +130,29 @@ FROM (SELECT y.key AS key, count(1) AS cnt
GROUP BY y.key) tmp;
set hive.optimize.correlation=false;
+-- If a column of the key of a GroupByOperator is the right table's key in
+-- a Left Outer Join, we cannot use a single MR to execute these two
+-- operators because those keys with a null value are not grouped.
+EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+set hive.optimize.correlation=true;
+EXPLAIN
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+SELECT x.key, y.value, count(1) AS cnt
+FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value)
+GROUP BY x.key, y.value;
+
+set hive.optimize.correlation=false;
-- If the key of a GroupByOperator is the right table's key in
-- a Right Outer Join, these two operators will be executed in
-- the same MR job when Correlation Optimizer is enabled.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer4.q
index 70fcdfc0b4..953d191fc6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer4.q
@@ -1,9 +1,9 @@
CREATE TABLE T1(key INT, val STRING);
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key INT, val STRING);
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
CREATE TABLE T3(key INT, val STRING);
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T3;
set hive.auto.convert.join=false;
set hive.optimize.correlation=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer5.q
index ac836c0bfd..287c7a389d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/correlationoptimizer5.q
@@ -1,11 +1,11 @@
CREATE TABLE T1(key INT, val STRING);
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1;
CREATE TABLE T2(key INT, val STRING);
-LOAD DATA LOCAL INPATH '../data/files/kv2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T2;
CREATE TABLE T3(key INT, val STRING);
-LOAD DATA LOCAL INPATH '../data/files/kv3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/kv3.txt' INTO TABLE T3;
CREATE TABLE T4(key INT, val STRING);
-LOAD DATA LOCAL INPATH '../data/files/kv5.txt' INTO TABLE T4;
+LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' INTO TABLE T4;
CREATE TABLE dest_co1(key INT, val STRING);
CREATE TABLE dest_co2(key INT, val STRING);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/count.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/count.q
index 0d66a5ec34..74ae9e428b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/count.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/count.q
@@ -1,5 +1,5 @@
create table abcd (a int, b int, c int, d int);
-LOAD DATA LOCAL INPATH '../data/files/in4.txt' INTO TABLE abcd;
+LOAD DATA LOCAL INPATH '../../data/files/in4.txt' INTO TABLE abcd;
select * from abcd;
set hive.map.aggr=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_func1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_func1.q
new file mode 100644
index 0000000000..ad924d3453
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_func1.q
@@ -0,0 +1,30 @@
+
+-- qtest_get_java_boolean should already be created during test initialization
+select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1;
+
+create database mydb;
+create function mydb.func1 as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper';
+
+show functions mydb.func1;
+
+select mydb.func1('abc') from src limit 1;
+
+drop function mydb.func1;
+
+-- function should now be gone
+show functions mydb.func1;
+
+-- To test function name resolution
+create function mydb.qtest_get_java_boolean as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper';
+
+use default;
+-- unqualified function should resolve to one in default db
+select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1;
+
+use mydb;
+-- unqualified function should resolve to one in mydb db
+select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1;
+
+drop function mydb.qtest_get_java_boolean;
+
+drop database mydb cascade;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_like.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_like.q
index cb4d6578af..13539a65f5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_like.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_like.q
@@ -21,8 +21,8 @@ INSERT OVERWRITE TABLE table2 SELECT key, value FROM src WHERE key = 100;
SELECT * FROM table1;
SELECT * FROM table2;
-CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.src.data.dir}/files/ext_test';
-CREATE EXTERNAL TABLE table5 LIKE table4 LOCATION '${system:test.src.data.dir}/files/ext_test';
+CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:hive.root}/data/files/ext_test';
+CREATE EXTERNAL TABLE table5 LIKE table4 LOCATION '${system:hive.root}/data/files/ext_test';
SELECT * FROM table4;
SELECT * FROM table5;
@@ -31,5 +31,5 @@ DROP TABLE table5;
SELECT * FROM table4;
DROP TABLE table4;
-CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:test.src.data.dir}/files/ext_test';
+CREATE EXTERNAL TABLE table4 (a INT) LOCATION '${system:hive.root}/data/files/ext_test';
SELECT * FROM table4;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_merge_compressed.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_merge_compressed.q
index 4418b3430a..483931b6ff 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_merge_compressed.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_merge_compressed.q
@@ -1,6 +1,6 @@
create table src_rc_merge_test(key int, value string) stored as rcfile;
-load data local inpath '../data/files/smbbucket_1.rc' into table src_rc_merge_test;
+load data local inpath '../../data/files/smbbucket_1.rc' into table src_rc_merge_test;
set hive.exec.compress.output = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_nested_type.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_nested_type.q
index 2debd0d71d..735b139719 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_nested_type.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_nested_type.q
@@ -9,7 +9,7 @@ CREATE TABLE table1 (
DESCRIBE table1;
DESCRIBE EXTENDED table1;
-LOAD DATA LOCAL INPATH '../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1;
+LOAD DATA LOCAL INPATH '../../data/files/create_nested_type.txt' OVERWRITE INTO TABLE table1;
SELECT * from table1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_struct_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_struct_table.q
index dd5aa63e45..1e5d151f28 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_struct_table.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_struct_table.q
@@ -4,7 +4,7 @@ row format delimited
fields terminated by '\t'
collection items terminated by '\001';
-load data local inpath '../data/files/kv1.txt'
+load data local inpath '../../data/files/kv1.txt'
overwrite into table abc;
SELECT strct, strct.a, strct.b FROM abc LIMIT 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_union_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_union_table.q
index bb0e5b989d..6bc4d29358 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_union_table.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_union_table.q
@@ -4,7 +4,7 @@ strct struct<a:int, b:string, c:string>);
create table abc(mydata uniontype<int,double,array<string>,struct<a:int,b:string>>,
strct struct<a:int, b:string, c:string>);
-load data local inpath '../data/files/union_input.txt'
+load data local inpath '../../data/files/union_input.txt'
overwrite into table abc;
SELECT * FROM abc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_view_translate.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_view_translate.q
index 2199750f42..11ba9c8afc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_view_translate.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/create_view_translate.q
@@ -11,3 +11,14 @@ describe formatted w;
drop view v;
drop view w;
+
+
+-- HIVE-4116 Can't use views using map datatype.
+
+CREATE TABLE items (id INT, name STRING, info MAP<STRING,STRING>);
+
+explain
+CREATE VIEW priceview AS SELECT items.id, items.info['price'] FROM items;
+CREATE VIEW priceview AS SELECT items.id, items.info['price'] FROM items;
+
+select * from priceview;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_1.q
new file mode 100644
index 0000000000..fb38c947b3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_1.q
@@ -0,0 +1,26 @@
+
+create table A as
+select * from src;
+
+create table B as
+select * from src
+limit 10;
+
+set hive.auto.convert.join.noconditionaltask.size=100;
+
+explain select * from A join B;
+
+explain select * from B d1 join B d2 on d1.key = d2.key join A;
+
+explain select * from A join
+ (select d1.key
+ from B d1 join B d2 on d1.key = d2.key
+ where 1 = 1 group by d1.key) od1;
+
+explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1;
+
+explain select * from
+(select A.key from A group by key) ss join
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1;
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_2.q
new file mode 100644
index 0000000000..479d57137e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cross_product_check_2.q
@@ -0,0 +1,27 @@
+create table A as
+select * from src;
+
+create table B as
+select * from src
+limit 10;
+
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000000;
+
+explain select * from A join B;
+
+explain select * from B d1 join B d2 on d1.key = d2.key join A;
+
+explain select * from A join
+ (select d1.key
+ from B d1 join B d2 on d1.key = d2.key
+ where 1 = 1 group by d1.key) od1;
+
+explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1;
+
+explain select * from
+(select A.key from A group by key) ss join
+(select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1;
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas.q
index e595904b41..71af40e7e4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas.q
@@ -56,7 +56,7 @@ set hive.exec.mode.local.auto=true;
create table nzhang_ctas5 row format delimited fields terminated by ',' lines terminated by '\012' stored as textfile as select key, value from src sort by key, value limit 10;
create table nzhang_ctas6 (key string, `to` string);
-insert overwrite table nzhang_ctas6 select key, value from src limit 10;
+insert overwrite table nzhang_ctas6 select key, value from src tablesample (10 rows);
create table nzhang_ctas7 as select key, `to` from nzhang_ctas6;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_char.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_char.q
new file mode 100644
index 0000000000..ecfe74afd0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_char.q
@@ -0,0 +1,22 @@
+drop table ctas_char_1;
+drop table ctas_char_2;
+drop view ctas_char_3;
+
+create table ctas_char_1 (key char(10), value string);
+insert overwrite table ctas_char_1
+ select key, value from src sort by key, value limit 5;
+
+-- create table as with char column
+create table ctas_char_2 as select key, value from ctas_char_1;
+
+-- view with char column
+create view ctas_char_3 as select key, value from ctas_char_2;
+
+select key, value from ctas_char_1;
+select * from ctas_char_2;
+select * from ctas_char_3;
+
+
+drop table ctas_char_1;
+drop table ctas_char_2;
+drop view ctas_char_3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
index 4961b971db..f39689de03 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ctas_hadoop20.q
@@ -58,11 +58,6 @@ create table nzhang_ctas6 (key string, `to` string);
insert overwrite table nzhang_ctas6 select key, value from src limit 10;
create table nzhang_ctas7 as select key, `to` from nzhang_ctas6;
-
-
-
-
-
-
-
-
+create table nzhang_ctas8 as select 3.14BD from nzhang_ctas6 limit 1;
+desc nzhang_ctas8;
+drop table nzhang_ctas8;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_1.q
new file mode 100644
index 0000000000..ca4132f7d3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_1.q
@@ -0,0 +1,28 @@
+explain
+with q1 as ( select key from src where key = '5')
+select *
+from q1
+;
+
+with q1 as ( select key from src where key = '5')
+select *
+from q1
+;
+
+-- in subquery
+explain
+with q1 as ( select key from src where key = '5')
+select * from (select key from q1) a;
+
+with q1 as ( select key from src where key = '5')
+select * from (select key from q1) a;
+
+-- chaining
+explain
+with q1 as ( select key from q2 where key = '5'),
+q2 as ( select key from src where key = '5')
+select * from (select key from q1) a;
+
+with q1 as ( select key from q2 where key = '5'),
+q2 as ( select key from src where key = '5')
+select * from (select key from q1) a; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_2.q
new file mode 100644
index 0000000000..b49620aca1
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/cte_2.q
@@ -0,0 +1,56 @@
+
+-- union test
+with q1 as (select * from src where key= '5'),
+q2 as (select * from src s2 where key = '4')
+select * from q1 union all select * from q2
+;
+
+-- insert test
+create table s1 like src;
+with q1 as ( select key, value from src where key = '5')
+from q1
+insert overwrite table s1
+select *
+;
+select * from s1;
+drop table s1;
+
+-- from style
+with q1 as (select * from src where key= '5')
+from q1
+select *
+;
+
+-- ctas
+create table s2 as
+with q1 as ( select key from src where key = '4')
+select * from q1
+;
+
+select * from s2;
+drop table s2;
+
+-- view test
+create view v1 as
+with q1 as ( select key from src where key = '5')
+select * from q1
+;
+
+select * from v1;
+
+drop view v1;
+
+
+-- view test, name collision
+create view v1 as
+with q1 as ( select key from src where key = '5')
+select * from q1
+;
+
+with q1 as ( select key from src where key = '4')
+select * from v1
+;
+
+drop view v1;
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/custom_input_output_format.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/custom_input_output_format.q
index ff5e86dc5a..d769d05602 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/custom_input_output_format.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/custom_input_output_format.q
@@ -1,6 +1,7 @@
-ADD JAR ../build/ql/test/test-udfs.jar;
CREATE TABLE src1_rot13_iof(key STRING, value STRING)
STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat';
+DESCRIBE EXTENDED src1_rot13_iof;
+SELECT * FROM src1 ORDER BY key, value;
INSERT OVERWRITE TABLE src1_rot13_iof SELECT * FROM src1;
-SELECT * FROM src1_rot13_iof;
+SELECT * FROM src1_rot13_iof ORDER BY key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database.q
index 9140a42b65..e3ceaccfa5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database.q
@@ -58,7 +58,7 @@ SHOW TABLES;
DESCRIBE EXTENDED test_table_like;
-- LOAD and SELECT
-LOAD DATA LOCAL INPATH '../data/files/test.dat'
+LOAD DATA LOCAL INPATH '../../data/files/test.dat'
OVERWRITE INTO TABLE test_table;
SELECT * FROM test_table;
@@ -146,7 +146,7 @@ CREATE TABLE db1.src(key STRING, value STRING)
STORED AS TEXTFILE;
-- LOAD into foreign table
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt'
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
OVERWRITE INTO TABLE db1.src;
-- SELECT from foreign table
@@ -158,7 +158,7 @@ PARTITIONED BY (ds STRING, hr STRING)
STORED AS TEXTFILE;
-- LOAD data into Partitioned foreign table
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt'
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
OVERWRITE INTO TABLE db1.srcpart
PARTITION (ds='2008-04-08', hr='11');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database_drop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database_drop.q
index 4e17c7ad5a..1371273245 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database_drop.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/database_drop.q
@@ -8,13 +8,14 @@ CREATE DATABASE db5;
SHOW DATABASES;
USE db5;
+set hive.stats.dbclass=fs;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/temp;
dfs -rmr ${system:test.tmp.dir}/dbcascade;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade;
-- add a table, index and view
CREATE TABLE temp_tbl (id INT, name STRING);
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE temp_tbl;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE temp_tbl;
CREATE VIEW temp_tbl_view AS SELECT * FROM temp_tbl;
CREATE INDEX idx1 ON TABLE temp_tbl(id) AS 'COMPACT' with DEFERRED REBUILD;
ALTER INDEX idx1 ON temp_tbl REBUILD;
@@ -23,15 +24,15 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/temp_tbl2;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/temp_tbl2_idx2;
-- add a table, index and view with a different storage location
CREATE TABLE temp_tbl2 (id INT, name STRING) LOCATION 'file:${system:test.tmp.dir}/dbcascade/temp_tbl2';
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' into table temp_tbl2;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl2;
CREATE VIEW temp_tbl2_view AS SELECT * FROM temp_tbl2;
CREATE INDEX idx2 ON TABLE temp_tbl2(id) AS 'COMPACT' with DEFERRED REBUILD LOCATION 'file:${system:test.tmp.dir}/dbcascade/temp_tbl2_idx2';
ALTER INDEX idx2 ON temp_tbl2 REBUILD;
-- add a partitioned table, index and view
CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string);
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE part_tab PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE part_tab PARTITION (ds='2009-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab PARTITION (ds='2009-04-09');
CREATE INDEX idx3 ON TABLE part_tab(id) AS 'COMPACT' with DEFERRED REBUILD;
ALTER INDEX idx3 ON part_tab PARTITION (ds='2008-04-09') REBUILD;
ALTER INDEX idx3 ON part_tab PARTITION (ds='2009-04-09') REBUILD;
@@ -41,8 +42,8 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/part_tab2_idx4;
-- add a partitioned table, index and view with a different storage location
CREATE TABLE part_tab2 (id INT, name STRING) PARTITIONED BY (ds string)
LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab2';
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE part_tab2 PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE part_tab2 PARTITION (ds='2009-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab2 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab2 PARTITION (ds='2009-04-09');
CREATE INDEX idx4 ON TABLE part_tab2(id) AS 'COMPACT' with DEFERRED REBUILD
LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab2_idx4';
ALTER INDEX idx4 ON part_tab2 PARTITION (ds='2008-04-09') REBUILD;
@@ -56,8 +57,8 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/dbcascade/part_tab3_idx5;
CREATE TABLE part_tab3 (id INT, name STRING) PARTITIONED BY (ds string)
LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab3';
ALTER TABLE part_tab3 ADD PARTITION (ds='2007-04-09') LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab3_p1';
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE part_tab3 PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE part_tab3 PARTITION (ds='2009-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab3 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE part_tab3 PARTITION (ds='2009-04-09');
CREATE INDEX idx5 ON TABLE part_tab3(id) AS 'COMPACT' with DEFERRED REBUILD
LOCATION 'file:${system:test.tmp.dir}/dbcascade/part_tab3_idx5';
ALTER INDEX idx5 ON part_tab3 PARTITION (ds='2008-04-09') REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_1.q
index a2322fc1e4..7d89ac9268 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_1.q
@@ -1,21 +1,23 @@
+set hive.fetch.task.conversion=more;
+
drop table date_1;
create table date_1 (d date);
insert overwrite table date_1
- select cast('2011-01-01' as date) from src limit 1;
+ select cast('2011-01-01' as date) from src tablesample (1 rows);
select * from date_1 limit 1;
select d, count(d) from date_1 group by d;
insert overwrite table date_1
- select date '2011-01-01' from src limit 1;
+ select date '2011-01-01' from src tablesample (1 rows);
select * from date_1 limit 1;
select d, count(d) from date_1 group by d;
insert overwrite table date_1
- select cast(cast('2011-01-01 00:00:00' as timestamp) as date) from src limit 1;
+ select cast(cast('2011-01-01 00:00:00' as timestamp) as date) from src tablesample (1 rows);
select * from date_1 limit 1;
select d, count(d) from date_1 group by d;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_2.q
index 0821e012ba..c5346c87dd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_2.q
@@ -8,7 +8,7 @@ create table date_2 (
FL_NUM int
);
-LOAD DATA LOCAL INPATH '../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_2;
+LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_2;
select fl_date, fl_num from date_2 order by fl_date asc, fl_num desc;
select fl_date, fl_num from date_2 order by fl_date desc, fl_num asc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_3.q
index be25148445..383fb4e990 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_3.q
@@ -7,7 +7,7 @@ create table date_3 (
alter table date_3 add columns (c2 date);
insert overwrite table date_3
- select 1, cast(cast('2011-01-01 00:00:00' as timestamp) as date) from src limit 1;
+ select 1, cast(cast('2011-01-01 00:00:00' as timestamp) as date) from src tablesample (1 rows);
select * from date_3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_4.q
index 4801a79b87..c840089f2e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_4.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
drop table date_4;
create table date_4 (d date);
@@ -5,7 +7,7 @@ alter table date_4 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-- Test date literal syntax
insert overwrite table date_4
- select date '2011-01-01' from src limit 1;
+ select date '2011-01-01' from src tablesample (1 rows);
select d, date '2011-01-01' from date_4 limit 1;
drop table date_4;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_comparison.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_comparison.q
index bdcb6c1b6e..86c7362e29 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_comparison.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_comparison.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
-- Comparisons against same value
select cast('2011-05-06' as date) >
cast('2011-05-06' as date) from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_join1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_join1.q
index a5844b76e9..34bb8c8990 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_join1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_join1.q
@@ -8,7 +8,7 @@ create table date_join1 (
FL_NUM int
);
-LOAD DATA LOCAL INPATH '../data/files/flights_join.txt' OVERWRITE INTO TABLE date_join1;
+LOAD DATA LOCAL INPATH '../../data/files/flights_join.txt' OVERWRITE INTO TABLE date_join1;
-- Note that there are 2 rows with date 2000-11-28, so we should expect 4 rows with that date in the join results
select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_serde.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_serde.q
index ffc06d270d..24b4820680 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_serde.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_serde.q
@@ -22,7 +22,7 @@ with serdeproperties (
)
stored as textfile;
-load data local inpath '../data/files/flights_tiny.txt.1' overwrite into table date_serde_regex;
+load data local inpath '../../data/files/flights_tiny.txt.1' overwrite into table date_serde_regex;
select * from date_serde_regex;
select fl_date, count(*) from date_serde_regex group by fl_date;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_udf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_udf.q
index 9696320a85..c55b9f9147 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_udf.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/date_udf.q
@@ -17,7 +17,7 @@ create table date_udf_flight (
ARR_DELAY float,
FL_NUM int
);
-LOAD DATA LOCAL INPATH '../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight;
+LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight;
-- Test UDFs with date input
select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d),
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
new file mode 100644
index 0000000000..6612fe8bab
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
@@ -0,0 +1,12 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.txn.testing=true;
+
+create table T1(key string, val string) stored as textfile;
+
+set hive.txn.testing=true;
+alter table T1 compact 'major';
+
+alter table T1 compact 'minor';
+
+drop table T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q
new file mode 100644
index 0000000000..599cad9afc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q
@@ -0,0 +1,14 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.txn.testing=true;
+
+create table T1(key string, val string) partitioned by (ds string) stored as textfile;
+
+alter table T1 add partition (ds = 'today');
+alter table T1 add partition (ds = 'yesterday');
+
+alter table T1 partition (ds = 'today') compact 'major';
+
+alter table T1 partition (ds = 'yesterday') compact 'minor';
+
+drop table T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q
new file mode 100644
index 0000000000..871d292a59
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q
@@ -0,0 +1,15 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.txn.testing=true;
+
+create database D1;
+
+use D1;
+
+create table T1(key string, val string) stored as textfile;
+
+alter table T1 compact 'major';
+
+alter table T1 compact 'minor';
+
+drop table T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
new file mode 100644
index 0000000000..3126bd6e54
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_ddl1.q
@@ -0,0 +1,59 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create database D1;
+
+alter database D1 set dbproperties('test'='yesthisis');
+
+drop database D1;
+
+create table T1(key string, val string) stored as textfile;
+
+create table T2 like T1;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T3 as select * from T1;
+
+create table T4 (key char(10), val decimal(5,2), b int)
+ partitioned by (ds string)
+ clustered by (b) into 10 buckets
+ stored as orc;
+
+alter table T3 rename to newT3;
+
+alter table T2 set tblproperties ('test'='thisisatest');
+
+alter table T2 set serde 'org.apache.hadoop.hive.ql.io.orc.OrcSerde';
+alter table T2 set serdeproperties ('test'='thisisatest');
+
+alter table T2 clustered by (key) into 32 buckets;
+
+alter table T4 add partition (ds='today');
+
+alter table T4 partition (ds='today') rename to partition(ds='yesterday');
+
+alter table T4 drop partition (ds='yesterday');
+
+alter table T4 add partition (ds='tomorrow');
+
+create table T5 (a string, b int);
+alter table T5 set fileformat orc;
+
+create table T7 (a string, b int);
+alter table T7 set location 'file:///tmp';
+
+alter table T2 touch;
+alter table T4 touch partition (ds='tomorrow');
+
+create view V1 as select key from T1;
+alter view V1 set tblproperties ('test'='thisisatest');
+drop view V1;
+
+
+
+drop table T1;
+drop table T2;
+drop table newT3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q
new file mode 100644
index 0000000000..970069aca6
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query1.q
@@ -0,0 +1,17 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string, val string) stored as textfile;
+
+insert into table T2 select * from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q
new file mode 100644
index 0000000000..00942e5357
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query2.q
@@ -0,0 +1,17 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string, val string) stored as textfile;
+
+insert overwrite table T2 select * from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q
new file mode 100644
index 0000000000..75b642b549
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query3.q
@@ -0,0 +1,21 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string, val string) partitioned by (pval string) stored as textfile;
+
+insert into table T2 partition (pval = '1') select * from T1;
+
+select * from T2;
+
+insert overwrite table T2 partition (pval = '1') select * from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q
new file mode 100644
index 0000000000..57eb4424ea
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query4.q
@@ -0,0 +1,19 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.exec.dynamic.partition=true;
+
+create table T1(key string, val string) stored as textfile;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+select * from T1;
+
+create table T2(key string) partitioned by (val string) stored as textfile;
+
+insert overwrite table T2 partition (val) select key, val from T1;
+
+select * from T2;
+
+drop table T1;
+drop table T2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q
new file mode 100644
index 0000000000..d22b98fd7d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_query5.q
@@ -0,0 +1,24 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+create database foo;
+
+use foo;
+
+create table T1(key string, val string) partitioned by (ds string) stored as textfile;
+
+alter table T1 add partition (ds='today');
+
+create view V1 as select key from T1;
+
+show tables;
+
+describe T1;
+
+drop view V1;
+
+drop table T1;
+
+show databases;
+
+drop database foo;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q
new file mode 100644
index 0000000000..7c71fdd9d2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dbtxnmgr_showlocks.q
@@ -0,0 +1,11 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.txn.testing=true;
+
+show locks;
+
+show locks extended;
+
+show locks default;
+
+show transactions;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_1.q
index 6c689e188a..f52b1923eb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_1.q
@@ -1,18 +1,22 @@
-drop table decimal_1;
+set hive.fetch.task.conversion=more;
+
+drop table if exists decimal_1;
-create table decimal_1 (t decimal);
+create table decimal_1 (t decimal(4,2), u decimal(5), v decimal);
alter table decimal_1 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
+desc decimal_1;
+
insert overwrite table decimal_1
- select cast('17.29' as decimal) from src limit 1;
-select cast(t as boolean) from decimal_1 limit 1;
-select cast(t as tinyint) from decimal_1 limit 1;
-select cast(t as smallint) from decimal_1 limit 1;
-select cast(t as int) from decimal_1 limit 1;
-select cast(t as bigint) from decimal_1 limit 1;
-select cast(t as float) from decimal_1 limit 1;
-select cast(t as double) from decimal_1 limit 1;
-select cast(t as string) from decimal_1 limit 1;
-select cast(t as timestamp) from decimal_1 limit 1;
+ select cast('17.29' as decimal(4,2)), 3.1415926BD, 3115926.54321BD from src tablesample (1 rows);
+select cast(t as boolean) from decimal_1;
+select cast(t as tinyint) from decimal_1;
+select cast(t as smallint) from decimal_1;
+select cast(t as int) from decimal_1;
+select cast(t as bigint) from decimal_1;
+select cast(t as float) from decimal_1;
+select cast(t as double) from decimal_1;
+select cast(t as string) from decimal_1;
+select cast(t as timestamp) from decimal_1;
drop table decimal_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_2.q
index 4890618a0d..2c4d919079 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_2.q
@@ -1,40 +1,42 @@
+set hive.fetch.task.conversion=more;
+
drop table decimal_2;
-create table decimal_2 (t decimal);
+create table decimal_2 (t decimal(18,9));
alter table decimal_2 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe';
insert overwrite table decimal_2
- select cast('17.29' as decimal) from src limit 1;
+ select cast('17.29' as decimal(4,2)) from src tablesample (1 rows);
-select cast(t as boolean) from decimal_2 limit 1;
-select cast(t as tinyint) from decimal_2 limit 1;
-select cast(t as smallint) from decimal_2 limit 1;
-select cast(t as int) from decimal_2 limit 1;
-select cast(t as bigint) from decimal_2 limit 1;
-select cast(t as float) from decimal_2 limit 1;
-select cast(t as double) from decimal_2 limit 1;
-select cast(t as string) from decimal_2 limit 1;
+select cast(t as boolean) from decimal_2;
+select cast(t as tinyint) from decimal_2;
+select cast(t as smallint) from decimal_2;
+select cast(t as int) from decimal_2;
+select cast(t as bigint) from decimal_2;
+select cast(t as float) from decimal_2;
+select cast(t as double) from decimal_2;
+select cast(t as string) from decimal_2;
insert overwrite table decimal_2
- select cast('3404045.5044003' as decimal) from src limit 1;
+ select cast('3404045.5044003' as decimal(18,9)) from src tablesample (1 rows);
-select cast(t as boolean) from decimal_2 limit 1;
-select cast(t as tinyint) from decimal_2 limit 1;
-select cast(t as smallint) from decimal_2 limit 1;
-select cast(t as int) from decimal_2 limit 1;
-select cast(t as bigint) from decimal_2 limit 1;
-select cast(t as float) from decimal_2 limit 1;
-select cast(t as double) from decimal_2 limit 1;
-select cast(t as string) from decimal_2 limit 1;
+select cast(t as boolean) from decimal_2;
+select cast(t as tinyint) from decimal_2;
+select cast(t as smallint) from decimal_2;
+select cast(t as int) from decimal_2;
+select cast(t as bigint) from decimal_2;
+select cast(t as float) from decimal_2;
+select cast(t as double) from decimal_2;
+select cast(t as string) from decimal_2;
-select cast(3.14 as decimal) from decimal_2 limit 1;
-select cast(cast(3.14 as float) as decimal) from decimal_2 limit 1;
-select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal) from decimal_2 limit 1;
-select cast(true as decimal) from decimal_2 limit 1;
-select cast(3Y as decimal) from decimal_2 limit 1;
-select cast(3S as decimal) from decimal_2 limit 1;
-select cast(cast(3 as int) as decimal) from decimal_2 limit 1;
-select cast(3L as decimal) from decimal_2 limit 1;
-select cast(0.99999999999999999999 as decimal) from decimal_2 limit 1;
-select cast('0.99999999999999999999' as decimal) from decimal_2 limit 1;
+select cast(3.14 as decimal(4,2)) from decimal_2;
+select cast(cast(3.14 as float) as decimal(4,2)) from decimal_2;
+select cast(cast('2012-12-19 11:12:19.1234567' as timestamp) as decimal(30,8)) from decimal_2;
+select cast(true as decimal) from decimal_2;
+select cast(3Y as decimal) from decimal_2;
+select cast(3S as decimal) from decimal_2;
+select cast(cast(3 as int) as decimal) from decimal_2;
+select cast(3L as decimal) from decimal_2;
+select cast(0.99999999999999999999 as decimal(20,19)) from decimal_2;
+select cast('0.99999999999999999999' as decimal(20,20)) from decimal_2;
drop table decimal_2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_3.q
index 28211e3f14..e4fba06fea 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_3.q
@@ -1,11 +1,11 @@
DROP TABLE IF EXISTS DECIMAL_3;
-CREATE TABLE DECIMAL_3(key decimal, value int)
+CREATE TABLE DECIMAL_3(key decimal(38,18), value int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ' '
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv7.txt' INTO TABLE DECIMAL_3;
+LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3;
SELECT * FROM DECIMAL_3 ORDER BY key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_4.q
index e8a89c131c..699ba3cb4f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_4.q
@@ -1,15 +1,15 @@
DROP TABLE IF EXISTS DECIMAL_4_1;
DROP TABLE IF EXISTS DECIMAL_4_2;
-CREATE TABLE DECIMAL_4_1(key decimal, value int)
+CREATE TABLE DECIMAL_4_1(key decimal(35,25), value int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ' '
STORED AS TEXTFILE;
-CREATE TABLE DECIMAL_4_2(key decimal, value decimal)
+CREATE TABLE DECIMAL_4_2(key decimal(35,25), value decimal(35,25))
STORED AS ORC;
-LOAD DATA LOCAL INPATH '../data/files/kv7.txt' INTO TABLE DECIMAL_4_1;
+LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_4_1;
INSERT OVERWRITE TABLE DECIMAL_4_2 SELECT key, key * 3 FROM DECIMAL_4_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_5.q
new file mode 100644
index 0000000000..70e5db0f70
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_5.q
@@ -0,0 +1,18 @@
+DROP TABLE IF EXISTS DECIMAL_5;
+
+CREATE TABLE DECIMAL_5(key decimal(10,5), value int)
+ROW FORMAT DELIMITED
+ FIELDS TERMINATED BY ' '
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_5;
+
+SELECT key FROM DECIMAL_5 ORDER BY key;
+
+SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key;
+
+SELECT cast(key as decimal) FROM DECIMAL_5;
+
+SELECT cast(key as decimal(6,3)) FROM DECIMAL_5;
+
+DROP TABLE DECIMAL_5;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_6.q
new file mode 100644
index 0000000000..b58e224256
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_6.q
@@ -0,0 +1,27 @@
+DROP TABLE IF EXISTS DECIMAL_6_1;
+DROP TABLE IF EXISTS DECIMAL_6_2;
+DROP TABLE IF EXISTS DECIMAL_6_3;
+
+CREATE TABLE DECIMAL_6_1(key decimal(10,5), value int)
+ROW FORMAT DELIMITED
+ FIELDS TERMINATED BY ' '
+STORED AS TEXTFILE;
+
+CREATE TABLE DECIMAL_6_2(key decimal(17,4), value int)
+ROW FORMAT DELIMITED
+ FIELDS TERMINATED BY ' '
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_1;
+LOAD DATA LOCAL INPATH '../../data/files/kv9.txt' INTO TABLE DECIMAL_6_2;
+
+SELECT T.key from (
+ SELECT key, value from DECIMAL_6_1
+ UNION ALL
+ SELECT key, value from DECIMAL_6_2
+) T order by T.key;
+
+CREATE TABLE DECIMAL_6_3 AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v;
+
+desc DECIMAL_6_3;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_join.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_join.q
index 589fc6597d..86c14d9351 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_join.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_join.q
@@ -1,6 +1,6 @@
-- HIVE-5292 Join on decimal columns fails
-create table src_dec (key decimal, value string);
-load data local inpath '../data/files/kv1.txt' into table src_dec;
+create table src_dec (key decimal(3,0), value string);
+load data local inpath '../../data/files/kv1.txt' into table src_dec;
select * from src_dec a join src_dec b on a.key=b.key+450;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_precision.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_precision.q
index 403c2be3fb..739352f9ef 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_precision.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_precision.q
@@ -1,11 +1,11 @@
DROP TABLE IF EXISTS DECIMAL_PRECISION;
-CREATE TABLE DECIMAL_PRECISION(dec decimal)
+CREATE TABLE DECIMAL_PRECISION(dec decimal(20,10))
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ' '
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION;
+LOAD DATA LOCAL INPATH '../../data/files/kv8.txt' INTO TABLE DECIMAL_PRECISION;
SELECT * FROM DECIMAL_PRECISION ORDER BY dec;
@@ -15,13 +15,14 @@ SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec;
SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec;
SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec;
+EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION;
SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION;
-SELECT dec * cast('123456789012345678901234567890.123456789' as decimal) FROM DECIMAL_PRECISION LIMIT 1;
-SELECT * from DECIMAL_PRECISION WHERE dec > cast('123456789012345678901234567890.123456789' as decimal) LIMIT 1;
-SELECT dec * 123456789012345678901234567890.123456789 FROM DECIMAL_PRECISION LIMIT 1;
+SELECT dec * cast('12345678901234567890.12345678' as decimal(38,18)) FROM DECIMAL_PRECISION LIMIT 1;
+SELECT * from DECIMAL_PRECISION WHERE dec > cast('1234567890123456789012345678.12345678' as decimal(38,18)) LIMIT 1;
+SELECT dec * 12345678901234567890.12345678 FROM DECIMAL_PRECISION LIMIT 1;
-SELECT MIN(cast('123456789012345678901234567890.123456789' as decimal)) FROM DECIMAL_PRECISION;
-SELECT COUNT(cast('123456789012345678901234567890.123456789' as decimal)) FROM DECIMAL_PRECISION;
+SELECT MIN(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION;
+SELECT COUNT(cast('12345678901234567890.12345678' as decimal(38,18))) FROM DECIMAL_PRECISION;
DROP TABLE DECIMAL_PRECISION;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_serde.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_serde.q
index 3556807705..cf3a86cd4d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_serde.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_serde.q
@@ -8,7 +8,7 @@ ROW FORMAT DELIMITED
FIELDS TERMINATED BY ' '
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv7.txt' INTO TABLE DECIMAL_TEXT;
+LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_TEXT;
SELECT * FROM DECIMAL_TEXT ORDER BY key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_udf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_udf.q
index b5ff088d16..0c9f1b86a9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_udf.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/decimal_udf.q
@@ -1,11 +1,13 @@
+set hive.fetch.task.conversion=more;
+
DROP TABLE IF EXISTS DECIMAL_UDF;
-CREATE TABLE DECIMAL_UDF (key decimal, value int)
+CREATE TABLE DECIMAL_UDF (key decimal(20,10), value int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ' '
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv7.txt' INTO TABLE DECIMAL_UDF;
+LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF;
-- addition
EXPLAIN SELECT key + key FROM DECIMAL_UDF;
@@ -70,8 +72,8 @@ EXPLAIN SELECT abs(key) FROM DECIMAL_UDF;
SELECT abs(key) FROM DECIMAL_UDF;
-- avg
-EXPLAIN SELECT value, sum(key) / count(key), avg(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value;
-SELECT value, sum(key) / count(key), avg(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value;
+EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value;
+SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value;
-- negative
EXPLAIN SELECT -key FROM DECIMAL_UDF;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/delimiter.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/delimiter.q
index 112ac57c3b..14d508c07d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/delimiter.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/delimiter.q
@@ -3,7 +3,7 @@ row format delimited
fields terminated by '\t'
lines terminated by '\n'
stored as textfile;
-LOAD DATA LOCAL INPATH '../data/files/in7.txt' INTO TABLE impressions;
+LOAD DATA LOCAL INPATH '../../data/files/in7.txt' INTO TABLE impressions;
select * from impressions;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
new file mode 100644
index 0000000000..89e49311fa
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/desc_tbl_part_cols.q
@@ -0,0 +1,7 @@
+create table t1 (a int, b string) partitioned by (c int, d string);
+describe t1;
+
+set hive.display.partition.cols.separately=false;
+describe t1;
+
+set hive.display.partition.cols.separately=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disable_file_format_check.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disable_file_format_check.q
index 6ea4156b34..81a5b3a6e6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disable_file_format_check.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disable_file_format_check.q
@@ -1,9 +1,9 @@
set hive.fileformat.check = false;
create table kv_fileformat_check_txt (key string, value string) stored as textfile;
-load data local inpath '../data/files/kv1.seq' overwrite into table kv_fileformat_check_txt;
+load data local inpath '../../data/files/kv1.seq' overwrite into table kv_fileformat_check_txt;
create table kv_fileformat_check_seq (key string, value string) stored as sequencefile;
-load data local inpath '../data/files/kv1.txt' overwrite into table kv_fileformat_check_seq;
+load data local inpath '../../data/files/kv1.txt' overwrite into table kv_fileformat_check_seq;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q
index 2a1e7276ca..991b930d54 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/disallow_incompatible_type_change_off.q
@@ -1,7 +1,9 @@
+set hive.fetch.task.conversion=more;
+
SET hive.metastore.disallow.incompatible.col.type.changes=false;
SELECT * FROM src LIMIT 1;
CREATE TABLE test_table123 (a INT, b MAP<STRING, STRING>) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE;
-INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src LIMIT 1;
+INSERT OVERWRITE TABLE test_table123 PARTITION(ds="foo1") SELECT 1, MAP("a1", "b1") FROM src tablesample (1 rows);
SELECT * from test_table123 WHERE ds="foo1";
-- This should now work as hive.metastore.disallow.incompatible.col.type.changes is false
ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/distinct_stats.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/distinct_stats.q
new file mode 100644
index 0000000000..725183380b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/distinct_stats.q
@@ -0,0 +1,20 @@
+set hive.stats.autogather=true;
+
+set hive.compute.query.using.stats=true;
+create table t1 (a string, b string);
+
+insert into table t1 select * from src;
+
+analyze table t1 compute statistics for columns a,b;
+
+explain
+select count(distinct b) from t1 group by a;
+
+explain
+select distinct(b) from t1;
+
+explain
+select a, count(*) from t1 group by a;
+
+drop table t1;
+set hive.compute.query.using.stats = false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_partitions_filter2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_partitions_filter2.q
index 798aa6d51a..54e6a35b5a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_partitions_filter2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_partitions_filter2.q
@@ -6,7 +6,7 @@ alter table ptestfilter add partition (c=1, d=2);
alter table ptestFilter add partition (c=2, d=1);
alter table ptestfilter add partition (c=2, d=2);
alter table ptestfilter add partition (c=3, d=1);
-alter table ptestfilter add partition (c=3, d=2);
+alter table ptestfilter add partition (c=30, d=2);
show partitions ptestfilter;
alter table ptestfilter drop partition (c=1, d=1);
@@ -15,6 +15,9 @@ show partitions ptestfilter;
alter table ptestfilter drop partition (c=2);
show partitions ptestfilter;
+alter table ptestfilter drop partition (c<4);
+show partitions ptestfilter;
+
drop table ptestfilter;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_with_concurrency.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_with_concurrency.q
new file mode 100644
index 0000000000..797a27c23b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/drop_with_concurrency.q
@@ -0,0 +1,8 @@
+set hive.lock.numretries=1;
+set hive.lock.sleep.between.retries=1;
+set hive.support.concurrency=true;
+set hive.lock.manager=org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager;
+
+drop table if exists drop_with_concurrency_1;
+create table drop_with_concurrency_1 (c1 int);
+drop table drop_with_concurrency_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q
index 397a2200a8..699e58effc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynamic_partition_skip_default.q
@@ -1,19 +1,19 @@
-create table dynamic_part_table(intcol int) partitioned by (partcol1 int, partcol2 int);
+create table dynamic_part_table(intcol string) partitioned by (partcol1 string, partcol2 string);
set hive.exec.dynamic.partition.mode=nonstrict;
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, 1, 1 from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', '1', '1' from src where key=150;
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, NULL, 1 from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', NULL, '1' from src where key=150;
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, 1, NULL from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', '1', NULL from src where key=150;
-insert into table dynamic_part_table partition(partcol1, partcol2) select 1, NULL, NULL from src where key=150;
+insert into table dynamic_part_table partition(partcol1, partcol2) select '1', NULL, NULL from src where key=150;
-explain extended select intcol from dynamic_part_table where partcol1=1 and partcol2=1;
+explain extended select intcol from dynamic_part_table where partcol1='1' and partcol2='1';
set hive.exec.dynamic.partition.mode=strict;
-explain extended select intcol from dynamic_part_table where partcol1=1 and partcol2=1;
+explain extended select intcol from dynamic_part_table where partcol1='1' and partcol2='1';
-explain extended select intcol from dynamic_part_table where (partcol1=1 and partcol2=1)or (partcol1=1 and partcol2='__HIVE_DEFAULT_PARTITION__');
+explain extended select intcol from dynamic_part_table where (partcol1='1' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
new file mode 100644
index 0000000000..5f1a5ce809
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q
@@ -0,0 +1,161 @@
+set hive.optimize.sort.dynamic.partition=true;
+set hive.exec.dynamic.partition=true;
+set hive.exec.max.dynamic.partitions=1000;
+set hive.exec.max.dynamic.partitions.pernode=1000;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.vectorized.execution.enabled=true;
+set hive.enforce.bucketing=false;
+set hive.enforce.sorting=false;
+
+create table over1k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ row format delimited
+ fields terminated by '|';
+
+load data local inpath '../../data/files/over1k' into table over1k;
+
+create table over1k_orc like over1k;
+alter table over1k_orc set fileformat orc;
+insert overwrite table over1k_orc select * from over1k;
+
+create table over1k_part_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint) stored as orc;
+
+create table over1k_part_limit_orc like over1k_part_orc;
+alter table over1k_part_limit_orc set fileformat orc;
+
+create table over1k_part_buck_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si) into 4 buckets stored as orc;
+
+create table over1k_part_buck_sort_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 4 buckets stored as orc;
+
+-- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization
+explain insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si;
+explain insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10;
+explain insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+explain insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+
+insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si;
+insert overwrite table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10;
+insert overwrite table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+insert overwrite table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting=true;
+
+-- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
+explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si;
+explain insert into table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10;
+explain insert into table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+explain insert into table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+
+insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si;
+insert into table over1k_part_limit_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 limit 10;
+insert into table over1k_part_buck_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+insert into table over1k_part_buck_sort_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+
+desc formatted over1k_part_orc partition(ds="foo",t=27);
+desc formatted over1k_part_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+desc formatted over1k_part_limit_orc partition(ds="foo",t=27);
+desc formatted over1k_part_limit_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+desc formatted over1k_part_buck_orc partition(t=27);
+desc formatted over1k_part_buck_orc partition(t="__HIVE_DEFAULT_PARTITION__");
+desc formatted over1k_part_buck_sort_orc partition(t=27);
+desc formatted over1k_part_buck_sort_orc partition(t="__HIVE_DEFAULT_PARTITION__");
+
+select count(*) from over1k_part_orc;
+select count(*) from over1k_part_limit_orc;
+select count(*) from over1k_part_buck_orc;
+select count(*) from over1k_part_buck_sort_orc;
+
+-- tests for HIVE-6883
+create table over1k_part2_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint);
+
+set hive.optimize.sort.dynamic.partition=false;
+explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
+set hive.optimize.sort.dynamic.partition=true;
+explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
+
+set hive.optimize.sort.dynamic.partition=false;
+insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
+
+desc formatted over1k_part2_orc partition(ds="foo",t=27);
+desc formatted over1k_part2_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part2_orc;
+select count(*) from over1k_part2_orc;
+
+set hive.optimize.sort.dynamic.partition=true;
+insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i;
+
+desc formatted over1k_part2_orc partition(ds="foo",t=27);
+desc formatted over1k_part2_orc partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part2_orc;
+select count(*) from over1k_part2_orc;
+
+-- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets.
+-- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number
+-- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test.
+create table over1k_part_buck_sort2_orc(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 1 buckets;
+
+set hive.optimize.sort.dynamic.partition=false;
+explain insert overwrite table over1k_part_buck_sort2_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+set hive.optimize.sort.dynamic.partition=true;
+explain insert overwrite table over1k_part_buck_sort2_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+
+set hive.optimize.sort.dynamic.partition=false;
+insert overwrite table over1k_part_buck_sort2_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+
+desc formatted over1k_part_buck_sort2_orc partition(t=27);
+desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part_buck_sort2_orc;
+select count(*) from over1k_part_buck_sort2_orc;
+
+set hive.optimize.sort.dynamic.partition=true;
+insert overwrite table over1k_part_buck_sort2_orc partition(t) select si,i,b,f,t from over1k_orc where t is null or t=27;
+
+desc formatted over1k_part_buck_sort2_orc partition(t=27);
+desc formatted over1k_part_buck_sort2_orc partition(t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part_buck_sort2_orc;
+select count(*) from over1k_part_buck_sort2_orc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
new file mode 100644
index 0000000000..52b5d1e0c1
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q
@@ -0,0 +1,155 @@
+set hive.optimize.sort.dynamic.partition=true;
+set hive.exec.dynamic.partition=true;
+set hive.exec.max.dynamic.partitions=1000;
+set hive.exec.max.dynamic.partitions.pernode=1000;
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.enforce.bucketing=false;
+set hive.enforce.sorting=false;
+
+create table over1k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ row format delimited
+ fields terminated by '|';
+
+load data local inpath '../../data/files/over1k' into table over1k;
+
+create table over1k_part(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint);
+
+create table over1k_part_limit like over1k_part;
+
+create table over1k_part_buck(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si) into 4 buckets;
+
+create table over1k_part_buck_sort(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 4 buckets;
+
+-- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization
+explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27;
+explain insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10;
+explain insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+explain insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+
+insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27;
+insert overwrite table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10;
+insert overwrite table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+insert overwrite table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting=true;
+
+-- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization
+explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27;
+explain insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10;
+explain insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+explain insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+
+insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27;
+insert into table over1k_part_limit partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 limit 10;
+insert into table over1k_part_buck partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+insert into table over1k_part_buck_sort partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+
+desc formatted over1k_part partition(ds="foo",t=27);
+desc formatted over1k_part partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+desc formatted over1k_part_limit partition(ds="foo",t=27);
+desc formatted over1k_part_limit partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+desc formatted over1k_part_buck partition(t=27);
+desc formatted over1k_part_buck partition(t="__HIVE_DEFAULT_PARTITION__");
+desc formatted over1k_part_buck_sort partition(t=27);
+desc formatted over1k_part_buck_sort partition(t="__HIVE_DEFAULT_PARTITION__");
+
+select count(*) from over1k_part;
+select count(*) from over1k_part_limit;
+select count(*) from over1k_part_buck;
+select count(*) from over1k_part_buck_sort;
+
+-- tests for HIVE-6883
+create table over1k_part2(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (ds string, t tinyint);
+
+set hive.optimize.sort.dynamic.partition=false;
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
+set hive.optimize.sort.dynamic.partition=true;
+explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
+
+set hive.optimize.sort.dynamic.partition=false;
+insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
+
+desc formatted over1k_part2 partition(ds="foo",t=27);
+desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part2;
+select count(*) from over1k_part2;
+
+set hive.optimize.sort.dynamic.partition=true;
+insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i;
+
+desc formatted over1k_part2 partition(ds="foo",t=27);
+desc formatted over1k_part2 partition(ds="foo",t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part2;
+select count(*) from over1k_part2;
+
+-- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets.
+-- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number
+-- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test.
+create table over1k_part_buck_sort2(
+ si smallint,
+ i int,
+ b bigint,
+ f float)
+ partitioned by (t tinyint)
+ clustered by (si)
+ sorted by (f) into 1 buckets;
+
+set hive.optimize.sort.dynamic.partition=false;
+explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+set hive.optimize.sort.dynamic.partition=true;
+explain insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+
+set hive.optimize.sort.dynamic.partition=false;
+insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+
+desc formatted over1k_part_buck_sort2 partition(t=27);
+desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part_buck_sort2;
+select count(*) from over1k_part_buck_sort2;
+
+set hive.optimize.sort.dynamic.partition=true;
+insert overwrite table over1k_part_buck_sort2 partition(t) select si,i,b,f,t from over1k where t is null or t=27;
+
+desc formatted over1k_part_buck_sort2 partition(t=27);
+desc formatted over1k_part_buck_sort2 partition(t="__HIVE_DEFAULT_PARTITION__");
+
+select * from over1k_part_buck_sort2;
+select count(*) from over1k_part_buck_sort2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape1.q
index a7f4cf79c9..d29a7a8f10 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape1.q
@@ -8,7 +8,7 @@ DROP TABLE escape1;
DROP TABLE escape_raw;
CREATE TABLE escape_raw (s STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/escapetest.txt' INTO TABLE escape_raw;
+LOAD DATA LOCAL INPATH '../../data/files/escapetest.txt' INTO TABLE escape_raw;
SELECT count(*) from escape_raw;
SELECT * from escape_raw;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape2.q
index 473cbf8c94..24601343b1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/escape2.q
@@ -10,7 +10,7 @@ DROP TABLE IF EXISTS escape2;
DROP TABLE IF EXISTS escape_raw;
CREATE TABLE escape_raw (s STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/escapetest.txt' INTO TABLE escape_raw;
+LOAD DATA LOCAL INPATH '../../data/files/escapetest.txt' INTO TABLE escape_raw;
SELECT count(*) from escape_raw;
SELECT * from escape_raw;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition.q
index 6e8bf8ebc1..4be6e3f6d8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition.q
@@ -3,7 +3,7 @@ CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING);
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
-ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05');
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition2.q
index 27b335a3d7..f346ddeec4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition2.q
@@ -3,7 +3,7 @@ CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRIN
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
-ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='1');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='1');
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition3.q
index 7b9060d420..7c076cebe8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exchange_partition3.q
@@ -3,8 +3,9 @@ CREATE TABLE exchange_part_test2 (f1 string) PARTITIONED BY (ds STRING, hr STRIN
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
-ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='1');
-ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2013-04-05', hr='2');
+ALTER TABLE exchange_part_test1 ADD PARTITION (ds='2014-01-03', hr='1');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='1');
+ALTER TABLE exchange_part_test2 ADD PARTITION (ds='2013-04-05', hr='2');
SHOW PARTITIONS exchange_part_test1;
SHOW PARTITIONS exchange_part_test2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q
index 7fa96b629a..8288bbfd86 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_00_nonpart_empty.q
@@ -1,3 +1,6 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
+set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
+
set hive.test.mode=true;
set hive.test.mode.prefix=;
set hive.test.mode.nosamplelist=exim_department,exim_employee;
@@ -5,8 +8,8 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -16,7 +19,7 @@ use importer;
import from 'ql/test/data/exports/exim_department';
describe extended exim_department;
show table extended like exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
select * from exim_department;
drop table exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_01_nonpart.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_01_nonpart.q
index 9920e778d1..1e2eed803a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_01_nonpart.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_01_nonpart.q
@@ -5,9 +5,9 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -17,7 +17,7 @@ use importer;
import from 'ql/test/data/exports/exim_department';
describe extended exim_department;
show table extended like exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
select * from exim_department;
drop table exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q
index 4017c83aa3..474a5a4a5b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_00_part_empty.q
@@ -7,8 +7,8 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -18,7 +18,7 @@ use importer;
import from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
show table extended like exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_part.q
index 21138f0263..dbd2c6bf5c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_02_part.q
@@ -7,10 +7,10 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -20,7 +20,7 @@ use importer;
import from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
show table extended like exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q
index 5f6bdee83c..47d949aa36 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_03_nonpart_over_compat.q
@@ -5,9 +5,9 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -21,6 +21,6 @@ import from 'ql/test/data/exports/exim_department';
describe extended exim_department;
select * from exim_department;
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_all_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_all_part.q
index 69c6faa30a..b2567fb270 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_all_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_all_part.q
@@ -7,16 +7,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -26,7 +26,7 @@ use importer;
import from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
show table extended like exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q
index cdc02fa25c..82df69874b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_04_evolved_parts.q
@@ -19,8 +19,8 @@ alter table exim_employee set fileformat
outputformat "org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat";
alter table exim_employee add partition (emp_country='in', emp_state='ka');
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -32,7 +32,7 @@ describe extended exim_employee;
describe extended exim_employee partition (emp_country='in', emp_state='tn');
describe extended exim_employee partition (emp_country='in', emp_state='ka');
show table extended like exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_05_some_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_05_some_part.q
index 50a59463b1..a2c977356c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_05_some_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_05_some_part.q
@@ -7,16 +7,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee partition (emp_state="ka") to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -26,7 +26,7 @@ use importer;
import from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
show table extended like exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_06_one_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_06_one_part.q
index 5136090929..3a61296422 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_06_one_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_06_one_part.q
@@ -7,16 +7,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee partition (emp_country="in",emp_state="ka") to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -26,7 +26,7 @@ use importer;
import from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
show table extended like exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q
index 5b9d4ddc03..8c774d5a8b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_07_all_part_over_nonoverlap.q
@@ -7,16 +7,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -28,12 +28,12 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "iso code", emp_state string comment "free-form text")
stored as textfile
tblproperties("maker"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="al");
import from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
select * from exim_employee;
drop table exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q
index 173f1569c5..8a1d945476 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_08_nonpart_rename.q
@@ -5,9 +5,9 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee,exim_imported_dept
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -17,12 +17,12 @@ create table exim_department ( dep_id int comment "department id")
partitioned by (emp_org string)
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department partition (emp_org="hr");
+load data local inpath "../../data/files/test.dat" into table exim_department partition (emp_org="hr");
import table exim_imported_dept from 'ql/test/data/exports/exim_department';
describe extended exim_imported_dept;
select * from exim_imported_dept;
drop table exim_imported_dept;
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q
index 178b76674d..53fc2936bb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_09_part_spec_nonoverlap.q
@@ -7,16 +7,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -27,14 +27,14 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
import table exim_employee partition (emp_country="us", emp_state="tn") from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
select * from exim_employee;
drop table exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_10_external_managed.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_10_external_managed.q
index 413f2aa476..54859eed19 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_10_external_managed.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_10_external_managed.q
@@ -2,18 +2,18 @@ set hive.test.mode=true;
set hive.test.mode.prefix=;
set hive.test.mode.nosamplelist=exim_department,exim_employee;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_department/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
create external table exim_department ( dep_id int comment "department id")
stored as textfile
location 'ql/test/data/tablestore/exim_department'
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
create database importer;
use importer;
@@ -22,6 +22,6 @@ import from 'ql/test/data/exports/exim_department';
describe extended exim_department;
select * from exim_department;
drop table exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_11_managed_external.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_11_managed_external.q
index f3b2896a32..4fc39dcff0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_11_managed_external.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_11_managed_external.q
@@ -5,9 +5,9 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -17,7 +17,7 @@ use importer;
import external table exim_department from 'ql/test/data/exports/exim_department';
describe extended exim_department;
select * from exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
select * from exim_department;
drop table exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_12_external_location.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_12_external_location.q
index 37d063432e..e4d50ffe5b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_12_external_location.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_12_external_location.q
@@ -5,24 +5,24 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ql/test/data/exports/exim_department/temp;
+dfs -rmr ${system:test.tmp.dir}/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_department/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ql/test/data/tablestore/exim_department/temp;
+dfs -rmr ${system:test.tmp.dir}/ql/test/data/tablestore/exim_department;
import external table exim_department from 'ql/test/data/exports/exim_department'
location 'ql/test/data/tablestore/exim_department';
describe extended exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr ${system:test.tmp.dir}/ql/test/data/exports/exim_department;
select * from exim_department;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs -rmr ${system:test.tmp.dir}/ql/test/data/tablestore/exim_department;
select * from exim_department;
drop table exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_13_managed_location.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_13_managed_location.q
index fb5058b840..909d23794b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_13_managed_location.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_13_managed_location.q
@@ -5,24 +5,24 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_department/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
import table exim_department from 'ql/test/data/exports/exim_department'
location 'ql/test/data/tablestore/exim_department';
describe extended exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
select * from exim_department;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
select * from exim_department;
drop table exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q
index 031b6bda6c..dbb5fd9343 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_14_managed_location_over_existing.q
@@ -5,17 +5,17 @@ set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int comment "department id")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_department/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
create table exim_department ( dep_id int comment "department id")
stored as textfile
@@ -24,9 +24,9 @@ create table exim_department ( dep_id int comment "department id")
import table exim_department from 'ql/test/data/exports/exim_department'
location 'ql/test/data/tablestore/exim_department';
describe extended exim_department;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
select * from exim_department;
-dfs -rmr ../build/ql/test/data/tablestore/exim_department;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_department;
select * from exim_department;
drop table exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_15_external_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_15_external_part.q
index ff088c70d7..989dd6cf56 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_15_external_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_15_external_part.q
@@ -7,24 +7,24 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
create external table exim_employee ( emp_id int comment "employee id")
comment "employee table"
@@ -32,17 +32,17 @@ create external table exim_employee ( emp_id int comment "employee id")
stored as textfile
location 'ql/test/data/tablestore/exim_employee'
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
import external table exim_employee partition (emp_country="us", emp_state="tn")
from 'ql/test/data/exports/exim_employee';
describe extended exim_employee;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_16_part_external.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_16_part_external.q
index 6f4ee7a01c..7eec358850 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_16_part_external.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_16_part_external.q
@@ -7,26 +7,26 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore2/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/tablestore2/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore2/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore2/exim_employee;
create external table exim_employee ( emp_id int comment "employee id")
comment "employee table"
@@ -39,11 +39,11 @@ import table exim_employee partition (emp_country="us", emp_state="tn")
location 'ql/test/data/tablestore/exim_employee';
show table extended like exim_employee;
show table extended like exim_employee partition (emp_country="us", emp_state="tn");
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
select * from exim_employee;
drop table exim_employee;
-dfs -rmr ../build/ql/test/data/tablestore2/exim_employee;
+dfs -rmr target/tmp/ql/test/data/tablestore2/exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_17_part_managed.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_17_part_managed.q
index 56ec152948..20cd7e0513 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_17_part_managed.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_17_part_managed.q
@@ -7,24 +7,24 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
create table exim_employee ( emp_id int comment "employee id")
comment "employee table"
@@ -39,9 +39,9 @@ alter table exim_employee add partition (emp_country="us", emp_state="ap")
show table extended like exim_employee;
show table extended like exim_employee partition (emp_country="us", emp_state="tn");
show table extended like exim_employee partition (emp_country="us", emp_state="ap");
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_18_part_external.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_18_part_external.q
index 7aa1297dc7..a300b1dbf1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_18_part_external.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_18_part_external.q
@@ -7,16 +7,16 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -29,7 +29,7 @@ describe extended exim_employee;
show table extended like exim_employee;
show table extended like exim_employee partition (emp_country="us", emp_state="tn");
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q
index cb9f8efc08..a821c75d70 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_00_part_external_location.q
@@ -7,20 +7,20 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test2.dat"
+load data local inpath "../../data/files/test2.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
import external table exim_employee
from 'ql/test/data/exports/exim_employee'
@@ -29,9 +29,9 @@ describe extended exim_employee;
show table extended like exim_employee;
show table extended like exim_employee partition (emp_country="in", emp_state="tn");
show table extended like exim_employee partition (emp_country="in", emp_state="ka");
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_part_external_location.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_part_external_location.q
index bdbd19df70..be1216453b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_part_external_location.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_19_part_external_location.q
@@ -7,24 +7,24 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
import external table exim_employee partition (emp_country="us", emp_state="tn")
from 'ql/test/data/exports/exim_employee'
@@ -32,9 +32,9 @@ import external table exim_employee partition (emp_country="us", emp_state="tn")
describe extended exim_employee;
show table extended like exim_employee;
show table extended like exim_employee partition (emp_country="us", emp_state="tn");
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
index eb44961a9b..000904aa66 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_20_part_managed_location.q
@@ -7,24 +7,24 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="ka");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="tn");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="us", emp_state="ka");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
create database importer;
use importer;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/tablestore/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/tablestore/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
import table exim_employee partition (emp_country="us", emp_state="tn")
from 'ql/test/data/exports/exim_employee'
@@ -32,9 +32,9 @@ import table exim_employee partition (emp_country="us", emp_state="tn")
describe extended exim_employee;
show table extended like exim_employee;
show table extended like exim_employee partition (emp_country="us", emp_state="tn");
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/tablestore/exim_employee;
+dfs -rmr target/tmp/ql/test/data/tablestore/exim_employee;
select * from exim_employee;
drop table exim_employee;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
index 822ed70a38..293a011cb2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_21_export_authsuccess.q
@@ -2,13 +2,13 @@ set hive.test.mode=true;
set hive.test.mode.prefix=;
create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../data/files/test.dat" into table exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
set hive.security.authorization.enabled=true;
grant Select on table exim_department to user hive_test_user;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
set hive.security.authorization.enabled=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q
index 440d08d2dc..03714ab17d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_22_import_exist_authsuccess.q
@@ -3,9 +3,9 @@ set hive.test.mode.prefix=;
set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -22,5 +22,5 @@ set hive.security.authorization.enabled=false;
select * from exim_department;
drop table exim_department;
drop database importer;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q
index 30fc343dd8..cb6af0efbc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_23_import_part_authsuccess.q
@@ -7,10 +7,10 @@ create table exim_employee ( emp_id int comment "employee id")
partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text")
stored as textfile
tblproperties("creator"="krishna");
-load data local inpath "../data/files/test.dat"
+load data local inpath "../../data/files/test.dat"
into table exim_employee partition (emp_country="in", emp_state="tn");
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_employee/temp;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_employee/temp;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
export table exim_employee to 'ql/test/data/exports/exim_employee';
drop table exim_employee;
@@ -29,6 +29,6 @@ import from 'ql/test/data/exports/exim_employee';
set hive.security.authorization.enabled=false;
select * from exim_employee;
-dfs -rmr ../build/ql/test/data/exports/exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
drop table exim_employee;
drop database importer;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q
index 2dc5af6ce4..8934c47372 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_24_import_nonexist_authsuccess.q
@@ -3,9 +3,9 @@ set hive.test.mode.prefix=;
set hive.test.mode.nosamplelist=exim_department,exim_employee;
create table exim_department ( dep_id int) stored as textfile;
-load data local inpath "../data/files/test.dat" into table exim_department;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/exim_department/test;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+load data local inpath "../../data/files/test.dat" into table exim_department;
+dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/test;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
export table exim_department to 'ql/test/data/exports/exim_department';
drop table exim_department;
@@ -20,5 +20,5 @@ set hive.security.authorization.enabled=false;
select * from exim_department;
drop table exim_department;
drop database importer;
-dfs -rmr ../build/ql/test/data/exports/exim_department;
+dfs -rmr target/tmp/ql/test/data/exports/exim_department;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_hidden_files.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_hidden_files.q
new file mode 100644
index 0000000000..f58c9f948d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/exim_hidden_files.q
@@ -0,0 +1,22 @@
+set hive.test.mode=true;
+set hive.test.mode.prefix=;
+set hive.test.mode.nosamplelist=exim_department,exim_employee;
+
+create table exim_employee ( emp_id int) partitioned by (emp_country string);
+load data local inpath "../../data/files/test.dat" into table exim_employee partition (emp_country="in");
+
+dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/exim_employee/emp_country=in/_logs;
+dfs -touchz ${system:test.warehouse.dir}/exim_employee/emp_country=in/_logs/job.xml;
+export table exim_employee to 'ql/test/data/exports/exim_employee';
+drop table exim_employee;
+
+create database importer;
+use importer;
+
+import from 'ql/test/data/exports/exim_employee';
+describe formatted exim_employee;
+select * from exim_employee;
+dfs -rmr target/tmp/ql/test/data/exports/exim_employee;
+drop table exim_employee;
+drop database importer;
+use default;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/explain_rearrange.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/explain_rearrange.q
new file mode 100644
index 0000000000..ca2da354bd
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/explain_rearrange.q
@@ -0,0 +1,98 @@
+-- query from auto_sortmerge_join_9.q
+
+CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
+
+set hive.auto.convert.join=true;
+set hive.optimize.bucketmapjoin = true;
+set hive.optimize.bucketmapjoin.sortedmerge = true;
+set hive.auto.convert.sortmerge.join=true;
+set hive.auto.convert.sortmerge.join.to.mapjoin=false;
+
+set hive.explain.dependency.append.tasktype=true;
+
+-- default behavior
+
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+order by src1.key, src1.cnt1, src2.cnt1;
+
+set hive.stageid.rearrange=IDONLY;
+
+-- changes id only
+
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+order by src1.key, src1.cnt1, src2.cnt1;
+
+set hive.stageid.rearrange=TRAVERSE;
+
+-- assign ids in traverse order
+
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+order by src1.key, src1.cnt1, src2.cnt1;
+
+set hive.stageid.rearrange=EXECUTION;
+
+-- assign ids in execution order
+
+explain
+select src1.key, src1.cnt1, src2.cnt1 from
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq1 group by key
+) src1
+join
+(
+ select key, count(*) as cnt1 from
+ (
+ select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key
+ ) subq2 group by key
+) src2
+on src1.key = src2.key
+order by src1.key, src1.cnt1, src2.cnt1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/external_table_with_space_in_location_path.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/external_table_with_space_in_location_path.q
new file mode 100644
index 0000000000..ad070464f9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/external_table_with_space_in_location_path.q
@@ -0,0 +1,23 @@
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test/;
+
+dfs -copyFromLocal ../../data/files/ext_test_space hdfs:///tmp/test/ext_test_space;
+
+CREATE EXTERNAL TABLE spacetest (id int, message string) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LOCATION 'hdfs:///tmp/test/ext_test_space/folder+with space';
+
+SELECT * FROM spacetest;
+
+SELECT count(*) FROM spacetest;
+
+DROP TABLE spacetest;
+
+CREATE EXTERNAL TABLE spacetestpartition (id int, message string) PARTITIONED BY (day int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
+
+ALTER TABLE spacetestpartition ADD PARTITION (day=10) LOCATION 'hdfs:///tmp/test/ext_test_space/folder+with space';
+
+SELECT * FROM spacetestpartition;
+
+SELECT count(*) FROM spacetestpartition;
+
+DROP TABLE spacetestpartition;
+
+dfs -rmr hdfs:///tmp/test;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/file_with_header_footer.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/file_with_header_footer.q
new file mode 100644
index 0000000000..8b65c7896d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/file_with_header_footer.q
@@ -0,0 +1,39 @@
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test/;
+
+dfs -copyFromLocal ../../data/files/header_footer_table_1 hdfs:///tmp/test/header_footer_table_1;
+
+dfs -copyFromLocal ../../data/files/header_footer_table_2 hdfs:///tmp/test/header_footer_table_2;
+
+dfs -copyFromLocal ../../data/files/header_footer_table_3 hdfs:///tmp/test/header_footer_table_3;
+
+CREATE EXTERNAL TABLE header_footer_table_1 (name string, message string, id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LOCATION 'hdfs:///tmp/test/header_footer_table_1' tblproperties ("skip.header.line.count"="1", "skip.footer.line.count"="2");
+
+SELECT * FROM header_footer_table_1;
+
+SELECT * FROM header_footer_table_1 WHERE id < 50;
+
+CREATE EXTERNAL TABLE header_footer_table_2 (name string, message string, id int) PARTITIONED BY (year int, month int, day int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' tblproperties ("skip.header.line.count"="1", "skip.footer.line.count"="2");
+
+ALTER TABLE header_footer_table_2 ADD PARTITION (year=2012, month=1, day=1) location 'hdfs:///tmp/test/header_footer_table_2/2012/01/01';
+
+ALTER TABLE header_footer_table_2 ADD PARTITION (year=2012, month=1, day=2) location 'hdfs:///tmp/test/header_footer_table_2/2012/01/02';
+
+ALTER TABLE header_footer_table_2 ADD PARTITION (year=2012, month=1, day=3) location 'hdfs:///tmp/test/header_footer_table_2/2012/01/03';
+
+SELECT * FROM header_footer_table_2;
+
+SELECT * FROM header_footer_table_2 WHERE id < 50;
+
+CREATE EXTERNAL TABLE emptytable (name string, message string, id int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' LOCATION 'hdfs:///tmp/test/header_footer_table_3' tblproperties ("skip.header.line.count"="1", "skip.footer.line.count"="2");
+
+SELECT * FROM emptytable;
+
+SELECT * FROM emptytable WHERE id < 50;
+
+DROP TABLE header_footer_table_1;
+
+DROP TABLE header_footer_table_2;
+
+DROP TABLE emptytable;
+
+dfs -rmr hdfs:///tmp/test; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_join_breaktask2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_join_breaktask2.q
index f8d855b259..7f4258f7bc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_join_breaktask2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_join_breaktask2.q
@@ -12,11 +12,11 @@ create table T3 (c0 bigint, c1 bigint, c2 int) partitioned by (ds string);
create table T4 (c0 bigint, c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string, c8 string, c9 string, c10 string, c11 string, c12 string, c13 string, c14 string, c15 string, c16 string, c17 string, c18 string, c19 string, c20 string, c21 string, c22 string, c23 string, c24 string, c25 string, c26 string, c27 string, c28 string, c29 string, c30 string, c31 string, c32 string, c33 string, c34 string, c35 string, c36 string, c37 string, c38 string, c39 string, c40 string, c41 string, c42 string, c43 string, c44 string, c45 string, c46 string, c47 string, c48 string, c49 string, c50 string, c51 string, c52 string, c53 string, c54 string, c55 string, c56 string, c57 string, c58 string, c59 string, c60 string, c61 string, c62 string, c63 string, c64 string, c65 string, c66 string, c67 bigint, c68 string, c69 string, c70 bigint, c71 bigint, c72 bigint, c73 string, c74 string, c75 string, c76 string, c77 string, c78 string, c79 string, c80 string, c81 bigint, c82 bigint, c83 bigint) partitioned by (ds string);
-insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src limit 1;
+insert overwrite table T1 partition (ds='2010-04-17') select '5', '1', '1', '1', 0, 0,4 from src tablesample (1 rows);
-insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src limit 1;
+insert overwrite table T2 partition(ds='2010-04-17') select '5','name', NULL, '2', 'kavin',NULL, '9', 'c', '8', '0', '0', '7', '1','2', '0', '3','2', NULL, '1', NULL, '3','2','0','0','5','10' from src tablesample (1 rows);
-insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src limit 1;
+insert overwrite table T3 partition (ds='2010-04-17') select 4,5,0 from src tablesample (1 rows);
insert overwrite table T4 partition(ds='2010-04-17')
select 4,'1','1','8','4','5','1','0','9','U','2','2', '0','2','1','1','J','C','A','U', '2','s', '2',NULL, NULL, NULL,NULL, NULL, NULL,'1','j', 'S', '6',NULL,'1', '2', 'J', 'g', '1', 'e', '2', '1', '2', 'U', 'P', 'p', '3', '0', '0', '0', '1', '1', '1', '0', '0', '0', '6', '2', 'j',NULL, NULL, NULL,NULL,NULL, NULL, '5',NULL, 'j', 'j', 2, 2, 1, '2', '2', '1', '1', '1', '1', '1', '1', 1, 1, 32,NULL from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_numeric.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_numeric.q
new file mode 100644
index 0000000000..69d543f472
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/filter_numeric.q
@@ -0,0 +1,21 @@
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+create table partint(key string, value string) partitioned by (ds string, hr int);
+insert overwrite table partint partition(ds, hr) select * from srcpart where ds = '2008-04-08';
+
+explain select key, value, hr from partint where hr < 11;
+select key, value, hr from partint where hr < 11;
+
+explain select key, value, hr from partint where hr <= 12 and hr > 11;
+select key, value, hr from partint where hr <= 12 and hr > 11;
+
+explain select key, value, hr from partint where hr between 11 and 12;
+select key, value, hr from partint where hr between 11 and 12;
+
+explain select key, value, hr from partint where hr not between 12 and 14;
+select key, value, hr from partint where hr not between 12 and 14;
+
+explain select key, value, hr from partint where hr < 13;
+select key, value, hr from partint where hr < 13;
+
+drop table partint; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/global_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/global_limit.q
index b76cf34120..c8a08af054 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/global_limit.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/global_limit.q
@@ -8,9 +8,9 @@ drop table gl_src_part1;
create table gl_src1 (key int, value string) stored as textfile;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE gl_src1;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE gl_src1;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE gl_src1;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE gl_src1;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE gl_src1;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE gl_src1;
@@ -49,10 +49,10 @@ select key from gl_src2 ORDER BY key ASC limit 10;
-- partition
create table gl_src_part1 (key int, value string) partitioned by (p string) stored as textfile;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE gl_src_part1 partition(p='11');
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE gl_src_part1 partition(p='12');
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE gl_src_part1 partition(p='12');
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE gl_src_part1 partition(p='12');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE gl_src_part1 partition(p='11');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE gl_src_part1 partition(p='12');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE gl_src_part1 partition(p='12');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE gl_src_part1 partition(p='12');
select key from gl_src_part1 where p like '1%' ORDER BY key ASC limit 10;
select key from gl_src_part1 where p='11' ORDER BY key ASC limit 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby10.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby10.q
index db38d43fe4..7750cb90b5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby10.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby10.q
@@ -6,7 +6,7 @@ CREATE TABLE dest1(key INT, val1 INT, val2 INT);
CREATE TABLE dest2(key INT, val1 INT, val2 INT);
CREATE TABLE INPUT(key INT, value STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv5.txt' INTO TABLE INPUT;
+LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' INTO TABLE INPUT;
EXPLAIN
FROM INPUT
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby12.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby12.q
new file mode 100644
index 0000000000..6e9aee1d11
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby12.q
@@ -0,0 +1,13 @@
+set hive.map.aggr=false;
+
+CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
+
+EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key;
+
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT COUNT(src.key), COUNT(DISTINCT value) GROUP BY src.key;
+
+SELECT dest1.* FROM dest1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
index 1b6891e33a..55133332a8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_limit.q
@@ -5,6 +5,6 @@ CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE;
EXPLAIN
FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5;
-FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key LIMIT 5;
+FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key ORDER BY src.key LIMIT 5;
SELECT dest1.* FROM dest1 ORDER BY dest1.key ASC , dest1.value ASC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q
index 82cff36422..dde37dfd47 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map.q
@@ -9,4 +9,4 @@ FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) G
FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
index 874995888b..f346cb7e90 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_map_skew.q
@@ -9,4 +9,4 @@ FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) G
FROM src INSERT OVERWRITE TABLE dest1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
index 1b10f1e024..c587b5f658 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby1_noskew.q
@@ -9,4 +9,4 @@ FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5))
FROM src INSERT OVERWRITE TABLE dest_g1 SELECT src.key, sum(substr(src.value,5)) GROUP BY src.key;
-SELECT dest_g1.* FROM dest_g1;
+SELECT dest_g1.* FROM dest_g1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
index 4bc263c77f..30499248ca 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_limit.q
@@ -1,7 +1,7 @@
set mapred.reduce.tasks=31;
EXPLAIN
-SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key LIMIT 5;
+SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 5;
-SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key LIMIT 5;
+SELECT src.key, sum(substr(src.value,5)) FROM src GROUP BY src.key ORDER BY src.key LIMIT 5;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q
index c3cf598fb1..794ec758e9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map.q
@@ -11,4 +11,4 @@ INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(s
FROM src
INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
index 25e6789b63..55d1a34b3c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_map_multi_distinct.q
@@ -11,4 +11,15 @@ INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(s
FROM src
INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY key;
+
+-- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned
+
+EXPLAIN
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
+
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
+
+SELECT dest1.* FROM dest1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
index c3c82d5174..6d7cb61e2d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew.q
@@ -11,4 +11,4 @@ INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr
FROM src
INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))) GROUP BY substr(src.key,1,1);
-SELECT dest_g2.* FROM dest_g2;
+SELECT dest_g2.* FROM dest_g2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
index b80c271afe..b2450c9ea0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby2_noskew_multi_distinct.q
@@ -11,4 +11,4 @@ INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr
FROM src
INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1);
-SELECT dest_g2.* FROM dest_g2;
+SELECT dest_g2.* FROM dest_g2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
index 99c2d2d5a1..a1ebf90aad 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby4_noskew.q
@@ -12,5 +12,5 @@ INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,
FROM src
INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1) GROUP BY substr(src.key,1,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY c1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
index be60785d87..e96568b398 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby5_noskew.q
@@ -16,5 +16,5 @@ SELECT src.key, sum(substr(src.value,5))
FROM src
GROUP BY src.key;
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q
index fbf761c3ae..ced122fae3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map.q
@@ -11,6 +11,6 @@ INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
FROM src
INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY c1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
index ac79a28605..0d3727b052 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_map_skew.q
@@ -11,6 +11,6 @@ INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
FROM src
INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY c1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
index 2c99d362ff..466c13222f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby6_noskew.q
@@ -12,6 +12,6 @@ INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
FROM src
INSERT OVERWRITE TABLE dest1 SELECT DISTINCT substr(src.value,5,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY c1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q
index b1457d9349..2b8c5db41e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map.q
@@ -18,5 +18,5 @@ FROM SRC
INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1.* FROM DEST1 ORDER BY key;
+SELECT DEST2.* FROM DEST2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
index 481b3cd084..5895ed4599 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_multi_single_reducer.q
@@ -17,5 +17,5 @@ FROM SRC
INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1.* FROM DEST1 ORDER BY key;
+SELECT DEST2.* FROM DEST2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
index a34ac8f64b..ee6d7bf830 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_map_skew.q
@@ -17,5 +17,5 @@ FROM SRC
INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1.* FROM DEST1 ORDER BY key;
+SELECT DEST2.* FROM DEST2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
index 94a3dcf7ec..8c2308e5d7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew.q
@@ -18,5 +18,5 @@ FROM SRC
INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1.* FROM DEST1 ORDER BY key;
+SELECT DEST2.* FROM DEST2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
index 802aea244d..e673cc6162 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby7_noskew_multi_single_reducer.q
@@ -14,8 +14,8 @@ INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY S
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10;
FROM SRC
-INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10
-INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key limit 10;
+INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10
+INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, sum(SUBSTR(SRC.value,5)) GROUP BY SRC.key ORDER BY SRC.key limit 10;
SELECT DEST1.* FROM DEST1 ORDER BY key ASC, value ASC;
SELECT DEST2.* FROM DEST2 ORDER BY key ASC, value ASC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q
index 62b6ff5dde..0252e99336 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map.q
@@ -14,6 +14,6 @@ FROM SRC
INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1.* FROM DEST1 ORDER BY key;
+SELECT DEST2.* FROM DEST2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
index 846fd01017..b5e1f63a45 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_map_skew.q
@@ -14,6 +14,6 @@ FROM SRC
INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1.* FROM DEST1 ORDER BY key;
+SELECT DEST2.* FROM DEST2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
index 20c8bef342..da85504ca1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby8_noskew.q
@@ -15,5 +15,5 @@ FROM SRC
INSERT OVERWRITE TABLE DEST1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key
INSERT OVERWRITE TABLE DEST2 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) GROUP BY SRC.key;
-SELECT DEST1.* FROM DEST1;
-SELECT DEST2.* FROM DEST2;
+SELECT DEST1.* FROM DEST1 ORDER BY key;
+SELECT DEST2.* FROM DEST2 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_bigdata.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_bigdata.q
index 7e97f75cec..2e3eddcb1f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_bigdata.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_bigdata.q
@@ -1,7 +1,7 @@
set hive.map.aggr.hash.percentmemory = 0.3;
set hive.mapred.local.mem = 384;
-add file ../data/scripts/dumpdata_script.py;
+add file ../../data/scripts/dumpdata_script.py;
select count(distinct subq.key) from
(FROM src MAP src.key USING 'python dumpdata_script.py' AS key WHERE src.key = 10) subq;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_cube1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_cube1.q
index 46e1f00d0f..099beb4319 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_cube1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_cube1.q
@@ -3,7 +3,7 @@ set hive.groupby.skewindata=false;
CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
EXPLAIN
SELECT key, val, count(1) FROM T1 GROUP BY key, val with cube;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id1.q
index bced21f9e4..de4a7c3cb5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id1.q
@@ -1,6 +1,6 @@
CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
SELECT key, val, GROUPING__ID from T1 group by key, val with cube;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id2.q
index ffc627c82e..f451f17834 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_id2.q
@@ -1,6 +1,6 @@
CREATE TABLE T1(key INT, value INT) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/groupby_groupingid.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt' INTO TABLE T1;
set hive.groupby.skewindata = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q
index 4fba7338f1..804dfb36cf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets1.q
@@ -1,6 +1,6 @@
CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1;
SELECT * FROM T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q
index 9f2286cc9b..30f1b420cc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets2.q
@@ -2,7 +2,7 @@ set hive.new.job.grouping.set.cardinality=2;
CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1;
-- Since 4 grouping sets would be generated for the query below, an additional MR job should be created
EXPLAIN
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q
index 9a00d0a7aa..707737798d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets3.q
@@ -4,8 +4,8 @@
-- additional MR job is created for processing the grouping sets.
CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/grouping_sets1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/grouping_sets2.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/grouping_sets1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' INTO TABLE T1;
set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
set hive.new.job.grouping.set.cardinality = 30;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q
index 25f1fcd793..ff83185d81 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets4.q
@@ -4,7 +4,7 @@ set hive.merge.mapredfiles = false;
CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1;
-- This tests that cubes and rollups work fine inside sub-queries.
EXPLAIN
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q
index fb0c5913fa..d94bd81f84 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_grouping_sets5.q
@@ -4,7 +4,7 @@ set hive.merge.mapredfiles = false;
CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/grouping_sets.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INTO TABLE T1;
-- This tests that cubes and rollups work fine where the source is a sub-query
EXPLAIN
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
index f0a8b72b0c..4a199365cf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr.q
@@ -17,4 +17,4 @@ SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(s
WHERE src.ds = '2008-04-08'
GROUP BY substr(src.key,1,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
index b863344485..cb3ee82918 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_map_ppr_multi_distinct.q
@@ -17,4 +17,4 @@ SELECT substr(src.key,1,1), count(DISTINCT substr(src.value,5)), concat(substr(s
WHERE src.ds = '2008-04-08'
GROUP BY substr(src.key,1,1);
-SELECT dest1.* FROM dest1;
+SELECT dest1.* FROM dest1 ORDER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_resolution.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_resolution.q
new file mode 100644
index 0000000000..663e33b4c7
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_resolution.q
@@ -0,0 +1,61 @@
+
+
+set hive.map.aggr=false;
+set hive.groupby.skewindata=false;
+explain select key, count(*) from src b group by b.key;
+explain select b.key, count(*) from src b group by key;
+
+set hive.map.aggr=false;
+set hive.groupby.skewindata=true;
+explain select key, count(*) from src b group by b.key;
+explain select b.key, count(*) from src b group by key;
+
+set hive.map.aggr=true;
+set hive.groupby.skewindata=false;
+explain select key, count(*) from src b group by b.key;
+explain select b.key, count(*) from src b group by key;
+
+set hive.map.aggr=true;
+set hive.groupby.skewindata=true;
+explain select key, count(*) from src b group by b.key;
+explain select b.key, count(*) from src b group by key;
+
+-- windowing after group by
+select key, count(*), rank() over(order by count(*))
+from src b
+where key < '12'
+group by b.key
+order by b.key;
+
+-- having after group by
+select key, count(*)
+from src b
+group by b.key
+having key < '12'
+order by b.key;
+
+-- having and windowing
+select key, count(*), rank() over(order by count(*))
+from src b
+group by b.key
+having key < '12'
+order by b.key
+;
+
+explain
+select key, count(*), rank() over(order by count(*))
+from src b
+group by b.key
+having key < '12'
+;
+
+-- order by
+select key
+from src t
+where key < '12'
+group by t.key
+order by t.key;
+
+-- cluster by
+EXPLAIN
+SELECT x.key, x.value as key FROM SRC x CLUSTER BY key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_rollup1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_rollup1.q
index f79b0c472e..ee8038c7d9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_rollup1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_rollup1.q
@@ -3,7 +3,7 @@ set hive.groupby.skewindata=false;
CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
EXPLAIN
SELECT key, val, count(1) FROM T1 GROUP BY key, val with rollup;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
index 911a11ae89..7401a9ca1d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_1.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_2.q
index 31b4ec5c74..700a8af915 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_2.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key) SORTED BY (val) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_3.q
index 103c57a123..2ef8447935 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_3.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_4.q
index e43da3c932..3c959e381f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_4.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key, val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_5.q
index bef5e5d2d5..dd05238f1c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_5.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
@@ -30,7 +30,7 @@ DROP TABLE T1;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (val, key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
@@ -52,7 +52,7 @@ DROP TABLE T1;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_6.q
index cf076e8125..aa09aec34b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_6.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_6.q
@@ -17,7 +17,7 @@ SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key;
SELECT * FROM outputTbl1 ORDER BY key;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='2');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='2');
-- The plan should not be converted to a map-side group since no partition is being accessed
EXPLAIN EXTENDED
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_7.q
index c2d42154e5..99337859fb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_7.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string)
CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='1');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='1');
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_8.q
index 121804e60a..f53295e4b2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_8.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string)
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='1');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='1');
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_9.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_9.q
index 1c3d1cdcc2..296336d0f9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_9.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_9.q
@@ -6,7 +6,7 @@ set hive.map.groupby.sorted=true;
CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string)
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='1');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 PARTITION (ds='1');
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
index 068c26a9c2..db0faa04da 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_skew_1.q
@@ -7,7 +7,7 @@ set hive.groupby.skewindata=true;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_test_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
index 8efa05e254..4ec138e51a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/groupby_sort_test_1.q
@@ -7,7 +7,7 @@ set hive.map.groupby.sorted.testmode=true;
CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
-- perform an insert to make sure there are 2 files
INSERT OVERWRITE TABLE T1 select key, val from T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/import_exported_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/import_exported_table.q
new file mode 100644
index 0000000000..cb147c5fea
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/import_exported_table.q
@@ -0,0 +1,13 @@
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test_import_exported_table/;
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test_import_exported_table/exported_table/;
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test_import_exported_table/exported_table/data/;
+
+dfs -copyFromLocal ../../data/files/exported_table/_metadata hdfs:///tmp/test_import_exported_table/exported_table;
+dfs -copyFromLocal ../../data/files/exported_table/data/data hdfs:///tmp/test_import_exported_table/exported_table/data;
+
+IMPORT FROM '/tmp/test_import_exported_table/exported_table';
+DESCRIBE j1_41;
+SELECT * from j1_41;
+
+dfs -rmr hdfs:///tmp/test_import_exported_table;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auth.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auth.q
index 33a1fc581e..03d77f1f19 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auth.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auth.q
@@ -1,15 +1,18 @@
+set hive.stats.dbclass=fs;
SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
create table foobar(key int, value string) PARTITIONED BY (ds string, hr string);
alter table foobar add partition (ds='2008-04-08',hr='12');
-CREATE INDEX srcpart_auth_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD;
+CREATE INDEX srcpart_AUTH_index ON TABLE foobar(key) as 'BITMAP' WITH DEFERRED REBUILD;
+SHOW INDEXES ON foobar;
+
grant select on table foobar to user hive_test_user;
-grant select on table default__foobar_srcpart_auth_index__ to user hive_test_user;
-grant update on table default__foobar_srcpart_auth_index__ to user hive_test_user;
-grant create on table default__foobar_srcpart_auth_index__ to user hive_test_user;
+grant select on table default__foobar_srcpart_auth_indeX__ to user hive_test_user;
+grant update on table default__foobar_srcpart_auth_indEx__ to user hive_test_user;
+grant create on table default__foobar_srcpart_auth_inDex__ to user hive_test_user;
set hive.security.authorization.enabled=true;
-ALTER INDEX srcpart_auth_index ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD;
+ALTER INDEX srcpart_auth_INDEX ON foobar PARTITION (ds='2008-04-08',hr='12') REBUILD;
set hive.security.authorization.enabled=false;
DROP INDEX srcpart_auth_index on foobar;
DROP TABLE foobar;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto.q
index cb8a1d6293..77733aac02 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto.q
@@ -3,6 +3,7 @@
-- without indexing
SELECT key, value FROM src WHERE key > 80 AND key < 100 ORDER BY key;
+set hive.stats.dbclass=fs;
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
@@ -25,4 +26,4 @@ SET hive.optimize.index.filter.compact.minsize=0;
EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 ORDER BY key;
SELECT key, value FROM src WHERE key > 80 AND key < 100 ORDER BY key;
-DROP INDEX src_index on src; \ No newline at end of file
+DROP INDEX src_index on src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_empty.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_empty.q
index cb32162d40..41f4a40823 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_empty.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_empty.q
@@ -3,6 +3,7 @@
-- Create temp, and populate it with some values in src.
CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE;
+set hive.stats.dbclass=fs;
-- Build an index on temp.
CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD;
ALTER INDEX temp_index ON temp REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_file_format.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_file_format.q
index 790e6c223f..2967bd60d8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_file_format.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_file_format.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- test automatic use of index on different file formats
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
@@ -16,4 +17,4 @@ SET hive.optimize.index.filter.compact.minsize=0;
EXPLAIN SELECT key, value FROM src WHERE key=86 ORDER BY key;
SELECT key, value FROM src WHERE key=86 ORDER BY key;
-DROP INDEX src_index on src; \ No newline at end of file
+DROP INDEX src_index on src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables.q
index 2bf8481f1d..a672e06e79 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables.q
@@ -4,6 +4,7 @@
EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
+set hive.stats.dbclass=fs;
CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q
index 808a04cc36..d78e0fd58a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_mult_tables_compact.q
@@ -4,6 +4,7 @@
EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
+set hive.stats.dbclass=fs;
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_multiple.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_multiple.q
index 06e97fa76b..f0a91b4b8a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_multiple.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_multiple.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- With multiple indexes, make sure we choose which to use in a consistent order
CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
@@ -13,4 +14,4 @@ EXPLAIN SELECT key, value FROM src WHERE key=86 ORDER BY key;
SELECT key, value FROM src WHERE key=86 ORDER BY key;
DROP INDEX src_key_index ON src;
-DROP INDEX src_val_index ON src; \ No newline at end of file
+DROP INDEX src_val_index ON src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_partitioned.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_partitioned.q
index 5013d29e73..70166b36c5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_partitioned.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_partitioned.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- test automatic use of index on table with partitions
CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD;
ALTER INDEX src_part_index ON srcpart REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_self_join.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_self_join.q
index 0984a4a21b..1d9efbbc6d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_self_join.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_self_join.q
@@ -3,6 +3,7 @@
EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 ORDER BY a.key;
+set hive.stats.dbclass=fs;
CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_unused.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_unused.q
index d8f3eda181..acd4194b0e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_unused.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_auto_unused.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- test cases where the index should not be used automatically
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap.q
index f9deb28839..673c835fb9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
DROP INDEX srcpart_index_proj on srcpart;
EXPLAIN
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap1.q
index 55633d9225..adec8f1b3b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap1.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
EXPLAIN
CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap2.q
index bd15a21fa4..1ffa6eeebb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap2.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
EXPLAIN
CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
EXPLAIN
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap3.q
index 0d2c811459..e7a093c118 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap3.q
@@ -1,3 +1,6 @@
+set hive.stats.dbclass=counter;
+set hive.stats.autogather=true;
+
EXPLAIN
CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
EXPLAIN
@@ -17,10 +20,10 @@ SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
WHERE key = 0) a
JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
+ (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
+ WHERE value = "val_0") b
+ ON
+ a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname;
INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result"
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto.q
index 672ce29f1b..56cd44dd5b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto.q
@@ -1,3 +1,6 @@
+set hive.stats.dbclass=counter;
+set hive.stats.autogather=true;
+
-- try the query without indexing, with manual indexing, and with automatic indexing
-- without indexing
SELECT key, value FROM src WHERE key=0 AND value = "val_0" ORDER BY key;
@@ -21,12 +24,12 @@ SELECT * FROM default__src_src2_index__ ORDER BY value;
EXPLAIN
SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets`
FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__
- WHERE key = 0) a
- JOIN
- (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
- WHERE value = "val_0") b
- ON
- a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
+ WHERE key = 0) a
+ JOIN
+ (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src2_index__
+ WHERE value = "val_0") b
+ ON
+ a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT
EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname;
INSERT OVERWRITE DIRECTORY "${system:test.tmp.dir}/index_result"
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q
index 90d7987594..3b310cee4c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_auto_partitioned.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- test automatic use of index on table with partitions
CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD;
ALTER INDEX src_part_index ON srcpart REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_compression.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_compression.q
index 2f5e5d4fb2..32ecfb9db8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_compression.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_compression.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
SET hive.exec.compress.result=true;
CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_rc.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_rc.q
index 054df51c32..26a351ea31 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_rc.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_bitmap_rc.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE;
INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact.q
index a936f1127f..98cbec147d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
DROP INDEX srcpart_index_proj on srcpart;
EXPLAIN
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_1.q
index 837033be40..97276f488e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_1.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
EXPLAIN
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_2.q
index 56119ac11f..1eb3f5c3db 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_2.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE;
INSERT OVERWRITE TABLE srcpart_rc PARTITION (ds='2008-04-08', hr=11) SELECT key, value FROM srcpart WHERE ds = '2008-04-08' AND hr = 11;
@@ -42,4 +43,4 @@ SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
SELECT key, value FROM srcpart_rc WHERE key=100 ORDER BY key;
DROP INDEX srcpart_rc_index on srcpart_rc;
-DROP TABLE srcpart_rc; \ No newline at end of file
+DROP TABLE srcpart_rc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_3.q
index f3fcb4af3d..599b4ac114 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_3.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE;
INSERT OVERWRITE TABLE src_index_test_rc SELECT * FROM src;
@@ -16,4 +17,4 @@ SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
SELECT key, value FROM src_index_test_rc WHERE key=100 ORDER BY key;
DROP INDEX src_index on src_index_test_rc;
-DROP TABLE src_index_test_rc; \ No newline at end of file
+DROP TABLE src_index_test_rc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_binary_search.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_binary_search.q
index d0d9a32adc..e72b27c781 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_binary_search.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compact_binary_search.q
@@ -1,6 +1,6 @@
SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
SET hive.default.fileformat=TextFile;
-
+set hive.stats.dbclass=fs;
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
@@ -129,4 +129,4 @@ SELECT * FROM src WHERE key >= '9';
SET hive.exec.post.hooks=;
-DROP INDEX src_index ON src; \ No newline at end of file
+DROP INDEX src_index ON src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compression.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compression.q
index 84ed3cc932..963b8f74e5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compression.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_compression.q
@@ -1,4 +1,5 @@
SET hive.exec.compress.result=true;
+set hive.stats.dbclass=fs;
CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD;
ALTER INDEX src_index ON src REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_creation.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_creation.q
index 062821e870..ef020b63d5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_creation.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_creation.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
drop index src_index_2 on src;
drop index src_index_3 on src;
drop index src_index_4 on src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_serde.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_serde.q
index a6fe16ba3b..20186a7400 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_serde.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_serde.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- Want to ensure we can build and use indices on tables stored with SerDes
-- Build the (Avro backed) table
CREATE TABLE doctors
@@ -31,7 +32,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
DESCRIBE doctors;
-LOAD DATA LOCAL INPATH '../data/files/doctors.avro' INTO TABLE doctors;
+LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors;
-- Create and build an index
CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale.q
index 82e15b97b6..ecab2b7f6b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- test that stale indexes are not used
CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale_partitioned.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale_partitioned.q
index e7cfeff31f..a93ccf7f95 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale_partitioned.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/index_stale_partitioned.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
-- Test if index is actually being used.
-- Create temp, and populate it with some values in src.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
index 119994e91b..728b8cc4a9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_bucket_sort_dyn_part.q
@@ -47,12 +47,12 @@ CREATE TABLE srcpart_merge_dp LIKE srcpart;
CREATE TABLE srcpart_merge_dp_rc LIKE srcpart;
ALTER TABLE srcpart_merge_dp_rc SET FILEFORMAT RCFILE;
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
-LOAD DATA LOCAL INPATH '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
-LOAD DATA LOCAL INPATH '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
-LOAD DATA LOCAL INPATH '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=11);
-LOAD DATA LOCAL INPATH '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=12);
+LOAD DATA LOCAL INPATH '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp PARTITION(ds='2008-04-08', hr=12);
INSERT OVERWRITE TABLE srcpart_merge_dp_rc PARTITION (ds = '2008-04-08', hr)
SELECT key, value, hr FROM srcpart_merge_dp WHERE ds = '2008-04-08';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_const_type.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_const_type.q
index a039dc5a2a..ce5ed8419d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_const_type.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/infer_const_type.q
@@ -1,7 +1,7 @@
DROP TABLE infertypes;
CREATE TABLE infertypes(ti TINYINT, si SMALLINT, i INT, bi BIGINT, fl FLOAT, db DOUBLE, str STRING);
-LOAD DATA LOCAL INPATH '../data/files/infer_const_type.txt' OVERWRITE INTO TABLE infertypes;
+LOAD DATA LOCAL INPATH '../../data/files/infer_const_type.txt' OVERWRITE INTO TABLE infertypes;
SELECT * FROM infertypes;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input13.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input13.q
index 40fbc84a98..620e9dcfae 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input13.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input13.q
@@ -7,15 +7,15 @@ FROM src
INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
FROM src
INSERT OVERWRITE TABLE dest1 SELECT src.* WHERE src.key < 100
INSERT OVERWRITE TABLE dest2 SELECT src.key, src.value WHERE src.key >= 100 and src.key < 200
INSERT OVERWRITE TABLE dest3 PARTITION(ds='2008-04-08', hr='12') SELECT src.key WHERE src.key >= 200 and src.key < 300
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/dest4.out' SELECT src.value WHERE src.key >= 300;
SELECT dest1.* FROM dest1;
SELECT dest2.* FROM dest2;
SELECT dest3.* FROM dest3;
-dfs -cat ../build/ql/test/data/warehouse/dest4.out/*;
+dfs -cat ${system:test.warehouse.dir}/dest4.out/*;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16.q
index 82e6d81426..4990d0ba1c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16.q
@@ -1,6 +1,6 @@
-- TestSerDe is a user defined serde where the default delimiter is Ctrl-B
DROP TABLE INPUT16;
-ADD JAR ../data/files/TestSerDe.jar;
+ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
CREATE TABLE INPUT16(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1_cb.txt' INTO TABLE INPUT16;
+LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE INPUT16;
SELECT INPUT16.VALUE, INPUT16.KEY FROM INPUT16;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16_cc.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16_cc.q
index 5dab4103d8..9272a92c81 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16_cc.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input16_cc.q
@@ -4,8 +4,8 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
-- the user is overwriting it with ctrlC
DROP TABLE INPUT16_CC;
-ADD JAR ../data/files/TestSerDe.jar;
+ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
CREATE TABLE INPUT16_CC(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties ('testserde.default.serialization.format'='\003', 'dummy.prop.not.used'='dummyy.val') STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1_cc.txt' INTO TABLE INPUT16_CC;
+LOAD DATA LOCAL INPATH '../../data/files/kv1_cc.txt' INTO TABLE INPUT16_CC;
SELECT INPUT16_CC.VALUE, INPUT16_CC.KEY FROM INPUT16_CC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input19.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input19.q
index fec44e9766..3dc7fec9f6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input19.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input19.q
@@ -1,5 +1,5 @@
create table apachelog(ipaddress STRING,identd STRING,user_name STRING,finishtime STRING,requestline string,returncode INT,size INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe' WITH SERDEPROPERTIES ( 'serialization.format'= 'org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol', 'quote.delim'= '("|\\[|\\])', 'field.delim'=' ', 'serialization.null.format'='-' ) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/apache.access.log' INTO TABLE apachelog;
+LOAD DATA LOCAL INPATH '../../data/files/apache.access.log' INTO TABLE apachelog;
SELECT a.* FROM apachelog a;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input20.q
index 0566ab18c2..ff430abb8e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input20.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input20.q
@@ -1,6 +1,6 @@
CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-ADD FILE ../data/scripts/input20_script;
+ADD FILE ../../data/scripts/input20_script.py;
EXPLAIN
FROM (
@@ -12,7 +12,7 @@ FROM (
) tmap
INSERT OVERWRITE TABLE dest1
REDUCE tmap.key, tmap.value
-USING 'input20_script'
+USING 'python input20_script.py'
AS key, value;
FROM (
@@ -24,7 +24,7 @@ FROM (
) tmap
INSERT OVERWRITE TABLE dest1
REDUCE tmap.key, tmap.value
-USING 'input20_script'
+USING 'python input20_script.py'
AS key, value;
SELECT * FROM dest1 SORT BY key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input21.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input21.q
index d7c814e580..43cd01e684 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input21.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input21.q
@@ -1,7 +1,7 @@
CREATE TABLE src_null(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/null.txt' INTO TABLE src_null;
+LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE src_null;
EXPLAIN SELECT * FROM src_null DISTRIBUTE BY c SORT BY d;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input22.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input22.q
index 853947be57..8803e4dbeb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input22.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input22.q
@@ -1,5 +1,5 @@
CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUT4;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4;
EXPLAIN
SELECT a.KEY2
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input33.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input33.q
index 7ab17515af..8b6b215020 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input33.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input33.q
@@ -1,6 +1,6 @@
CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
-ADD FILE ../data/scripts/input20_script;
+ADD FILE ../../data/scripts/input20_script.py;
EXPLAIN
FROM (
@@ -12,7 +12,7 @@ FROM (
) tmap
INSERT OVERWRITE TABLE dest1
REDUCE tmap.key, tmap.value
-USING 'input20_script'
+USING 'python input20_script.py'
AS (key STRING, value STRING);
FROM (
@@ -24,7 +24,7 @@ FROM (
) tmap
INSERT OVERWRITE TABLE dest1
REDUCE tmap.key, tmap.value
-USING 'input20_script'
+USING 'python input20_script.py'
AS (key STRING, value STRING);
SELECT * FROM dest1 SORT BY key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input37.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input37.q
index 6fd136afec..6ded61aa23 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input37.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input37.q
@@ -1,6 +1,6 @@
create table documents(contents string) stored as textfile;
-LOAD DATA LOCAL INPATH '../data/files/docurl.txt' INTO TABLE documents;
+LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE documents;
select url, count(1)
@@ -8,7 +8,7 @@ FROM
(
FROM documents
MAP documents.contents
- USING 'java -cp ../build/ql/test/classes org.apache.hadoop.hive.scripts.extracturl' AS (url, count)
+ USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (url, count)
) subq
group by url;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input3_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input3_limit.q
index 3584820aca..f983aca847 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input3_limit.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input3_limit.q
@@ -1,7 +1,7 @@
CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/kv2.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, value STRING);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4.q
index 08d6d97603..1186bbbbe6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4.q
@@ -1,7 +1,7 @@
CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
EXPLAIN
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUT4;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUT4;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4;
EXPLAIN FORMATTED
SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias;
SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input40.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input40.q
index 4166cb5f94..ab187b5d7e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input40.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input40.q
@@ -2,15 +2,15 @@
create table tmp_insert_test (key string, value string) stored as textfile;
-load data local inpath '../data/files/kv1.txt' into table tmp_insert_test;
+load data local inpath '../../data/files/kv1.txt' into table tmp_insert_test;
select * from tmp_insert_test;
create table tmp_insert_test_p (key string, value string) partitioned by (ds string) stored as textfile;
-load data local inpath '../data/files/kv1.txt' into table tmp_insert_test_p partition (ds = '2009-08-01');
+load data local inpath '../../data/files/kv1.txt' into table tmp_insert_test_p partition (ds = '2009-08-01');
select * from tmp_insert_test_p where ds= '2009-08-01'
order by key, value;
-load data local inpath '../data/files/kv2.txt' into table tmp_insert_test_p partition (ds = '2009-08-01');
+load data local inpath '../../data/files/kv2.txt' into table tmp_insert_test_p partition (ds = '2009-08-01');
select * from tmp_insert_test_p where ds= '2009-08-01'
order by key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input43.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input43.q
index 5512dc3280..3182bbef39 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input43.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input43.q
@@ -1,7 +1,7 @@
drop table tst_src1;
create table tst_src1 like src1;
-load data local inpath '../data/files/kv1.txt' into table tst_src1 ;
+load data local inpath '../../data/files/kv1.txt' into table tst_src1 ;
select count(1) from tst_src1;
-load data local inpath '../data/files/kv1.txt' into table tst_src1 ;
+load data local inpath '../../data/files/kv1.txt' into table tst_src1 ;
select count(1) from tst_src1;
drop table tst_src1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input44.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input44.q
index 4557edc178..2e975e58c1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input44.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input44.q
@@ -3,4 +3,4 @@ CREATE TABLE dest(key INT, value STRING) STORED AS TEXTFILE;
SET hive.output.file.extension=.txt;
INSERT OVERWRITE TABLE dest SELECT src.* FROM src;
-dfs -cat ../build/ql/test/data/warehouse/dest/*.txt \ No newline at end of file
+dfs -cat ${system:test.warehouse.dir}/dest/*.txt
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input45.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input45.q
index 633a8c6edb..334da264d6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input45.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input45.q
@@ -2,8 +2,8 @@ SET hive.insert.into.multilevel.dirs=true;
SET hive.output.file.extension=.txt;
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/x/y/z/' SELECT src.* FROM src;
+INSERT OVERWRITE DIRECTORY 'target/data/x/y/z/' SELECT src.* FROM src;
-dfs -cat ../build/ql/test/data/x/y/z/*.txt;
+dfs -cat ${system:build.dir}/data/x/y/z/*.txt;
-dfs -rmr ../build/ql/test/data/x; \ No newline at end of file
+dfs -rmr ${system:build.dir}/data/x;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4_cb_delim.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4_cb_delim.q
index 8c57dd3f25..b18d60aa74 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4_cb_delim.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input4_cb_delim.q
@@ -1,4 +1,4 @@
CREATE TABLE INPUT4_CB(KEY STRING, VALUE STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\002' LINES TERMINATED BY '\012' STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1_cb.txt' INTO TABLE INPUT4_CB;
+LOAD DATA LOCAL INPATH '../../data/files/kv1_cb.txt' INTO TABLE INPUT4_CB;
SELECT INPUT4_CB.VALUE, INPUT4_CB.KEY FROM INPUT4_CB;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_dfs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_dfs.q
index 4f5824df5c..b108cbd6b2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_dfs.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/input_dfs.q
@@ -1,2 +1,2 @@
-dfs -cat ../data/files/kv1.txt;
+dfs -cat ../../data/files/kv1.txt;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl5.q
index 9a7ca5da12..87c55a26d7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl5.q
@@ -1,7 +1,7 @@
-- test for internationalization
-- kv4.txt contains the utf-8 character 0xE982B5E993AE which we are verifying later on
CREATE TABLE INPUTDDL5(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv4.txt' INTO TABLE INPUTDDL5;
+LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE INPUTDDL5;
DESCRIBE INPUTDDL5;
SELECT INPUTDDL5.name from INPUTDDL5;
SELECT count(1) FROM INPUTDDL5 WHERE INPUTDDL5.name = _UTF-8 0xE982B5E993AE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl6.q
index d33ab8d9a7..6c709399a3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl6.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl6.q
@@ -2,8 +2,8 @@
-- test for describe extended table partition
-- test for alter table drop partition
CREATE TABLE INPUTDDL6(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUTDDL6 PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE INPUTDDL6 PARTITION (ds='2008-04-08');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUTDDL6 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUTDDL6 PARTITION (ds='2008-04-08');
DESCRIBE EXTENDED INPUTDDL6;
DESCRIBE EXTENDED INPUTDDL6 PARTITION (ds='2008-04-08');
SHOW PARTITIONS INPUTDDL6;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl7.q
index 8a73935fee..27e587a283 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/inputddl7.q
@@ -3,22 +3,22 @@
CREATE TABLE T1(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T1;
SELECT COUNT(1) FROM T1;
CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.seq' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T2;
SELECT COUNT(1) FROM T2;
CREATE TABLE T3(name STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE T3 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE T3 PARTITION (ds='2008-04-09');
SELECT COUNT(1) FROM T3 where T3.ds='2008-04-09';
CREATE TABLE T4(name STRING) PARTITIONED BY(ds STRING) STORED AS SEQUENCEFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.seq' INTO TABLE T4 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.seq' INTO TABLE T4 PARTITION (ds='2008-04-09');
SELECT COUNT(1) FROM T4 where T4.ds='2008-04-09';
DESCRIBE EXTENDED T1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert1_overwrite_partitions.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert1_overwrite_partitions.q
index 6ad70b5673..6b00f977c4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert1_overwrite_partitions.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert1_overwrite_partitions.q
@@ -1,8 +1,8 @@
CREATE TABLE sourceTable (one string,two string) PARTITIONED BY (ds string,hr string);
-load data local inpath '../data/files/kv1.txt' INTO TABLE sourceTable partition(ds='2011-11-11', hr='11');
+load data local inpath '../../data/files/kv1.txt' INTO TABLE sourceTable partition(ds='2011-11-11', hr='11');
-load data local inpath '../data/files/kv3.txt' INTO TABLE sourceTable partition(ds='2011-11-11', hr='12');
+load data local inpath '../../data/files/kv3.txt' INTO TABLE sourceTable partition(ds='2011-11-11', hr='12');
CREATE TABLE destinTable (one string,two string) PARTITIONED BY (ds string,hr string);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert2_overwrite_partitions.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert2_overwrite_partitions.q
index 598d30eaeb..bd1eb75287 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert2_overwrite_partitions.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert2_overwrite_partitions.q
@@ -4,9 +4,9 @@ CREATE DATABASE db2;
CREATE TABLE db1.sourceTable (one string,two string) PARTITIONED BY (ds string);
-load data local inpath '../data/files/kv1.txt' INTO TABLE db1.sourceTable partition(ds='2011-11-11');
+load data local inpath '../../data/files/kv1.txt' INTO TABLE db1.sourceTable partition(ds='2011-11-11');
-load data local inpath '../data/files/kv3.txt' INTO TABLE db1.sourceTable partition(ds='2011-11-11');
+load data local inpath '../../data/files/kv3.txt' INTO TABLE db1.sourceTable partition(ds='2011-11-11');
CREATE TABLE db2.destinTable (one string,two string) PARTITIONED BY (ds string);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_into3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_into3.q
index e58b212e19..4ff0edcd61 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_into3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_into3.q
@@ -4,10 +4,10 @@ DROP TABLE insert_into3b;
CREATE TABLE insert_into3a (key int, value string);
CREATE TABLE insert_into3b (key int, value string);
-EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100;
-FROM src INSERT INTO TABLE insert_into3a SELECT * LIMIT 50
- INSERT INTO TABLE insert_into3b SELECT * LIMIT 100;
+EXPLAIN FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100;
+FROM src INSERT INTO TABLE insert_into3a SELECT * ORDER BY key, value LIMIT 50
+ INSERT INTO TABLE insert_into3b SELECT * ORDER BY key, value LIMIT 100;
SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into3a
) t;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q
index 25c127f67f..6d069f5411 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/insert_overwrite_local_directory_1.q
@@ -1,40 +1,40 @@
-insert overwrite local directory '../data/files/local_src_table_1'
+insert overwrite local directory '../../data/files/local_src_table_1'
select * from src ;
-dfs -cat ../data/files/local_src_table_1/000000_0;
+dfs -cat ../../data/files/local_src_table_1/000000_0;
-insert overwrite local directory '../data/files/local_src_table_2'
+insert overwrite local directory '../../data/files/local_src_table_2'
row format delimited
FIELDS TERMINATED BY ':'
select * from src ;
-dfs -cat ../data/files/local_src_table_2/000000_0;
+dfs -cat ../../data/files/local_src_table_2/000000_0;
create table array_table (a array<string>, b array<string>)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\t'
COLLECTION ITEMS TERMINATED BY ',';
-load data local inpath "../data/files/array_table.txt" overwrite into table array_table;
+load data local inpath "../../data/files/array_table.txt" overwrite into table array_table;
-insert overwrite local directory '../data/files/local_array_table_1'
+insert overwrite local directory '../../data/files/local_array_table_1'
select * from array_table;
-dfs -cat ../data/files/local_array_table_1/000000_0;
+dfs -cat ../../data/files/local_array_table_1/000000_0;
-insert overwrite local directory '../data/files/local_array_table_2'
+insert overwrite local directory '../../data/files/local_array_table_2'
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ':'
COLLECTION ITEMS TERMINATED BY '#'
select * from array_table;
-dfs -cat ../data/files/local_array_table_2/000000_0;
+dfs -cat ../../data/files/local_array_table_2/000000_0;
-insert overwrite local directory '../data/files/local_array_table_2_withfields'
+insert overwrite local directory '../../data/files/local_array_table_2_withfields'
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ':'
COLLECTION ITEMS TERMINATED BY '#'
select b,a from array_table;
-dfs -cat ../data/files/local_array_table_2_withfields/000000_0;
+dfs -cat ../../data/files/local_array_table_2_withfields/000000_0;
create table map_table (foo STRING , bar MAP<STRING, STRING>)
@@ -44,63 +44,63 @@ COLLECTION ITEMS TERMINATED BY ','
MAP KEYS TERMINATED BY ':'
STORED AS TEXTFILE;
-load data local inpath "../data/files/map_table.txt" overwrite into table map_table;
+load data local inpath "../../data/files/map_table.txt" overwrite into table map_table;
-insert overwrite local directory '../data/files/local_map_table_1'
+insert overwrite local directory '../../data/files/local_map_table_1'
select * from map_table;
-dfs -cat ../data/files/local_map_table_1/000000_0;
+dfs -cat ../../data/files/local_map_table_1/000000_0;
-insert overwrite local directory '../data/files/local_map_table_2'
+insert overwrite local directory '../../data/files/local_map_table_2'
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ':'
COLLECTION ITEMS TERMINATED BY '#'
MAP KEYS TERMINATED BY '='
select * from map_table;
-dfs -cat ../data/files/local_map_table_2/000000_0;
+dfs -cat ../../data/files/local_map_table_2/000000_0;
-insert overwrite local directory '../data/files/local_map_table_2_withfields'
+insert overwrite local directory '../../data/files/local_map_table_2_withfields'
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ':'
COLLECTION ITEMS TERMINATED BY '#'
MAP KEYS TERMINATED BY '='
select bar,foo from map_table;
-dfs -cat ../data/files/local_map_table_2_withfields/000000_0;
+dfs -cat ../../data/files/local_map_table_2_withfields/000000_0;
-insert overwrite local directory '../data/files/local_array_table_3'
+insert overwrite local directory '../../data/files/local_array_table_3'
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe'
STORED AS TEXTFILE
select * from array_table;
-dfs -cat ../data/files/local_array_table_3/000000_0;
+dfs -cat ../../data/files/local_array_table_3/000000_0;
-insert overwrite local directory '../data/files/local_map_table_3'
+insert overwrite local directory '../../data/files/local_map_table_3'
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.DelimitedJSONSerDe'
STORED AS TEXTFILE
select * from map_table;
-dfs -cat ../data/files/local_map_table_3/000000_0;
+dfs -cat ../../data/files/local_map_table_3/000000_0;
-insert overwrite local directory '../data/files/local_rctable'
+insert overwrite local directory '../../data/files/local_rctable'
STORED AS RCFILE
select value,key from src;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/local_rctable/temp;
dfs -rmr ${system:test.tmp.dir}/local_rctable;
dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/local_rctable;
-dfs -put ../data/files/local_rctable/000000_0 ${system:test.tmp.dir}/local_rctable/000000_0;
+dfs -put ../../data/files/local_rctable/000000_0 ${system:test.tmp.dir}/local_rctable/000000_0;
create external table local_rctable(value string, key string)
STORED AS RCFILE
LOCATION '${system:test.tmp.dir}/local_rctable';
-insert overwrite local directory '../data/files/local_rctable_out'
+insert overwrite local directory '../../data/files/local_rctable_out'
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\t'
select key,value from local_rctable;
-dfs -cat ../data/files/local_rctable_out/000000_0;
+dfs -cat ../../data/files/local_rctable_out/000000_0;
drop table local_rctable;
drop table array_table;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_1to1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_1to1.q
index b403814adb..4d1ae2186e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_1to1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_1to1.q
@@ -1,9 +1,9 @@
CREATE TABLE join_1to1_1(key1 int, key2 int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in5.txt' INTO TABLE join_1to1_1;
+LOAD DATA LOCAL INPATH '../../data/files/in5.txt' INTO TABLE join_1to1_1;
CREATE TABLE join_1to1_2(key1 int, key2 int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in6.txt' INTO TABLE join_1to1_2;
+LOAD DATA LOCAL INPATH '../../data/files/in6.txt' INTO TABLE join_1to1_2;
set hive.outerjoin.supports.filters=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_alt_syntax.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_alt_syntax.q
new file mode 100644
index 0000000000..0b0c53803d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_alt_syntax.q
@@ -0,0 +1,41 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+explain select p1.p_name, p2.p_name
+from part p1 , part p2;
+
+explain select p1.p_name, p2.p_name, p3.p_name
+from part p1 ,part p2 ,part p3
+where p1.p_name = p2.p_name and p2.p_name = p3.p_name;
+
+explain select p1.p_name, p2.p_name, p3.p_name
+from part p1 , (select p_name from part) p2 ,part p3
+where p1.p_name = p2.p_name and p2.p_name = p3.p_name;
+
+explain select p1.p_name, p2.p_name, p3.p_name
+from part p1 , part p2 , part p3
+where p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name;
+
+explain select p1.p_name, p2.p_name, p3.p_name, p4.p_name
+from part p1 , part p2 join part p3 on p2.p_name = p1.p_name join part p4
+where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
+ and p1.p_partkey = p2.p_partkey;
+
+explain select p1.p_name, p2.p_name, p3.p_name, p4.p_name
+from part p1 join part p2 on p2.p_name = p1.p_name , part p3 , part p4
+where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
+ and p1.p_partkey = p2.p_partkey; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_array.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_array.q
index e4d95a51fa..81e984e7eb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_array.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_array.q
@@ -1,8 +1,8 @@
create table tinyA(a bigint, b bigint) stored as textfile;
create table tinyB(a bigint, bList array<int>) stored as textfile;
-load data local inpath '../data/files/tiny_a.txt' into table tinyA;
-load data local inpath '../data/files/tiny_b.txt' into table tinyB;
+load data local inpath '../../data/files/tiny_a.txt' into table tinyA;
+load data local inpath '../../data/files/tiny_b.txt' into table tinyB;
select * from tinyA;
select * from tinyB;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_casesensitive.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_casesensitive.q
index a9b69c5b1c..0c0962cece 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_casesensitive.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_casesensitive.q
@@ -1,8 +1,8 @@
CREATE TABLE joinone(key1 int, key2 int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in5.txt' INTO TABLE joinone;
+LOAD DATA LOCAL INPATH '../../data/files/in5.txt' INTO TABLE joinone;
CREATE TABLE joinTwo(key1 int, key2 int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in6.txt' INTO TABLE joinTwo;
+LOAD DATA LOCAL INPATH '../../data/files/in6.txt' INTO TABLE joinTwo;
SELECT * FROM joinone JOIN joinTwo ON(joinone.key2=joinTwo.key2) ORDER BY joinone.key1 ASC, joinone.key2 ASC, joinone.value ASC, joinTwo.key1 ASC, joinTwo.key2 ASC, joinTwo.value ASC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_1.q
new file mode 100644
index 0000000000..7f493671b8
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_1.q
@@ -0,0 +1,30 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+
+explain select *
+from part p1 join part p2 join part p3 on p1.p_name = p2.p_name and p2.p_name = p3.p_name;
+
+explain select *
+from part p1 join part p2 join part p3 on p2.p_name = p1.p_name and p3.p_name = p2.p_name;
+
+explain select *
+from part p1 join part p2 join part p3 on p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name;
+
+explain select *
+from part p1 join part p2 join part p3 on p2.p_partkey = 1 and p3.p_name = p2.p_name;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_2.q
new file mode 100644
index 0000000000..ca280104d9
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_2.q
@@ -0,0 +1,24 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+explain select *
+from part p1 join part p2 join part p3 on p1.p_name = p2.p_name join part p4 on p2.p_name = p3.p_name and p1.p_name = p4.p_name;
+
+explain select *
+from part p1 join part p2 join part p3 on p2.p_name = p1.p_name join part p4 on p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
+ and p1.p_partkey = p2.p_partkey;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_3.q
new file mode 100644
index 0000000000..b308838d62
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_3.q
@@ -0,0 +1,34 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+
+explain select *
+from part p1 join part p2 join part p3
+where p1.p_name = p2.p_name and p2.p_name = p3.p_name;
+
+explain select *
+from part p1 join part p2 join part p3
+where p2.p_name = p1.p_name and p3.p_name = p2.p_name;
+
+explain select *
+from part p1 join part p2 join part p3
+where p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name;
+
+explain select *
+from part p1 join part p2 join part p3
+where p2.p_partkey = 1 and p3.p_name = p2.p_name;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_4.q
new file mode 100644
index 0000000000..477682e4e8
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_4.q
@@ -0,0 +1,26 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+explain select *
+from part p1 join part p2 join part p3 on p1.p_name = p2.p_name join part p4
+where p2.p_name = p3.p_name and p1.p_name = p4.p_name;
+
+explain select *
+from part p1 join part p2 join part p3 on p2.p_name = p1.p_name join part p4
+where p2.p_name = p3.p_name and p1.p_partkey = p4.p_partkey
+ and p1.p_partkey = p2.p_partkey;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q
new file mode 100644
index 0000000000..1013f51a0d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual1.q
@@ -0,0 +1,52 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+create table part2(
+ p2_partkey INT,
+ p2_name STRING,
+ p2_mfgr STRING,
+ p2_brand STRING,
+ p2_type STRING,
+ p2_size INT,
+ p2_container STRING,
+ p2_retailprice DOUBLE,
+ p2_comment STRING
+);
+
+create table part3(
+ p3_partkey INT,
+ p3_name STRING,
+ p3_mfgr STRING,
+ p3_brand STRING,
+ p3_type STRING,
+ p3_size INT,
+ p3_container STRING,
+ p3_retailprice DOUBLE,
+ p3_comment STRING
+);
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name and p2_name = p3_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name and p3_name = p2_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p2_partkey = 1 and p3_name = p2_name;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q
new file mode 100644
index 0000000000..6232357752
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual2.q
@@ -0,0 +1,47 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+create table part2(
+ p2_partkey INT,
+ p2_name STRING,
+ p2_mfgr STRING,
+ p2_brand STRING,
+ p2_type STRING,
+ p2_size INT,
+ p2_container STRING,
+ p2_retailprice DOUBLE,
+ p2_comment STRING
+);
+
+create table part3(
+ p3_partkey INT,
+ p3_name STRING,
+ p3_mfgr STRING,
+ p3_brand STRING,
+ p3_type STRING,
+ p3_size INT,
+ p3_container STRING,
+ p3_retailprice DOUBLE,
+ p3_comment STRING
+);
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name join part p4 on p2_name = p3_name and p1.p_name = p4.p_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name join part p4 on p2_name = p3_name and p1.p_partkey = p4.p_partkey
+ and p1.p_partkey = p2_partkey;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q
new file mode 100644
index 0000000000..6ac86042c0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual3.q
@@ -0,0 +1,56 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+create table part2(
+ p2_partkey INT,
+ p2_name STRING,
+ p2_mfgr STRING,
+ p2_brand STRING,
+ p2_type STRING,
+ p2_size INT,
+ p2_container STRING,
+ p2_retailprice DOUBLE,
+ p2_comment STRING
+);
+
+create table part3(
+ p3_partkey INT,
+ p3_name STRING,
+ p3_mfgr STRING,
+ p3_brand STRING,
+ p3_type STRING,
+ p3_size INT,
+ p3_container STRING,
+ p3_retailprice DOUBLE,
+ p3_comment STRING
+);
+
+explain select *
+from part p1 join part2 p2 join part3 p3
+where p1.p_name = p2_name and p2_name = p3_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3
+where p2_name = p1.p_name and p3_name = p2_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3
+where p2_partkey + p1.p_partkey = p1.p_partkey and p3_name = p2_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3
+where p2_partkey = 1 and p3_name = p2_name;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q
new file mode 100644
index 0000000000..0db4d5e3cf
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_cond_pushdown_unqual4.q
@@ -0,0 +1,49 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+create table part2(
+ p2_partkey INT,
+ p2_name STRING,
+ p2_mfgr STRING,
+ p2_brand STRING,
+ p2_type STRING,
+ p2_size INT,
+ p2_container STRING,
+ p2_retailprice DOUBLE,
+ p2_comment STRING
+);
+
+create table part3(
+ p3_partkey INT,
+ p3_name STRING,
+ p3_mfgr STRING,
+ p3_brand STRING,
+ p3_type STRING,
+ p3_size INT,
+ p3_container STRING,
+ p3_retailprice DOUBLE,
+ p3_comment STRING
+);
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p1.p_name = p2_name join part p4
+where p2_name = p3_name and p1.p_name = p4.p_name;
+
+explain select *
+from part p1 join part2 p2 join part3 p3 on p2_name = p1.p_name join part p4
+where p2_name = p3_name and p1.p_partkey = p4.p_partkey
+ and p1.p_partkey = p2_partkey;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_filters.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_filters.q
index d54aa950a1..49b6c6f920 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_filters.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_filters.q
@@ -1,5 +1,5 @@
CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in3.txt' INTO TABLE myinput1;
+LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE myinput1;
SELECT * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value ORDER BY a.key ASC, a.value ASC, b.key ASC, b.value ASC;
SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value ORDER BY a.key ASC, a.value ASC, b.key ASC, b.value ASC;
@@ -55,10 +55,10 @@ SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.valu
CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input2;
-LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input2;
SET hive.optimize.bucketmapjoin = true;
SET hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_hive_626.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_hive_626.q
index 31b0c8c91c..c4c239cae2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_hive_626.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_hive_626.q
@@ -13,9 +13,9 @@ delimited fields terminated by ',' stored as textfile;
create table hive_count (bar_id int, n int) row format delimited fields
terminated by ',' stored as textfile;
-load data local inpath '../data/files/hive_626_foo.txt' overwrite into table hive_foo;
-load data local inpath '../data/files/hive_626_bar.txt' overwrite into table hive_bar;
-load data local inpath '../data/files/hive_626_count.txt' overwrite into table hive_count;
+load data local inpath '../../data/files/hive_626_foo.txt' overwrite into table hive_foo;
+load data local inpath '../../data/files/hive_626_bar.txt' overwrite into table hive_bar;
+load data local inpath '../../data/files/hive_626_count.txt' overwrite into table hive_count;
explain
select hive_foo.foo_name, hive_bar.bar_name, n from hive_foo join hive_bar on hive_foo.foo_id =
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_merging.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_merging.q
new file mode 100644
index 0000000000..a0046dbc41
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_merging.q
@@ -0,0 +1,25 @@
+
+
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+explain select p1.p_size, p2.p_size
+from part p1 left outer join part p2 on p1.p_partkey = p2.p_partkey
+ right outer join part p3 on p2.p_partkey = p3.p_partkey and
+ p1.p_size > 10
+;
+
+explain select p1.p_size, p2.p_size
+from part p1 left outer join part p2 on p1.p_partkey = p2.p_partkey
+ right outer join part p3 on p2.p_partkey = p3.p_partkey and
+ p1.p_size > 10 and p1.p_size > p2.p_size + 10
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nulls.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nulls.q
index 4ff60713d6..047a769eb8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nulls.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nulls.q
@@ -1,5 +1,5 @@
CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' INTO TABLE myinput1;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE myinput1;
SELECT * FROM myinput1 a JOIN myinput1 b ORDER BY a.key ASC, a.value ASC, b.key ASC, b.value ASC;
SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ORDER BY a.key ASC, a.value ASC, b.key ASC, b.value ASC;
@@ -42,10 +42,10 @@ SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.valu
CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in1.txt' into table smb_input2;
-LOAD DATA LOCAL INPATH '../data/files/in2.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in1.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table smb_input2;
SET hive.optimize.bucketmapJOIN = true;
SET hive.optimize.bucketmapJOIN.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nullsafe.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nullsafe.q
index 05b57bce20..5e22517edb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nullsafe.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_nullsafe.q
@@ -1,7 +1,7 @@
set hive.nullsafe.equijoin=true;
CREATE TABLE myinput1(key int, value int);
-LOAD DATA LOCAL INPATH '../data/files/in8.txt' INTO TABLE myinput1;
+LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1;
-- merging
explain select * from myinput1 a join myinput1 b on a.key<=>b.value ORDER BY a.key, a.value, b.key, b.value;
@@ -31,10 +31,10 @@ SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value OR
-- smbs
CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS;
CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS;
-LOAD DATA LOCAL INPATH '../data/files/in8.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in9.txt' into table smb_input1;
-LOAD DATA LOCAL INPATH '../data/files/in8.txt' into table smb_input2;
-LOAD DATA LOCAL INPATH '../data/files/in9.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input1;
+LOAD DATA LOCAL INPATH '../../data/files/in8.txt' into table smb_input2;
+LOAD DATA LOCAL INPATH '../../data/files/in9.txt' into table smb_input2;
SET hive.optimize.bucketmapJOIN = true;
SET hive.optimize.bucketmapJOIN.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder.q
index b92a79ba07..b209c50b66 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder.q
@@ -6,9 +6,9 @@ CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key
SELECT a.key, a.val, c.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder2.q
index 238c0adad3..ca1e65ebef 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder2.q
@@ -8,10 +8,10 @@ CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T4;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4;
EXPLAIN
SELECT /*+ STREAMTABLE(a) */ *
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder3.q
index 1bda28fbc3..994be164aa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder3.q
@@ -8,10 +8,10 @@ CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T4;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4;
EXPLAIN
SELECT /*+ STREAMTABLE(a,c) */ *
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder4.q
index 126f356ef7..16ef2046be 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_reorder4.q
@@ -2,9 +2,9 @@ CREATE TABLE T1(key1 STRING, val1 STRING) STORED AS TEXTFILE;
CREATE TABLE T2(key2 STRING, val2 STRING) STORED AS TEXTFILE;
CREATE TABLE T3(key3 STRING, val3 STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
set hive.auto.convert.join=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_star.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_star.q
index 8314161975..c95a13b9ca 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_star.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/join_star.q
@@ -7,14 +7,14 @@ create table dim5(f9 int, f10 int);
create table dim6(f11 int, f12 int);
create table dim7(f13 int, f14 int);
-LOAD DATA LOCAL INPATH '../data/files/fact-data.txt' INTO TABLE fact;
-LOAD DATA LOCAL INPATH '../data/files/dim-data.txt' INTO TABLE dim1;
-LOAD DATA LOCAL INPATH '../data/files/dim-data.txt' INTO TABLE dim2;
-LOAD DATA LOCAL INPATH '../data/files/dim-data.txt' INTO TABLE dim3;
-LOAD DATA LOCAL INPATH '../data/files/dim-data.txt' INTO TABLE dim4;
-LOAD DATA LOCAL INPATH '../data/files/dim-data.txt' INTO TABLE dim5;
-LOAD DATA LOCAL INPATH '../data/files/dim-data.txt' INTO TABLE dim6;
-LOAD DATA LOCAL INPATH '../data/files/dim-data.txt' INTO TABLE dim7;
+LOAD DATA LOCAL INPATH '../../data/files/fact-data.txt' INTO TABLE fact;
+LOAD DATA LOCAL INPATH '../../data/files/dim-data.txt' INTO TABLE dim1;
+LOAD DATA LOCAL INPATH '../../data/files/dim-data.txt' INTO TABLE dim2;
+LOAD DATA LOCAL INPATH '../../data/files/dim-data.txt' INTO TABLE dim3;
+LOAD DATA LOCAL INPATH '../../data/files/dim-data.txt' INTO TABLE dim4;
+LOAD DATA LOCAL INPATH '../../data/files/dim-data.txt' INTO TABLE dim5;
+LOAD DATA LOCAL INPATH '../../data/files/dim-data.txt' INTO TABLE dim6;
+LOAD DATA LOCAL INPATH '../../data/files/dim-data.txt' INTO TABLE dim7;
set hive.auto.convert.join=true;
set hive.auto.convert.join.noconditionaltask=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_noalias.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_noalias.q
index 11e54a5b04..df7343259a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_noalias.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_noalias.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
--HIVE-2608 Do not require AS a,b,c part in LATERAL VIEW
EXPLAIN SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2;
SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_ppd.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_ppd.q
index 7be86a6f10..65ae518cd5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_ppd.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lateral_view_ppd.q
@@ -11,3 +11,7 @@ SELECT value, myCol FROM (SELECT * FROM srcpart LATERAL VIEW explode(array(1,2,3
EXPLAIN SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array(1,2,3)) myTable2 AS myCol2) a WHERE key='0';
SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array(1,2,3)) myTable2 AS myCol2) a WHERE key='0';
+
+-- HIVE-4293 Predicates following UDTF operator are removed by PPD
+EXPLAIN SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol WHERE myCol > 1) a WHERE key='0';
+SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol WHERE myCol > 1) a WHERE key='0'; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lb_fs_stats.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lb_fs_stats.q
new file mode 100644
index 0000000000..7f31797f31
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/lb_fs_stats.q
@@ -0,0 +1,19 @@
+set hive.mapred.supports.subdirectories=true;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+set mapred.input.dir.recursive=true;
+set hive.stats.dbclass=fs;
+-- Tests truncating a column from a list bucketing table
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE;
+
+ALTER TABLE test_tab SKEWED BY (key) ON ("484") STORED AS DIRECTORIES;
+
+INSERT OVERWRITE TABLE test_tab PARTITION (part = '1') SELECT * FROM src;
+
+describe formatted test_tab partition (part='1');
+
+set hive.stats.dbclass=jdbc:derby;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag.q
index f497667710..5623cbfac5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag.q
@@ -13,7 +13,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
--1. testLagWithPTFWindowing
select p_mfgr, p_name,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag_queries.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag_queries.q
index 6ef3bdb107..e53abce763 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag_queries.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leadlag_queries.q
@@ -11,7 +11,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-- 1. testLeadUDAF
select p_mfgr, p_retailprice,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin.q
index abe3d33171..0c16fb8dfd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin.q
@@ -9,9 +9,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
CREATE TABLE things (id INT, name STRING) partitioned by (ds string)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
-load data local inpath '../data/files/sales.txt' INTO TABLE sales;
-load data local inpath '../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23');
-load data local inpath '../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24');
+load data local inpath '../../data/files/sales.txt' INTO TABLE sales;
+load data local inpath '../../data/files/things.txt' INTO TABLE things partition(ds='2011-10-23');
+load data local inpath '../../data/files/things2.txt' INTO TABLE things partition(ds='2011-10-24');
SELECT name,id FROM sales ORDER BY name ASC, id ASC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
index 5813ca3c61..c9ebe0e8fa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/leftsemijoin_mr.q
@@ -1,7 +1,7 @@
CREATE TABLE T1(key INT);
-LOAD DATA LOCAL INPATH '../data/files/leftsemijoin_mr_t1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/leftsemijoin_mr_t1.txt' INTO TABLE T1;
CREATE TABLE T2(key INT);
-LOAD DATA LOCAL INPATH '../data/files/leftsemijoin_mr_t2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/leftsemijoin_mr_t2.txt' INTO TABLE T2;
-- Run this query using TestMinimrCliDriver
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
new file mode 100644
index 0000000000..e91adab59d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_partition_metadataonly.q
@@ -0,0 +1,7 @@
+set hive.limit.query.max.table.partition=1;
+
+explain select ds from srcpart where hr=11 and ds='2008-04-08';
+select ds from srcpart where hr=11 and ds='2008-04-08';
+
+explain select distinct hr from srcpart;
+select distinct hr from srcpart;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown.q
index e4d0aa06bd..adfe1e63e8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown.q
@@ -22,12 +22,17 @@ select value,avg(key + 1) from src group by value order by value limit 20;
-- distincts
explain
-select distinct(key) from src limit 20;
-select distinct(key) from src limit 20;
+select distinct(cdouble) from alltypesorc limit 20;
+select distinct(cdouble) from alltypesorc limit 20;
explain
-select key, count(distinct(key)) from src group by key limit 20;
-select key, count(distinct(key)) from src group by key limit 20;
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20;
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20;
+
+-- multi distinct
+explain
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20;
+select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint limit 20;
-- limit zero
explain
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown_negative.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown_negative.q
index a86ddf1404..e17ded1ee1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown_negative.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/limit_pushdown_negative.q
@@ -16,7 +16,3 @@ CREATE TABLE dest_3(key STRING, c1 INT);
EXPLAIN FROM src
INSERT OVERWRITE TABLE dest_2 SELECT value, sum(key) GROUP BY value
INSERT OVERWRITE TABLE dest_3 SELECT value, sum(key) GROUP BY value limit 20;
-
--- nagative, multi distinct
-explain
-select count(distinct key)+count(distinct value) from src limit 20;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
index 3a39f42b2a..80aba5d4a5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
@@ -48,13 +48,13 @@ set hive.optimize.listbucketing=true;
explain extended
select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
-select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' ORDER BY key, value;
+select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
-- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
-- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51' ORDER BY key, value;
+select * from srcpart where ds = '2008-04-08' and key = '51' ORDER BY key, value, ds, hr;
select * from list_bucketing_static_part where key = '51' ORDER BY key, value, ds, hr;
-select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14' ORDER BY key, value;
+select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14' ORDER BY key, value, ds, hr;
select * from list_bucketing_static_part where key = '51' and value = 'val_14' ORDER BY key, value, ds, hr;
-- queries with < <= > >= should work for skewed test although we don't benefit from pruning
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
index 918c817e49..380d148ac9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
@@ -65,7 +65,7 @@ set hive.optimize.listbucketing=true;
explain extended
select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
select * from list_bucketing_static_part where ds = '2008-04-08' and hr = '11' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
-select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' ORDER BY key, value;
+select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' ORDER BY key, value, ds, hr;
-- clean up
drop table list_bucketing_static_part;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_decimal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_decimal.q
index a6ad4b8485..08b21dc689 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_decimal.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_decimal.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1;
SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_double.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_double.q
index 08836127b9..766da699ea 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_double.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_double.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN SELECT 3.14, -3.14, 3.14e8, 3.14e-8, -3.14e8, -3.14e-8, 3.14e+8, 3.14E8, 3.14E-8 FROM src LIMIT 1;
SELECT 3.14, -3.14, 3.14e8, 3.14e-8, -3.14e8, -3.14e-8, 3.14e+8, 3.14E8, 3.14E-8 FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_ints.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_ints.q
index 9da622e246..5fd0cfabf3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_ints.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_ints.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN SELECT 100, 100Y, 100S, 100L FROM src LIMIT 1;
SELECT 100, 100Y, 100S, 100L FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_string.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_string.q
index 21f0890ada..c57dc57252 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_string.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/literal_string.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN SELECT 'face''book', 'face' 'book', 'face'
'book',
"face""book", "face" "book", "face"
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_binary_data.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_binary_data.q
index 7da363183c..653918afc0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_binary_data.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_binary_data.q
@@ -4,7 +4,7 @@ FIELDS TERMINATED BY '9'
STORED AS TEXTFILE;
-- this query loads native binary data, stores in a table and then queries it. Note that string.txt contains binary data. Also uses transform clause and then length udf.
-LOAD DATA LOCAL INPATH '../data/files/string.txt' INTO TABLE mytable;
+LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable;
create table dest1 (key binary, value int);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part1.q
index 52b4937d4a..5f0a015693 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part1.q
@@ -23,8 +23,8 @@ insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, v
show partitions nzhang_part1;
show partitions nzhang_part2;
-select * from nzhang_part1 where ds is not null and hr is not null;
-select * from nzhang_part2 where ds is not null and hr is not null;
+select * from nzhang_part1 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part2 where ds is not null and hr is not null order by ds, hr, key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part10.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part10.q
index 9517664675..dd84599e69 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part10.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part10.q
@@ -19,6 +19,6 @@ insert overwrite table nzhang_part10 partition(ds='2008-12-31', hr) select key,
show partitions nzhang_part10;
-select * from nzhang_part10 where ds is not null and hr is not null;
+select * from nzhang_part10 where ds is not null and hr is not null order by ds, hr, key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part3.q
index e4c8c17f63..29f951aa69 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part3.q
@@ -14,6 +14,6 @@ insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr
insert overwrite table nzhang_part3 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null;
-select * from nzhang_part3 where ds is not null and hr is not null;
+select * from nzhang_part3 where ds is not null and hr is not null order by ds, hr, key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part4.q
index 3f3a0c8d51..942c245db8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part4.q
@@ -17,8 +17,8 @@ insert overwrite table nzhang_part4 partition (ds, hr) select key, value, ds, hr
insert overwrite table nzhang_part4 partition (ds, hr) select key, value, ds, hr from srcpart where ds is not null and hr is not null;
show partitions nzhang_part4;
-select * from nzhang_part4 where ds='2008-04-08' and hr is not null;
+select * from nzhang_part4 where ds='2008-04-08' and hr is not null order by ds, hr, key;
-select * from nzhang_part4 where ds is not null and hr is not null;
+select * from nzhang_part4 where ds is not null and hr is not null order by ds, hr, key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part8.q
index 8073500c0b..6768e4373a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part8.q
@@ -20,5 +20,5 @@ insert overwrite table nzhang_part8 partition(ds='2008-12-31', hr) select key, v
show partitions nzhang_part8;
-select * from nzhang_part8 where ds is not null and hr is not null;
+select * from nzhang_part8 where ds is not null and hr is not null order by ds, hr, key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part9.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part9.q
index 01fa596cdf..4680033cbd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part9.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_dyn_part9.q
@@ -19,5 +19,5 @@ insert overwrite table nzhang_part9 partition (ds, hr) select key, value, ds, hr
show partitions nzhang_part9;
-select * from nzhang_part9 where ds is not null and hr is not null;
+select * from nzhang_part9 where ds is not null and hr is not null order by ds, hr, key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q
index 6d2a8b82d3..35eb219808 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_exist_part_authsuccess.q
@@ -2,4 +2,4 @@ create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored
alter table hive_test_src add partition (pcol1 = 'test_part');
set hive.security.authorization.enabled=true;
grant Update on table hive_test_src to user hive_test_user;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q
index 3b8951a1a7..6bac47fb90 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_file_with_space_in_the_name.q
@@ -2,4 +2,5 @@
CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT);
-LOAD DATA LOCAL INPATH '../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name;
+LOAD DATA LOCAL INPATH '../../data/files/person age.txt' INTO TABLE load_file_with_space_in_the_name;
+LOAD DATA LOCAL INPATH '../../data/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs.q
index c1ac29c172..2f06ca464f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs.q
@@ -2,9 +2,9 @@
create table load_overwrite (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/load_overwrite';
create table load_overwrite2 (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/load2_overwrite2';
-load data local inpath '../data/files/kv1.txt' into table load_overwrite;
-load data local inpath '../data/files/kv2.txt' into table load_overwrite;
-load data local inpath '../data/files/kv3.txt' into table load_overwrite;
+load data local inpath '../../data/files/kv1.txt' into table load_overwrite;
+load data local inpath '../../data/files/kv2.txt' into table load_overwrite;
+load data local inpath '../../data/files/kv3.txt' into table load_overwrite;
show table extended like load_overwrite;
desc extended load_overwrite;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs2.q
index 7255324d16..a75758a072 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs2.q
@@ -4,17 +4,17 @@
create table result (key string, value string);
create table loader (key string, value string);
-load data local inpath '../data/files/kv1.txt' into table loader;
+load data local inpath '../../data/files/kv1.txt' into table loader;
load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result;
show table extended like result;
-load data local inpath '../data/files/kv1.txt' into table loader;
+load data local inpath '../../data/files/kv1.txt' into table loader;
load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result;
show table extended like result;
-load data local inpath '../data/files/kv1.txt' into table loader;
+load data local inpath '../../data/files/kv1.txt' into table loader;
load data inpath '/build/ql/test/data/warehouse/loader/kv1.txt' into table result;
show table extended like result;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs_overwrite.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs_overwrite.q
new file mode 100644
index 0000000000..51a803130a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_fs_overwrite.q
@@ -0,0 +1,20 @@
+--HIVE 6209
+
+drop table target;
+drop table temp;
+
+create table target (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/target';
+create table temp (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/temp';
+
+set fs.pfile.impl.disable.cache=false;
+
+load data local inpath '../../data/files/kv1.txt' into table temp;
+load data inpath '${system:test.tmp.dir}/temp/kv1.txt' overwrite into table target;
+select count(*) from target;
+
+load data local inpath '../../data/files/kv2.txt' into table temp;
+load data inpath '${system:test.tmp.dir}/temp/kv2.txt' overwrite into table target;
+select count(*) from target;
+
+drop table target;
+drop table temp; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_hdfs_file_with_space_in_the_name.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_hdfs_file_with_space_in_the_name.q
index cce297cca4..55ac1a8185 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_hdfs_file_with_space_in_the_name.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_hdfs_file_with_space_in_the_name.q
@@ -1,9 +1,10 @@
-dfs -mkdir hdfs:///tmp/test/;
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test_load_hdfs_file_with_space_in_the_name/;
-dfs -copyFromLocal ../data/files hdfs:///tmp/test/.;
+dfs -copyFromLocal ../../data/files hdfs:///tmp/test_load_hdfs_file_with_space_in_the_name/.;
CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT);
-LOAD DATA INPATH 'hdfs:///tmp/test/files/person age.txt' INTO TABLE load_file_with_space_in_the_name;
+LOAD DATA INPATH 'hdfs:///tmp/test_load_hdfs_file_with_space_in_the_name/files/person age.txt' INTO TABLE load_file_with_space_in_the_name;
+LOAD DATA INPATH 'hdfs:///tmp/test_load_hdfs_file_with_space_in_the_name/files/person+age.txt' INTO TABLE load_file_with_space_in_the_name;
-dfs -rmr hdfs:///tmp/test;
+dfs -rmr hdfs:///tmp/test_load_hdfs_file_with_space_in_the_name;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_nonpart_authsuccess.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_nonpart_authsuccess.q
index 40d8210e57..fdee45114b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_nonpart_authsuccess.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_nonpart_authsuccess.q
@@ -1,4 +1,4 @@
create table hive_test_src ( col1 string ) stored as textfile;
set hive.security.authorization.enabled=true;
grant Update on table hive_test_src to user hive_test_user;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_overwrite.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_overwrite.q
index 73853f15a9..080c78496a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_overwrite.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_overwrite.q
@@ -5,11 +5,11 @@ show table extended like load_overwrite;
select count(*) from load_overwrite;
-load data local inpath '../data/files/kv1.txt' into table load_overwrite;
+load data local inpath '../../data/files/kv1.txt' into table load_overwrite;
show table extended like load_overwrite;
select count(*) from load_overwrite;
-load data local inpath '../data/files/kv1.txt' overwrite into table load_overwrite;
+load data local inpath '../../data/files/kv1.txt' overwrite into table load_overwrite;
show table extended like load_overwrite;
select count(*) from load_overwrite;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_part_authsuccess.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_part_authsuccess.q
index ff54324a5a..cee5873ca5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_part_authsuccess.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/load_part_authsuccess.q
@@ -1,4 +1,4 @@
create table hive_test_src ( col1 string ) partitioned by (pcol1 string) stored as textfile;
set hive.security.authorization.enabled=true;
grant Update on table hive_test_src to user hive_test_user;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src partition (pcol1 = 'test_part');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart1.q
index 0813bb23c3..735befef6f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart1.q
@@ -2,7 +2,7 @@
create table hive_test_src ( col1 string ) stored as textfile ;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ;
create table hive_test_dst ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as sequencefile;
insert overwrite table hive_test_dst partition ( pcol1='test_part', pCol2='test_Part') select col1 from hive_test_src ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart2.q
new file mode 100644
index 0000000000..a252eaa00d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart2.q
@@ -0,0 +1,9 @@
+
+create table hive_test ( col1 string ) partitioned by ( pcol1 string , pcol2 string) stored as textfile;
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test partition (pcol1='part1',pcol2='part1') ;
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test partition (pcol2='part2',pcol1='part2') ;
+select * from hive_test where pcol1='part1' and pcol2='part1';
+select * from hive_test where pcol1='part2' and pcol2='part2';
+
+
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart_err.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart_err.q
index 6e4df21547..cc9c1fec3b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart_err.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/loadpart_err.q
@@ -1,6 +1,6 @@
set hive.cli.errors.ignore=true;
-ADD FILE ../data/scripts/error_script;
+ADD FILE ../../data/scripts/error_script;
-- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19, 0.20, 0.20S, 0.23)
-- (this test is flaky so it is currently disabled for all Hadoop versions)
@@ -14,7 +14,7 @@ FROM src;
DESCRIBE loadpart1;
SHOW PARTITIONS loadpart1;
-LOAD DATA LOCAL INPATH '../data1/files/kv1.txt' INTO TABLE loadpart1 PARTITION(ds='2009-05-05');
+LOAD DATA LOCAL INPATH '../../data1/files/kv1.txt' INTO TABLE loadpart1 PARTITION(ds='2009-05-05');
SHOW PARTITIONS loadpart1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/macro.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/macro.q
index fd0f7f2b0c..47b05ff449 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/macro.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/macro.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
CREATE TEMPORARY MACRO SIGMOID (x DOUBLE) 1.0 / (1.0 + EXP(-x));
SELECT SIGMOID(2) FROM src LIMIT 1;
EXPLAIN SELECT SIGMOID(2) FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_addjar.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_addjar.q
new file mode 100644
index 0000000000..f56f074616
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_addjar.q
@@ -0,0 +1,14 @@
+
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.use.nonstaged=false;
+
+add jar ${system:maven.local.repository}/org/apache/hive/hcatalog/hive-hcatalog-core/${system:hive.version}/hive-hcatalog-core-${system:hive.version}.jar;
+
+CREATE TABLE t1 (a string, b string)
+ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'
+;
+LOAD DATA LOCAL INPATH "../../data/files/sample.json" INTO TABLE t1;
+select * from src join t1 on src.key =t1.a;
+drop table t1;
+set hive.auto.convert.join=false;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_decimal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_decimal.q
new file mode 100644
index 0000000000..b65a7be2d2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_decimal.q
@@ -0,0 +1,35 @@
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask=true;
+set hive.auto.convert.join.noconditionaltask.size=10000000;
+
+CREATE TABLE over1k(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k;
+
+CREATE TABLE t1(dec decimal(4,2)) STORED AS ORC;
+INSERT INTO TABLE t1 select dec from over1k;
+CREATE TABLE t2(dec decimal(4,0)) STORED AS ORC;
+INSERT INTO TABLE t2 select dec from over1k;
+
+explain
+select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec);
+
+set hive.mapjoin.optimized.keys=false;
+
+select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec);
+
+set hive.mapjoin.optimized.keys=true;
+
+select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec); \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_hook.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_hook.q
index d6811d4932..a9e1960a5b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_hook.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_hook.q
@@ -1,4 +1,5 @@
-set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.MapJoinCounterHook ;
+set hive.exec.post.hooks = org.apache.hadoop.hive.ql.hooks.MapJoinCounterHook,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
+
drop table dest1;
CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
index 3f87db28ed..1eb95f6378 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_mapjoin.q
@@ -6,6 +6,14 @@ set hive.auto.convert.join.noconditionaltask.size=10000;
explain select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key);
-explain select count(*) from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) group by ds;
+explain
+select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450';
+select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) where srcpart.value > 'val_450';
+
+explain
+select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds;
+select count(*) from srcpart join src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds;
+
+set hive.mapjoin.lazy.hashtable=false;
select count(*) from srcpart join src src on (srcpart.value=src.value) join src src1 on (srcpart.key=src1.key) group by ds;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_memcheck.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_memcheck.q
new file mode 100644
index 0000000000..b23361724e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_memcheck.q
@@ -0,0 +1,16 @@
+
+set hive.auto.convert.join = true;
+
+create table src0 like src;
+insert into table src0 select * from src where src.key < 10;
+
+set hive.mapjoin.check.memory.rows=1;
+
+explain
+select src1.key as k1, src1.value as v1, src2.key, src2.value
+from src0 src1 inner join src0 src2 on src1.key = src2.key order by k1, v1;
+
+select src1.key as k1, src1.value as v1, src2.key, src2.value
+from src0 src1 inner join src0 src2 on src1.key = src2.key order by k1, v1;
+
+drop table src0; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_subquery2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_subquery2.q
index 9980946057..aed8990523 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_subquery2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mapjoin_subquery2.q
@@ -11,9 +11,9 @@ ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
CREATE TABLE z (id INT, name STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t';
-load data local inpath '../data/files/x.txt' INTO TABLE x;
-load data local inpath '../data/files/y.txt' INTO TABLE y;
-load data local inpath '../data/files/z.txt' INTO TABLE z;
+load data local inpath '../../data/files/x.txt' INTO TABLE x;
+load data local inpath '../../data/files/y.txt' INTO TABLE y;
+load data local inpath '../../data/files/z.txt' INTO TABLE z;
set hive.auto.convert.join=true;
set hive.auto.convert.join.noconditionaltask=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge3.q
index aacd0cd68f..c5c7ea202f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge3.q
@@ -37,7 +37,7 @@ where ds is not null;
show partitions merge_src_part2;
-select * from merge_src_part2 where ds is not null ORDER BY key ASC, value ASC;
+select * from merge_src_part2 where ds is not null ORDER BY key ASC, value ASC, ds ASC;
drop table merge_src_part2;
@@ -54,4 +54,4 @@ select key, value, ds;
show partitions merge_src_part2;
-select * from merge_src_part2 where ds is not null ORDER BY key ASC, value ASC;
+select * from merge_src_part2 where ds is not null ORDER BY key ASC, value ASC, ds ASC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge4.q
index 744783bd62..5a167aa0f0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge4.q
@@ -11,14 +11,14 @@ insert overwrite table nzhang_part partition (ds='2010-08-15', hr) select key, v
insert overwrite table nzhang_part partition (ds='2010-08-15', hr) select key, value, hr from srcpart where ds='2008-04-08';
-select * from nzhang_part;
+select * from nzhang_part ORDER BY key, value, ds, hr;
explain
insert overwrite table nzhang_part partition (ds='2010-08-15', hr=11) select key, value from srcpart where ds='2008-04-08';
insert overwrite table nzhang_part partition (ds='2010-08-15', hr=11) select key, value from srcpart where ds='2008-04-08';
-select * from nzhang_part;
+select * from nzhang_part ORDER BY key, value, ds, hr;
explain
insert overwrite table nzhang_part partition (ds='2010-08-15', hr)
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition.q
index 1379426b60..ae319865f6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition.q
@@ -5,15 +5,16 @@ create table srcpart_merge_dp like srcpart;
create table merge_dynamic_part like srcpart;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
set hive.merge.mapfiles=false;
set hive.merge.mapredfiles=false;
set hive.merge.smallfiles.avgsize=1000000000;
+set hive.optimize.sort.dynamic.partition=false;
explain
insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08';
insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q
index b51c70ed03..73a71e6265 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition2.q
@@ -5,12 +5,12 @@ create table srcpart_merge_dp like srcpart;
create table merge_dynamic_part like srcpart;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-load data local inpath '../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket0.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
@@ -18,6 +18,7 @@ set hive.merge.mapfiles=true;
set hive.merge.mapredfiles=true;
set hive.merge.smallfiles.avgsize=3000;
set hive.exec.compress.output=false;
+set hive.optimize.sort.dynamic.partition=false;
explain
insert overwrite table merge_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart_merge_dp where ds='2008-04-08';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q
index b3bcf01ea0..43be59e5d5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition3.q
@@ -5,20 +5,20 @@ create table srcpart_merge_dp like srcpart;
create table merge_dynamic_part like srcpart;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-
-load data local inpath '../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11);
-load data local inpath '../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11);
-load data local inpath '../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12);
-load data local inpath '../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+
+load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11);
+load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=11);
+load data local inpath '../../data/files/kv1.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12);
+load data local inpath '../../data/files/kv2.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-09', hr=12);
show partitions srcpart_merge_dp;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q
index ef769a042d..589717096a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition4.q
@@ -9,15 +9,15 @@ alter table srcpart_merge_dp_rc set fileformat RCFILE;
create table merge_dynamic_part like srcpart;
alter table merge_dynamic_part set fileformat RCFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
insert overwrite table srcpart_merge_dp_rc partition (ds = '2008-04-08', hr)
select key, value, hr from srcpart_merge_dp where ds = '2008-04-08';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition5.q
index a196fa0528..9f64724563 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/merge_dynamic_partition5.q
@@ -8,12 +8,12 @@ alter table srcpart_merge_dp_rc set fileformat RCFILE;
create table merge_dynamic_part like srcpart;
alter table merge_dynamic_part set fileformat RCFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=11);
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcpart_merge_dp partition(ds='2008-04-08', hr=12);
insert overwrite table srcpart_merge_dp_rc partition (ds = '2008-04-08', hr)
select key, value, hr from srcpart_merge_dp where ds = '2008-04-08';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_export_drop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_export_drop.q
index 41be152e78..e2da61a783 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_export_drop.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_export_drop.q
@@ -1,8 +1,8 @@
create table tmp_meta_export_listener_drop_test (foo string);
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/exports/HIVE-3427;
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/data/exports/HIVE-3427;
set hive.metastore.pre.event.listeners=org.apache.hadoop.hive.ql.parse.MetaDataExportListener;
-set hive.metadata.export.location=../build/ql/test/data/exports/HIVE-3427;
+set hive.metadata.export.location=${system:test.tmp.dir}/data/exports/HIVE-3427;
set hive.move.exported.metadata.to.trash=false;
drop table tmp_meta_export_listener_drop_test;
-dfs -rmr ../build/ql/test/data/exports/HIVE-3427;
+dfs -rmr ${system:test.tmp.dir}/data/exports/HIVE-3427;
set hive.metastore.pre.event.listeners=;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries.q
new file mode 100644
index 0000000000..b549a56232
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries.q
@@ -0,0 +1,77 @@
+set hive.stats.dbclass=fs;
+set hive.compute.query.using.stats=true;
+set hive.stats.autogather=true;
+create table over10k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary)
+ row format delimited
+ fields terminated by '|';
+
+load data local inpath '../../data/files/over10k' into table over10k;
+
+create table stats_tbl(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary);
+
+create table stats_tbl_part(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary) partitioned by (dt string);
+
+
+insert overwrite table stats_tbl select * from over10k;
+
+insert into table stats_tbl_part partition (dt='2010') select * from over10k where t>0 and t<30;
+insert into table stats_tbl_part partition (dt='2011') select * from over10k where t>30 and t<60;
+insert into table stats_tbl_part partition (dt='2012') select * from over10k where t>60;
+
+explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl;
+explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) from stats_tbl_part;
+
+analyze table stats_tbl compute statistics for columns t,si,i,b,f,d,bo,s,bin;
+analyze table stats_tbl_part partition(dt='2010') compute statistics for columns t,si,i,b,f,d,bo,s,bin;
+analyze table stats_tbl_part partition(dt='2011') compute statistics for columns t,si,i,b,f,d,bo,s,bin;
+analyze table stats_tbl_part partition(dt='2012') compute statistics for columns t,si,i,b,f,d,bo,s,bin;
+
+explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl;
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl;
+explain
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part;
+select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part;
+
+explain select count(ts) from stats_tbl_part;
+
+drop table stats_tbl;
+drop table stats_tbl_part;
+
+set hive.compute.query.using.stats=false;
+set hive.stats.dbclass=jdbc:derby;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
new file mode 100644
index 0000000000..09f4bff616
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/metadata_only_queries_with_filters.q
@@ -0,0 +1,51 @@
+set hive.stats.dbclass=fs;
+set hive.compute.query.using.stats=true;
+create table over10k(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary)
+ row format delimited
+ fields terminated by '|';
+
+load data local inpath '../../data/files/over10k' into table over10k;
+
+create table stats_tbl_part(
+ t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal,
+ bin binary) partitioned by (dt int);
+
+
+from over10k
+insert overwrite table stats_tbl_part partition (dt=2010) select t,si,i,b,f,d,bo,s,ts,dec,bin where t>0 and t<30
+insert overwrite table stats_tbl_part partition (dt=2014) select t,si,i,b,f,d,bo,s,ts,dec,bin where t > 30 and t<60;
+
+analyze table stats_tbl_part partition(dt) compute statistics;
+analyze table stats_tbl_part partition(dt=2010) compute statistics for columns t,si,i,b,f,d,bo,s,bin;
+analyze table stats_tbl_part partition(dt=2014) compute statistics for columns t,si,i,b,f,d,bo,s,bin;
+
+explain
+select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010;
+select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010;
+explain
+select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
+select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010;
+
+drop table stats_tbl_part;
+set hive.compute.query.using.stats=false;
+set hive.stats.dbclass=jdbc:derby;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mi.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mi.q
index 067c143c0e..2a6059b3a3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mi.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mi.q
@@ -15,7 +15,7 @@ GROUP BY key, value, ds, hr;
show partitions nzhang_t1;
show partitions nzhang_t2;
-select * from nzhang_t1;
-select * from nzhang_t2;
+select * from nzhang_t1 order by key, value;
+select * from nzhang_t2 order by key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mrr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mrr.q
new file mode 100644
index 0000000000..9f068cc713
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/mrr.q
@@ -0,0 +1,59 @@
+-- simple query with multiple reduce stages
+EXPLAIN SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt;
+SELECT key, count(value) as cnt FROM src GROUP BY key ORDER BY cnt;
+
+set hive.auto.convert.join=false;
+-- join query with multiple reduce stages;
+EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt;
+SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt;
+
+set hive.auto.convert.join=true;
+-- same query with broadcast join
+EXPLAIN SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt;
+SELECT s2.key, count(distinct s2.value) as cnt FROM src s1 join src s2 on (s1.key = s2.key) GROUP BY s2.key ORDER BY cnt;
+
+set hive.auto.convert.join=false;
+-- query with multiple branches in the task dag
+EXPLAIN
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s1
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s2
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s3
+ ON (s1.key = s2.key and s1.key = s3.key)
+WHERE
+ s1.cnt > 1
+ORDER BY s1.key;
+
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s1
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s2
+ JOIN
+ (SELECT key, count(value) as cnt
+ FROM src GROUP BY key ORDER BY cnt) s3
+ ON (s1.key = s2.key and s1.key = s3.key)
+WHERE
+ s1.cnt > 1
+ORDER BY s1.key;
+
+set hive.auto.convert.join=true;
+-- query with broadcast join in the reduce stage
+EXPLAIN
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt FROM src GROUP BY key) s1
+ JOIN src ON (s1.key = src.key);
+
+SELECT *
+FROM
+ (SELECT key, count(value) as cnt FROM src GROUP BY key) s1
+ JOIN src ON (s1.key = src.key);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin1.q
index 9a0a792a91..455f550ae3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin1.q
@@ -1,3 +1,5 @@
+set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
+
create table smallTbl1(key string, value string);
insert overwrite table smallTbl1 select * from src where key < 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin2.q
index ce6cf6d8d6..141db4db0a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/multiMapJoin2.q
@@ -1,3 +1,4 @@
+set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
set hive.auto.convert.join=true;
set hive.auto.convert.join.noconditionaltask=true;
set hive.auto.convert.join.noconditionaltask.size=6000;
@@ -187,3 +188,28 @@ FROM part_table x JOIN src1 y ON (x.key = y.key);
SELECT count(*)
FROM part_table x JOIN src1 y ON (x.key = y.key);
+set hive.auto.convert.join.noconditionaltask.size=10000000;
+set hive.optimize.correlation=false;
+-- HIVE-5891 Alias conflict when merging multiple mapjoin tasks into their common
+-- child mapred task
+EXPLAIN
+SELECT * FROM (
+ SELECT c.key FROM
+ (SELECT a.key FROM src a JOIN src b ON a.key=b.key GROUP BY a.key) tmp
+ JOIN src c ON tmp.key=c.key
+ UNION ALL
+ SELECT c.key FROM
+ (SELECT a.key FROM src a JOIN src b ON a.key=b.key GROUP BY a.key) tmp
+ JOIN src c ON tmp.key=c.key
+) x;
+
+SELECT * FROM (
+ SELECT c.key FROM
+ (SELECT a.key FROM src a JOIN src b ON a.key=b.key GROUP BY a.key) tmp
+ JOIN src c ON tmp.key=c.key
+ UNION ALL
+ SELECT c.key FROM
+ (SELECT a.key FROM src a JOIN src b ON a.key=b.key GROUP BY a.key) tmp
+ JOIN src c ON tmp.key=c.key
+) x;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nested_complex.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nested_complex.q
index b94fbb7b8b..6fd76b859e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nested_complex.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nested_complex.q
@@ -17,6 +17,6 @@ describe nestedcomplex;
describe extended nestedcomplex;
-load data local inpath '../data/files/nested_complex.txt' overwrite into table nestedcomplex;
+load data local inpath '../../data/files/nested_complex.txt' overwrite into table nestedcomplex;
select * from nestedcomplex sort by simple_int;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/newline.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/newline.q
index 722ecf6d97..11168fcd3b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/newline.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/newline.q
@@ -1,4 +1,4 @@
-add file ../data/scripts/newline.py;
+add file ../../data/scripts/newline.py;
set hive.transform.escape.input=true;
create table tmp_tmp(key string, value string) stored as rcfile;
@@ -10,10 +10,10 @@ select * from tmp_tmp ORDER BY key ASC, value ASC;
drop table tmp_tmp;
-add file ../data/scripts/escapednewline.py;
-add file ../data/scripts/escapedtab.py;
-add file ../data/scripts/doubleescapedtab.py;
-add file ../data/scripts/escapedcarriagereturn.py;
+add file ../../data/scripts/escapednewline.py;
+add file ../../data/scripts/escapedtab.py;
+add file ../../data/scripts/doubleescapedtab.py;
+add file ../../data/scripts/escapedcarriagereturn.py;
create table tmp_tmp(key string, value string) stored as rcfile;
insert overwrite table tmp_tmp
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q
new file mode 100644
index 0000000000..e6343e2f53
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonmr_fetch_threshold.q
@@ -0,0 +1,9 @@
+set hive.fetch.task.conversion=more;
+
+explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
+explain select cast(key as int) * 10, upper(value) from src limit 10;
+
+set hive.fetch.task.conversion.threshold=100;
+
+explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10;
+explain select cast(key as int) * 10, upper(value) from src limit 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q
index 9cb89da373..e33b4bfcab 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q
@@ -1,12 +1,12 @@
CREATE TABLE table(string string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/docurl.txt' INTO TABLE table;
+LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table;
SELECT table, count(1)
FROM
(
FROM table
SELECT TRANSFORM (table.string)
- USING 'java -cp ../build/ql/test/classes org.apache.hadoop.hive.scripts.extracturl' AS (table, count)
+ USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (table, count)
) subq
GROUP BY table;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/notable_alias3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/notable_alias3.q
new file mode 100644
index 0000000000..aa79674409
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/notable_alias3.q
@@ -0,0 +1,4 @@
+CREATE TABLE dest1(c string, key INT, value DOUBLE) STORED AS TEXTFILE;
+
+FROM src
+INSERT OVERWRITE TABLE dest1 SELECT '1234', src.key, sum(src.value) WHERE src.key < 100 group by key; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_cast.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_cast.q
index 48c39b81fd..bd0cb8d12d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_cast.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_cast.q
@@ -2,10 +2,10 @@ EXPLAIN SELECT ARRAY(NULL, 0),
ARRAY(NULL, ARRAY()),
ARRAY(NULL, MAP()),
ARRAY(NULL, STRUCT(0))
- FROM src LIMIT 1;
+ FROM src tablesample (1 rows);
SELECT ARRAY(NULL, 0),
ARRAY(NULL, ARRAY()),
ARRAY(NULL, MAP()),
ARRAY(NULL, STRUCT(0))
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_column.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_column.q
index fa4a863944..4b43d608e8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_column.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/null_column.q
@@ -3,7 +3,7 @@
create table temp_null(a int) stored as textfile;
-load data local inpath '../data/files/test.dat' overwrite into table temp_null;
+load data local inpath '../../data/files/test.dat' overwrite into table temp_null;
select null, null from temp_null;
@@ -15,8 +15,8 @@ create table tt_b(a int, b string) row format serde "org.apache.hadoop.hive.serd
insert overwrite table tt_b select null, null from temp_null;
select * from tt_b;
-insert overwrite directory "../build/ql/test/data/warehouse/null_columns.out" select null, null from temp_null;
-dfs -cat ../build/ql/test/data/warehouse/null_columns.out/*;
+insert overwrite directory "target/warehouse/null_columns.out" select null, null from temp_null;
+dfs -cat ${system:test.warehouse.dir}/null_columns.out/*;
create table temp_null2 (key string, value string) partitioned by (ds string);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformat.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformat.q
new file mode 100644
index 0000000000..c9a7dab5eb
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformat.q
@@ -0,0 +1,24 @@
+-- base table with null data
+DROP TABLE IF EXISTS base_tab;
+CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab;
+DESCRIBE EXTENDED base_tab;
+
+-- table with non-default null format
+DROP TABLE IF EXISTS null_tab1;
+EXPLAIN CREATE TABLE null_tab1(a STRING, b STRING) ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull';
+CREATE TABLE null_tab1(a STRING, b STRING) ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull';
+DESCRIBE EXTENDED null_tab1;
+SHOW CREATE TABLE null_tab1;
+
+-- load null data from another table and verify that the null is stored in the expected format
+INSERT OVERWRITE TABLE null_tab1 SELECT a,b FROM base_tab;
+dfs -cat ${system:test.warehouse.dir}/null_tab1/*;
+SELECT * FROM null_tab1;
+-- alter the null format and verify that the old null format is no longer in effect
+ALTER TABLE null_tab1 SET SERDEPROPERTIES ( 'serialization.null.format'='foo');
+SELECT * FROM null_tab1;
+
+
+DROP TABLE null_tab1;
+DROP TABLE base_tab;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatCTAS.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatCTAS.q
new file mode 100644
index 0000000000..d077981d02
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatCTAS.q
@@ -0,0 +1,24 @@
+-- base table with null data
+DROP TABLE IF EXISTS base_tab;
+CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab;
+DESCRIBE EXTENDED base_tab;
+
+-- table with non-default null format
+DROP TABLE IF EXISTS null_tab3;
+EXPLAIN CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull'
+ AS SELECT a, b FROM base_tab;
+CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull'
+ AS SELECT a, b FROM base_tab;
+DESCRIBE EXTENDED null_tab3;
+SHOW CREATE TABLE null_tab3;
+
+dfs -cat ${system:test.warehouse.dir}/null_tab3/*;
+SELECT * FROM null_tab3;
+-- alter the null format and verify that the old null format is no longer in effect
+ALTER TABLE null_tab3 SET SERDEPROPERTIES ( 'serialization.null.format'='foo');
+SELECT * FROM null_tab3;
+
+
+DROP TABLE null_tab3;
+DROP TABLE base_tab;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatdir.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatdir.q
new file mode 100644
index 0000000000..d29863839f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullformatdir.q
@@ -0,0 +1,21 @@
+-- base table with null data
+DROP TABLE IF EXISTS base_tab;
+CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
+LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE base_tab;
+DESCRIBE EXTENDED base_tab;
+
+dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/hive_test/nullformat/tmp;
+dfs -rmr ${system:test.tmp.dir}/hive_test/nullformat/*;
+INSERT OVERWRITE LOCAL DIRECTORY '${system:test.tmp.dir}/hive_test/nullformat'
+ ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' SELECT a,b FROM base_tab;
+dfs -cat ${system:test.tmp.dir}/hive_test/nullformat/000000_0;
+
+-- load the exported data back into a table with same null format and verify null values
+DROP TABLE IF EXISTS null_tab2;
+CREATE TABLE null_tab2(a STRING, b STRING) ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull';
+LOAD DATA LOCAL INPATH '${system:test.tmp.dir}/hive_test/nullformat/000000_0' INTO TABLE null_tab2;
+SELECT * FROM null_tab2;
+
+
+dfs -rmr ${system:test.tmp.dir}/hive_test/nullformat;
+DROP TABLE base_tab;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup3.q
index a5bc9ff1f3..19e5b10786 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup3.q
@@ -1,28 +1,28 @@
CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08');
explain
select count(1) from tstparttbl;
select count(1) from tstparttbl;
CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08');
explain
select count(1) from tstparttbl2;
select count(1) from tstparttbl2;
DROP TABLE tstparttbl;
CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl PARTITION (ds='2008-04-08');
explain
select count(1) from tstparttbl;
select count(1) from tstparttbl;
DROP TABLE tstparttbl2;
CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09');
-LOAD DATA LOCAL INPATH '../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE tstparttbl2 PARTITION (ds='2008-04-08');
explain
select count(1) from tstparttbl2;
select count(1) from tstparttbl2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup5.q
index 12773b6159..b4b68fb8b6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullgroup5.q
@@ -1,10 +1,10 @@
CREATE TABLE tstparttbl(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2009-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl PARTITION (ds='2009-04-09');
CREATE TABLE tstparttbl2(KEY STRING, VALUE STRING) PARTITIONED BY(ds string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE tstparttbl2 PARTITION (ds='2009-04-09');
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE tstparttbl2 PARTITION (ds='2009-04-09');
explain
select u.* from
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullscript.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullscript.q
index 95c9e1df37..11f4a7a78f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullscript.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/nullscript.q
@@ -1,7 +1,7 @@
CREATE TABLE nullscript(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE nullscript;
-LOAD DATA LOCAL INPATH '../data/files/nullfile.txt' INTO TABLE nullscript;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE nullscript;
+LOAD DATA LOCAL INPATH '../../data/files/nullfile.txt' INTO TABLE nullscript;
explain
select transform(key) using 'cat' as key1 from nullscript;
select transform(key) using 'cat' as key1 from nullscript;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/num_op_type_conv.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/num_op_type_conv.q
index 7f858d3e6f..d51c2107e1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/num_op_type_conv.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/num_op_type_conv.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN SELECT null + 7, 1.0 - null, null + null,
CAST(21 AS BIGINT) % CAST(5 AS TINYINT),
CAST(21 AS BIGINT) % CAST(21 AS BIGINT),
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ops_comparison.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ops_comparison.q
index b685ae6c4e..ec9e8076fd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ops_comparison.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ops_comparison.q
@@ -1,3 +1,4 @@
+set hive.fetch.task.conversion=more;
select 1.0 < 2.0 from src limit 1;
select 2.0 < 2.0 from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/optrstat_groupby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/optrstat_groupby.q
deleted file mode 100644
index 5993041405..0000000000
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/optrstat_groupby.q
+++ /dev/null
@@ -1,6 +0,0 @@
-SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.OptrStatGroupByHook;
-SET hive.exec.mode.local.auto=false;
-SET hive.task.progress=true;
--- This test executes the OptrStatGroupBy hook which prints the optr level
--- stats of GROUPBY optr present is the plan of below query
-SELECT count(1) FROM src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_analyze.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_analyze.q
new file mode 100644
index 0000000000..915f4f0d71
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_analyze.q
@@ -0,0 +1,179 @@
+CREATE TABLE orc_create_people_staging (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string);
+
+LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt' OVERWRITE INTO TABLE orc_create_people_staging;
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+set hive.stats.autogather=false;
+-- non-partitioned table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+STORED AS orc;
+
+INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id;
+
+set hive.stats.autogather = true;
+analyze table orc_create_people compute statistics partialscan;
+
+desc formatted orc_create_people;
+
+drop table orc_create_people;
+
+-- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp,
+ state string)
+STORED AS orc;
+
+INSERT OVERWRITE TABLE orc_create_people SELECT * FROM orc_create_people_staging ORDER BY id;
+
+desc formatted orc_create_people;
+
+drop table orc_create_people;
+
+set hive.stats.autogather=false;
+-- partitioned table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc;
+
+INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id;
+
+set hive.stats.autogather = true;
+analyze table orc_create_people partition(state) compute statistics partialscan;
+
+desc formatted orc_create_people partition(state="Ca");
+desc formatted orc_create_people partition(state="Or");
+
+drop table orc_create_people;
+
+-- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc;
+
+INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id;
+
+desc formatted orc_create_people partition(state="Ca");
+desc formatted orc_create_people partition(state="Or");
+
+drop table orc_create_people;
+
+set hive.stats.autogather=false;
+-- partitioned and bucketed table
+-- partial scan gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+clustered by (first_name)
+sorted by (last_name)
+into 4 buckets
+STORED AS orc;
+
+INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id;
+
+set hive.stats.autogather = true;
+analyze table orc_create_people partition(state) compute statistics partialscan;
+
+desc formatted orc_create_people partition(state="Ca");
+desc formatted orc_create_people partition(state="Or");
+
+drop table orc_create_people;
+
+-- auto stats gather
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+clustered by (first_name)
+sorted by (last_name)
+into 4 buckets
+STORED AS orc;
+
+INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id;
+
+desc formatted orc_create_people partition(state="Ca");
+desc formatted orc_create_people partition(state="Or");
+
+drop table orc_create_people;
+
+set hive.stats.autogather=false;
+-- create table with partitions containing text and ORC files.
+-- ORC files implements StatsProvidingRecordReader but text files does not.
+-- So the partition containing text file should not have statistics.
+CREATE TABLE orc_create_people (
+ id int,
+ first_name string,
+ last_name string,
+ address string,
+ salary decimal,
+ start_date timestamp)
+PARTITIONED BY (state string)
+STORED AS orc;
+
+INSERT OVERWRITE TABLE orc_create_people PARTITION (state)
+ SELECT * FROM orc_create_people_staging ORDER BY id;
+
+-- set the table to text format
+ALTER TABLE orc_create_people SET SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
+ALTER TABLE orc_create_people SET FILEFORMAT TEXTFILE;
+
+-- load the text data into a new partition
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE orc_create_people PARTITION(state="OH");
+
+-- set the table back to orc
+ALTER TABLE orc_create_people SET SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde';
+ALTER TABLE orc_create_people SET FILEFORMAT ORC;
+
+set hive.stats.autogather = true;
+analyze table orc_create_people partition(state) compute statistics noscan;
+
+desc formatted orc_create_people partition(state="Ca");
+desc formatted orc_create_people partition(state="OH");
+
+drop table orc_create_people;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_create.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_create.q
index 6aca548644..a82c1a55d5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_create.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_create.q
@@ -40,7 +40,7 @@ set hive.default.fileformat=orc;
CREATE TABLE orc_create (key INT, value STRING)
PARTITIONED BY (ds string);
-set hive.default.fileformat=text;
+set hive.default.fileformat=TextFile;
DESCRIBE FORMATTED orc_create;
@@ -53,7 +53,7 @@ CREATE TABLE orc_create_complex (
DESCRIBE FORMATTED orc_create_complex;
-LOAD DATA LOCAL INPATH '../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging;
+LOAD DATA LOCAL INPATH '../../data/files/orc_create.txt' OVERWRITE INTO TABLE orc_create_staging;
SELECT * from orc_create_staging;
@@ -70,16 +70,20 @@ CREATE TABLE orc_create_people_staging (
first_name string,
last_name string,
address string,
+ salary decimal,
+ start_date timestamp,
state string);
-LOAD DATA LOCAL INPATH '../data/files/orc_create_people.txt'
+LOAD DATA LOCAL INPATH '../../data/files/orc_create_people.txt'
OVERWRITE INTO TABLE orc_create_people_staging;
CREATE TABLE orc_create_people (
id int,
first_name string,
last_name string,
- address string)
+ address string,
+ salary decimal,
+ start_date timestamp)
PARTITIONED BY (state string)
STORED AS orc;
@@ -92,9 +96,26 @@ SET hive.optimize.index.filter=true;
-- test predicate push down with partition pruning
SELECT COUNT(*) FROM orc_create_people where id < 10 and state = 'Ca';
+-- test predicate push down
+SELECT COUNT(*) FROM orc_create_people where id = 50;
+SELECT COUNT(*) FROM orc_create_people where id between 10 and 20;
+SELECT COUNT(*) FROM orc_create_people where id > 10 and id < 100;
+SELECT COUNT(*) FROM orc_create_people where (id + 1) = 20;
+SELECT COUNT(*) FROM orc_create_people where (id + 10) < 200;
+SELECT COUNT(*) FROM orc_create_people where id < 30 or first_name = "Rafael";
+SELECT COUNT(*) FROM orc_create_people
+ where length(substr(first_name, 1, 2)) <= 2 and last_name like '%';
+SELECT COUNT(*) FROM orc_create_people where salary = 200.00;
+SELECT COUNT(*) FROM orc_create_people WHERE start_date IS NULL;
+SELECT COUNT(*) FROM orc_create_people WHERE YEAR(start_date) = 2014;
+
+-- test predicate push down with partition pruning
+SELECT COUNT(*) FROM orc_create_people where salary = 200.00 and state = 'Ca';
+
-- test predicate push down with no column projection
SELECT id, first_name, last_name, address
- FROM orc_create_people WHERE id > 90;
+ FROM orc_create_people WHERE id > 90
+ ORDER BY id, first_name, last_name;
DROP TABLE orc_create;
DROP TABLE orc_create_complex;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q
index f916012b83..a0eaab75fa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_dictionary_threshold.q
@@ -19,7 +19,7 @@ SELECT * FROM test_orc;
ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.stripe.size' = '1');
CREATE TABLE src_thousand(key STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1kv2.cogroup.txt'
+LOAD DATA LOCAL INPATH '../../data/files/kv1kv2.cogroup.txt'
INTO TABLE src_thousand;
set hive.exec.orc.dictionary.key.size.threshold=0.5;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
index cbfd7b359f..0c8861e41c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols.q
@@ -10,7 +10,7 @@ set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
-- to another partition
-- This can produce unexpected results with CombineHiveInputFormat
-INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src LIMIT 5;
+INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows);
ALTER TABLE test_orc ADD COLUMNS (cnt INT);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q
new file mode 100644
index 0000000000..f7e80a75b3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_diff_part_cols2.q
@@ -0,0 +1,11 @@
+-- Create a table with one column, write to it, then add an additional column
+-- This can break reads
+
+CREATE TABLE test_orc (key STRING)
+STORED AS ORC;
+
+INSERT OVERWRITE TABLE test_orc SELECT key FROM src LIMIT 5;
+
+ALTER TABLE test_orc ADD COLUMNS (value STRING);
+
+SELECT * FROM test_orc order by key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_empty_strings.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_empty_strings.q
index 0ef57d18cc..34cd6d47df 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_empty_strings.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_empty_strings.q
@@ -3,13 +3,13 @@ ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde'
STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat';
-INSERT OVERWRITE TABLE test_orc SELECT '' FROM src limit 10;
+INSERT OVERWRITE TABLE test_orc SELECT '' FROM src tablesample (10 rows);
-- Test reading a column which is just empty strings
SELECT * FROM test_orc;
-INSERT OVERWRITE TABLE test_orc SELECT IF (key % 3 = 0, key, '') FROM src limit 10;
+INSERT OVERWRITE TABLE test_orc SELECT IF (key % 3 = 0, key, '') FROM src tablesample (10 rows);
-- Test reading a column which has some empty strings
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q
index 6685da7a82..83c5a0505e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ends_with_nulls.q
@@ -10,7 +10,7 @@ ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.row.index.stride' = '1000');
-- to last index stride are the same (there's only two index strides)
CREATE TABLE src_null(a STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/nulls.txt' INTO TABLE src_null;
+LOAD DATA LOCAL INPATH '../../data/files/nulls.txt' INTO TABLE src_null;
INSERT OVERWRITE TABLE test_orc SELECT a FROM src_null;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_min_max.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_min_max.q
new file mode 100644
index 0000000000..b81adf2af3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_min_max.q
@@ -0,0 +1,32 @@
+create table if not exists alltypes (
+ bo boolean,
+ ti tinyint,
+ si smallint,
+ i int,
+ bi bigint,
+ f float,
+ d double,
+ de decimal(10,3),
+ ts timestamp,
+ da date,
+ s string,
+ c char(5),
+ vc varchar(5),
+ m map<string, string>,
+ l array<int>,
+ st struct<c1:int, c2:string>
+) row format delimited fields terminated by '|'
+collection items terminated by ','
+map keys terminated by ':' stored as textfile;
+
+create table alltypes_orc like alltypes;
+alter table alltypes_orc set fileformat orc;
+
+load data local inpath '../../data/files/alltypes2.txt' overwrite into table alltypes;
+
+insert overwrite table alltypes_orc select * from alltypes;
+
+select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes;
+
+select min(bo), max(bo), min(ti), max(ti), min(si), max(si), min(i), max(i), min(bi), max(bi), min(f), max(f), min(d), max(d), min(de), max(de), min(ts), max(ts), min(da), max(da), min(s), max(s), min(c), max(c), min(vc), max(vc) from alltypes_orc;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q
new file mode 100644
index 0000000000..1f5f54ae19
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_char.q
@@ -0,0 +1,76 @@
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SET mapred.min.split.size=1000;
+SET mapred.max.split.size=5000;
+
+create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) stored as orc tblproperties("orc.stripe.size"="16777216");
+
+insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+
+set hive.optimize.index.filter=false;
+
+-- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
+select sum(hash(*)) from newtypesorc where c="apple";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c="apple";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c!="apple";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c!="apple";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c<"hello";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c<"hello";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c<="hello";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c<="hello";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c="apple ";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c="apple ";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c in ("apple", "carrot");
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c in ("apple", "carrot");
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c in ("apple", "hello");
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c in ("apple", "hello");
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c in ("carrot");
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c in ("carrot");
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c between "apple" and "carrot";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c between "apple" and "carrot";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c between "apple" and "zombie";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c between "apple" and "zombie";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where c between "carrot" and "carrot1";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where c between "carrot" and "carrot1";
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q
new file mode 100644
index 0000000000..c34be867e4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_date.q
@@ -0,0 +1,97 @@
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SET mapred.min.split.size=1000;
+SET mapred.max.split.size=5000;
+
+create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) stored as orc tblproperties("orc.stripe.size"="16777216");
+
+insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+
+-- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
+select sum(hash(*)) from newtypesorc where da='1970-02-20';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da='1970-02-20';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as date);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as date);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as varchar(20));
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da=cast('1970-02-20' as varchar(20));
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da!='1970-02-20';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da!='1970-02-20';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da<'1970-02-27';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da<'1970-02-27';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da<'1970-02-29';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da<'1970-02-29';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da<'1970-02-15';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da<'1970-02-15';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da<='1970-02-20';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da<='1970-02-20';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da<='1970-02-27';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da<='1970-02-27';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da in (cast('1970-02-21' as date), cast('1970-02-27' as date));
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da in (cast('1970-02-21' as date), cast('1970-02-27' as date));
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da in (cast('1970-02-20' as date), cast('1970-02-27' as date));
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da in (cast('1970-02-20' as date), cast('1970-02-27' as date));
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da in (cast('1970-02-21' as date), cast('1970-02-22' as date));
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da in (cast('1970-02-21' as date), cast('1970-02-22' as date));
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da between '1970-02-19' and '1970-02-22';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da between '1970-02-19' and '1970-02-22';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da between '1970-02-19' and '1970-02-28';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da between '1970-02-19' and '1970-02-28';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where da between '1970-02-18' and '1970-02-19';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where da between '1970-02-18' and '1970-02-19';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
new file mode 100644
index 0000000000..a93590eacc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_decimal.q
@@ -0,0 +1,151 @@
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SET mapred.min.split.size=1000;
+SET mapred.max.split.size=5000;
+
+create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) stored as orc tblproperties("orc.stripe.size"="16777216");
+
+insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+
+-- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
+select sum(hash(*)) from newtypesorc where d=0.22;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d=0.22;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d='0.22';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d='0.22';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d=cast('0.22' as float);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d=cast('0.22' as float);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d!=0.22;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d!=0.22;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d!='0.22';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d!='0.22';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d!=cast('0.22' as float);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d!=cast('0.22' as float);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<11.22;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<11.22;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<'11.22';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<'11.22';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<cast('11.22' as float);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<cast('11.22' as float);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<1;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<1;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<=11.22;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<=11.22;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<='11.22';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<='11.22';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<=cast('11.22' as float);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<=cast('11.22' as float);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d<=12;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d<=12;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d in ('0.22', '1.0');
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d in ('0.22', '1.0');
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d in ('0.22', '11.22');
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d in ('0.22', '11.22');
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d in ('0.9', '1.0');
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d in ('0.9', '1.0');
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d in ('0.9', 0.22);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d in ('0.9', 0.22);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d in ('0.9', 0.22, cast('11.22' as float));
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d in ('0.9', 0.22, cast('11.22' as float));
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d between 0 and 1;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d between 0 and 1;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d between 0 and 1000;
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d between 0 and 1000;
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d between 0 and '2.0';
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d between 0 and '2.0';
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d between 0 and cast(3 as float);
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d between 0 and cast(3 as float);
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where d between 1 and cast(30 as char(10));
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where d between 1 and cast(30 as char(10));
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q
new file mode 100644
index 0000000000..0fecc664e4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_ppd_varchar.q
@@ -0,0 +1,76 @@
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SET mapred.min.split.size=1000;
+SET mapred.max.split.size=5000;
+
+create table newtypesorc(c char(10), v varchar(10), d decimal(5,3), da date) stored as orc tblproperties("orc.stripe.size"="16777216");
+
+insert overwrite table newtypesorc select * from (select cast("apple" as char(10)), cast("bee" as varchar(10)), 0.22, cast("1970-02-20" as date) from src src1 union all select cast("hello" as char(10)), cast("world" as varchar(10)), 11.22, cast("1970-02-27" as date) from src src2) uniontbl;
+
+set hive.optimize.index.filter=false;
+
+-- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests)
+select sum(hash(*)) from newtypesorc where v="bee";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v="bee";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v!="bee";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v!="bee";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v<"world";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v<"world";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v<="world";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v<="world";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v="bee ";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v="bee ";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v in ("bee", "orange");
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v in ("bee", "orange");
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v in ("bee", "world");
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v in ("bee", "world");
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v in ("orange");
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v in ("orange");
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v between "bee" and "orange";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v between "bee" and "orange";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v between "bee" and "zombie";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v between "bee" and "zombie";
+
+set hive.optimize.index.filter=false;
+select sum(hash(*)) from newtypesorc where v between "orange" and "pine";
+
+set hive.optimize.index.filter=true;
+select sum(hash(*)) from newtypesorc where v between "orange" and "pine";
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q
index f5f25f00c9..a267bfe8e1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_predicate_pushdown.q
@@ -7,7 +7,7 @@ CREATE TABLE orc_pred(t tinyint,
bo boolean,
s string,
ts timestamp,
- dec decimal,
+ dec decimal(4,2),
bin binary)
STORED AS ORC;
@@ -22,12 +22,12 @@ CREATE TABLE staging(t tinyint,
bo boolean,
s string,
ts timestamp,
- dec decimal,
+ dec decimal(4,2),
bin binary)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/over1k' OVERWRITE INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
INSERT INTO TABLE orc_pred select * from staging;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q
new file mode 100644
index 0000000000..54eb23e776
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_split_elimination.q
@@ -0,0 +1,168 @@
+create table orc_split_elim (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc;
+
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim;
+
+SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SET mapred.min.split.size=1000;
+SET mapred.max.split.size=5000;
+SET hive.optimize.index.filter=false;
+
+-- The above table will have 5 splits with the followings stats
+-- Stripe 1:
+-- Column 0: count: 5000
+-- Column 1: count: 5000 min: 2 max: 100 sum: 499902
+-- Column 2: count: 5000 min: foo max: zebra sum: 24998
+-- Column 3: count: 5000 min: 0.8 max: 8.0 sum: 39992.8
+-- Column 4: count: 5000 min: 0 max: 1.2 sum: 1.2
+-- Column 5: count: 5000
+-- Stripe 2:
+-- Column 0: count: 5000
+-- Column 1: count: 5000 min: 13 max: 100 sum: 499913
+-- Column 2: count: 5000 min: bar max: zebra sum: 24998
+-- Column 3: count: 5000 min: 8.0 max: 80.0 sum: 40072.0
+-- Column 4: count: 5000 min: 0 max: 2.2 sum: 2.2
+-- Column 5: count: 5000
+-- Stripe 3:
+-- Column 0: count: 5000
+-- Column 1: count: 5000 min: 29 max: 100 sum: 499929
+-- Column 2: count: 5000 min: cat max: zebra sum: 24998
+-- Column 3: count: 5000 min: 8.0 max: 8.0 sum: 40000.0
+-- Column 4: count: 5000 min: 0 max: 3.3 sum: 3.3
+-- Column 5: count: 5000
+-- Stripe 4:
+-- Column 0: count: 5000
+-- Column 1: count: 5000 min: 70 max: 100 sum: 499970
+-- Column 2: count: 5000 min: dog max: zebra sum: 24998
+-- Column 3: count: 5000 min: 1.8 max: 8.0 sum: 39993.8
+-- Column 4: count: 5000 min: 0 max: 4.4 sum: 4.4
+-- Column 5: count: 5000
+-- Stripe 5:
+-- Column 0: count: 5000
+-- Column 1: count: 5000 min: 5 max: 100 sum: 499905
+-- Column 2: count: 5000 min: eat max: zebra sum: 24998
+-- Column 3: count: 5000 min: 0.8 max: 8.0 sum: 39992.8
+-- Column 4: count: 5000 min: 0 max: 5.5 sum: 5.5
+-- Column 5: count: 5000
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=0;
+
+SET hive.optimize.index.filter=true;
+-- 0 mapper
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=0;
+SET hive.optimize.index.filter=false;
+
+-- 5 mappers. count should be 0
+select count(*) from orc_split_elim where userid<=0;
+
+SET hive.optimize.index.filter=true;
+-- 0 mapper
+select count(*) from orc_split_elim where userid<=0;
+SET hive.optimize.index.filter=false;
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 1 mapper
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=2 order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 2 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=5 order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 3 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=13 order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 4 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=29 order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 5 mappers
+select userid,string1,subtype,decimal1,ts from orc_split_elim where userid<=70 order by userid;
+SET hive.optimize.index.filter=false;
+
+-- partitioned table
+create table orc_split_elim_part (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) partitioned by (country string, year int) stored as orc;
+
+alter table orc_split_elim_part add partition(country='us', year=2000);
+alter table orc_split_elim_part add partition(country='us', year=2001);
+
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_part partition(country='us', year=2000);
+load data local inpath '../../data/files/orc_split_elim.orc' into table orc_split_elim_part partition(country='us', year=2001);
+
+-- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us'order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001) order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and (year=2000 or year=2001) order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 1 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=2 and country='us' and year=2000 order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 4 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001) order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 4 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and (year=2000 or year=2001) order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 10 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 2 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=5 and country='us' and year=2000 order by userid;
+SET hive.optimize.index.filter=false;
+
+-- 0 mapper - no split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in' order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002 order by userid;
+
+SET hive.optimize.index.filter=true;
+-- 0 mapper - split elimination
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='in' order by userid;
+select userid,string1,subtype,decimal1,ts from orc_split_elim_part where userid<=70 and country='us' and year=2002 order by userid;
+SET hive.optimize.index.filter=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q
new file mode 100644
index 0000000000..9bdad86e41
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q
@@ -0,0 +1,69 @@
+-- create table with 1000 rows
+create table srcorc(key string, value string) stored as textfile;
+insert overwrite table srcorc select * from src;
+insert into table srcorc select * from src;
+
+-- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively
+create table if not exists vectororc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000");
+
+-- insert creates separate orc files
+insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc;
+insert into table vectororc select null, "b", rand(2), "zoo" from srcorc;
+insert into table vectororc select null, "c", rand(3), "zoo" from srcorc;
+insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc;
+insert into table vectororc select null, "e", rand(5), "z" from srcorc;
+insert into table vectororc select "apple", "f", rand(6), "z" from srcorc;
+insert into table vectororc select null, "g", rand(7), "zoo" from srcorc;
+
+-- since vectororc table has multiple orc file we will load them into a single file using another table
+create table if not exists testorc
+(s1 string,
+s2 string,
+d double,
+s3 string)
+stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000");
+insert overwrite table testorc select * from vectororc order by s2;
+
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+set hive.optimize.index.filter=true;
+
+set hive.vectorized.execution.enabled=false;
+-- row group (1,4) from stripe 1 and row group (1) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s1 is not null;
+set hive.vectorized.execution.enabled=true;
+-- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s1 is not null;
+
+set hive.vectorized.execution.enabled=false;
+-- row group (2,3,5) from stripe 1 and row group (2) from stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g");
+set hive.vectorized.execution.enabled=true;
+-- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g");
+
+set hive.vectorized.execution.enabled=false;
+-- last row group of stripe 1 and first row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s3="z";
+set hive.vectorized.execution.enabled=true;
+-- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s3="z";
+
+set hive.vectorized.execution.enabled=false;
+-- first row group of stripe 1 and last row group of stripe 2
+-- PPD ONLY
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g";
+set hive.vectorized.execution.enabled=true;
+-- VECTORIZATION + PPD
+select count(*),int(sum(d)) from testorc where s2="a" or s2="g";
+
+drop table srcorc;
+drop table vectororc;
+drop table testorc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/order_within_subquery.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/order_within_subquery.q
new file mode 100644
index 0000000000..7fc9b44cbc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/order_within_subquery.q
@@ -0,0 +1,19 @@
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+select t1.p_name, t2.p_name
+from (select * from part order by p_size limit 10) t1 join part t2 on t1.p_partkey = t2.p_partkey and t1.p_size = t2.p_size
+where t1.p_partkey < 100000;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
index 5e09395901..73c3940644 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parallel_orderby.q
@@ -1,6 +1,6 @@
create table src5 (key string, value string);
-load data local inpath '../data/files/kv5.txt' into table src5;
-load data local inpath '../data/files/kv5.txt' into table src5;
+load data local inpath '../../data/files/kv5.txt' into table src5;
+load data local inpath '../../data/files/kv5.txt' into table src5;
set mapred.reduce.tasks = 4;
set hive.optimize.sampling.orderby=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_create.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_create.q
new file mode 100644
index 0000000000..0b976bdbaf
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_create.q
@@ -0,0 +1,36 @@
+DROP TABLE parquet_create_staging;
+DROP TABLE parquet_create;
+
+CREATE TABLE parquet_create_staging (
+ id int,
+ str string,
+ mp MAP<STRING,STRING>,
+ lst ARRAY<STRING>,
+ strct STRUCT<A:STRING,B:STRING>
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+COLLECTION ITEMS TERMINATED BY ','
+MAP KEYS TERMINATED BY ':';
+
+CREATE TABLE parquet_create (
+ id int,
+ str string,
+ mp MAP<STRING,STRING>,
+ lst ARRAY<STRING>,
+ strct STRUCT<A:STRING,B:STRING>
+) STORED AS PARQUET;
+
+DESCRIBE FORMATTED parquet_create;
+
+LOAD DATA LOCAL INPATH '../../data/files/parquet_create.txt' OVERWRITE INTO TABLE parquet_create_staging;
+
+SELECT * FROM parquet_create_staging;
+
+INSERT OVERWRITE TABLE parquet_create SELECT * FROM parquet_create_staging;
+
+SELECT * FROM parquet_create group by id;
+SELECT id, count(0) FROM parquet_create group by id;
+SELECT str from parquet_create;
+SELECT mp from parquet_create;
+SELECT lst from parquet_create;
+SELECT strct from parquet_create;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_ctas.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_ctas.q
new file mode 100644
index 0000000000..652aef1b2b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_ctas.q
@@ -0,0 +1,24 @@
+drop table staging;
+drop table parquet_ctas;
+drop table parquet_ctas_advanced;
+drop table parquet_ctas_alias;
+drop table parquet_ctas_mixed;
+
+create table staging (key int, value string) stored as textfile;
+insert into table staging select * from src order by key limit 10;
+
+create table parquet_ctas stored as parquet as select * from staging;
+describe parquet_ctas;
+select * from parquet_ctas;
+
+create table parquet_ctas_advanced stored as parquet as select key+1,concat(value,"value") from staging;
+describe parquet_ctas_advanced;
+select * from parquet_ctas_advanced;
+
+create table parquet_ctas_alias stored as parquet as select key+1 as mykey,concat(value,"value") as myvalue from staging;
+describe parquet_ctas_alias;
+select * from parquet_ctas_alias;
+
+create table parquet_ctas_mixed stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging;
+describe parquet_ctas_mixed;
+select * from parquet_ctas_mixed; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_partitioned.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_partitioned.q
new file mode 100644
index 0000000000..5d4f68ea43
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_partitioned.q
@@ -0,0 +1,34 @@
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.exec.dynamic.partition=true;
+
+DROP TABLE parquet_partitioned_staging;
+DROP TABLE parquet_partitioned;
+
+CREATE TABLE parquet_partitioned_staging (
+ id int,
+ str string,
+ part string
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|';
+
+CREATE TABLE parquet_partitioned (
+ id int,
+ str string
+) PARTITIONED BY (part string)
+STORED AS PARQUET;
+
+DESCRIBE FORMATTED parquet_partitioned;
+
+LOAD DATA LOCAL INPATH '../../data/files/parquet_partitioned.txt' OVERWRITE INTO TABLE parquet_partitioned_staging;
+
+SELECT * FROM parquet_partitioned_staging;
+
+INSERT OVERWRITE TABLE parquet_partitioned PARTITION (part) SELECT * FROM parquet_partitioned_staging;
+
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+SELECT * FROM parquet_partitioned ORDER BY id, str;
+SELECT part, COUNT(0) FROM parquet_partitioned GROUP BY part;
+
+set hive.input.format=org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
+SELECT * FROM parquet_partitioned ORDER BY id, str;
+SELECT part, COUNT(0) FROM parquet_partitioned GROUP BY part;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_types.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_types.q
new file mode 100644
index 0000000000..5d6333c934
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/parquet_types.q
@@ -0,0 +1,38 @@
+DROP TABLE parquet_types_staging;
+DROP TABLE parquet_types;
+
+CREATE TABLE parquet_types_staging (
+ cint int,
+ ctinyint tinyint,
+ csmallint smallint,
+ cfloat float,
+ cdouble double,
+ cstring1 string
+) ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|';
+
+CREATE TABLE parquet_types (
+ cint int,
+ ctinyint tinyint,
+ csmallint smallint,
+ cfloat float,
+ cdouble double,
+ cstring1 string
+) STORED AS PARQUET;
+
+LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging;
+
+INSERT OVERWRITE TABLE parquet_types SELECT * FROM parquet_types_staging;
+
+SELECT * FROM parquet_types;
+
+SELECT ctinyint,
+ MAX(cint),
+ MIN(csmallint),
+ COUNT(cstring1),
+ AVG(cfloat),
+ STDDEV_POP(cdouble)
+FROM parquet_types
+GROUP BY ctinyint
+ORDER BY ctinyint
+;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partcols1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partcols1.q
index b7f8c64d42..03a5760e69 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partcols1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partcols1.q
@@ -1,7 +1,7 @@
create table test1(col1 string) partitioned by (partitionId int);
insert overwrite table test1 partition (partitionId=1)
- select key from src limit 10;
+ select key from src tablesample (10 rows);
FROM (
FROM test1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date.q
index 8738afdfa0..70a7b25215 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date.q
@@ -1,45 +1,58 @@
drop table partition_date_1;
-create table partition_date_1 (key string, value string) partitioned by (dt date, region int);
+create table partition_date_1 (key string, value string) partitioned by (dt date, region string);
+
+insert overwrite table partition_date_1 partition(dt='2000-01-01', region= '1')
+ select * from src tablesample (10 rows);
+insert overwrite table partition_date_1 partition(dt='2000-01-01', region= '2')
+ select * from src tablesample (5 rows);
+insert overwrite table partition_date_1 partition(dt='2013-12-10', region= '2020-20-20')
+ select * from src tablesample (5 rows);
+insert overwrite table partition_date_1 partition(dt='2013-08-08', region= '1')
+ select * from src tablesample (20 rows);
+insert overwrite table partition_date_1 partition(dt='2013-08-08', region= '10')
+ select * from src tablesample (11 rows);
-insert overwrite table partition_date_1 partition(dt='2000-01-01', region=1)
- select * from src limit 10;
-insert overwrite table partition_date_1 partition(dt='2000-01-01', region=2)
- select * from src limit 5;
-insert overwrite table partition_date_1 partition(dt='2013-08-08', region=1)
- select * from src limit 20;
-insert overwrite table partition_date_1 partition(dt='2013-08-08', region=10)
- select * from src limit 11;
select distinct dt from partition_date_1;
-select * from partition_date_1 where dt = '2000-01-01' and region = 2 order by key,value;
+select * from partition_date_1 where dt = '2000-01-01' and region = '2' order by key,value;
-- 15
select count(*) from partition_date_1 where dt = date '2000-01-01';
-- 15. Also try with string value in predicate
select count(*) from partition_date_1 where dt = '2000-01-01';
-- 5
-select count(*) from partition_date_1 where dt = date '2000-01-01' and region = 2;
+select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '2';
-- 11
-select count(*) from partition_date_1 where dt = date '2013-08-08' and region = 10;
+select count(*) from partition_date_1 where dt = date '2013-08-08' and region = '10';
-- 30
-select count(*) from partition_date_1 where region = 1;
+select count(*) from partition_date_1 where region = '1';
-- 0
-select count(*) from partition_date_1 where dt = date '2000-01-01' and region = 3;
+select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '3';
-- 0
select count(*) from partition_date_1 where dt = date '1999-01-01';
-- Try other comparison operations
-- 20
-select count(*) from partition_date_1 where dt > date '2000-01-01' and region = 1;
+select count(*) from partition_date_1 where dt > date '2000-01-01' and region = '1';
-- 10
-select count(*) from partition_date_1 where dt < date '2000-01-02' and region = 1;
+select count(*) from partition_date_1 where dt < date '2000-01-02' and region = '1';
-- 20
-select count(*) from partition_date_1 where dt >= date '2000-01-02' and region = 1;
+select count(*) from partition_date_1 where dt >= date '2000-01-02' and region = '1';
-- 10
-select count(*) from partition_date_1 where dt <= date '2000-01-01' and region = 1;
+select count(*) from partition_date_1 where dt <= date '2000-01-01' and region = '1';
-- 20
-select count(*) from partition_date_1 where dt <> date '2000-01-01' and region = 1;
+select count(*) from partition_date_1 where dt <> date '2000-01-01' and region = '1';
+-- 10
+select count(*) from partition_date_1 where dt between date '1999-12-30' and date '2000-01-03' and region = '1';
+
+
+-- Try a string key with date-like strings
+
+-- 5
+select count(*) from partition_date_1 where region = '2020-20-20';
+-- 5
+select count(*) from partition_date_1 where region > '2010-01-01';
drop table partition_date_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date2.q
index 9b84b59608..c932ed1023 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_date2.q
@@ -3,7 +3,7 @@ drop table partition_date2_1;
create table partition_date2_1 (key string, value string) partitioned by (dt date, region int);
-- test date literal syntax
-from (select * from src limit 1) x
+from (select * from src tablesample (1 rows)) x
insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select *
insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select *
insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select *;
@@ -13,7 +13,7 @@ select * from partition_date2_1;
-- insert overwrite
insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2)
- select 'changed_key', 'changed_value' from src limit 2;
+ select 'changed_key', 'changed_value' from src tablesample (2 rows);
select * from partition_date2_1;
-- truncate
@@ -41,7 +41,7 @@ alter table partition_date2_1 partition(dt=date '1980-01-02', region=3)
describe extended partition_date2_1 partition(dt=date '1980-01-02', region=3);
insert overwrite table partition_date2_1 partition(dt=date '1980-01-02', region=3)
- select * from src limit 2;
+ select * from src tablesample (2 rows);
select * from partition_date2_1 order by key,value,dt,region;
-- alter table set location
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_decode_name.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_decode_name.q
index ba193cd51a..a8381a4200 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_decode_name.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_decode_name.q
@@ -1,9 +1,9 @@
create table sc as select *
-from (select '2011-01-11', '2011-01-11+14:18:26' from src limit 1
+from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows)
union all
- select '2011-01-11', '2011-01-11+15:18:26' from src limit 1
+ select '2011-01-11', '2011-01-11+15:18:26' from src tablesample (1 rows)
union all
- select '2011-01-11', '2011-01-11+16:18:26' from src limit 1 ) s;
+ select '2011-01-11', '2011-01-11+16:18:26' from src tablesample (1 rows) ) s;
create table sc_part (key string) partitioned by (ts string) stored as rcfile;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_special_char.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_special_char.q
index 81344334df..b0b1ff4db6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_special_char.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_special_char.q
@@ -1,9 +1,9 @@
create table sc as select *
-from (select '2011-01-11', '2011-01-11+14:18:26' from src limit 1
+from (select '2011-01-11', '2011-01-11+14:18:26' from src tablesample (1 rows)
union all
- select '2011-01-11', '2011-01-11+15:18:26' from src limit 1
+ select '2011-01-11', '2011-01-11+15:18:26' from src tablesample (1 rows)
union all
- select '2011-01-11', '2011-01-11+16:18:26' from src limit 1 ) s;
+ select '2011-01-11', '2011-01-11+16:18:26' from src tablesample (1 rows) ) s;
create table sc_part (key string) partitioned by (ts string) stored as rcfile;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_type_check.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_type_check.q
index 7f1accadac..c9bca99b9c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_type_check.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_type_check.q
@@ -2,14 +2,14 @@ set hive.typecheck.on.insert = true;
-- begin part(string, string) pass(string, int)
CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day=2);
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day=2);
select * from tab1;
drop table tab1;
-- begin part(string, int) pass(string, string)
CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2');
select * from tab1;
drop table tab1;
@@ -17,7 +17,7 @@ drop table tab1;
-- begin part(string, date) pass(string, date)
create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile;
alter table tab1 add partition (month='June', day='2008-01-01');
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2008-01-01');
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' overwrite into table tab1 PARTITION(month='June', day='2008-01-01');
select id1, id2, day from tab1 where day='2008-01-01';
drop table tab1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar1.q
index d700b1cbf8..22aadd3b53 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar1.q
@@ -3,13 +3,13 @@ drop table partition_varchar_1;
create table partition_varchar_1 (key string, value varchar(20)) partitioned by (dt varchar(10), region int);
insert overwrite table partition_varchar_1 partition(dt='2000-01-01', region=1)
- select * from src limit 10;
+ select * from src tablesample (10 rows);
insert overwrite table partition_varchar_1 partition(dt='2000-01-01', region=2)
- select * from src limit 5;
+ select * from src tablesample (5 rows);
insert overwrite table partition_varchar_1 partition(dt='2013-08-08', region=1)
- select * from src limit 20;
+ select * from src tablesample (20 rows);
insert overwrite table partition_varchar_1 partition(dt='2013-08-08', region=10)
- select * from src limit 11;
+ select * from src tablesample (11 rows);
select distinct dt from partition_varchar_1;
select * from partition_varchar_1 where dt = '2000-01-01' and region = 2 order by key,value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar2.q
new file mode 100644
index 0000000000..92cb742f15
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_varchar2.q
@@ -0,0 +1,10 @@
+drop table partition_varchar_2;
+
+create table partition_varchar_2 (key string, value varchar(20)) partitioned by (dt varchar(15), region int);
+
+insert overwrite table partition_varchar_2 partition(dt='2000-01-01', region=1)
+ select * from src order by key limit 1;
+
+select * from partition_varchar_2 where cast(dt as varchar(10)) = '2000-01-01';
+
+drop table partition_varchar_2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q
index e9b574c1ca..3cf488fb03 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q
@@ -3,9 +3,9 @@
-- CustomSerDe(1, 2, 3) irrespective of the inserted values
DROP TABLE PW17;
-ADD JAR ../build/ql/test/test-serdes.jar;
+ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-custom-serde/${system:hive.version}/hive-it-custom-serde-${system:hive.version}.jar;
CREATE TABLE PW17(USER STRING, COMPLEXDT ARRAY<INT>) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
-LOAD DATA LOCAL INPATH '../data/files/pw17.txt' INTO TABLE PW17 PARTITION (YEAR='1');
+LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17 PARTITION (YEAR='1');
ALTER TABLE PW17 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe2';
ALTER TABLE PW17 SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
-- Without the fix HIVE-5199, will throw cast exception via FetchOperator
@@ -14,13 +14,13 @@ SELECT * FROM PW17;
-- Test for non-parititioned table.
DROP TABLE PW17_2;
CREATE TABLE PW17_2(USER STRING, COMPLEXDT ARRAY<INT>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1';
-LOAD DATA LOCAL INPATH '../data/files/pw17.txt' INTO TABLE PW17_2;
+LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_2;
-- Without the fix HIVE-5199, will throw cast exception via MapOperator
SELECT COUNT(*) FROM PW17_2;
DROP TABLE PW17_3;
CREATE TABLE PW17_3(USER STRING, COMPLEXDT ARRAY<ARRAY<INT> >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
-LOAD DATA LOCAL INPATH '../data/files/pw17.txt' INTO TABLE PW17_3 PARTITION (YEAR='1');
+LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_3 PARTITION (YEAR='1');
ALTER TABLE PW17_3 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe2';
ALTER TABLE PW17_3 SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
-- Without the fix HIVE-5285, will throw cast exception via FetchOperator
@@ -28,7 +28,7 @@ SELECT * FROM PW17;
DROP TABLE PW17_4;
CREATE TABLE PW17_4(USER STRING, COMPLEXDT ARRAY<ARRAY<INT> >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3';
-LOAD DATA LOCAL INPATH '../data/files/pw17.txt' INTO TABLE PW17_4;
+LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_4;
-- Without the fix HIVE-5285, will throw cast exception via MapOperator
SELECT COUNT(*) FROM PW17_4;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q
new file mode 100644
index 0000000000..40ed2585f5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q
@@ -0,0 +1,19 @@
+-- HIVE-5202 : Tests for SettableUnionObjectInspectors
+-- CustomSerDe(4,5) are used here.
+-- The final results should be all NULL columns deserialized using
+-- CustomSerDe(4, 5) irrespective of the inserted values
+
+DROP TABLE PW18;
+ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-custom-serde/${system:hive.version}/hive-it-custom-serde-${system:hive.version}.jar;
+CREATE TABLE PW18(USER STRING, COMPLEXDT UNIONTYPE<INT, DOUBLE>) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5';
+LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW18 PARTITION (YEAR='1');
+ALTER TABLE PW18 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe4';
+-- Without the fix HIVE-5202, will throw unsupported data type exception.
+SELECT * FROM PW18;
+
+-- Test for non-parititioned table.
+DROP TABLE PW18_2;
+CREATE TABLE PW18_2(USER STRING, COMPLEXDT UNIONTYPE<INT, DOUBLE>) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5';
+LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW18_2;
+-- Without the fix HIVE-5202, will throw unsupported data type exception
+SELECT COUNT(*) FROM PW18_2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/pcr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/pcr.q
index 09a39ae4e4..3be0ff23b8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/pcr.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/pcr.q
@@ -127,7 +127,7 @@ create table ab(strct struct<a:int, b:string>)
row format delimited
fields terminated by '\t'
collection items terminated by '\001';
-load data local inpath '../data/files/kv1.txt'
+load data local inpath '../../data/files/kv1.txt'
overwrite into table ab;
-- Create partitioned table with struct data:
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_join4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_join4.q
new file mode 100644
index 0000000000..475d45c19f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_join4.q
@@ -0,0 +1,22 @@
+create table dual(a string);
+
+set hive.optimize.ppd=true;
+drop table if exists test_tbl ;
+
+create table test_tbl (id string,name string);
+
+insert into table test_tbl
+select 'a','b' from dual;
+
+explain
+select t2.*
+from
+(select id,name from (select id,name from test_tbl) t1 sort by id) t2
+join test_tbl t3 on (t2.id=t3.id )
+where t2.name='c' and t3.id='a';
+
+select t2.*
+from
+(select id,name from (select id,name from test_tbl) t1 sort by id) t2
+join test_tbl t3 on (t2.id=t3.id )
+where t2.name='c' and t3.id='a'; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_multi_insert.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_multi_insert.q
index a802df1b98..06fe7ce580 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_multi_insert.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_multi_insert.q
@@ -10,18 +10,18 @@ FROM src a JOIN src b ON (a.key = b.key)
INSERT OVERWRITE TABLE mi1 SELECT a.* WHERE a.key < 100
INSERT OVERWRITE TABLE mi2 SELECT a.key, a.value WHERE a.key >= 100 and a.key < 200
INSERT OVERWRITE TABLE mi3 PARTITION(ds='2008-04-08', hr='12') SELECT a.key WHERE a.key >= 200 and a.key < 300
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
FROM src a JOIN src b ON (a.key = b.key)
INSERT OVERWRITE TABLE mi1 SELECT a.* WHERE a.key < 100
INSERT OVERWRITE TABLE mi2 SELECT a.key, a.value WHERE a.key >= 100 and a.key < 200
INSERT OVERWRITE TABLE mi3 PARTITION(ds='2008-04-08', hr='12') SELECT a.key WHERE a.key >= 200 and a.key < 300
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
SELECT mi1.* FROM mi1;
SELECT mi2.* FROM mi2;
SELECT mi3.* FROM mi3;
-dfs -cat ../build/ql/test/data/warehouse/mi4.out/*;
+dfs -cat ${system:test.warehouse.dir}/mi4.out/*;
set hive.ppd.remove.duplicatefilters=true;
@@ -31,15 +31,15 @@ FROM src a JOIN src b ON (a.key = b.key)
INSERT OVERWRITE TABLE mi1 SELECT a.* WHERE a.key < 100
INSERT OVERWRITE TABLE mi2 SELECT a.key, a.value WHERE a.key >= 100 and a.key < 200
INSERT OVERWRITE TABLE mi3 PARTITION(ds='2008-04-08', hr='12') SELECT a.key WHERE a.key >= 200 and a.key < 300
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
FROM src a JOIN src b ON (a.key = b.key)
INSERT OVERWRITE TABLE mi1 SELECT a.* WHERE a.key < 100
INSERT OVERWRITE TABLE mi2 SELECT a.key, a.value WHERE a.key >= 100 and a.key < 200
INSERT OVERWRITE TABLE mi3 PARTITION(ds='2008-04-08', hr='12') SELECT a.key WHERE a.key >= 200 and a.key < 300
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/mi4.out' SELECT a.value WHERE a.key >= 300;
SELECT mi1.* FROM mi1;
SELECT mi2.* FROM mi2;
SELECT mi3.* FROM mi3;
-dfs -cat ../build/ql/test/data/warehouse/mi4.out/*;
+dfs -cat ${system:test.warehouse.dir}/mi4.out/*;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_transform.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_transform.q
index 65a498d021..530ef9c4d8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_transform.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_transform.q
@@ -36,3 +36,12 @@ FROM (
CLUSTER BY tkey
) tmap
SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100;
+
+-- test described in HIVE-4598
+
+EXPLAIN
+FROM (
+ FROM ( SELECT * FROM src ) mapout REDUCE * USING 'cat' AS x,y
+) reduced
+insert overwrite local directory '/tmp/a' select * where x='a' or x='b'
+insert overwrite local directory '/tmp/b' select * where x='c' or x='d';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_udtf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_udtf.q
new file mode 100644
index 0000000000..d90532cfa4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_udtf.q
@@ -0,0 +1,12 @@
+explain
+SELECT value from (
+ select explode(array(key, value)) as (value) from (
+ select * FROM src WHERE key > 400
+ ) A
+) B WHERE value < 450;
+
+SELECT value from (
+ select explode(array(key, value)) as (value) from (
+ select * FROM src WHERE key > 400
+ ) A
+) B WHERE value < 450;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_union_view.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_union_view.q
index d635e2d171..a7606c5a5f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_union_view.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppd_union_view.q
@@ -5,26 +5,26 @@ drop view v;
create table t1_new (key string, value string) partitioned by (ds string);
insert overwrite table t1_new partition (ds = '2011-10-15')
-select 'key1', 'value1' from src limit 1;
+select 'key1', 'value1' from src tablesample (1 rows);
insert overwrite table t1_new partition (ds = '2011-10-16')
-select 'key2', 'value2' from src limit 1;
+select 'key2', 'value2' from src tablesample (1 rows);
create table t1_old (keymap string, value string) partitioned by (ds string);
insert overwrite table t1_old partition (ds = '2011-10-13')
-select 'keymap3', 'value3' from src limit 1;
+select 'keymap3', 'value3' from src tablesample (1 rows);
insert overwrite table t1_old partition (ds = '2011-10-14')
-select 'keymap4', 'value4' from src limit 1;
+select 'keymap4', 'value4' from src tablesample (1 rows);
create table t1_mapping (key string, keymap string) partitioned by (ds string);
insert overwrite table t1_mapping partition (ds = '2011-10-13')
-select 'key3', 'keymap3' from src limit 1;
+select 'key3', 'keymap3' from src tablesample (1 rows);
insert overwrite table t1_mapping partition (ds = '2011-10-14')
-select 'key4', 'keymap4' from src limit 1;
+select 'key4', 'keymap4' from src tablesample (1 rows);
create view t1 partitioned on (ds) as
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown.q
index 860dd631ce..440005fdee 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
create table ppr_test (key string) partitioned by (ds string);
alter table ppr_test add partition (ds = '1234');
@@ -9,14 +11,14 @@ alter table ppr_test add partition (ds = '12:4');
alter table ppr_test add partition (ds = '12%4');
alter table ppr_test add partition (ds = '12*4');
-insert overwrite table ppr_test partition(ds = '1234') select * from (select '1234' from src limit 1 union all select 'abcd' from src limit 1) s;
-insert overwrite table ppr_test partition(ds = '1224') select * from (select '1224' from src limit 1 union all select 'abcd' from src limit 1) s;
-insert overwrite table ppr_test partition(ds = '1214') select * from (select '1214' from src limit 1 union all select 'abcd' from src limit 1) s;
-insert overwrite table ppr_test partition(ds = '12+4') select * from (select '12+4' from src limit 1 union all select 'abcd' from src limit 1) s;
-insert overwrite table ppr_test partition(ds = '12.4') select * from (select '12.4' from src limit 1 union all select 'abcd' from src limit 1) s;
-insert overwrite table ppr_test partition(ds = '12:4') select * from (select '12:4' from src limit 1 union all select 'abcd' from src limit 1) s;
-insert overwrite table ppr_test partition(ds = '12%4') select * from (select '12%4' from src limit 1 union all select 'abcd' from src limit 1) s;
-insert overwrite table ppr_test partition(ds = '12*4') select * from (select '12*4' from src limit 1 union all select 'abcd' from src limit 1) s;
+insert overwrite table ppr_test partition(ds = '1234') select * from (select '1234' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
+insert overwrite table ppr_test partition(ds = '1224') select * from (select '1224' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
+insert overwrite table ppr_test partition(ds = '1214') select * from (select '1214' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
+insert overwrite table ppr_test partition(ds = '12+4') select * from (select '12+4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
+insert overwrite table ppr_test partition(ds = '12.4') select * from (select '12.4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
+insert overwrite table ppr_test partition(ds = '12:4') select * from (select '12:4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
+insert overwrite table ppr_test partition(ds = '12%4') select * from (select '12%4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
+insert overwrite table ppr_test partition(ds = '12*4') select * from (select '12*4' from src tablesample (1 rows) union all select 'abcd' from src tablesample (1 rows)) s;
select * from ppr_test where ds = '1234' order by key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown2.q
index 67c0da0dfc..8c60906538 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ppr_pushdown2.q
@@ -1,24 +1,26 @@
+set hive.fetch.task.conversion=more;
+
create table ppr_test (key string) partitioned by (ds string);
-insert overwrite table ppr_test partition(ds='2') select '2' from src limit 1;
-insert overwrite table ppr_test partition(ds='22') select '22' from src limit 1;
+insert overwrite table ppr_test partition(ds='2') select '2' from src tablesample (1 rows);
+insert overwrite table ppr_test partition(ds='22') select '22' from src tablesample (1 rows);
select * from ppr_test where ds = '2';
select * from ppr_test where ds = '22';
create table ppr_test2 (key string) partitioned by (ds string, s string);
-insert overwrite table ppr_test2 partition(ds='1', s='2') select '1' from src limit 1;
-insert overwrite table ppr_test2 partition(ds='2', s='1') select '2' from src limit 1;
+insert overwrite table ppr_test2 partition(ds='1', s='2') select '1' from src tablesample (1 rows);
+insert overwrite table ppr_test2 partition(ds='2', s='1') select '2' from src tablesample (1 rows);
select * from ppr_test2 where s = '1';
select * from ppr_test2 where ds = '1';
create table ppr_test3 (key string) partitioned by (col string, ol string, l string);
-insert overwrite table ppr_test3 partition(col='1', ol='2', l = '3') select '1' from src limit 1;
-insert overwrite table ppr_test3 partition(col='1', ol='1', l = '2') select '2' from src limit 1;
-insert overwrite table ppr_test3 partition(col='1', ol='2', l = '1') select '3' from src limit 1;
+insert overwrite table ppr_test3 partition(col='1', ol='2', l = '3') select '1' from src tablesample (1 rows);
+insert overwrite table ppr_test3 partition(col='1', ol='1', l = '2') select '2' from src tablesample (1 rows);
+insert overwrite table ppr_test3 partition(col='1', ol='2', l = '1') select '3' from src tablesample (1 rows);
select * from ppr_test3 where l = '1';
select * from ppr_test3 where l = '2';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/progress_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/progress_1.q
index ad908a02ad..22ee92634d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/progress_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/progress_1.q
@@ -2,7 +2,7 @@ set hive.heartbeat.interval=5;
CREATE TABLE PROGRESS_1(key int, value string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv6.txt' INTO TABLE PROGRESS_1;
+LOAD DATA LOCAL INPATH '../../data/files/kv6.txt' INTO TABLE PROGRESS_1;
select count(1) from PROGRESS_1 t1 join PROGRESS_1 t2 on t1.key=t2.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf.q
index eea5415d68..d56b412355 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf.q
@@ -13,7 +13,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
--1. test1
select p_mfgr, p_name, p_size,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_decimal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_decimal.q
index 03f435e453..9799534ff4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_decimal.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_decimal.q
@@ -9,11 +9,11 @@ CREATE TABLE part(
p_type STRING,
p_size INT,
p_container STRING,
- p_retailprice DECIMAL,
+ p_retailprice DECIMAL(6,2),
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-- 1. aggregate functions with decimal type
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_general_queries.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_general_queries.q
index 885c3b3d43..4fe9710d0d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_general_queries.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_general_queries.q
@@ -13,7 +13,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-- 1. testNoPTFNoWindowing
select p_mfgr, p_name, p_size
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_matchpath.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_matchpath.q
index 72eeb104d5..0cde350f73 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_matchpath.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_matchpath.q
@@ -10,7 +10,7 @@ ARR_DELAY float,
FL_NUM string
);
-LOAD DATA LOCAL INPATH '../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny;
+LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny;
-- 1. basic Matchpath test
select origin_city_name, fl_num, year, month, day_of_month, sz, tpath
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_rcfile.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_rcfile.q
index 535a233a9e..a68c578848 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_rcfile.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_rcfile.q
@@ -12,7 +12,7 @@ CREATE TABLE part_rc(
p_comment STRING
) STORED AS RCFILE ;
-LOAD DATA LOCAL INPATH '../data/files/part.rc' overwrite into table part_rc;
+LOAD DATA LOCAL INPATH '../../data/files/part.rc' overwrite into table part_rc;
-- testWindowingPTFWithPartRC
select p_mfgr, p_name, p_size,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_register_tblfn.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_register_tblfn.q
index a2140cd049..4b508e9df0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_register_tblfn.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_register_tblfn.q
@@ -10,7 +10,7 @@ ARR_DELAY float,
FL_NUM string
);
-LOAD DATA LOCAL INPATH '../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny;
+LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVERWRITE INTO TABLE flights_tiny;
create temporary function matchpathtest as 'org.apache.hadoop.hive.ql.udf.ptf.MatchPath$MatchPathResolver';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_seqfile.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_seqfile.q
index 4aa8ce11be..c5d65f0efa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_seqfile.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ptf_seqfile.q
@@ -12,7 +12,7 @@ CREATE TABLE part_seq(
p_comment STRING
) STORED AS SEQUENCEFILE ;
-LOAD DATA LOCAL INPATH '../data/files/part.seq' overwrite into table part_seq;
+LOAD DATA LOCAL INPATH '../../data/files/part.seq' overwrite into table part_seq;
-- testWindowingPTFWithPartSeqFile
select p_mfgr, p_name, p_size,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q
index f198baa6e4..57e8cc673c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/ql_rewrite_gbtoidx.q
@@ -1,3 +1,5 @@
+set hive.stats.dbclass=counter;
+set hive.stats.autogather=true;
DROP TABLE lineitem;
CREATE TABLE lineitem (L_ORDERKEY INT,
@@ -19,7 +21,7 @@ CREATE TABLE lineitem (L_ORDERKEY INT,
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '|';
-LOAD DATA LOCAL INPATH '../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
+LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
CREATE INDEX lineitem_lshipdate_idx ON TABLE lineitem(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)");
ALTER INDEX lineitem_lshipdate_idx ON lineitem REBUILD;
@@ -156,7 +158,7 @@ DROP INDEX tbl_part_index on tblpart;
DROP TABLE tblpart;
CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|';
-LOAD DATA LOCAL INPATH '../data/files/tbl.txt' OVERWRITE INTO TABLE tbl;
+LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl;
CREATE INDEX tbl_key_idx ON TABLE tbl(key) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(key)");
ALTER INDEX tbl_key_idx ON tbl REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quote2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quote2.q
index 65b9f8776d..c93902ab3e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quote2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quote2.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
SELECT
'abc', "abc",
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_alter.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_alter.q
new file mode 100644
index 0000000000..a34a25af4b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_alter.q
@@ -0,0 +1,21 @@
+
+set hive.support.quoted.identifiers=column;
+
+create table src_b3(`x+1` string, `!@#$%^&*()_q` string) ;
+
+alter table src_b3
+clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
+;
+
+
+-- alter partition
+create table src_p3(`x+1` string, `y&y` string) partitioned by (`!@#$%^&*()_q` string);
+
+insert overwrite table src_p3 partition(`!@#$%^&*()_q`='a') select * from src;
+show partitions src_p3;
+
+alter table src_p3 add if not exists partition(`!@#$%^&*()_q`='b');
+show partitions src_p3;
+
+alter table src_p3 partition(`!@#$%^&*()_q`='b') rename to partition(`!@#$%^&*()_q`='c');
+show partitions src_p3; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_basic.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_basic.q
new file mode 100644
index 0000000000..680868e549
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_basic.q
@@ -0,0 +1,34 @@
+
+set hive.support.quoted.identifiers=column;
+
+-- basic
+create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string);
+describe t1;
+select `x+1`, `y&y`, `!@#$%^&*()_q` from t1;
+explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1;
+explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1';
+explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1';
+explain select `x+1`, `y&y`, `!@#$%^&*()_q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`)
+from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = '1';
+
+-- case insensitive
+explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`)
+from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1';
+
+
+-- escaped back ticks
+create table t4(`x+1``` string, `y&y` string);
+describe t4;
+insert into table t4 select * from src;
+select `x+1```, `y&y`, rank() over(partition by `x+1``` order by `y&y`)
+from t4 where `x+1``` = '10' group by `x+1```, `y&y` having `x+1``` = '10';
+
+-- view
+create view v1 as
+select `x+1```, `y&y`
+from t4 where `x+1``` < '200';
+
+select `x+1```, `y&y`, rank() over(partition by `x+1``` order by `y&y`)
+from v1
+group by `x+1```, `y&y`
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_partition.q
new file mode 100644
index 0000000000..e9416ae282
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_partition.q
@@ -0,0 +1,24 @@
+
+set hive.support.quoted.identifiers=column;
+
+
+create table src_p(`x+1` string, `y&y` string) partitioned by (`!@#$%^&*()_q` string);
+insert overwrite table src_p partition(`!@#$%^&*()_q`='a') select * from src;
+
+show partitions src_p;
+
+explain select `x+1`, `y&y`, `!@#$%^&*()_q`
+from src_p where `!@#$%^&*()_q` = 'a' and `x+1`='10'
+group by `x+1`, `y&y`, `!@#$%^&*()_q` having `!@#$%^&*()_q` = 'a'
+;
+
+set hive.exec.dynamic.partition.mode=nonstrict
+;
+
+create table src_p2(`x+1` string) partitioned by (`!@#$%^&*()_q` string);
+
+insert overwrite table src_p2 partition(`!@#$%^&*()_q`)
+select key, value as `!@#$%^&*()_q` from src where key < '200'
+;
+
+show partitions src_p2; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_skew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_skew.q
new file mode 100644
index 0000000000..5c95967411
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_skew.q
@@ -0,0 +1,26 @@
+
+set hive.support.quoted.identifiers=column;
+
+set hive.mapred.supports.subdirectories=true;
+set hive.internal.ddl.list.bucketing.enable=true;
+set hive.optimize.skewjoin.compiletime = true;
+
+CREATE TABLE T1(`!@#$%^&*()_q` string, `y&y` string)
+SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE
+;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+
+CREATE TABLE T2(`!@#$%^&*()_q` string, `y&y` string)
+SKEWED BY (`!@#$%^&*()_q`) ON ((2)) STORED AS TEXTFILE
+;
+
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2;
+
+-- a simple join query with skew on both the tables on the join key
+-- adding a order by at the end to make the results deterministic
+
+EXPLAIN
+SELECT a.*, b.* FROM T1 a JOIN T2 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q`
+;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_smb.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_smb.q
new file mode 100644
index 0000000000..38d1b99c4b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_smb.q
@@ -0,0 +1,34 @@
+
+set hive.support.quoted.identifiers=column;
+
+
+set hive.enforce.bucketing = true;
+set hive.enforce.sorting = true;
+create table src_b(`x+1` string, `!@#$%^&*()_q` string)
+clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
+;
+
+insert overwrite table src_b
+select * from src
+;
+
+create table src_b2(`x+1` string, `!@#$%^&*()_q` string)
+clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets
+;
+
+insert overwrite table src_b2
+select * from src
+;
+
+set hive.auto.convert.join=true;
+set hive.auto.convert.sortmerge.join=true;
+set hive.optimize.bucketmapjoin = true;
+set hive.optimize.bucketmapjoin.sortedmerge = true;
+
+set hive.auto.convert.sortmerge.join.to.mapjoin=false;
+set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ;
+
+select a.`x+1`, a.`!@#$%^&*()_q`, b.`x+1`, b.`!@#$%^&*()_q`
+from src_b a join src_b2 b on a.`!@#$%^&*()_q` = b.`!@#$%^&*()_q`
+where a.`x+1` < '11'
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_tblproperty.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_tblproperty.q
new file mode 100644
index 0000000000..d64e9cb9d5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/quotedid_tblproperty.q
@@ -0,0 +1,8 @@
+ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-test-serde/${system:hive.version}/hive-it-test-serde-${system:hive.version}.jar;
+
+CREATE TABLE xyz(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe'
+STORED AS TEXTFILE
+TBLPROPERTIES('columns'='valid_colname,invalid.colname')
+;
+
+describe xyz; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_bigdata.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_bigdata.q
index 3e83e6693b..df460c89aa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_bigdata.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/rcfile_bigdata.q
@@ -1,7 +1,7 @@
set hive.map.aggr.hash.percentmemory = 0.3;
set hive.mapred.local.mem = 256;
-add file ../data/scripts/dumpdata_script.py;
+add file ../../data/scripts/dumpdata_script.py;
CREATE table columnTable_Bigdata (key STRING, value STRING)
ROW FORMAT SERDE
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/regex_col.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/regex_col.q
index 9cfcee5a59..1c311fc478 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/regex_col.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/regex_col.q
@@ -1,3 +1,5 @@
+set hive.support.quoted.identifiers=none;
+
EXPLAIN
SELECT * FROM srcpart;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/remote_script.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/remote_script.q
index 926601c647..c4fcaaf95b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/remote_script.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/remote_script.q
@@ -1,4 +1,4 @@
-dfs -put ../data/scripts/newline.py /newline.py;
+dfs -put ../../data/scripts/newline.py /newline.py;
add file hdfs:///newline.py;
set hive.transform.escape.input=true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/repair.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/repair.q
index 8d04d3e991..df199b0d77 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/repair.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/repair.q
@@ -1,10 +1,12 @@
+DROP TABLE IF EXISTS repairtable;
+
CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING, p2 STRING);
MSCK TABLE repairtable;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/warehouse/repairtable/p1=a/p2=a;
-dfs ${system:test.dfs.mkdir} ../build/ql/test/data/warehouse/repairtable/p1=b/p2=a;
-dfs -touchz ../build/ql/test/data/warehouse/repairtable/p1=b/p2=a/datafile;
+dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=a/p2=a;
+dfs ${system:test.dfs.mkdir} ${system:test.warehouse.dir}/repairtable/p1=b/p2=a;
+dfs -touchz ${system:test.warehouse.dir}/repairtable/p1=b/p2=a/datafile;
MSCK TABLE repairtable;
@@ -12,4 +14,4 @@ MSCK REPAIR TABLE repairtable;
MSCK TABLE repairtable;
-
+DROP TABLE repairtable;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/root_dir_external_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/root_dir_external_table.q
new file mode 100644
index 0000000000..a0514c86ff
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/root_dir_external_table.q
@@ -0,0 +1,11 @@
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test_root_dir_external_table;
+
+insert overwrite directory "hdfs:///tmp/test_root_dir_external_table" select key from src where (key < 20) order by key;
+
+dfs -cp /tmp/test_root_dir_external_table/000000_0 /000000_0;
+dfs -rmr hdfs:///tmp/test_root_dir_external_table;
+
+create external table roottable (key string) row format delimited fields terminated by '\\t' stored as textfile location 'hdfs:///';
+select count(*) from roottable;
+
+dfs -rmr /000000_0; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/schemeAuthority2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/schemeAuthority2.q
index ecd4d13d0e..b3c38bf577 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/schemeAuthority2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/schemeAuthority2.q
@@ -1,5 +1,5 @@
-dfs -mkdir file:///tmp/test;
-dfs -mkdir hdfs:///tmp/test;
+dfs ${system:test.dfs.mkdir} file:///tmp/test;
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/test;
create external table dynPart (key string) partitioned by (value string, value2 string) row format delimited fields terminated by '\\t' stored as textfile;
insert overwrite local directory "/tmp/test" select key from src where (key = 10) order by key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1.q
index 4f65016f30..2dfb12951f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1.q
@@ -1,7 +1,9 @@
set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+-- EXCLUDE_OS_WINDOWS
CREATE TABLE dest1(key INT, value STRING);
-ADD FILE src/test/scripts/testgrep;
+ADD FILE ../../ql/src/test/scripts/testgrep;
FROM (
FROM src
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1_win.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1_win.q
new file mode 100644
index 0000000000..0008ae51c4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/scriptfile1_win.q
@@ -0,0 +1,16 @@
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+-- INCLUDE_OS_WINDOWS
+
+CREATE TABLE dest1(key INT, value STRING);
+
+ADD FILE src/test/scripts/testgrep_win.bat;
+
+FROM (
+ FROM src
+ SELECT TRANSFORM(src.key, src.value)
+ USING 'testgrep_win.bat' AS (tkey, tvalue)
+ CLUSTER BY tkey
+) tmap
+INSERT OVERWRITE TABLE dest1 SELECT tmap.tkey, tmap.tvalue;
+
+SELECT dest1.* FROM dest1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/select_dummy_source.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/select_dummy_source.q
new file mode 100644
index 0000000000..25a1a81283
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/select_dummy_source.q
@@ -0,0 +1,33 @@
+explain
+select 'a', 100;
+select 'a', 100;
+
+--evaluation
+explain
+select 1 + 1;
+select 1 + 1;
+
+-- explode (not possible for lateral view)
+explain
+select explode(array('a', 'b'));
+select explode(array('a', 'b'));
+
+set hive.fetch.task.conversion=more;
+
+explain
+select 'a', 100;
+select 'a', 100;
+
+explain
+select 1 + 1;
+select 1 + 1;
+
+explain
+select explode(array('a', 'b'));
+select explode(array('a', 'b'));
+
+-- subquery
+explain
+select 2 + 3,x from (select 1 + 2 x) X;
+select 2 + 3,x from (select 1 + 2 x) X;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/serde_regex.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/serde_regex.q
index 2a287bd877..accdb54744 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/serde_regex.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/serde_regex.q
@@ -31,8 +31,8 @@ WITH SERDEPROPERTIES (
)
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH "../data/files/apache.access.log" INTO TABLE serde_regex;
-LOAD DATA LOCAL INPATH "../data/files/apache.access.2.log" INTO TABLE serde_regex;
+LOAD DATA LOCAL INPATH "../../data/files/apache.access.log" INTO TABLE serde_regex;
+LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" INTO TABLE serde_regex;
SELECT * FROM serde_regex ORDER BY time;
@@ -42,7 +42,7 @@ DROP TABLE serde_regex;
EXPLAIN
CREATE TABLE serde_regex1(
- key decimal,
+ key decimal(38,18),
value int)
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.RegexSerDe'
WITH SERDEPROPERTIES (
@@ -51,7 +51,7 @@ WITH SERDEPROPERTIES (
STORED AS TEXTFILE;
CREATE TABLE serde_regex1(
- key decimal,
+ key decimal(38,18),
value int)
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.RegexSerDe'
WITH SERDEPROPERTIES (
@@ -59,7 +59,7 @@ WITH SERDEPROPERTIES (
)
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH "../data/files/kv7.txt" INTO TABLE serde_regex1;
+LOAD DATA LOCAL INPATH "../../data/files/kv7.txt" INTO TABLE serde_regex1;
SELECT key, value FROM serde_regex1 ORDER BY key, value;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/set_processor_namespaces.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/set_processor_namespaces.q
index 7e3d1f4d8a..d10239c31a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/set_processor_namespaces.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/set_processor_namespaces.q
@@ -24,7 +24,7 @@ set b=a;
set c=${hiveconf:${hiveconf:b}};
set c;
-set jar=${system:build.ivy.lib.dir}/default/derby-${system:derby.version}.jar;
+set jar=${system:maven.local.repository}/org/apache/derby/derby/${system:derby.version}/derby-${system:derby.version}.jar;
add file ${hiveconf:jar};
delete file ${hiveconf:jar};
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q
index 5fcdf97e2d..9758c16caa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_edge_cases.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
DROP TABLE show_idx_empty;
DROP TABLE show_idx_full;
@@ -24,4 +25,4 @@ SHOW INDEXES ON show_idx_empty;
DROP INDEX idx_1 on show_idx_full;
DROP INDEX idx_2 on show_idx_full;
DROP TABLE show_idx_empty;
-DROP TABLE show_idx_full; \ No newline at end of file
+DROP TABLE show_idx_full;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_syntax.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_syntax.q
index ab588937e1..bb43c5e138 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_syntax.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_indexes_syntax.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
DROP TABLE show_idx_t1;
CREATE TABLE show_idx_t1(KEY STRING, VALUE STRING);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_partitions.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_partitions.q
index 7fa7b828bd..1fc1d8e1f2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_partitions.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_partitions.q
@@ -1,4 +1,5 @@
SHOW PARTITIONS srcpart;
+SHOW PARTITIONS default.srcpart;
SHOW PARTITIONS srcpart PARTITION(hr='11');
SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08');
-SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12'); \ No newline at end of file
+SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_roles.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_roles.q
new file mode 100644
index 0000000000..d8ce96a37d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_roles.q
@@ -0,0 +1,4 @@
+create role role1;
+create role role2;
+
+show roles;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_tablestatus.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_tablestatus.q
index 9184d6da89..55fb7b67ff 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_tablestatus.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/show_tablestatus.q
@@ -1,3 +1,4 @@
+set hive.support.quoted.identifiers=none;
EXPLAIN
SHOW TABLE EXTENDED IN default LIKE `src`;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin.q
index ad917beeef..47535eab63 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin.q
@@ -13,10 +13,10 @@ CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T4(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T4;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T4;
EXPLAIN
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_noskew.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_noskew.q
new file mode 100644
index 0000000000..b8ca592ab7
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_noskew.q
@@ -0,0 +1,9 @@
+set hive.auto.convert.join=false;
+set hive.optimize.skewjoin=true;
+
+explain
+create table noskew as select a.* from src a join src b on a.key=b.key order by a.key limit 30;
+
+create table noskew as select a.* from src a join src b on a.key=b.key order by a.key limit 30;
+
+select * from noskew;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
index 03eab4cd6d..fc07742cd7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_1.q
@@ -20,12 +20,12 @@ set mapred.input.dir.recursive=true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- a simple join query with skew on both the tables on the join key
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
index 9cb919531f..50cfc61962 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoin_union_remove_2.q
@@ -12,16 +12,16 @@ set mapred.input.dir.recursive=true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
-- This is to test the union->selectstar->filesink and skewjoin optimization
-- Union of 3 map-reduce subqueries is performed for the skew join
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt1.q
index af446bb65c..504ba8be2a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt1.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- a simple join query with skew on both the tables on the join key
-- adding a order by at the end to make the results deterministic
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt10.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt10.q
index 199f3201af..f35af90170 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt10.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt10.q
@@ -4,7 +4,7 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, value STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
drop table array_valued_T1;
create table array_valued_T1 (key string, value array<string>) SKEWED BY (key) ON ((8));
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt11.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt11.q
index ef61fb22f1..9e00bdcd76 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt11.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt11.q
@@ -5,11 +5,11 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- This test is to verify the skew join compile optimization when the join is followed
-- by a union. Both sides of a union consist of a join, which should have used
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt12.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt12.q
index b5d9d9bc46..171995069b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt12.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt12.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- Both the join tables are skewed by 2 keys, and one of the skewed values
-- is common to both the tables. The join key matches the skewed key set.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt13.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt13.q
index 0634c4f4ff..5ef217c900 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt13.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt13.q
@@ -4,16 +4,16 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
CREATE TABLE T3(key STRING, val STRING)
SKEWED BY (val) ON ((12)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
-- This test is for skewed join compile time optimization for more than 2 tables.
-- The join key for table 3 is different from the join key used for joining
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt14.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt14.q
index 0f031dd4fc..df1a26bcc7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt14.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt14.q
@@ -5,16 +5,16 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
CREATE TABLE T3(key STRING, val STRING)
SKEWED BY (val) ON ((12)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
-- This test is for skewed join compile time optimization for more than 2 tables.
-- The join key for table 3 is different from the join key used for joining
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt15.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt15.q
index d5474a455e..1db5472396 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt15.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt15.q
@@ -4,7 +4,7 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE tmpT1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1;
-- testing skew on other data types - int
CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2));
@@ -12,7 +12,7 @@ INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1;
CREATE TABLE tmpT2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE tmpT2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE tmpT2;
CREATE TABLE T2(key INT, val STRING) SKEWED BY (key) ON ((3));
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt16.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt16.q
index 46b4f6d6e6..915de612de 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt16.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt16.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- One of the tables is skewed by 2 columns, and the other table is
-- skewed by one column. Ths join is performed on the both the columns
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt17.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt17.q
index 0592ca8c3e..2ee79cc758 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt17.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt17.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- One of the tables is skewed by 2 columns, and the other table is
-- skewed by one column. Ths join is performed on the first skewed column
@@ -31,12 +31,12 @@ DROP TABLE T2;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- One of the tables is skewed by 2 columns, and the other table is
-- skewed by one column. Ths join is performed on the both the columns
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt18.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt18.q
index 433fea336d..9d06cc0306 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt18.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt18.q
@@ -4,7 +4,7 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE tmpT1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE tmpT1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmpT1;
-- testing skew on other data types - int
CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2));
@@ -16,7 +16,7 @@ INSERT OVERWRITE TABLE T1 SELECT key, val FROM tmpT1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- Once HIVE-3445 is fixed, the compile time skew join optimization would be
-- applicable here. Till the above jira is fixed, it would be performed as a
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt19.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt19.q
index 0b11ebe4cb..075645f89d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt19.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt19.q
@@ -6,11 +6,11 @@ CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key) INTO 4 BUCKETS
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- add a test where the skewed key is also the bucketized key
-- it should not matter, and the compile time skewed join
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt2.q
index 34fcdbfac4..f7acaad18e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt2.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- a simple query with skew on both the tables on the join key
-- multiple skew values are present for the skewed keys
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt20.q
index f217052881..9b908ce21b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt20.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt20.q
@@ -6,11 +6,11 @@ CREATE TABLE T1(key STRING, val STRING)
CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- add a test where the skewed key is also the bucketized/sorted key
-- it should not matter, and the compile time skewed join
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt3.q
index f6002ad498..22ea4f0621 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt3.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- a simple query with skew on both the tables. One of the skewed
-- value is common to both the tables. The skewed value should not be
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt4.q
index ca83c44608..8496b1aa79 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt4.q
@@ -5,11 +5,11 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- only of the tables of the join (the left table of the join) is skewed
-- the skewed filter would still be applied to both the tables
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt5.q
index 3d7884c5e3..152de5bde7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt5.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key, val) ON ((2, 12)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- One of the tables is skewed by 2 columns, and the other table is
-- skewed by one column. Ths join is performed on the first skewed column
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt6.q
index 36cf8ceeae..2e261bde66 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt6.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt6.q
@@ -5,12 +5,12 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key, val) ON ((2, 12), (8, 18)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key, val) ON ((3, 13), (8, 18)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- Both the join tables are skewed by 2 keys, and one of the skewed values
-- is common to both the tables. The join key is a subset of the skewed key set:
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt7.q
index cf84f67b6a..e4d9605f6f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt7.q
@@ -5,16 +5,16 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
-- This test is for validating skewed join compile time optimization for more than
-- 2 tables. The join key is the same, and so a 3-way join would be performed.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt8.q
index d0ac845f86..85746d9611 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt8.q
@@ -4,16 +4,16 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING)
SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
-- This test is for validating skewed join compile time optimization for more than
-- 2 tables. The join key is the same, and so a 3-way join would be performed.
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt9.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt9.q
index 04834033a1..889ab6c3f5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt9.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/skewjoinopt9.q
@@ -5,11 +5,11 @@ set hive.optimize.skewjoin.compiletime = true;
CREATE TABLE T1(key STRING, val STRING)
SKEWED BY (key) ON ((2)) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
-- no skew join compile time optimization would be performed if one of the
-- join sources is a sub-query consisting of a union all
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_1.q
index 359513e424..9dee4110f5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_1.q
@@ -6,9 +6,9 @@ create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (k
create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-load data local inpath '../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
-load data local inpath '../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
-load data local inpath '../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_10.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_10.q
index a79ebf62d0..1fbe2090ea 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_10.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_10.q
@@ -6,11 +6,11 @@ alter table tmp_smb_bucket_10 add partition (ds = '2');
-- add dummy files to make sure that the number of files in each partition is same as number of buckets
-load data local inpath '../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1');
-load data local inpath '../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1');
+load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1');
+load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1');
-load data local inpath '../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2');
-load data local inpath '../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2');
+load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2');
+load data local inpath '../../data/files/smbbucket_2.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='2');
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_2.q
index 9d86314879..e2b24333ad 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_2.q
@@ -6,9 +6,9 @@ create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (k
create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-load data local inpath '../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
-load data local inpath '../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
-load data local inpath '../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_25.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
index 8b534e85ae..e43174bc07 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_25.q
@@ -10,9 +10,9 @@ create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (k
create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-load data local inpath '../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
-load data local inpath '../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
-load data local inpath '../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
explain
select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_3.q
index 73b21fae25..b379706cc8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_3.q
@@ -6,9 +6,9 @@ create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (k
create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-load data local inpath '../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
-load data local inpath '../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
-load data local inpath '../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_4.q
index 83143b170e..2b3f67ea4e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_4.q
@@ -6,9 +6,9 @@ create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (k
create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-load data local inpath '../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
-load data local inpath '../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
-load data local inpath '../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_5.q
index 61ec084f64..406604e621 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_5.q
@@ -6,9 +6,9 @@ create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (k
create table smb_bucket_2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
create table smb_bucket_3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE;
-load data local inpath '../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
-load data local inpath '../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
-load data local inpath '../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
+load data local inpath '../../data/files/smbbucket_1.rc' overwrite into table smb_bucket_1;
+load data local inpath '../../data/files/smbbucket_2.rc' overwrite into table smb_bucket_2;
+load data local inpath '../../data/files/smbbucket_3.rc' overwrite into table smb_bucket_3;
set hive.optimize.bucketmapjoin = true;
set hive.optimize.bucketmapjoin.sortedmerge = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
index 1488b1f949..ca1c7491b7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_7.q
@@ -15,8 +15,8 @@ create table smb_join_results(k1 int, v1 string, k2 int, v2 string);
create table smb_join_results_empty_bigtable(k1 int, v1 string, k2 int, v2 string);
create table normal_join_results(k1 int, v1 string, k2 int, v2 string);
-load data local inpath '../data/files/empty1.txt' into table smb_bucket4_1;
-load data local inpath '../data/files/empty2.txt' into table smb_bucket4_1;
+load data local inpath '../../data/files/empty1.txt' into table smb_bucket4_1;
+load data local inpath '../../data/files/empty2.txt' into table smb_bucket4_1;
insert overwrite table smb_bucket4_2
select * from src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
index 6f282ed441..f296057d43 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_8.q
@@ -5,7 +5,7 @@ set hive.exec.reducers.max = 1;
create table smb_bucket_input (key int, value string) stored as rcfile;
-load data local inpath '../data/files/smb_bucket_input.rc' into table smb_bucket_input;
+load data local inpath '../../data/files/smb_bucket_input.rc' into table smb_bucket_input;
CREATE TABLE smb_bucket4_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/source.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/source.q
index 6fe3d211a0..76ca152ef5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/source.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/source.q
@@ -1 +1 @@
-source ../data/files/source.txt;
+source ../../data/files/source.txt;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split.q
new file mode 100644
index 0000000000..f5d7ff8fdd
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/split.q
@@ -0,0 +1,8 @@
+DROP TABLE tmp_jo_tab_test;
+CREATE table tmp_jo_tab_test (message_line STRING)
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/input.txt'
+OVERWRITE INTO TABLE tmp_jo_tab_test;
+
+select size(split(message_line, '\t')) from tmp_jo_tab_test;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats1.q
index 0b783de153..359d27b315 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats1.q
@@ -26,5 +26,5 @@ DESCRIBE FORMATTED tmptable;
-- Load a file into a existing table
-- Some stats (numFiles, totalSize) should be updated correctly
-- Some other stats (numRows, rawDataSize) should be cleared
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE tmptable;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable;
DESCRIBE FORMATTED tmptable; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats11.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats11.q
index 6618c913ea..d037c003b7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats11.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats11.q
@@ -2,25 +2,25 @@ set datanucleus.cache.collections=false;
set hive.stats.autogather=true;
CREATE TABLE srcbucket_mapjoin(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin;
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin;
CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
explain
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08');
desc formatted srcbucket_mapjoin_part partition(ds='2008-04-08');
CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
-load data local inpath '../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-08');
create table bucketmapjoin_hash_result_1 (key bigint , value1 bigint, value2 bigint);
create table bucketmapjoin_hash_result_2 (key bigint , value1 bigint, value2 bigint);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats18.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats18.q
index 425de64c26..e773cd7494 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats18.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats18.q
@@ -13,7 +13,7 @@ insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select
-- Some other stats (numRows, rawDataSize) should be cleared
desc formatted stats_part partition (ds='2010-04-08', hr='13');
-load data local inpath '../data/files/srcbucket20.txt' INTO TABLE stats_part partition (ds='2010-04-08', hr='13');
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE stats_part partition (ds='2010-04-08', hr='13');
desc formatted stats_part partition (ds='2010-04-08', hr='13');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats19.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats19.q
index da4af9655d..51514bd773 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats19.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats19.q
@@ -1,7 +1,7 @@
set datanucleus.cache.collections=false;
set hive.stats.autogather=true;
set hive.stats.reliable=true;
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher;
set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.KeyVerifyingStatsAggregator;
@@ -56,7 +56,7 @@ insert overwrite table stats_part partition (ds='2010-04-08', hr = '13') select
desc formatted stats_part partition (ds='2010-04-08', hr = '13');
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher;
set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.KeyVerifyingStatsAggregator;
set hive.stats.key.prefix.max.length=0;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats3.q
index 5962348d9c..fd7e0eaca8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats3.q
@@ -5,9 +5,9 @@ drop table hive_test_dst;
create table hive_test_src ( col1 string ) stored as textfile ;
explain extended
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ;
-load data local inpath '../data/files/test.dat' overwrite into table hive_test_src ;
+load data local inpath '../../data/files/test.dat' overwrite into table hive_test_src ;
desc formatted hive_test_src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats4.q
index 62580042d4..80a67f405c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats4.q
@@ -25,8 +25,8 @@ insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, v
show partitions nzhang_part1;
show partitions nzhang_part2;
-select * from nzhang_part1 where ds is not null and hr is not null;
-select * from nzhang_part2 where ds is not null and hr is not null;
+select * from nzhang_part1 where ds is not null and hr is not null order by ds, hr, key;
+select * from nzhang_part2 where ds is not null and hr is not null order by ds, hr, key;
describe formatted nzhang_part1 partition(ds='2008-04-08',hr=11);
describe formatted nzhang_part1 partition(ds='2008-04-08',hr=12);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q
index 4e7d3dc547..5e6b0aaa12 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_aggregator_error_1.q
@@ -1,12 +1,12 @@
-- In this test, there is a dummy stats aggregator which throws an error when various
--- methods are called (as indicated by the parameter hive.test.dummystats.agregator)
+-- methods are called (as indicated by the parameter hive.test.dummystats.aggregator)
-- Since stats need not be reliable (by setting hive.stats.reliable to false), the
-- insert statements succeed. The insert statement succeeds even if the stats aggregator
-- is set to null, since stats need not be reliable.
create table tmptable(key string, value string);
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher;
set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.DummyStatsAggregator;
set hive.stats.reliable=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter.q
new file mode 100644
index 0000000000..3c1f132a68
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter.q
@@ -0,0 +1,16 @@
+set hive.stats.dbclass=counter;
+set hive.stats.autogather=false;
+
+-- by analyze
+create table dummy1 as select * from src;
+
+analyze table dummy1 compute statistics;
+desc formatted dummy1;
+
+set hive.stats.dbclass=counter;
+set hive.stats.autogather=true;
+
+-- by autogather
+create table dummy2 as select * from src;
+
+desc formatted dummy2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter_partitioned.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter_partitioned.q
new file mode 100644
index 0000000000..e1274c0cb5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_counter_partitioned.q
@@ -0,0 +1,45 @@
+set hive.stats.dbclass=counter;
+set hive.stats.autogather=true;
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+-- partitioned table analyze
+
+create table dummy (key string, value string) partitioned by (ds string, hr string);
+
+load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='12');
+load data local inpath '../../data/files/kv1.txt' into table dummy partition (ds='2008',hr='11');
+
+analyze table dummy partition (ds,hr) compute statistics;
+describe formatted dummy partition (ds='2008', hr='11');
+describe formatted dummy partition (ds='2008', hr='12');
+
+drop table dummy;
+
+-- static partitioned table on insert
+
+create table dummy (key string, value string) partitioned by (ds string, hr string);
+
+insert overwrite table dummy partition (ds='10',hr='11') select * from src;
+insert overwrite table dummy partition (ds='10',hr='12') select * from src;
+
+describe formatted dummy partition (ds='10', hr='11');
+describe formatted dummy partition (ds='10', hr='12');
+
+drop table dummy;
+
+-- dynamic partitioned table on insert
+
+create table dummy (key int) partitioned by (hr int);
+
+CREATE TABLE tbl(key int, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|';
+LOAD DATA LOCAL INPATH '../../data/files/tbl.txt' OVERWRITE INTO TABLE tbl;
+
+insert overwrite table dummy partition (hr) select * from tbl;
+
+describe formatted dummy partition (hr=1997);
+describe formatted dummy partition (hr=1994);
+describe formatted dummy partition (hr=1998);
+describe formatted dummy partition (hr=1996);
+
+drop table tbl;
+drop table dummy;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_invalidation.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_invalidation.q
new file mode 100644
index 0000000000..a7fce6e3e5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_invalidation.q
@@ -0,0 +1,15 @@
+set hive.stats.autogather=true;
+
+CREATE TABLE stats_invalid (key string, value string);
+
+insert overwrite table stats_invalid
+select * from src;
+
+analyze table stats_invalid compute statistics for columns key,value;
+
+desc formatted stats_invalid;
+alter table stats_invalid add columns (new_col string);
+
+desc formatted stats_invalid;
+drop table stats_invalid;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_list_bucket.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_list_bucket.q
new file mode 100644
index 0000000000..5982643741
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_list_bucket.q
@@ -0,0 +1,45 @@
+
+set hive.mapred.supports.subdirectories=true;
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+drop table stats_list_bucket;
+drop table stats_list_bucket_1;
+
+create table stats_list_bucket (
+ c1 string,
+ c2 string
+) partitioned by (ds string, hr string)
+skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile;
+
+set hive.stats.key.prefix.max.length=1;
+
+-- Make sure we use hashed IDs during stats publishing.
+-- Try partitioned table with list bucketing.
+-- The stats should show 500 rows loaded, as many rows as the src table has.
+
+insert overwrite table stats_list_bucket partition (ds = '2008-04-08', hr = '11')
+ select key, value from src;
+
+desc formatted stats_list_bucket partition (ds = '2008-04-08', hr = '11');
+
+-- Also try non-partitioned table with list bucketing.
+-- Stats should show the same number of rows.
+
+create table stats_list_bucket_1 (
+ c1 string,
+ c2 string
+)
+skewed by (c1, c2) on (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile;
+
+insert overwrite table stats_list_bucket_1
+ select key, value from src;
+
+desc formatted stats_list_bucket_1;
+
+drop table stats_list_bucket;
+drop table stats_list_bucket_1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_noscan_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_noscan_2.q
index c934fb2051..b106b30476 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_noscan_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_noscan_2.q
@@ -1,12 +1,12 @@
-- test analyze table compute statistiscs [noscan] on external table
-- 1 test table
-CREATE EXTERNAL TABLE anaylyze_external (a INT) LOCATION '${system:test.src.data.dir}/files/ext_test';
+CREATE EXTERNAL TABLE anaylyze_external (a INT) LOCATION '${system:hive.root}/data/files/ext_test';
SELECT * FROM anaylyze_external;
-analyze table anaylyze_external compute statistics;
-describe formatted anaylyze_external;
analyze table anaylyze_external compute statistics noscan;
describe formatted anaylyze_external;
+analyze table anaylyze_external compute statistics;
+describe formatted anaylyze_external;
drop table anaylyze_external;
-- 2 test partition
@@ -21,10 +21,10 @@ CREATE EXTERNAL TABLE anaylyze_external (key string, val string) partitioned by
ALTER TABLE anaylyze_external ADD PARTITION (insertdate='2008-01-01') location 'pfile://${system:test.tmp.dir}/texternal/2008-01-01';
select count(*) from anaylyze_external where insertdate='2008-01-01';
-- analyze
-analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics;
-describe formatted anaylyze_external PARTITION (insertdate='2008-01-01');
analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan;
describe formatted anaylyze_external PARTITION (insertdate='2008-01-01');
+analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics;
+describe formatted anaylyze_external PARTITION (insertdate='2008-01-01');
dfs -rmr ${system:test.tmp.dir}/texternal;
drop table anaylyze_external;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_only_null.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_only_null.q
new file mode 100644
index 0000000000..b47bc48958
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_only_null.q
@@ -0,0 +1,41 @@
+set hive.stats.dbclass=fs;
+set hive.compute.query.using.stats=true;
+set hive.stats.autogather=true;
+CREATE TABLE temps_null(a double, b int, c STRING, d smallint) STORED AS TEXTFILE;
+
+CREATE TABLE stats_null(a double, b int, c STRING, d smallint) STORED AS TEXTFILE;
+
+CREATE TABLE stats_null_part(a double, b int, c STRING, d smallint) partitioned by (dt string) STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE temps_null;
+
+insert overwrite table stats_null select * from temps_null;
+insert into table stats_null_part partition(dt='2010') select * from temps_null where d <=5;
+
+insert into table stats_null_part partition(dt='2011') select * from temps_null where d > 5;
+explain
+select count(*), count(a), count(b), count(c), count(d) from stats_null;
+explain
+select count(*), count(a), count(b), count(c), count(d) from stats_null_part;
+
+
+analyze table stats_null compute statistics for columns a,b,c,d;
+analyze table stats_null_part partition(dt='2010') compute statistics for columns a,b,c,d;
+analyze table stats_null_part partition(dt='2011') compute statistics for columns a,b,c,d;
+
+describe formatted stats_null_part partition (dt='2010');
+describe formatted stats_null_part partition (dt='2011');
+
+explain
+select count(*), count(a), count(b), count(c), count(d) from stats_null;
+explain
+select count(*), count(a), count(b), count(c), count(d) from stats_null_part;
+
+
+select count(*), count(a), count(b), count(c), count(d) from stats_null;
+select count(*), count(a), count(b), count(c), count(d) from stats_null_part;
+drop table stats_null;
+drop table stats_null_part;
+drop table temps_null;
+set hive.compute.query.using.stats=false;
+set hive.stats.dbclass=jdbc:derby;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_publisher_error_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_publisher_error_1.q
index 6d383f213d..513b8e75a0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_publisher_error_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/stats_publisher_error_1.q
@@ -6,7 +6,7 @@
create table tmptable(key string, value string);
-set hive.stats.dbclass=dummy;
+set hive.stats.dbclass=custom;
set hive.stats.default.publisher=org.apache.hadoop.hive.ql.stats.DummyStatsPublisher;
set hive.stats.default.aggregator=org.apache.hadoop.hive.ql.stats.DummyStatsAggregator;
set hive.stats.reliable=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/statsfs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/statsfs.q
new file mode 100644
index 0000000000..82a2295ac2
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/statsfs.q
@@ -0,0 +1,63 @@
+set hive.stats.dbclass=fs;
+
+-- stats computation on partitioned table with analyze command
+
+create table t1 (key string, value string) partitioned by (ds string);
+load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2010');
+load data local inpath '../../data/files/kv1.txt' into table t1 partition (ds = '2011');
+
+analyze table t1 partition (ds) compute statistics;
+
+describe formatted t1 partition (ds='2010');
+describe formatted t1 partition (ds='2011');
+
+drop table t1;
+
+-- stats computation on partitioned table with autogather on insert query
+
+create table t1 (key string, value string) partitioned by (ds string);
+
+insert into table t1 partition (ds='2010') select * from src;
+insert into table t1 partition (ds='2011') select * from src;
+
+describe formatted t1 partition (ds='2010');
+describe formatted t1 partition (ds='2011');
+
+drop table t1;
+
+-- analyze stmt on unpartitioned table
+
+create table t1 (key string, value string);
+load data local inpath '../../data/files/kv1.txt' into table t1;
+
+analyze table t1 compute statistics;
+
+describe formatted t1 ;
+
+drop table t1;
+
+-- stats computation on unpartitioned table with autogather on insert query
+
+create table t1 (key string, value string);
+
+insert into table t1 select * from src;
+
+describe formatted t1 ;
+
+drop table t1;
+
+-- stats computation on partitioned table with autogather on insert query with dynamic partitioning
+
+
+create table t1 (key string, value string) partitioned by (ds string, hr string);
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+insert into table t1 partition (ds,hr) select * from srcpart;
+
+describe formatted t1 partition (ds='2008-04-08',hr='11');
+describe formatted t1 partition (ds='2008-04-09',hr='12');
+
+drop table t1;
+set hive.exec.dynamic.partition.mode=strict;
+
+set hive.stats.dbclass=jdbc:derby;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/str_to_map.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/str_to_map.q
index c3b206bba6..ae83407f84 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/str_to_map.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/str_to_map.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
desc function str_to_map;
desc function extended str_to_map;
@@ -19,7 +21,7 @@ limit 3;
drop table tbl_s2m;
-create table tbl_s2m as select 'ABC=CC_333=444' as t from src limit 3;
+create table tbl_s2m as select 'ABC=CC_333=444' as t from src tablesample (3 rows);
select str_to_map(t,'_','=')['333'] from tbl_s2m;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq.q
index 3fb1558a90..14fa321c11 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq.q
@@ -2,12 +2,12 @@ EXPLAIN
FROM (
FROM src select src.* WHERE src.key < 100
) unioninput
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/union.out' SELECT unioninput.*;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.*;
FROM (
FROM src select src.* WHERE src.key < 100
) unioninput
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/union.out' SELECT unioninput.*;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.*;
-dfs -cat ../build/ql/test/data/warehouse/union.out/*;
+dfs -cat ${system:test.warehouse.dir}/union.out/*;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq_where_serialization.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq_where_serialization.q
new file mode 100644
index 0000000000..1d53982542
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subq_where_serialization.q
@@ -0,0 +1,5 @@
+set hive.auto.convert.join=true;
+set hive.auto.convert.join.noconditionaltask.size=10000000;
+explain select src.key from src where src.key in ( select distinct key from src);
+
+set hive.auto.convert.join=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_alias.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_alias.q
new file mode 100644
index 0000000000..ffc33dc2cf
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_alias.q
@@ -0,0 +1,16 @@
+EXPLAIN
+FROM (
+ FROM src select src.* WHERE src.key < 100
+) as unioninput
+INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.*;
+
+EXPLAIN
+SELECT * FROM
+( SELECT * FROM
+ ( SELECT * FROM src as s ) as src1
+) as src2;
+
+SELECT * FROM
+( SELECT * FROM
+ ( SELECT * FROM src as s ) as src1
+) as src2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists.q
new file mode 100644
index 0000000000..f812e36070
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists.q
@@ -0,0 +1,45 @@
+
+
+-- no agg, corr
+explain
+select *
+from src b
+where exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9'
+ )
+;
+
+select *
+from src b
+where exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9'
+ )
+;
+
+-- view test
+create view cv1 as
+select *
+from src b
+where exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9')
+;
+
+select * from cv1
+;
+
+-- sq in from
+select *
+from (select *
+ from src b
+ where exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9')
+ ) a
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists_having.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists_having.q
new file mode 100644
index 0000000000..690aa10527
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_exists_having.q
@@ -0,0 +1,60 @@
+
+
+-- no agg, corr
+explain
+select b.key, count(*)
+from src b
+group by b.key
+having exists
+ (select a.key
+ from src a
+ where a.key = b.key and a.value > 'val_9'
+ )
+;
+
+select b.key, count(*)
+from src b
+group by b.key
+having exists
+ (select a.key
+ from src a
+ where a.key = b.key and a.value > 'val_9'
+ )
+;
+
+-- view test
+create view cv1 as
+select b.key, count(*) as c
+from src b
+group by b.key
+having exists
+ (select a.key
+ from src a
+ where a.key = b.key and a.value > 'val_9'
+ )
+;
+
+select * from cv1;
+
+-- sq in from
+select *
+from (select b.key, count(*)
+ from src b
+ group by b.key
+ having exists
+ (select a.key
+ from src a
+ where a.key = b.key and a.value > 'val_9'
+ )
+) a
+;
+
+-- join on agg
+select b.key, min(b.value)
+from src b
+group by b.key
+having exists ( select a.key
+ from src a
+ where a.value > 'val_9' and a.value = min(b.value)
+ )
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in.q
new file mode 100644
index 0000000000..69f40f9b8c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in.q
@@ -0,0 +1,163 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+DROP TABLE lineitem;
+CREATE TABLE lineitem (L_ORDERKEY INT,
+ L_PARTKEY INT,
+ L_SUPPKEY INT,
+ L_LINENUMBER INT,
+ L_QUANTITY DOUBLE,
+ L_EXTENDEDPRICE DOUBLE,
+ L_DISCOUNT DOUBLE,
+ L_TAX DOUBLE,
+ L_RETURNFLAG STRING,
+ L_LINESTATUS STRING,
+ l_shipdate STRING,
+ L_COMMITDATE STRING,
+ L_RECEIPTDATE STRING,
+ L_SHIPINSTRUCT STRING,
+ L_SHIPMODE STRING,
+ L_COMMENT STRING)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|';
+
+LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
+
+-- non agg, non corr
+explain
+ select *
+from src
+where src.key in (select key from src s1 where s1.key > '9')
+;
+
+select *
+from src
+where src.key in (select key from src s1 where s1.key > '9')
+order by key
+;
+
+-- non agg, corr
+explain
+select *
+from src b
+where b.key in
+ (select a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+;
+
+select *
+from src b
+where b.key in
+ (select a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+order by b.key
+;
+
+-- agg, non corr
+explain
+select p_name, p_size
+from
+part where part.p_size in
+ (select avg(p_size)
+ from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2
+ )
+;
+select p_name, p_size
+from
+part where part.p_size in
+ (select avg(p_size)
+ from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2
+ )
+order by p_name
+;
+
+-- agg, corr
+explain
+select p_mfgr, p_name, p_size
+from part b where b.p_size in
+ (select min(p_size)
+ from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = a.p_mfgr
+ )
+;
+
+select p_mfgr, p_name, p_size
+from part b where b.p_size in
+ (select min(p_size)
+ from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = a.p_mfgr
+ )
+order by p_mfgr, p_name, p_size
+;
+
+-- distinct, corr
+explain
+select *
+from src b
+where b.key in
+ (select distinct a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+;
+
+select *
+from src b
+where b.key in
+ (select distinct a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+order by b.key
+;
+
+-- non agg, non corr, windowing
+select p_mfgr, p_name, p_size
+from part
+where part.p_size in
+ (select first_value(p_size) over(partition by p_mfgr order by p_size) from part)
+order by p_mfgr, p_name, p_size
+;
+
+-- non agg, non corr, with join in Parent Query
+explain
+select p.p_partkey, li.l_suppkey
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey
+where li.l_linenumber = 1 and
+ li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR')
+;
+
+select p.p_partkey, li.l_suppkey
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey
+where li.l_linenumber = 1 and
+ li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR')
+order by p.p_partkey, li.l_suppkey
+;
+
+-- non agg, corr, with join in Parent Query
+select p.p_partkey, li.l_suppkey
+from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey
+where li.l_linenumber = 1 and
+ li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber)
+order by p.p_partkey, li.l_suppkey
+;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in_having.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in_having.q
new file mode 100644
index 0000000000..84045568f4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_in_having.q
@@ -0,0 +1,104 @@
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+-- non agg, non corr
+explain
+ select key, count(*)
+from src
+group by key
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
+;
+
+
+select s1.key, count(*) from src s1 where s1.key > '9' group by s1.key order by s1.key;
+
+select key, count(*)
+from src
+group by key
+having count(*) in (select count(*) from src s1 where s1.key = '90' group by s1.key )
+order by key
+;
+
+-- non agg, corr
+explain
+ select key, value, count(*)
+from src b
+group by key, value
+having count(*) in (select count(*) from src s1 where s1.key > '9' and s1.value = b.value group by s1.key )
+;
+
+-- agg, non corr
+explain
+select p_mfgr, avg(p_size)
+from part b
+group by b.p_mfgr
+having b.p_mfgr in
+ (select p_mfgr
+ from part
+ group by p_mfgr
+ having max(p_size) - min(p_size) < 20
+ )
+;
+
+-- join on agg
+select b.key, min(b.value)
+from src b
+group by b.key
+having b.key in ( select a.key
+ from src a
+ where a.value > 'val_9' and a.value = min(b.value)
+ )
+order by b.key
+;
+
+-- where and having
+-- Plan is:
+-- Stage 1: b semijoin sq1:src (subquery in where)
+-- Stage 2: group by Stage 1 o/p
+-- Stage 5: group by on sq2:src (subquery in having)
+-- Stage 6: Stage 2 o/p semijoin Stage 5
+explain
+select key, value, count(*)
+from src b
+where b.key in (select key from src where src.key > '8')
+group by key, value
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
+;
+
+set hive.auto.convert.join=true;
+-- Plan is:
+-- Stage 5: group by on sq2:src (subquery in having)
+-- Stage 10: hashtable for sq1:src (subquery in where)
+-- Stage 2: b map-side semijoin Stage 10 o/p
+-- Stage 3: Stage 2 semijoin Stage 5
+-- Stage 9: construct hastable for Stage 5 o/p
+-- Stage 6: Stage 2 map-side semijoin Stage 9
+explain
+select key, value, count(*)
+from src b
+where b.key in (select key from src where src.key > '8')
+group by key, value
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
+;
+
+-- non agg, non corr, windowing
+explain
+select p_mfgr, p_name, avg(p_size)
+from part
+group by p_mfgr, p_name
+having p_name in
+ (select first_value(p_name) over(partition by p_mfgr order by p_size) from part)
+;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_multiinsert.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_multiinsert.q
new file mode 100644
index 0000000000..ed36d9ef6e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_multiinsert.q
@@ -0,0 +1,82 @@
+set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
+
+CREATE TABLE src_4(
+ key STRING,
+ value STRING
+)
+;
+
+CREATE TABLE src_5(
+ key STRING,
+ value STRING
+)
+;
+
+explain
+from src b
+INSERT OVERWRITE TABLE src_4
+ select *
+ where b.key in
+ (select a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+INSERT OVERWRITE TABLE src_5
+ select *
+ where b.key not in ( select key from src s1 where s1.key > '2')
+ order by key
+;
+
+from src b
+INSERT OVERWRITE TABLE src_4
+ select *
+ where b.key in
+ (select a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+INSERT OVERWRITE TABLE src_5
+ select *
+ where b.key not in ( select key from src s1 where s1.key > '2')
+ order by key
+;
+
+select * from src_4
+;
+select * from src_5
+;
+set hive.auto.convert.join=true;
+
+explain
+from src b
+INSERT OVERWRITE TABLE src_4
+ select *
+ where b.key in
+ (select a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+INSERT OVERWRITE TABLE src_5
+ select *
+ where b.key not in ( select key from src s1 where s1.key > '2')
+ order by key
+;
+
+from src b
+INSERT OVERWRITE TABLE src_4
+ select *
+ where b.key in
+ (select a.key
+ from src a
+ where b.value = a.value and a.key > '9'
+ )
+INSERT OVERWRITE TABLE src_5
+ select *
+ where b.key not in ( select key from src s1 where s1.key > '2')
+ order by key
+;
+
+select * from src_4
+;
+select * from src_5
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists.q
new file mode 100644
index 0000000000..43a801fa96
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists.q
@@ -0,0 +1,41 @@
+
+
+-- no agg, corr
+explain
+select *
+from src b
+where not exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_2'
+ )
+;
+
+select *
+from src b
+where not exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_2'
+ )
+;
+
+-- distinct, corr
+explain
+select *
+from src b
+where not exists
+ (select distinct a.key
+ from src a
+ where b.value = a.value and a.value > 'val_2'
+ )
+;
+
+select *
+from src b
+where not exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.value > 'val_2'
+ )
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists_having.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists_having.q
new file mode 100644
index 0000000000..7205d17bc4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notexists_having.q
@@ -0,0 +1,46 @@
+
+
+-- no agg, corr
+explain
+select *
+from src b
+group by key, value
+having not exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_12'
+ )
+;
+
+select *
+from src b
+group by key, value
+having not exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_12'
+ )
+;
+
+
+-- distinct, corr
+explain
+select *
+from src b
+group by key, value
+having not exists
+ (select distinct a.key
+ from src a
+ where b.value = a.value and a.value > 'val_12'
+ )
+;
+
+select *
+from src b
+group by key, value
+having not exists
+ (select distinct a.key
+ from src a
+ where b.value = a.value and a.value > 'val_12'
+ )
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin.q
new file mode 100644
index 0000000000..d5f6086031
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin.q
@@ -0,0 +1,143 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+DROP TABLE lineitem;
+CREATE TABLE lineitem (L_ORDERKEY INT,
+ L_PARTKEY INT,
+ L_SUPPKEY INT,
+ L_LINENUMBER INT,
+ L_QUANTITY DOUBLE,
+ L_EXTENDEDPRICE DOUBLE,
+ L_DISCOUNT DOUBLE,
+ L_TAX DOUBLE,
+ L_RETURNFLAG STRING,
+ L_LINESTATUS STRING,
+ l_shipdate STRING,
+ L_COMMITDATE STRING,
+ L_RECEIPTDATE STRING,
+ L_SHIPINSTRUCT STRING,
+ L_SHIPMODE STRING,
+ L_COMMENT STRING)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|';
+
+LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem;
+
+-- non agg, non corr
+explain
+select *
+from src
+where src.key not in
+ ( select key from src s1
+ where s1.key > '2'
+ )
+;
+
+select *
+from src
+where src.key not in ( select key from src s1 where s1.key > '2')
+order by key
+;
+
+-- non agg, corr
+explain
+select p_mfgr, b.p_name, p_size
+from part b
+where b.p_name not in
+ (select p_name
+ from (select p_mfgr, p_name, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = a.p_mfgr
+ )
+;
+
+select p_mfgr, b.p_name, p_size
+from part b
+where b.p_name not in
+ (select p_name
+ from (select p_mfgr, p_name, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = a.p_mfgr
+ )
+order by p_mfgr, b.p_name
+;
+
+-- agg, non corr
+explain
+select p_name, p_size
+from
+part where part.p_size not in
+ (select avg(p_size)
+ from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2
+ )
+;
+select p_name, p_size
+from
+part where part.p_size not in
+ (select avg(p_size)
+ from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2
+ )
+order by p_name, p_size
+;
+
+-- agg, corr
+explain
+select p_mfgr, p_name, p_size
+from part b where b.p_size not in
+ (select min(p_size)
+ from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = a.p_mfgr
+ )
+;
+
+select p_mfgr, p_name, p_size
+from part b where b.p_size not in
+ (select min(p_size)
+ from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = a.p_mfgr
+ )
+order by p_mfgr, p_size
+;
+
+-- non agg, non corr, Group By in Parent Query
+select li.l_partkey, count(*)
+from lineitem li
+where li.l_linenumber = 1 and
+ li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR')
+group by li.l_partkey
+;
+
+-- alternate not in syntax
+select *
+from src
+where not src.key in ( select key from src s1 where s1.key > '2')
+order by key
+;
+
+-- null check
+create view T1_v as
+select key from src where key <'11';
+
+create view T2_v as
+select case when key > '104' then null else key end as key from T1_v;
+
+explain
+select *
+from T1_v where T1_v.key not in (select T2_v.key from T2_v);
+
+select *
+from T1_v where T1_v.key not in (select T2_v.key from T2_v);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin_having.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin_having.q
new file mode 100644
index 0000000000..a586f02272
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_notin_having.q
@@ -0,0 +1,74 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
+
+
+-- non agg, non corr
+explain
+select key, count(*)
+from src
+group by key
+having key not in
+ ( select key from src s1
+ where s1.key > '12'
+ )
+;
+
+-- non agg, corr
+explain
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a
+ where min(p_retailprice) = l and r - l > 600
+ )
+;
+
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a
+ where min(p_retailprice) = l and r - l > 600
+ )
+;
+
+-- agg, non corr
+explain
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from part a
+ group by p_mfgr
+ having max(p_retailprice) - min(p_retailprice) > 600
+ )
+;
+
+select b.p_mfgr, min(p_retailprice)
+from part b
+group by b.p_mfgr
+having b.p_mfgr not in
+ (select p_mfgr
+ from part a
+ group by p_mfgr
+ having max(p_retailprice) - min(p_retailprice) > 600
+ )
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q
new file mode 100644
index 0000000000..749435c005
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_unqualcolumnrefs.q
@@ -0,0 +1,83 @@
+DROP TABLE part;
+
+-- data setup
+CREATE TABLE part(
+ p_partkey INT,
+ p_name STRING,
+ p_mfgr STRING,
+ p_brand STRING,
+ p_type STRING,
+ p_size INT,
+ p_container STRING,
+ p_retailprice DOUBLE,
+ p_comment STRING
+);
+
+create table src11 (key1 string, value1 string);
+
+create table part2(
+ p2_partkey INT,
+ p2_name STRING,
+ p2_mfgr STRING,
+ p2_brand STRING,
+ p2_type STRING,
+ p2_size INT,
+ p2_container STRING,
+ p2_retailprice DOUBLE,
+ p2_comment STRING
+);
+
+-- non agg, corr
+explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9');
+
+explain select * from src a where a.key in (select key from src where a.value = value and key > '9');
+
+-- agg, corr
+explain
+select p_mfgr, p_name, p_size
+from part b where b.p_size in
+ (select min(p2_size)
+ from (select p2_mfgr, p2_size, rank() over(partition by p2_mfgr order by p2_size) as r from part2) a
+ where r <= 2 and b.p_mfgr = p2_mfgr
+ )
+;
+
+
+explain
+select p_mfgr, p_name, p_size
+from part b where b.p_size in
+ (select min(p_size)
+ from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = p_mfgr
+ )
+;
+
+-- distinct, corr
+explain
+select *
+from src b
+where b.key in
+ (select distinct key
+ from src
+ where b.value = value and key > '9'
+ )
+;
+
+-- non agg, corr, having
+explain
+ select key, value, count(*)
+from src b
+group by key, value
+having count(*) in (select count(*) from src where src.key > '9' and src.value = b.value group by key )
+;
+
+-- non agg, corr
+explain
+select p_mfgr, b.p_name, p_size
+from part b
+where b.p_name not in
+ (select p_name
+ from (select p_mfgr, p_name, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a
+ where r <= 2 and b.p_mfgr = p_mfgr
+ )
+; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_views.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_views.q
new file mode 100644
index 0000000000..9f6712fc18
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/subquery_views.q
@@ -0,0 +1,48 @@
+
+
+-- exists test
+create view cv1 as
+select *
+from src b
+where exists
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_9')
+;
+
+select *
+from cv1 where cv1.key in (select key from cv1 c where c.key > '95') order by key;
+;
+
+
+-- not in test
+create view cv2 as
+select *
+from src b
+where b.key not in
+ (select a.key
+ from src a
+ where b.value = a.value and a.key = b.key and a.value > 'val_11'
+ )
+;
+
+select *
+from cv2 where cv2.key in (select key from cv2 c where c.key < '11') order by key;
+;
+
+-- in where + having
+create view cv3 as
+select key, value, count(*)
+from src b
+where b.key in (select key from src where src.key > '8')
+group by key, value
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
+;
+
+select * from cv3 order by key;
+
+
+-- join of subquery views
+select *
+from cv3
+where cv3.key in (select key from cv1) order by key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/symlink_text_input_format.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/symlink_text_input_format.q
index bb9d6f34ed..d633b97f4c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/symlink_text_input_format.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/symlink_text_input_format.q
@@ -1,12 +1,12 @@
-
+DROP TABLE IF EXISTS symlink_text_input_format;
EXPLAIN
CREATE TABLE symlink_text_input_format (key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
CREATE TABLE symlink_text_input_format (key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.SymlinkTextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat';
-dfs -cp ../data/files/symlink1.txt ../build/ql/test/data/warehouse/symlink_text_input_format/symlink1.txt;
-dfs -cp ../data/files/symlink2.txt ../build/ql/test/data/warehouse/symlink_text_input_format/symlink2.txt;
+dfs -cp ../../data/files/symlink1.txt ${system:test.warehouse.dir}/symlink_text_input_format/symlink1.txt;
+dfs -cp ../../data/files/symlink2.txt ${system:test.warehouse.dir}/symlink_text_input_format/symlink2.txt;
EXPLAIN SELECT * FROM symlink_text_input_format order by key, value;
@@ -20,5 +20,4 @@ EXPLAIN SELECT count(1) FROM symlink_text_input_format;
SELECT count(1) FROM symlink_text_input_format;
-
-
+DROP TABLE symlink_text_input_format;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/table_access_keys_stats.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/table_access_keys_stats.q
index 8b1a390149..23209d85e4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/table_access_keys_stats.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/table_access_keys_stats.q
@@ -4,7 +4,7 @@ SET hive.stats.collect.tablekeys=true;
-- This test is used for testing the TableAccessAnalyzer
CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/test_boolean_whereclause.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/test_boolean_whereclause.q
index d2da5ac174..a4f0fdb1c1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/test_boolean_whereclause.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/test_boolean_whereclause.q
@@ -1,5 +1,5 @@
create table if not exists test_boolean(dummy tinyint);
-insert overwrite table test_boolean select 1 from src limit 1;
+insert overwrite table test_boolean select 1 from src tablesample (1 rows);
SELECT 1
FROM (
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_dml.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_dml.q
new file mode 100644
index 0000000000..87d251f40f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_dml.q
@@ -0,0 +1,40 @@
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+-- CTAS
+EXPLAIN CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt;
+CREATE TABLE tmp_src AS SELECT * FROM (SELECT value, count(value) AS cnt FROM src GROUP BY value) f1 ORDER BY cnt;
+
+SELECT * FROM tmp_src;
+
+-- dyn partitions
+CREATE TABLE tmp_src_part (c string) PARTITIONED BY (d int);
+EXPLAIN INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src;
+INSERT INTO TABLE tmp_src_part PARTITION (d) SELECT * FROM tmp_src;
+
+SELECT * FROM tmp_src_part;
+
+-- multi insert
+CREATE TABLE even (c int, d string);
+CREATE TABLE odd (c int, d string);
+
+EXPLAIN
+FROM src
+INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0
+INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1;
+
+FROM src
+INSERT INTO TABLE even SELECT key, value WHERE key % 2 = 0
+INSERT INTO TABLE odd SELECT key, value WHERE key % 2 = 1;
+
+SELECT * FROM even;
+SELECT * FROM odd;
+
+-- create empty table
+CREATE TABLE empty STORED AS orc AS SELECT * FROM tmp_src_part WHERE d = -1000;
+SELECT * FROM empty;
+
+-- drop the tables
+DROP TABLE even;
+DROP TABLE odd;
+DROP TABLE tmp_src;
+DROP TABLE tmp_src_part;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_fsstat.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_fsstat.q
new file mode 100644
index 0000000000..7f2e28fbaf
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_fsstat.q
@@ -0,0 +1,19 @@
+set hive.execution.engine=tez;
+CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+CREATE TABLE t1 (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE;
+
+load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE t1 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE t1 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE t1 partition(ds='2008-04-08');
+load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE t1 partition(ds='2008-04-08');
+
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting = true;
+set hive.optimize.bucketingsorting=false;
+set hive.stats.dbclass=fs;
+
+insert overwrite table tab_part partition (ds='2008-04-08')
+select key,value from t1;
+describe formatted tab_part partition(ds='2008-04-08');
+
+set hive.stats.dbclass=jdbc:derby;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_insert_overwrite_local_directory_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_insert_overwrite_local_directory_1.q
new file mode 100644
index 0000000000..d7a652fb8c
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_insert_overwrite_local_directory_1.q
@@ -0,0 +1,5 @@
+insert overwrite local directory '${system:test.tmp.dir}/tez_local_src_table_1'
+select * from src order by key limit 10 ;
+dfs -cat file:${system:test.tmp.dir}/tez_local_src_table_1/000000_0 ;
+
+dfs -rmr file:${system:test.tmp.dir}/tez_local_src_table_1/ ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_join_tests.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_join_tests.q
new file mode 100644
index 0000000000..f309e3fe0e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_join_tests.q
@@ -0,0 +1,12 @@
+explain
+select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key;
+
+select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key;
+select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key)) x right outer join src c on (x.value = c.value) order by x.key;
+select * from src1 a left outer join src b on (a.key = b.key) right outer join src c on (a.value = c.value) order by a.key;
+select * from src1 a left outer join src b on (a.key = b.key) left outer join src c on (a.value = c.value) order by a.key;
+select * from src1 a left outer join src b on (a.key = b.key) join src c on (a.key = c.key);
+select * from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key);
+
+select count(*) from src1 a join src b on (a.key = b.key) join src c on (a.key = c.key);
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_joins_explain.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_joins_explain.q
new file mode 100644
index 0000000000..9193843824
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_joins_explain.q
@@ -0,0 +1,5 @@
+explain
+select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key;
+
+select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_schema_evolution.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_schema_evolution.q
new file mode 100644
index 0000000000..2f1c73f8e5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_schema_evolution.q
@@ -0,0 +1,14 @@
+create table test (key int, value string) partitioned by (p int) stored as textfile;
+
+insert into table test partition (p=1) select * from src limit 10;
+
+alter table test set fileformat orc;
+
+insert into table test partition (p=2) select * from src limit 10;
+
+describe test;
+
+select * from test where p=1 and key > 0;
+select * from test where p=2 and key > 0;
+select * from test where key > 0;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_union.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_union.q
new file mode 100644
index 0000000000..f80d94c4a1
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/tez_union.q
@@ -0,0 +1,94 @@
+set hive.auto.convert.join=true;
+
+explain
+select s1.key as key, s1.value as value from src s1 join src s3 on s1.key=s3.key
+UNION ALL
+select s2.key as key, s2.value as value from src s2;
+
+create table ut as
+select s1.key as key, s1.value as value from src s1 join src s3 on s1.key=s3.key
+UNION ALL
+select s2.key as key, s2.value as value from src s2;
+
+select * from ut order by key, value limit 20;
+drop table ut;
+
+set hive.auto.convert.join=false;
+
+explain
+with u as (select * from src union all select * from src)
+select count(*) from (select u1.key as k1, u2.key as k2 from
+u as u1 join u as u2 on (u1.key = u2.key)) a;
+
+create table ut as
+with u as (select * from src union all select * from src)
+select count(*) as cnt from (select u1.key as k1, u2.key as k2 from
+u as u1 join u as u2 on (u1.key = u2.key)) a;
+
+select * from ut order by cnt limit 20;
+drop table ut;
+
+set hive.auto.convert.join=true;
+
+explain select s1.key as skey, u1.key as ukey from
+src s1
+join (select * from src union all select * from src) u1 on s1.key = u1.key;
+
+create table ut as
+select s1.key as skey, u1.key as ukey from
+src s1
+join (select * from src union all select * from src) u1 on s1.key = u1.key;
+
+select * from ut order by skey, ukey limit 20;
+drop table ut;
+
+explain select s1.key as skey, u1.key as ukey, s8.key as lkey from
+src s1
+join (select s2.key as key from src s2 join src s3 on s2.key = s3.key
+ union all select s4.key from src s4 join src s5 on s4.key = s5.key
+ union all select s6.key from src s6 join src s7 on s6.key = s7.key) u1 on (s1.key = u1.key)
+join src s8 on (u1.key = s8.key)
+order by lkey;
+
+create table ut as
+select s1.key as skey, u1.key as ukey, s8.key as lkey from
+src s1
+join (select s2.key as key from src s2 join src s3 on s2.key = s3.key
+ union all select s4.key from src s4 join src s5 on s4.key = s5.key
+ union all select s6.key from src s6 join src s7 on s6.key = s7.key) u1 on (s1.key = u1.key)
+join src s8 on (u1.key = s8.key)
+order by lkey;
+
+select * from ut order by skey, ukey, lkey limit 100;
+
+drop table ut;
+
+explain
+select s2.key as key from src s2 join src s3 on s2.key = s3.key
+union all select s4.key from src s4 join src s5 on s4.key = s5.key;
+
+create table ut as
+select s2.key as key from src s2 join src s3 on s2.key = s3.key
+union all select s4.key from src s4 join src s5 on s4.key = s5.key;
+
+select * from ut order by key limit 30;
+
+drop table ut;
+
+explain
+select * from
+(select * from src union all select * from src) u
+left outer join src s on u.key = s.key;
+
+explain
+select u.key as ukey, s.key as skey from
+(select * from src union all select * from src) u
+right outer join src s on u.key = s.key;
+
+create table ut as
+select u.key as ukey, s.key as skey from
+(select * from src union all select * from src) u
+right outer join src s on u.key = s.key;
+
+select * from ut order by ukey, skey limit 20;
+drop table ut; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_1.q
index f2c3b596af..ce79eefaae 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_1.q
@@ -1,10 +1,12 @@
+set hive.fetch.task.conversion=more;
+
drop table timestamp_1;
create table timestamp_1 (t timestamp);
alter table timestamp_1 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
-insert overwrite table timestamp_1
- select cast('2011-01-01 01:01:01' as timestamp) from src limit 1;
+insert overwrite table timestamp_1
+ select cast('2011-01-01 01:01:01' as timestamp) from src tablesample (1 rows);
select cast(t as boolean) from timestamp_1 limit 1;
select cast(t as tinyint) from timestamp_1 limit 1;
select cast(t as smallint) from timestamp_1 limit 1;
@@ -15,7 +17,7 @@ select cast(t as double) from timestamp_1 limit 1;
select cast(t as string) from timestamp_1 limit 1;
insert overwrite table timestamp_1
- select '2011-01-01 01:01:01' from src limit 1;
+ select '2011-01-01 01:01:01' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_1 limit 1;
select cast(t as tinyint) from timestamp_1 limit 1;
select cast(t as smallint) from timestamp_1 limit 1;
@@ -26,7 +28,7 @@ select cast(t as double) from timestamp_1 limit 1;
select cast(t as string) from timestamp_1 limit 1;
insert overwrite table timestamp_1
- select '2011-01-01 01:01:01.1' from src limit 1;
+ select '2011-01-01 01:01:01.1' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_1 limit 1;
select cast(t as tinyint) from timestamp_1 limit 1;
select cast(t as smallint) from timestamp_1 limit 1;
@@ -37,7 +39,7 @@ select cast(t as double) from timestamp_1 limit 1;
select cast(t as string) from timestamp_1 limit 1;
insert overwrite table timestamp_1
- select '2011-01-01 01:01:01.0001' from src limit 1;
+ select '2011-01-01 01:01:01.0001' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_1 limit 1;
select cast(t as tinyint) from timestamp_1 limit 1;
select cast(t as smallint) from timestamp_1 limit 1;
@@ -48,7 +50,7 @@ select cast(t as double) from timestamp_1 limit 1;
select cast(t as string) from timestamp_1 limit 1;
insert overwrite table timestamp_1
- select '2011-01-01 01:01:01.000100000' from src limit 1;
+ select '2011-01-01 01:01:01.000100000' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_1 limit 1;
select cast(t as tinyint) from timestamp_1 limit 1;
select cast(t as smallint) from timestamp_1 limit 1;
@@ -59,7 +61,7 @@ select cast(t as double) from timestamp_1 limit 1;
select cast(t as string) from timestamp_1 limit 1;
insert overwrite table timestamp_1
- select '2011-01-01 01:01:01.001000011' from src limit 1;
+ select '2011-01-01 01:01:01.001000011' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_1 limit 1;
select cast(t as tinyint) from timestamp_1 limit 1;
select cast(t as smallint) from timestamp_1 limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_2.q
index b93208f48c..351f5ca519 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_2.q
@@ -1,10 +1,12 @@
+set hive.fetch.task.conversion=more;
+
drop table timestamp_2;
create table timestamp_2 (t timestamp);
alter table timestamp_2 set serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe';
-insert overwrite table timestamp_2
- select cast('2011-01-01 01:01:01' as timestamp) from src limit 1;
+insert overwrite table timestamp_2
+ select cast('2011-01-01 01:01:01' as timestamp) from src tablesample (1 rows);
select cast(t as boolean) from timestamp_2 limit 1;
select cast(t as tinyint) from timestamp_2 limit 1;
select cast(t as smallint) from timestamp_2 limit 1;
@@ -15,7 +17,7 @@ select cast(t as double) from timestamp_2 limit 1;
select cast(t as string) from timestamp_2 limit 1;
insert overwrite table timestamp_2
- select '2011-01-01 01:01:01' from src limit 1;
+ select '2011-01-01 01:01:01' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_2 limit 1;
select cast(t as tinyint) from timestamp_2 limit 1;
select cast(t as smallint) from timestamp_2 limit 1;
@@ -26,7 +28,7 @@ select cast(t as double) from timestamp_2 limit 1;
select cast(t as string) from timestamp_2 limit 1;
insert overwrite table timestamp_2
- select '2011-01-01 01:01:01.1' from src limit 1;
+ select '2011-01-01 01:01:01.1' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_2 limit 1;
select cast(t as tinyint) from timestamp_2 limit 1;
select cast(t as smallint) from timestamp_2 limit 1;
@@ -37,7 +39,7 @@ select cast(t as double) from timestamp_2 limit 1;
select cast(t as string) from timestamp_2 limit 1;
insert overwrite table timestamp_2
- select '2011-01-01 01:01:01.0001' from src limit 1;
+ select '2011-01-01 01:01:01.0001' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_2 limit 1;
select cast(t as tinyint) from timestamp_2 limit 1;
select cast(t as smallint) from timestamp_2 limit 1;
@@ -48,7 +50,7 @@ select cast(t as double) from timestamp_2 limit 1;
select cast(t as string) from timestamp_2 limit 1;
insert overwrite table timestamp_2
- select '2011-01-01 01:01:01.000100000' from src limit 1;
+ select '2011-01-01 01:01:01.000100000' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_2 limit 1;
select cast(t as tinyint) from timestamp_2 limit 1;
select cast(t as smallint) from timestamp_2 limit 1;
@@ -59,7 +61,7 @@ select cast(t as double) from timestamp_2 limit 1;
select cast(t as string) from timestamp_2 limit 1;
insert overwrite table timestamp_2
- select '2011-01-01 01:01:01.001000011' from src limit 1;
+ select '2011-01-01 01:01:01.001000011' from src tablesample (1 rows);
select cast(t as boolean) from timestamp_2 limit 1;
select cast(t as tinyint) from timestamp_2 limit 1;
select cast(t as smallint) from timestamp_2 limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_3.q
index cda724f9e8..0e1a8d5526 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_3.q
@@ -1,10 +1,12 @@
+set hive.fetch.task.conversion=more;
+
drop table timestamp_3;
create table timestamp_3 (t timestamp);
alter table timestamp_3 set serde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe';
-insert overwrite table timestamp_3
- select cast(cast('1.3041352164485E9' as double) as timestamp) from src limit 1;
+insert overwrite table timestamp_3
+ select cast(cast('1.3041352164485E9' as double) as timestamp) from src tablesample (1 rows);
select cast(t as boolean) from timestamp_3 limit 1;
select cast(t as tinyint) from timestamp_3 limit 1;
select cast(t as smallint) from timestamp_3 limit 1;
@@ -14,4 +16,6 @@ select cast(t as float) from timestamp_3 limit 1;
select cast(t as double) from timestamp_3 limit 1;
select cast(t as string) from timestamp_3 limit 1;
+select t, sum(t), count(*), sum(t)/count(*), avg(t) from timestamp_3 group by t;
+
drop table timestamp_3;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_comparison.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_comparison.q
index f64ae48b85..30fee3cbf6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_comparison.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_comparison.q
@@ -1,5 +1,6 @@
+set hive.fetch.task.conversion=more;
-select cast('2011-05-06 07:08:09' as timestamp) >
+select cast('2011-05-06 07:08:09' as timestamp) >
cast('2011-05-06 07:08:09' as timestamp) from src limit 1;
select cast('2011-05-06 07:08:09' as timestamp) <
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_lazy.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_lazy.q
index 7a1005295e..e9a0cfae7c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_lazy.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_lazy.q
@@ -1,6 +1,6 @@
drop table timestamp_lazy;
create table timestamp_lazy (t timestamp, key string, value string);
-insert overwrite table timestamp_lazy select cast('2011-01-01 01:01:01' as timestamp), key, value from src limit 5;
+insert overwrite table timestamp_lazy select cast('2011-01-01 01:01:01' as timestamp), key, value from src tablesample (5 rows);
select t,key,value from timestamp_lazy ORDER BY key ASC, value ASC;
select t,key,value from timestamp_lazy distribute by t sort by key ASC, value ASC;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_null.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_null.q
index efd5bc4b78..36f35413e9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_null.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_null.q
@@ -1,6 +1,6 @@
DROP TABLE IF EXISTS timestamp_null;
CREATE TABLE timestamp_null (t1 TIMESTAMP);
-LOAD DATA LOCAL INPATH '../data/files/test.dat' OVERWRITE INTO TABLE timestamp_null;
+LOAD DATA LOCAL INPATH '../../data/files/test.dat' OVERWRITE INTO TABLE timestamp_null;
SELECT * FROM timestamp_null LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_udf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_udf.q
index 2620acefee..ade9fb408c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_udf.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/timestamp_udf.q
@@ -1,13 +1,15 @@
+set hive.fetch.task.conversion=more;
+
drop table timestamp_udf;
drop table timestamp_udf_string;
create table timestamp_udf (t timestamp);
create table timestamp_udf_string (t string);
-from src
+from (select * from src tablesample (1 rows)) s
insert overwrite table timestamp_udf
- select '2011-05-06 07:08:09.1234567' limit 1
+ select '2011-05-06 07:08:09.1234567'
insert overwrite table timestamp_udf_string
- select '2011-05-06 07:08:09.1234567' limit 1;
+ select '2011-05-06 07:08:09.1234567';
-- Test UDFs with Timestamp input
select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t),
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/transform1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/transform1.q
index 962077c2ca..3bed2b6727 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/transform1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/transform1.q
@@ -12,7 +12,7 @@ SELECT transform(*) USING 'cat' AS (col array<bigint>) FROM transform1_t1;
create table transform1_t2(col array<int>);
insert overwrite table transform1_t2
-select array(1,2,3) from src limit 1;
+select array(1,2,3) from src tablesample (1 rows);
EXPLAIN
SELECT transform('0\0021\0022') USING 'cat' AS (col array<int>) FROM transform1_t2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column.q
index d756b47e46..0bfb23ead6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column.q
@@ -5,7 +5,7 @@ ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED A
set hive.stats.autogather=true;
-INSERT OVERWRITE TABLE test_tab SELECT * FROM src LIMIT 10;
+INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows);
DESC FORMATTED test_tab;
@@ -20,7 +20,7 @@ DESC FORMATTED test_tab;
SELECT * FROM test_tab ORDER BY value;
-- Truncate multiple columns
-INSERT OVERWRITE TABLE test_tab SELECT * FROM src LIMIT 10;
+INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows);
TRUNCATE TABLE test_tab COLUMNS (key, value);
@@ -40,7 +40,7 @@ SELECT * FROM test_tab ORDER BY value;
-- Test truncating with a binary serde
ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe';
-INSERT OVERWRITE TABLE test_tab SELECT * FROM src LIMIT 10;
+INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows);
DESC FORMATTED test_tab;
@@ -65,7 +65,7 @@ SELECT * FROM test_tab ORDER BY value;
-- Test truncating a partition
CREATE TABLE test_tab_part (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE;
-INSERT OVERWRITE TABLE test_tab_part PARTITION (part = '1') SELECT * FROM src LIMIT 10;
+INSERT OVERWRITE TABLE test_tab_part PARTITION (part = '1') SELECT * FROM src tablesample (10 rows);
DESC FORMATTED test_tab_part PARTITION (part = '1');
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column_merge.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column_merge.q
index a7aab357ea..7a59efc4d1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column_merge.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_column_merge.q
@@ -2,9 +2,9 @@
CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE;
-INSERT OVERWRITE TABLE test_tab SELECT * FROM src LIMIT 5;
+INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (5 rows);
-INSERT INTO TABLE test_tab SELECT * FROM src LIMIT 5;
+INSERT INTO TABLE test_tab SELECT * FROM src tablesample (5 rows);
-- The value should be 2 indicating the table has 2 files
SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_table.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_table.q
index c0e81e9ac0..975c0f1ae8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_table.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/truncate_table.q
@@ -1,5 +1,5 @@
create table src_truncate (key string, value string);
-load data local inpath '../data/files/kv1.txt' into table src_truncate;;
+load data local inpath '../../data/files/kv1.txt' into table src_truncate;;
create table srcpart_truncate (key string, value string) partitioned by (ds string, hr string);
alter table srcpart_truncate add partition (ds='2008-04-08', hr='11');
@@ -7,10 +7,10 @@ alter table srcpart_truncate add partition (ds='2008-04-08', hr='12');
alter table srcpart_truncate add partition (ds='2008-04-09', hr='11');
alter table srcpart_truncate add partition (ds='2008-04-09', hr='12');
-load data local inpath '../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-08', hr='11');
-load data local inpath '../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-08', hr='12');
-load data local inpath '../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-09', hr='11');
-load data local inpath '../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-09', hr='12');
+load data local inpath '../../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-08', hr='11');
+load data local inpath '../../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-08', hr='12');
+load data local inpath '../../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-09', hr='11');
+load data local inpath '../../data/files/kv1.txt' into table srcpart_truncate partition (ds='2008-04-09', hr='12');
set hive.fetch.task.convertion=more;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_cast_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_cast_1.q
index a1607320c7..4d1d978f82 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_cast_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_cast_1.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
SELECT IF(false, 1, cast(2 as smallint)) + 3 FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_conversions_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_conversions_1.q
index 63dd66ebed..4c4a828fe0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_conversions_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_conversions_1.q
@@ -1,3 +1,4 @@
+set hive.fetch.task.conversion=more;
-- casting from null should yield null
select
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_widening.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_widening.q
index 0d36bc44fe..b18c01425c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_widening.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/type_widening.q
@@ -1,3 +1,4 @@
+set hive.fetch.task.conversion=more;
-- Check for int, bigint automatic type widening conversions in UDFs, UNIONS
EXPLAIN SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1;
SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_collect_set.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_collect_set.q
index 45aaa022dc..04bea32101 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_collect_set.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_collect_set.q
@@ -1,6 +1,9 @@
DESCRIBE FUNCTION collect_set;
DESCRIBE FUNCTION EXTENDED collect_set;
+DESCRIBE FUNCTION collect_list;
+DESCRIBE FUNCTION EXTENDED collect_list;
+
set hive.map.aggr = false;
set hive.groupby.skewindata = false;
@@ -8,6 +11,10 @@ SELECT key, collect_set(value)
FROM src
GROUP BY key ORDER BY key limit 20;
+SELECT key, collect_list(value)
+FROM src
+GROUP BY key ORDER by key limit 20;
+
set hive.map.aggr = true;
set hive.groupby.skewindata = false;
@@ -15,6 +22,10 @@ SELECT key, collect_set(value)
FROM src
GROUP BY key ORDER BY key limit 20;
+SELECT key, collect_list(value)
+FROM src
+GROUP BY key ORDER BY key limit 20;
+
set hive.map.aggr = false;
set hive.groupby.skewindata = true;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
index dda7aaa5f2..f065385688 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_context_ngrams.q
@@ -1,5 +1,5 @@
CREATE TABLE kafka (contents STRING);
-LOAD DATA LOCAL INPATH '../data/files/text-en.txt' INTO TABLE kafka;
+LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka;
set mapred.reduce.tasks=1;
set hive.exec.reducers.max=1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_corr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_corr.q
index 6cc9ce2630..a2edec4d64 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_corr.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_corr.q
@@ -2,7 +2,7 @@ DROP TABLE covar_tab;
CREATE TABLE covar_tab (a INT, b INT, c INT)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/covar_tab.txt' OVERWRITE
+LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE
INTO TABLE covar_tab;
DESCRIBE FUNCTION corr;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_pop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_pop.q
index 0f5d5f35bf..a9937bae3c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_pop.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_pop.q
@@ -2,7 +2,7 @@ DROP TABLE covar_tab;
CREATE TABLE covar_tab (a INT, b INT, c INT)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/covar_tab.txt' OVERWRITE
+LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE
INTO TABLE covar_tab;
DESCRIBE FUNCTION covar_pop;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_samp.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_samp.q
index 72b9c4bd40..2b50d8f238 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_samp.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_covar_samp.q
@@ -2,7 +2,7 @@ DROP TABLE covar_tab;
CREATE TABLE covar_tab (a INT, b INT, c INT)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/covar_tab.txt' OVERWRITE
+LOAD DATA LOCAL INPATH '../../data/files/covar_tab.txt' OVERWRITE
INTO TABLE covar_tab;
DESCRIBE FUNCTION covar_samp;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q
index 31ffd29a88..6a2fde52e4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_ngrams.q
@@ -1,5 +1,5 @@
CREATE TABLE kafka (contents STRING);
-LOAD DATA LOCAL INPATH '../data/files/text-en.txt' INTO TABLE kafka;
+LOAD DATA LOCAL INPATH '../../data/files/text-en.txt' INTO TABLE kafka;
set mapred.reduce.tasks=1;
set hive.exec.reducers.max=1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile.q
new file mode 100644
index 0000000000..8ebf01dcec
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile.q
@@ -0,0 +1 @@
+select percentile(cast(key as bigint), 0.3) from src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q
index 66c408d71b..5b8ad7a08f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_20.q
@@ -1,10 +1,10 @@
-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.20, 0.20S)
CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket;
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket;
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket;
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket;
create table t1 (result double);
create table t2 (result double);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
index 07bfb6e1fb..1efa2951ef 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
@@ -2,10 +2,10 @@
-- 0.23 changed input order of data in reducer task, which affects result of percentile_approx
CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE;
-load data local inpath '../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket;
-load data local inpath '../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket;
-load data local inpath '../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket;
-load data local inpath '../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket;
+load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket;
create table t1 (result double);
create table t2 (result double);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_sum_list.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_sum_list.q
new file mode 100644
index 0000000000..0d86a42128
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udaf_sum_list.q
@@ -0,0 +1,6 @@
+-- HIVE-5279
+-- GenericUDAFSumList has Converter which does not have default constructor
+-- After
+create temporary function sum_list as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSumList';
+
+select sum_list(array(key, key)) from src;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_E.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_E.q
index 113af61062..41bdec0827 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_E.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_E.q
@@ -1,14 +1,16 @@
-explain
-select E() FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
+
+explain
+select E() FROM src tablesample (1 rows);
-select E() FROM src LIMIT 1;
+select E() FROM src tablesample (1 rows);
DESCRIBE FUNCTION E;
DESCRIBE FUNCTION EXTENDED E;
explain
-select E() FROM src LIMIT 1;
+select E() FROM src tablesample (1 rows);
-select E() FROM src LIMIT 1;
+select E() FROM src tablesample (1 rows);
DESCRIBE FUNCTION E;
DESCRIBE FUNCTION EXTENDED E;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_PI.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_PI.q
index 1fde7df5d2..945483ecbf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_PI.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_PI.q
@@ -1,14 +1,16 @@
-explain
-select PI() FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
+
+explain
+select PI() FROM src tablesample (1 rows);
-select PI() FROM src LIMIT 1;
+select PI() FROM src tablesample (1 rows);
DESCRIBE FUNCTION PI;
DESCRIBE FUNCTION EXTENDED PI;
explain
-select PI() FROM src LIMIT 1;
+select PI() FROM src tablesample (1 rows);
-select PI() FROM src LIMIT 1;
+select PI() FROM src tablesample (1 rows);
DESCRIBE FUNCTION PI;
DESCRIBE FUNCTION EXTENDED PI; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_abs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_abs.q
index f4f227d0dc..0c06a5b6cd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_abs.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_abs.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION abs;
DESCRIBE FUNCTION EXTENDED abs;
@@ -7,7 +9,7 @@ EXPLAIN SELECT
abs(123),
abs(-9223372036854775807),
abs(9223372036854775807)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
abs(0),
@@ -15,16 +17,16 @@ SELECT
abs(123),
abs(-9223372036854775807),
abs(9223372036854775807)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
EXPLAIN SELECT
abs(0.0),
abs(-3.14159265),
abs(3.14159265)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
abs(0.0),
abs(-3.14159265),
abs(3.14159265)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_acos.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_acos.q
index 625a2aa5c6..f9adc16931 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_acos.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_acos.q
@@ -1,14 +1,16 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION acos;
DESCRIBE FUNCTION EXTENDED acos;
SELECT acos(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT acos(0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT acos(-0.5), asin(0.66)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT acos(2)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array.q
index fca8fe8d1c..5a6a1830b1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array.q
@@ -1,8 +1,10 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION array;
DESCRIBE FUNCTION EXTENDED array;
EXPLAIN SELECT array(), array()[1], array(1, 2, 3), array(1, 2, 3)[2], array(1,"a", 2, 3), array(1,"a", 2, 3)[2],
-array(array(1), array(2), array(3), array(4))[1][0] FROM src LIMIT 1;
+array(array(1), array(2), array(3), array(4))[1][0] FROM src tablesample (1 rows);
SELECT array(), array()[1], array(1, 2, 3), array(1, 2, 3)[2], array(1,"a", 2, 3), array(1,"a", 2, 3)[2],
-array(array(1), array(2), array(3), array(4))[1][0] FROM src LIMIT 1;
+array(array(1), array(2), array(3), array(4))[1][0] FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array_contains.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array_contains.q
index 937bb0be03..d2dad64406 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array_contains.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_array_contains.q
@@ -1,9 +1,11 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION array_contains;
DESCRIBE FUNCTION EXTENDED array_contains;
-- evalutes function for array of primitives
-SELECT array_contains(array(1, 2, 3), 1) FROM src LIMIT 1;
+SELECT array_contains(array(1, 2, 3), 1) FROM src tablesample (1 rows);
-- evaluates function for nested arrays
SELECT array_contains(array(array(1,2), array(2,3), array(3,4)), array(1,2))
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_ascii.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_ascii.q
index 53b389fd38..3d885a2563 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_ascii.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_ascii.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION ascii;
DESCRIBE FUNCTION EXTENDED ascii;
@@ -5,10 +7,10 @@ EXPLAIN SELECT
ascii('Facebook'),
ascii(''),
ascii('!')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
ascii('Facebook'),
ascii(''),
ascii('!')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_asin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_asin.q
index f95a5f57df..73b77d10f0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_asin.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_asin.q
@@ -1,14 +1,16 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION asin;
DESCRIBE FUNCTION EXTENDED asin;
SELECT asin(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT asin(0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT asin(-0.5), asin(0.66)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT asin(2)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_atan.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_atan.q
index d4ef03deb8..090438cb0f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_atan.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_atan.q
@@ -1,16 +1,18 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION atan;
DESCRIBE FUNCTION EXTENDED atan;
SELECT atan(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT atan(1), atan(6), atan(-1.0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
DESCRIBE FUNCTION atan;
DESCRIBE FUNCTION EXTENDED atan;
SELECT atan(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT atan(1), atan(6), atan(-1.0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_between.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_between.q
index eb3ccea82e..b22ee9c3ce 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_between.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_between.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
describe function between;
describe function extended between;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bin.q
index 2b9ad62a39..c5a7ac1a60 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bin.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bin.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION bin;
DESCRIBE FUNCTION EXTENDED bin;
@@ -5,7 +7,7 @@ SELECT
bin(1),
bin(0),
bin(99992421)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Negative numbers should be treated as two's complement (64 bit).
-SELECT bin(-5) FROM src LIMIT 1;
+SELECT bin(-5) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_and.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_and.q
index 7ea50dac31..ed7711cd6d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_and.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_and.q
@@ -1,11 +1,13 @@
-select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src limit 1;
-select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src limit 1;
+set hive.fetch.task.conversion=more;
+
+select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows);
+select ewah_bitmap_and(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows);
drop table bitmap_test;
create table bitmap_test (a array<bigint>, b array<bigint>);
insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src limit 10;
+select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows);
select ewah_bitmap_and(a,b) from bitmap_test;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_empty.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_empty.q
index 88e961683e..142b248cdd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_empty.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_empty.q
@@ -1,3 +1,5 @@
-select ewah_bitmap_empty(array(13,2,4,8589934592,0,0)) from src limit 1;
+set hive.fetch.task.conversion=more;
-select ewah_bitmap_empty(array(13,2,4,8589934592,4096,0)) from src limit 1;
+select ewah_bitmap_empty(array(13,2,4,8589934592,0,0)) from src tablesample (1 rows);
+
+select ewah_bitmap_empty(array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_or.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_or.q
index 0b71e681a5..00785b73fa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_or.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_bitmap_or.q
@@ -1,11 +1,13 @@
-select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src limit 1;
-select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src limit 1;
+set hive.fetch.task.conversion=more;
+
+select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(13,2,4,8589934592,4096,0)) from src tablesample (1 rows);
+select ewah_bitmap_or(array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0)) from src tablesample (1 rows);
drop table bitmap_test;
create table bitmap_test (a array<bigint>, b array<bigint>);
insert overwrite table bitmap_test
-select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src limit 10;
+select array(13,2,4,8589934592,4096,0), array(8,2,4,8589934592,128,0) from src tablesample (10 rows);
select ewah_bitmap_or(a,b) from bitmap_test;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case.q
index 4f71e70e1f..43573bfb12 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION case;
DESCRIBE FUNCTION EXTENDED case;
@@ -27,7 +29,7 @@ SELECT CASE 1
WHEN 22 THEN 23
WHEN 21 THEN 24
END
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT CASE 1
WHEN 1 THEN 2
@@ -54,10 +56,27 @@ SELECT CASE 1
WHEN 22 THEN 23
WHEN 21 THEN 24
END
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- verify that short-circuiting is working correctly for CASE
-- we should never get to the ELSE branch, which would raise an exception
SELECT CASE 1 WHEN 1 THEN 'yo'
ELSE reflect('java.lang.String', 'bogus', 1) END
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
+
+-- Allow compatible types in when/return type
+SELECT CASE 1
+ WHEN 1 THEN 123.0BD
+ ELSE 0.0BD
+ END,
+ CASE 1
+ WHEN 1.0 THEN 123
+ WHEN 2 THEN 1.0
+ ELSE 222.02BD
+ END,
+ CASE 'abc'
+ WHEN cast('abc' as varchar(3)) THEN 'abcd'
+ WHEN 'efg' THEN cast('efgh' as varchar(10))
+ ELSE cast('ijkl' as char(4))
+ END
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case_thrift.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case_thrift.q
index 736bb053cd..2aa76f1f1d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case_thrift.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_case_thrift.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
SELECT CASE src_thrift.lint[0]
WHEN 0 THEN src_thrift.lint[0] + 1
@@ -14,7 +16,7 @@ SELECT CASE src_thrift.lint[0]
WHEN '0' THEN src_thrift.lstring
ELSE NULL
END)[0]
-FROM src_thrift LIMIT 3;
+FROM src_thrift tablesample (3 rows);
SELECT CASE src_thrift.lint[0]
WHEN 0 THEN src_thrift.lint[0] + 1
@@ -31,4 +33,4 @@ SELECT CASE src_thrift.lint[0]
WHEN '0' THEN src_thrift.lstring
ELSE NULL
END)[0]
-FROM src_thrift LIMIT 3;
+FROM src_thrift tablesample (3 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_coalesce.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_coalesce.q
index 48ca29cbc3..d3c417babd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_coalesce.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_coalesce.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION coalesce;
DESCRIBE FUNCTION EXTENDED coalesce;
@@ -20,7 +22,7 @@ SELECT COALESCE(1),
COALESCE(NULL, 2.0, 3.0),
COALESCE(2.0, NULL, 3.0),
COALESCE(IF(TRUE, NULL, 0), NULL)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT COALESCE(1),
COALESCE(1, 2),
@@ -40,7 +42,7 @@ SELECT COALESCE(1),
COALESCE(NULL, 2.0, 3.0),
COALESCE(2.0, NULL, 3.0),
COALESCE(IF(TRUE, NULL, 0), NULL)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
EXPLAIN
SELECT COALESCE(src_thrift.lint[1], 999),
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_compare_java_string.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_compare_java_string.q
index 6c12f81304..c7983b8eb2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_compare_java_string.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_compare_java_string.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
CREATE TEMPORARY FUNCTION test_udf_get_java_string AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaString';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat.q
index f642f6a2d0..e35a1cfa17 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION concat;
DESCRIBE FUNCTION EXTENDED concat;
@@ -12,4 +14,10 @@ SELECT
concat(1, 2),
concat(1),
concat('1234', 'abc', 'extra argument')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
+
+-- binary/mixed
+SELECT
+ concat(cast('ab' as binary), cast('cd' as binary)),
+ concat('ab', cast('cd' as binary))
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat_ws.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat_ws.q
index 6a0ce20dc3..538dfae06f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat_ws.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_concat_ws.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION concat_ws;
DESCRIBE FUNCTION EXTENDED concat_ws;
@@ -24,7 +26,7 @@ SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'),
concat_ws('_', array('www', 'face'), array('book', 'com', '1234')),
concat_ws('**', 'www', array('face'), array('book', 'com', '1234')),
concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')),
- concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 LIMIT 1;
+ concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows);
SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'),
concat_ws('-', 'www', array('face', 'book', 'com'), '1234'),
@@ -32,7 +34,7 @@ SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'),
concat_ws('_', array('www', 'face'), array('book', 'com', '1234')),
concat_ws('**', 'www', array('face'), array('book', 'com', '1234')),
concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')),
- concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 LIMIT 1;
+ concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows);
SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '1234'),
concat_ws(NULL, 'www', array('face', 'book', 'com'), '1234'),
@@ -40,4 +42,4 @@ SELECT concat_ws(NULL, array('www', 'face', 'book', 'com'), '1234'),
concat_ws(NULL, array('www', 'face'), array('book', 'com', '1234')),
concat_ws(NULL, 'www', array('face'), array('book', 'com', '1234')),
concat_ws(NULL, array('www'), 'face', array('book', 'com', '1234')),
- concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1 LIMIT 1;
+ concat_ws(NULL, array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_conv.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_conv.q
index 212bcfb579..c6d6cf8600 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_conv.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_conv.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION conv;
DESCRIBE FUNCTION EXTENDED conv;
@@ -9,7 +11,7 @@ SELECT
conv('22', 10, 10),
conv('110011', 2, 16),
conv('facebook', 36, 16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Test negative numbers. If to_base is positive, the number should be handled
-- as a two's complement (64-bit)
@@ -18,7 +20,7 @@ SELECT
conv('1011', 2, -16),
conv('-1', 10, 16),
conv('-15', 10, 16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Test overflow. If a number is two large, the result should be -1 (if signed)
-- or MAX_LONG (if unsigned)
@@ -27,7 +29,7 @@ SELECT
conv('9223372036854775807', 36, -16),
conv('-9223372036854775807', 36, 16),
conv('-9223372036854775807', 36, -16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Test with invalid input. If one of the bases is invalid, the result should
-- be NULL. If there is an invalid digit in the number, the longest valid
@@ -37,7 +39,7 @@ SELECT
conv('131', 1, 5),
conv('515', 5, 100),
conv('10', -2, 2)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Perform the same tests with number arguments.
@@ -45,31 +47,31 @@ SELECT
conv(4521, 10, 36),
conv(22, 10, 10),
conv(110011, 2, 16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
conv(-641, 10, -10),
conv(1011, 2, -16),
conv(-1, 10, 16),
conv(-15, 10, 16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
conv(9223372036854775807, 36, 16),
conv(9223372036854775807, 36, -16),
conv(-9223372036854775807, 36, 16),
conv(-9223372036854775807, 36, -16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
conv(123455, 3, 10),
conv(131, 1, 5),
conv(515, 5, 100),
conv('10', -2, 2)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Make sure that state is properly reset.
SELECT conv(key, 10, 16),
conv(key, 16, 10)
-FROM src LIMIT 3;
+FROM src tablesample (3 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_cos.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_cos.q
index 7887c4c260..11ef8d7d87 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_cos.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_cos.q
@@ -1,8 +1,10 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION cos;
DESCRIBE FUNCTION EXTENDED cos;
SELECT cos(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT cos(0.98), cos(1.57), cos(-0.5)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_current_database.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_current_database.q
new file mode 100644
index 0000000000..4ada035d87
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_current_database.q
@@ -0,0 +1,26 @@
+DESCRIBE FUNCTION current_database;
+
+explain
+select current_database();
+select current_database();
+
+create database xxx;
+use xxx;
+
+explain
+select current_database();
+select current_database();
+
+set hive.fetch.task.conversion=more;
+
+use default;
+
+explain
+select current_database();
+select current_database();
+
+use xxx;
+
+explain
+select current_database();
+select current_database();
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_degrees.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_degrees.q
index 014ca1c6e6..d5360fe3b2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_degrees.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_degrees.q
@@ -1,14 +1,16 @@
-explain
-select degrees(PI()) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
+
+explain
+select degrees(PI()) FROM src tablesample (1 rows);
-select degrees(PI()) FROM src LIMIT 1;
+select degrees(PI()) FROM src tablesample (1 rows);
DESCRIBE FUNCTION degrees;
DESCRIBE FUNCTION EXTENDED degrees;
explain
-select degrees(PI()) FROM src LIMIT 1;
+select degrees(PI()) FROM src tablesample (1 rows);
-select degrees(PI()) FROM src LIMIT 1;
+select degrees(PI()) FROM src tablesample (1 rows);
DESCRIBE FUNCTION degrees;
DESCRIBE FUNCTION EXTENDED degrees; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_div.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_div.q
index 4229e625a0..b0d2844d42 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_div.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_div.q
@@ -1,4 +1,6 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION div;
DESCRIBE FUNCTION EXTENDED div;
-SELECT 3 DIV 2 FROM SRC LIMIT 1;
+SELECT 3 DIV 2 FROM SRC tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_divide.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_divide.q
index dc4b2e7884..d36ba99118 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_divide.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_divide.q
@@ -1,4 +1,6 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION /;
DESCRIBE FUNCTION EXTENDED /;
-SELECT 3 / 2 FROM SRC LIMIT 1;
+SELECT 3 / 2 FROM SRC tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_elt.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_elt.q
index c32340ac89..fae764965a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_elt.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_elt.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION elt;
DESCRIBE FUNCTION EXTENDED elt;
@@ -13,7 +15,7 @@ SELECT elt(2, 'abc', 'defg'),
elt(null, 'abc', 'defg'),
elt(0, 'abc', 'defg'),
elt(3, 'abc', 'defg')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT elt(2, 'abc', 'defg'),
elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'),
@@ -26,4 +28,4 @@ SELECT elt(2, 'abc', 'defg'),
elt(null, 'abc', 'defg'),
elt(0, 'abc', 'defg'),
elt(3, 'abc', 'defg')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_equal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_equal.q
index f5e9a7df4b..ea9b18bf1f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_equal.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_equal.q
@@ -1,12 +1,14 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION =;
DESCRIBE FUNCTION EXTENDED =;
DESCRIBE FUNCTION ==;
DESCRIBE FUNCTION EXTENDED ==;
-SELECT true=false, false=true, false=false, true=true, NULL=NULL, true=NULL, NULL=true, false=NULL, NULL=false FROM src LIMIT 1;
+SELECT true=false, false=true, false=false, true=true, NULL=NULL, true=NULL, NULL=true, false=NULL, NULL=false FROM src tablesample (1 rows);
DESCRIBE FUNCTION <=>;
DESCRIBE FUNCTION EXTENDED <=>;
-SELECT true<=>false, false<=>true, false<=>false, true<=>true, NULL<=>NULL, true<=>NULL, NULL<=>true, false<=>NULL, NULL<=>false FROM src LIMIT 1;
+SELECT true<=>false, false<=>true, false<=>false, true<=>true, NULL<=>NULL, true<=>NULL, NULL<=>true, false<=>NULL, NULL<=>false FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_explode.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_explode.q
index 19af288ff8..ae651644a7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_explode.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_explode.q
@@ -1,22 +1,24 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION explode;
DESCRIBE FUNCTION EXTENDED explode;
-EXPLAIN EXTENDED SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3;
-EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3) a GROUP BY a.myCol;
+EXPLAIN EXTENDED SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows);
+EXPLAIN EXTENDED SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol;
-SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3;
-SELECT explode(array(1,2,3)) AS (myCol) FROM src LIMIT 3;
-SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src LIMIT 3) a GROUP BY a.myCol;
+SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows);
+SELECT explode(array(1,2,3)) AS (myCol) FROM src tablesample (1 rows);
+SELECT a.myCol, count(1) FROM (SELECT explode(array(1,2,3)) AS myCol FROM src tablesample (1 rows)) a GROUP BY a.myCol;
-EXPLAIN EXTENDED SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src LIMIT 3;
-EXPLAIN EXTENDED SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src LIMIT 3) a GROUP BY a.key, a.val;
+EXPLAIN EXTENDED SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows);
+EXPLAIN EXTENDED SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows)) a GROUP BY a.key, a.val;
-SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src LIMIT 3;
-SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src LIMIT 3) a GROUP BY a.key, a.val;
+SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows);
+SELECT a.key, a.val, count(1) FROM (SELECT explode(map(1,'one',2,'two',3,'three')) AS (key,val) FROM src tablesample (1 rows)) a GROUP BY a.key, a.val;
drop table lazy_array_map;
create table lazy_array_map (map_col map<int,string>, array_col array<string>);
-INSERT OVERWRITE TABLE lazy_array_map select map(1,'one',2,'two',3,'three'), array('100','200','300') FROM src LIMIT 1;
+INSERT OVERWRITE TABLE lazy_array_map select map(1,'one',2,'two',3,'three'), array('100','200','300') FROM src tablesample (1 rows);
SELECT array_col, myCol from lazy_array_map lateral view explode(array_col) X AS myCol;
SELECT map_col, myKey, myValue from lazy_array_map lateral view explode(map_col) X AS myKey, myValue; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_field.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_field.q
index e995f5cf3c..be92c024d3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_field.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_field.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION field;
DESCRIBE FUNCTION EXTENDED field;
@@ -5,7 +7,7 @@ SELECT
field("x", "a", "b", "c", "d"),
field(NULL, "a", "b", "c", "d"),
field(0, 1, 2, 3, 4)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
field("a", "a", "b", "c", "d"),
@@ -13,7 +15,7 @@ SELECT
field("c", "a", "b", "c", "d"),
field("d", "a", "b", "c", "d"),
field("d", "a", "b", NULL, "d")
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
field(1, 1, 2, 3, 4),
@@ -21,11 +23,11 @@ SELECT
field(3, 1, 2, 3, 4),
field(4, 1, 2, 3, 4),
field(4, 1, 2, NULL, 4)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
CREATE TABLE test_table(col1 STRING, col2 STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE test_table;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table;
select col1,col2,
field("66",col1),
@@ -42,7 +44,7 @@ from test_table where col1="86" or col1="66";
CREATE TABLE test_table1(col1 int, col2 string) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt' INTO TABLE test_table1;
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_table1;
select col1,col2,
field(66,col1),
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_find_in_set.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_find_in_set.q
index eac2c6e91c..72c65b4198 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_find_in_set.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_find_in_set.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION find_in_set;
DESCRIBE FUNCTION EXTENDED find_in_set;
@@ -6,18 +8,18 @@ FROM src1 SELECT find_in_set(src1.key,concat(src1.key,',',src1.value));
FROM src1 SELECT find_in_set(src1.key,concat(src1.key,',',src1.value));
-SELECT find_in_set('ab','ab,abc,abcde') FROM src1 LIMIT 1;
-SELECT find_in_set('ab','abc,ab,bbb') FROM src1 LIMIT 1;
-SELECT find_in_set('ab','def,abc,ab') FROM src1 LIMIT 1;
-SELECT find_in_set('ab','abc,abd,abf') FROM src1 LIMIT 1;
-SELECT find_in_set(null,'a,b,c') FROM src1 LIMIT 1;
-SELECT find_in_set('a',null) FROM src1 LIMIT 1;
-SELECT find_in_set('', '') FROM src1 LIMIT 1;
-SELECT find_in_set('',',') FROM src1 LIMIT 1;
-SELECT find_in_set('','a,,b') FROM src1 LIMIT 1;
-SELECT find_in_set('','a,b,') FROM src1 LIMIT 1;
-SELECT find_in_set(',','a,b,d,') FROM src1 LIMIT 1;
-SELECT find_in_set('a','') FROM src1 LIMIT 1;
-SELECT find_in_set('a,','a,b,c,d') FROM src1 LIMIT 1;
+SELECT find_in_set('ab','ab,abc,abcde') FROM src1 tablesample (1 rows);
+SELECT find_in_set('ab','abc,ab,bbb') FROM src1 tablesample (1 rows);
+SELECT find_in_set('ab','def,abc,ab') FROM src1 tablesample (1 rows);
+SELECT find_in_set('ab','abc,abd,abf') FROM src1 tablesample (1 rows);
+SELECT find_in_set(null,'a,b,c') FROM src1 tablesample (1 rows);
+SELECT find_in_set('a',null) FROM src1 tablesample (1 rows);
+SELECT find_in_set('', '') FROM src1 tablesample (1 rows);
+SELECT find_in_set('',',') FROM src1 tablesample (1 rows);
+SELECT find_in_set('','a,,b') FROM src1 tablesample (1 rows);
+SELECT find_in_set('','a,b,') FROM src1 tablesample (1 rows);
+SELECT find_in_set(',','a,b,d,') FROM src1 tablesample (1 rows);
+SELECT find_in_set('a','') FROM src1 tablesample (1 rows);
+SELECT find_in_set('a,','a,b,c,d') FROM src1 tablesample (1 rows);
SELECT * FROM src1 WHERE NOT find_in_set(key,'311,128,345,2,956')=0;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_format_number.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_format_number.q
index e2084cddf0..2504bd0b68 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_format_number.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_format_number.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
use default;
-- Test format_number() UDF
@@ -7,26 +9,26 @@ DESCRIBE FUNCTION EXTENDED format_number;
EXPLAIN
SELECT format_number(12332.123456, 4),
format_number(12332.1,4),
- format_number(12332.2,0) FROM src limit 1;
+ format_number(12332.2,0) FROM src tablesample (1 rows);
SELECT format_number(12332.123456, 4),
format_number(12332.1,4),
format_number(12332.2,0)
-FROM src limit 1;
+FROM src tablesample (1 rows);
-- positive numbers
SELECT format_number(0.123456789, 12),
format_number(12345678.123456789, 5),
format_number(1234567.123456789, 7),
format_number(123456.123456789, 0)
-FROM src limit 1;
+FROM src tablesample (1 rows);
-- negative numbers
SELECT format_number(-123456.123456789, 0),
format_number(-1234567.123456789, 2),
format_number(-0.123456789, 15),
format_number(-12345.123456789, 4)
-FROM src limit 1;
+FROM src tablesample (1 rows);
-- zeros
SELECT format_number(0.0, 4),
@@ -34,7 +36,7 @@ SELECT format_number(0.0, 4),
format_number(000.0000, 1),
format_number(00000.0000, 1),
format_number(-00.0, 4)
-FROM src limit 1;
+FROM src tablesample (1 rows);
-- integers
SELECT format_number(0, 0),
@@ -42,7 +44,7 @@ SELECT format_number(0, 0),
format_number(12, 2),
format_number(123, 5),
format_number(1234, 7)
-FROM src limit 1;
+FROM src tablesample (1 rows);
-- long and double boundary
-- 9223372036854775807 is LONG_MAX
@@ -54,4 +56,4 @@ SELECT format_number(-9223372036854775807, 10),
format_number(9223372036854775807, 20),
format_number(4.9E-324, 324),
format_number(1.7976931348623157E308, 308)
-FROM src limit 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_get_json_object.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_get_json_object.q
index 464f2df3dc..05f7f5a981 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_get_json_object.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_get_json_object.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION get_json_object;
DESCRIBE FUNCTION EXTENDED get_json_object;
@@ -5,6 +7,8 @@ CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE;
FROM src INSERT OVERWRITE TABLE dest1 SELECT ' abc ' WHERE src.key = 86;
+set hive.fetch.task.conversion=more;
+
EXPLAIN
SELECT get_json_object(src_json.json, '$.owner') FROM src_json;
@@ -33,8 +37,8 @@ SELECT get_json_object(src_json.json, '$.fb:testid') FROM src_json;
CREATE TABLE dest2(c1 STRING) STORED AS RCFILE;
-INSERT OVERWRITE TABLE dest2 SELECT '{"a":"b\nc"}' FROM src LIMIT 1;
+INSERT OVERWRITE TABLE dest2 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows);
SELECT * FROM dest2;
-SELECT get_json_object(c1, '$.a') FROM dest2; \ No newline at end of file
+SELECT get_json_object(c1, '$.a') FROM dest2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthan.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthan.q
index aea110a943..230bd244ff 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthan.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthan.q
@@ -1,4 +1,6 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION >;
DESCRIBE FUNCTION EXTENDED >;
-SELECT true>false, false>true, false>false, true>true FROM src LIMIT 1; \ No newline at end of file
+SELECT true>false, false>true, false>false, true>true FROM src tablesample (1 rows); \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthanorequal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthanorequal.q
index 8de165b9ea..025eed7dd5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthanorequal.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_greaterthanorequal.q
@@ -1,4 +1,6 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION >=;
DESCRIBE FUNCTION EXTENDED >=;
-SELECT true>=false, false>=true, false>=false, true>=true FROM src LIMIT 1; \ No newline at end of file
+SELECT true>=false, false>=true, false>=false, true>=true FROM src tablesample (1 rows); \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hash.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hash.q
index faf372218a..5814a1779b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hash.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hash.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION hash;
DESCRIBE FUNCTION EXTENDED hash;
@@ -7,11 +9,11 @@ SELECT hash(CAST(1 AS TINYINT)), hash(CAST(2 AS SMALLINT)),
hash(CAST(1.25 AS FLOAT)), hash(CAST(16.0 AS DOUBLE)),
hash('400'), hash('abc'), hash(TRUE), hash(FALSE),
hash(1, 2, 3)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT hash(CAST(1 AS TINYINT)), hash(CAST(2 AS SMALLINT)),
hash(3), hash(CAST('123456789012' AS BIGINT)),
hash(CAST(1.25 AS FLOAT)), hash(CAST(16.0 AS DOUBLE)),
hash('400'), hash('abc'), hash(TRUE), hash(FALSE),
hash(1, 2, 3)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hex.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hex.q
index 37e035ad42..0e5457965a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hex.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hex.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION hex;
DESCRIBE FUNCTION EXTENDED hex;
@@ -7,14 +9,14 @@ SELECT
hex('Facebook'),
hex('\0'),
hex('qwertyuiopasdfghjkl')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- If the argument is a number, hex should convert it to hexadecimal.
SELECT
hex(1),
hex(0),
hex(4207849477)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Negative numbers should be treated as two's complement (64 bit).
-SELECT hex(-5) FROM src LIMIT 1;
+SELECT hex(-5) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hour.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hour.q
index c5c366daa8..b9811e6c6f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hour.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_hour.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION hour;
DESCRIBE FUNCTION EXTENDED hour;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_if.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_if.q
index 4f7c8b4a36..d9285ff7ce 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_if.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_if.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION if;
DESCRIBE FUNCTION EXTENDED if;
@@ -8,7 +10,7 @@ SELECT IF(TRUE, 1, 2) AS COL1,
IF(2=2, 1, NULL) AS COL4,
IF(2=2, NULL, 1) AS COL5,
IF(IF(TRUE, NULL, FALSE), 1, 2) AS COL6
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT IF(TRUE, 1, 2) AS COL1,
@@ -17,7 +19,7 @@ SELECT IF(TRUE, 1, 2) AS COL1,
IF(2=2, 1, NULL) AS COL4,
IF(2=2, NULL, 1) AS COL5,
IF(IF(TRUE, NULL, FALSE), 1, 2) AS COL6
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- Type conversions
EXPLAIN
@@ -25,10 +27,10 @@ SELECT IF(TRUE, CAST(128 AS SMALLINT), CAST(1 AS TINYINT)) AS COL1,
IF(FALSE, 1, 1.1) AS COL2,
IF(FALSE, 1, 'ABC') AS COL3,
IF(FALSE, 'ABC', 12.3) AS COL4
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT IF(TRUE, CAST(128 AS SMALLINT), CAST(1 AS TINYINT)) AS COL1,
IF(FALSE, 1, 1.1) AS COL2,
IF(FALSE, 1, 'ABC') AS COL3,
IF(FALSE, 'ABC', 12.3) AS COL4
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in.q
index 7577813874..a7ce3c6f0b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
SELECT 1 IN (1, 2, 3),
4 IN (1, 2, 3),
array(1,2,3) IN (array(1,2,3)),
@@ -8,6 +10,6 @@ SELECT 1 IN (1, 2, 3),
1 IN (1, 2, 3) OR false IN(false),
NULL IN (1, 2, 3),
4 IN (1, 2, 3, NULL),
- (1+3) IN (5, 6, (1+2) + 1) FROM src LIMIT 1;
+ (1+3) IN (5, 6, (1+2) + 1) FROM src tablesample (1 rows);
SELECT key FROM src WHERE key IN ("238", 86); \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in_file.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in_file.q
index 4da478908d..9d9efe8e23 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in_file.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_in_file.q
@@ -1,12 +1,12 @@
DESCRIBE FUNCTION in_file;
EXPLAIN
-SELECT in_file("303", "../data/files/test2.dat"),
- in_file("304", "../data/files/test2.dat"),
- in_file(CAST(NULL AS STRING), "../data/files/test2.dat")
+SELECT in_file("303", "../../data/files/test2.dat"),
+ in_file("304", "../../data/files/test2.dat"),
+ in_file(CAST(NULL AS STRING), "../../data/files/test2.dat")
FROM src LIMIT 1;
-SELECT in_file("303", "../data/files/test2.dat"),
- in_file("304", "../data/files/test2.dat"),
- in_file(CAST(NULL AS STRING), "../data/files/test2.dat")
+SELECT in_file("303", "../../data/files/test2.dat"),
+ in_file("304", "../../data/files/test2.dat"),
+ in_file(CAST(NULL AS STRING), "../../data/files/test2.dat")
FROM src LIMIT 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_index.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_index.q
index 9079d0e574..6844f9d0bf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_index.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_index.q
@@ -1,2 +1,3 @@
+set hive.support.quoted.identifiers=none;
DESCRIBE FUNCTION `index`;
DESCRIBE FUNCTION EXTENDED `index`;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_inline.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_inline.q
index 39006f4b9b..95d55f71c1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_inline.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_inline.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
describe function inline;
explain SELECT inline(
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_instr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_instr.q
index 20ed8e4ea0..790a1049d1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_instr.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_instr.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION instr;
DESCRIBE FUNCTION EXTENDED instr;
@@ -15,7 +17,7 @@ SELECT instr('abcd', 'abc'),
instr(CAST(16.0 AS DOUBLE), '.0'),
instr(null, 'abc'),
instr('abcd', null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT instr('abcd', 'abc'),
instr('abcabc', 'ccc'),
@@ -30,4 +32,4 @@ SELECT instr('abcd', 'abc'),
instr(CAST(16.0 AS DOUBLE), '.0'),
instr(null, 'abc'),
instr('abcd', null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_isnull_isnotnull.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_isnull_isnotnull.q
index d1569cc7f6..efb834efdc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_isnull_isnotnull.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_isnull_isnotnull.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION isnull;
DESCRIBE FUNCTION EXTENDED isnull;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_java_method.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_java_method.q
index 2f28be1e9d..51280b2567 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_java_method.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_java_method.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION java_method;
DESCRIBE FUNCTION EXTENDED java_method;
@@ -11,7 +13,7 @@ SELECT java_method("java.lang.String", "valueOf", 1),
java_method("java.lang.Math", "round", 2.5),
java_method("java.lang.Math", "exp", 1.0),
java_method("java.lang.Math", "floor", 1.9)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT java_method("java.lang.String", "valueOf", 1),
@@ -21,5 +23,5 @@ SELECT java_method("java.lang.String", "valueOf", 1),
java_method("java.lang.Math", "round", 2.5),
java_method("java.lang.Math", "exp", 1.0),
java_method("java.lang.Math", "floor", 1.9)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_length.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_length.q
index b84307970d..4413751ae6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_length.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_length.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION length;
DESCRIBE FUNCTION EXTENDED length;
@@ -9,6 +11,6 @@ DROP TABLE dest1;
-- Test with non-ascii characters.
CREATE TABLE dest1(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv4.txt' INTO TABLE dest1;
+LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1;
EXPLAIN SELECT length(dest1.name) FROM dest1;
SELECT length(dest1.name) FROM dest1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthan.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthan.q
index a2577006a8..03326777bf 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthan.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthan.q
@@ -1,4 +1,6 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION <;
DESCRIBE FUNCTION EXTENDED <;
-SELECT true<false, false<true, false<false, true<true FROM src LIMIT 1;
+SELECT true<false, false<true, false<false, true<true FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthanorequal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthanorequal.q
index e741b12e72..ae7109fb92 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthanorequal.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lessthanorequal.q
@@ -1,4 +1,6 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION <=;
DESCRIBE FUNCTION EXTENDED <=;
-SELECT true<=false, false<=true, false<=false, true<=true FROM src LIMIT 1;
+SELECT true<=false, false<=true, false<=false, true<=true FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_like.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_like.q
index 02c2924fca..12983bdceb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_like.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_like.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION like;
DESCRIBE FUNCTION EXTENDED like;
@@ -18,4 +20,4 @@ SELECT '1+2' LIKE '_+_',
'112' LIKE '1+_',
'|||' LIKE '|_|',
'+++' LIKE '1+_'
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_locate.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_locate.q
index 80148d0923..68216d597d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_locate.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_locate.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION locate;
DESCRIBE FUNCTION EXTENDED locate;
@@ -19,7 +21,7 @@ SELECT locate('abc', 'abcd'),
locate('abc', null),
locate('abc', 'abcd', null),
locate('abc', 'abcd', 'invalid number')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT locate('abc', 'abcd'),
locate('ccc', 'abcabc'),
@@ -38,4 +40,4 @@ SELECT locate('abc', 'abcd'),
locate('abc', null),
locate('abc', 'abcd', null),
locate('abc', 'abcd', 'invalid number')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_logic_java_boolean.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_logic_java_boolean.q
index 508f9fd558..a4aa6bc8e8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_logic_java_boolean.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_logic_java_boolean.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
CREATE TEMPORARY FUNCTION test_udf_get_java_boolean AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestGetJavaBoolean';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lpad.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lpad.q
index 8879231aee..937c92a29e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lpad.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_lpad.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION lpad;
DESCRIBE FUNCTION EXTENDED lpad;
@@ -5,10 +7,10 @@ EXPLAIN SELECT
lpad('hi', 1, '?'),
lpad('hi', 5, '.'),
lpad('hi', 6, '123')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
lpad('hi', 1, '?'),
lpad('hi', 5, '.'),
lpad('hi', 6, '123')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map.q
index e975131bd5..e1923b9ae0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map.q
@@ -1,8 +1,10 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION map;
DESCRIBE FUNCTION EXTENDED map;
EXPLAIN SELECT map(), map(1, "a", 2, "b", 3, "c"), map(1, 2, "a", "b"),
-map(1, "a", 2, "b", 3, "c")[2], map(1, 2, "a", "b")["a"], map(1, array("a"))[1][0] FROM src LIMIT 1;
+map(1, "a", 2, "b", 3, "c")[2], map(1, 2, "a", "b")["a"], map(1, array("a"))[1][0] FROM src tablesample (1 rows);
SELECT map(), map(1, "a", 2, "b", 3, "c"), map(1, 2, "a", "b"),
-map(1, "a", 2, "b", 3, "c")[2], map(1, 2, "a", "b")["a"], map(1, array("a"))[1][0] FROM src LIMIT 1;
+map(1, "a", 2, "b", 3, "c")[2], map(1, 2, "a", "b")["a"], map(1, array("a"))[1][0] FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_keys.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_keys.q
index 7ae8d78823..78778e73cd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_keys.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_keys.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
use default;
-- Test map_keys() UDF
@@ -5,7 +7,7 @@ DESCRIBE FUNCTION map_keys;
DESCRIBE FUNCTION EXTENDED map_keys;
-- Evaluate function against INT valued keys
-SELECT map_keys(map(1, "a", 2, "b", 3, "c")) FROM src LIMIT 1;
+SELECT map_keys(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows);
-- Evaluate function against STRING valued keys
-SELECT map_keys(map("a", 1, "b", 2, "c", 3)) FROM src LIMIT 1;
+SELECT map_keys(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_values.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_values.q
index e25b9bc669..4b55873fb9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_values.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_map_values.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
use default;
-- Test map_values() UDF
@@ -5,7 +7,7 @@ DESCRIBE FUNCTION map_values;
DESCRIBE FUNCTION EXTENDED map_values;
-- Evaluate function against STRING valued values
-SELECT map_values(map(1, "a", 2, "b", 3, "c")) FROM src LIMIT 1;
+SELECT map_values(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows);
-- Evaluate function against INT valued keys
-SELECT map_values(map("a", 1, "b", 2, "c", 3)) FROM src LIMIT 1;
+SELECT map_values(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_named_struct.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_named_struct.q
index bbf0f67d81..ad6fd7a021 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_named_struct.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_named_struct.q
@@ -1,9 +1,11 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION named_struct;
DESCRIBE FUNCTION EXTENDED named_struct;
EXPLAIN
SELECT named_struct("foo", 1, "bar", 2),
- named_struct("foo", 1, "bar", 2).foo FROM src LIMIT 1;
+ named_struct("foo", 1, "bar", 2).foo FROM src tablesample (1 rows);
SELECT named_struct("foo", 1, "bar", 2),
- named_struct("foo", 1, "bar", 2).foo FROM src LIMIT 1;
+ named_struct("foo", 1, "bar", 2).foo FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_negative.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_negative.q
index 6c06938214..b038c8cad5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_negative.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_negative.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION negative;
DESCRIBE FUNCTION EXTENDED negative;
@@ -5,9 +7,9 @@ DESCRIBE FUNCTION EXTENDED negative;
DESCRIBE FUNCTION -;
DESCRIBE FUNCTION EXTENDED -;
-select - null from src limit 1;
-select - cast(null as int) from src limit 1;
-select - cast(null as smallint) from src limit 1;
-select - cast(null as bigint) from src limit 1;
-select - cast(null as double) from src limit 1;
-select - cast(null as float) from src limit 1;
+select - null from src tablesample (1 rows);
+select - cast(null as int) from src tablesample (1 rows);
+select - cast(null as smallint) from src tablesample (1 rows);
+select - cast(null as bigint) from src tablesample (1 rows);
+select - cast(null as double) from src tablesample (1 rows);
+select - cast(null as float) from src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notequal.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notequal.q
index e1a15098db..138110f6ed 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notequal.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notequal.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION <>;
DESCRIBE FUNCTION EXTENDED <>;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notop.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notop.q
index 88396545de..dceab7edaa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notop.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_notop.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
SELECT 1 NOT IN (1, 2, 3),
4 NOT IN (1, 2, 3),
1 = 2 NOT IN (true, false),
@@ -7,4 +9,4 @@ SELECT 1 NOT IN (1, 2, 3),
"abc" NOT RLIKE "^bc",
"abc" NOT REGEXP "^ab",
"abc" NOT REGEXP "^bc",
- 1 IN (1, 2) AND "abc" NOT LIKE "bc%" FROM src LIMIT 1;
+ 1 IN (1, 2) AND "abc" NOT LIKE "bc%" FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_nvl.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_nvl.q
index 0133b4b90b..97162576df 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_nvl.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_nvl.q
@@ -1,3 +1,4 @@
+set hive.fetch.task.conversion=more;
DESCRIBE FUNCTION nvl;
DESCRIBE FUNCTION EXTENDED nvl;
@@ -5,9 +6,9 @@ DESCRIBE FUNCTION EXTENDED nvl;
EXPLAIN
SELECT NVL( 1 , 2 ) AS COL1,
NVL( NULL, 5 ) AS COL2
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT NVL( 1 , 2 ) AS COL1,
NVL( NULL, 5 ) AS COL2
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_pmod.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_pmod.q
index 9ff73d42b4..d42a2f337e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_pmod.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_pmod.q
@@ -1,20 +1,22 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION pmod;
DESCRIBE FUNCTION EXTENDED pmod;
SELECT pmod(null, null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT pmod(-100,9), pmod(-50,101), pmod(-1000,29)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT pmod(100,19), pmod(50,125), pmod(300,15)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-SELECT pmod(CAST(-100 AS TINYINT),CAST(9 AS TINYINT)), pmod(CAST(-50 AS TINYINT),CAST(101 AS TINYINT)), pmod(CAST(-100 AS TINYINT),CAST(29 AS TINYINT)) FROM src LIMIT 1;
-SELECT pmod(CAST(-100 AS SMALLINT),CAST(9 AS SMALLINT)), pmod(CAST(-50 AS SMALLINT),CAST(101 AS SMALLINT)), pmod(CAST(-100 AS SMALLINT),CAST(29 AS SMALLINT)) FROM src LIMIT 1;
-SELECT pmod(CAST(-100 AS BIGINT),CAST(9 AS BIGINT)), pmod(CAST(-50 AS BIGINT),CAST(101 AS BIGINT)), pmod(CAST(-100 AS BIGINT),CAST(29 AS BIGINT)) FROM src LIMIT 1;
+SELECT pmod(CAST(-100 AS TINYINT),CAST(9 AS TINYINT)), pmod(CAST(-50 AS TINYINT),CAST(101 AS TINYINT)), pmod(CAST(-100 AS TINYINT),CAST(29 AS TINYINT)) FROM src tablesample (1 rows);
+SELECT pmod(CAST(-100 AS SMALLINT),CAST(9 AS SMALLINT)), pmod(CAST(-50 AS SMALLINT),CAST(101 AS SMALLINT)), pmod(CAST(-100 AS SMALLINT),CAST(29 AS SMALLINT)) FROM src tablesample (1 rows);
+SELECT pmod(CAST(-100 AS BIGINT),CAST(9 AS BIGINT)), pmod(CAST(-50 AS BIGINT),CAST(101 AS BIGINT)), pmod(CAST(-100 AS BIGINT),CAST(29 AS BIGINT)) FROM src tablesample (1 rows);
-SELECT pmod(CAST(-100.91 AS FLOAT),CAST(9.8 AS FLOAT)), pmod(CAST(-50.1 AS FLOAT),CAST(101.8 AS FLOAT)), pmod(CAST(-100.91 AS FLOAT),CAST(29.75 AS FLOAT)) FROM src LIMIT 1;
-SELECT pmod(CAST(-100.91 AS DOUBLE),CAST(9.8 AS DOUBLE)), pmod(CAST(-50.1 AS DOUBLE),CAST(101.8 AS DOUBLE)), pmod(CAST(-100.91 AS DOUBLE),CAST(29.75 AS DOUBLE)) FROM src LIMIT 1;
-SELECT pmod(CAST(-100.91 AS DECIMAL),CAST(9.8 AS DECIMAL)), pmod(CAST(-50.1 AS DECIMAL),CAST(101.8 AS DECIMAL)), pmod(CAST(-100.91 AS DECIMAL),CAST(29.75 AS DECIMAL)) FROM src LIMIT 1;
+SELECT pmod(CAST(-100.91 AS FLOAT),CAST(9.8 AS FLOAT)), pmod(CAST(-50.1 AS FLOAT),CAST(101.8 AS FLOAT)), pmod(CAST(-100.91 AS FLOAT),CAST(29.75 AS FLOAT)) FROM src tablesample (1 rows);
+SELECT pmod(CAST(-100.91 AS DOUBLE),CAST(9.8 AS DOUBLE)), pmod(CAST(-50.1 AS DOUBLE),CAST(101.8 AS DOUBLE)), pmod(CAST(-100.91 AS DOUBLE),CAST(29.75 AS DOUBLE)) FROM src tablesample (1 rows);
+SELECT pmod(CAST(-100.91 AS DECIMAL(5,2)),CAST(9.8 AS DECIMAL(2,1))), pmod(CAST(-50.1 AS DECIMAL(3,1)),CAST(101.8 AS DECIMAL(4,1))), pmod(CAST(-100.91 AS DECIMAL(5,2)),CAST(29.75 AS DECIMAL(4,2))) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_printf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_printf.q
index 99e89ccbf7..115e4e56f1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_printf.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_printf.q
@@ -4,21 +4,23 @@ use default;
DESCRIBE FUNCTION printf;
DESCRIBE FUNCTION EXTENDED printf;
+set hive.fetch.task.conversion=more;
+
EXPLAIN
-SELECT printf("Hello World %d %s", 100, "days") FROM src LIMIT 1;
+SELECT printf("Hello World %d %s", 100, "days") FROM src tablesample (1 rows);
-- Test Primitive Types
-SELECT printf("Hello World %d %s", 100, "days") FROM src LIMIT 1;
-SELECT printf("All Type Test: %b, %c, %d, %e, %+10.4f, %g, %h, %s, %a", false, 65, 15000, 12.3400, 27183.240051, 2300.41, 50, "corret", 256.125) FROM src LIMIT 1;
+SELECT printf("Hello World %d %s", 100, "days") FROM src tablesample (1 rows);
+SELECT printf("All Type Test: %b, %c, %d, %e, %+10.4f, %g, %h, %s, %a", false, 65, 15000, 12.3400, 27183.240051, 2300.41, 50, "corret", 256.125) FROM src tablesample (1 rows);
-- Test NULL Values
-SELECT printf("Color %s, String Null: %s, number1 %d, number2 %05d, Integer Null: %d, hex %#x, float %5.2f Double Null: %f\n", "red", NULL, 123456, 89, NULL, 255, 3.14159, NULL) FROM src LIMIT 1;
+SELECT printf("Color %s, String Null: %s, number1 %d, number2 %05d, Integer Null: %d, hex %#x, float %5.2f Double Null: %f\n", "red", NULL, 123456, 89, NULL, 255, 3.14159, NULL) FROM src tablesample (1 rows);
-- Test Timestamp
create table timestamp_udf (t timestamp);
-from src
+from (select * from src tablesample (1 rows)) s
insert overwrite table timestamp_udf
- select '2011-05-06 07:08:09.1234567' limit 1;
+ select '2011-05-06 07:08:09.1234567';
select printf("timestamp: %s", t) from timestamp_udf;
drop table timestamp_udf;
@@ -27,7 +29,7 @@ CREATE TABLE binay_udf(key binary, value int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '9'
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/string.txt' INTO TABLE binay_udf;
+LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE binay_udf;
create table dest1 (key binary, value int);
insert overwrite table dest1 select transform(*) using 'cat' as key binary, value int from binay_udf;
select value, printf("format key: %s", key) from dest1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_radians.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_radians.q
index 001d1cf8b6..19242bd757 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_radians.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_radians.q
@@ -1,16 +1,18 @@
-explain
-select radians(57.2958) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
+
+explain
+select radians(57.2958) FROM src tablesample (1 rows);
-select radians(57.2958) FROM src LIMIT 1;
-select radians(143.2394) FROM src LIMIT 1;
+select radians(57.2958) FROM src tablesample (1 rows);
+select radians(143.2394) FROM src tablesample (1 rows);
DESCRIBE FUNCTION radians;
DESCRIBE FUNCTION EXTENDED radians;
explain
-select radians(57.2958) FROM src LIMIT 1;
+select radians(57.2958) FROM src tablesample (1 rows);
-select radians(57.2958) FROM src LIMIT 1;
-select radians(143.2394) FROM src LIMIT 1;
+select radians(57.2958) FROM src tablesample (1 rows);
+select radians(143.2394) FROM src tablesample (1 rows);
DESCRIBE FUNCTION radians;
DESCRIBE FUNCTION EXTENDED radians; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect.q
index f357ff5049..cef1e4a5d9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION reflect;
DESCRIBE FUNCTION EXTENDED reflect;
@@ -10,7 +12,7 @@ SELECT reflect("java.lang.String", "valueOf", 1),
reflect("java.lang.Math", "exp", 1.0),
reflect("java.lang.Math", "floor", 1.9),
reflect("java.lang.Integer", "valueOf", key, 16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT reflect("java.lang.String", "valueOf", 1),
@@ -21,4 +23,4 @@ SELECT reflect("java.lang.String", "valueOf", 1),
reflect("java.lang.Math", "exp", 1.0),
reflect("java.lang.Math", "floor", 1.9),
reflect("java.lang.Integer", "valueOf", key, 16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect2.q
index 9ffd755292..a65294b335 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reflect2.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION reflect2;
DESCRIBE FUNCTION EXTENDED reflect2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_regexp.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_regexp.q
index 3aee10963d..12b685b32c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_regexp.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_regexp.q
@@ -1,6 +1,8 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION regexp;
DESCRIBE FUNCTION EXTENDED regexp;
SELECT 'fofo' REGEXP '^fo', 'fo\no' REGEXP '^fo\no$', 'Bn' REGEXP '^Ba*n', 'afofo' REGEXP 'fo',
'afofo' REGEXP '^fo', 'Baan' REGEXP '^Ba?n', 'axe' REGEXP 'pi|apa', 'pip' REGEXP '^(pi)*$'
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_repeat.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_repeat.q
index 162085f4c7..91474bac2a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_repeat.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_repeat.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION repeat;
DESCRIBE FUNCTION EXTENDED repeat;
@@ -6,11 +8,11 @@ EXPLAIN SELECT
repeat("", 4),
repeat("asd", 0),
repeat("asdf", -1)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
repeat("Facebook", 3),
repeat("", 4),
repeat("asd", 0),
repeat("asdf", -1)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reverse.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reverse.q
index 81f765ec59..89aafe3443 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reverse.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_reverse.q
@@ -11,5 +11,5 @@ DROP TABLE dest1;
-- kv4.txt contains the text 0xE982B5E993AE, which should be reversed to
-- 0xE993AEE982B5
CREATE TABLE dest1(name STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv4.txt' INTO TABLE dest1;
+LOAD DATA LOCAL INPATH '../../data/files/kv4.txt' INTO TABLE dest1;
SELECT count(1) FROM dest1 WHERE reverse(dest1.name) = _UTF-8 0xE993AEE982B5;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round.q
index 18ebba8708..88b22749a3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round.q
@@ -1,15 +1,17 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION round;
DESCRIBE FUNCTION EXTENDED round;
SELECT round(null), round(null, 0), round(125, null),
round(1.0/0.0, 0), round(power(-1.0,0.5), 0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
round(55555), round(55555, 0), round(55555, 1), round(55555, 2), round(55555, 3),
round(55555, -1), round(55555, -2), round(55555, -3), round(55555, -4),
round(55555, -5), round(55555, -6), round(55555, -7), round(55555, -8)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
round(125.315), round(125.315, 0),
@@ -18,7 +20,7 @@ SELECT
round(-125.315), round(-125.315, 0),
round(-125.315, 1), round(-125.315, 2), round(-125.315, 3), round(-125.315, 4),
round(-125.315, -1), round(-125.315, -2), round(-125.315, -3), round(-125.315, -4)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
round(3.141592653589793, -15), round(3.141592653589793, -16),
@@ -38,7 +40,7 @@ SELECT
round(3.141592653589793, 12), round(3.141592653589793, 13),
round(3.141592653589793, 13), round(3.141592653589793, 14),
round(3.141592653589793, 15), round(3.141592653589793, 16)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-SELECT round(1809242.3151111344, 9), round(-1809242.3151111344, 9)
-FROM src LIMIT 1;
+SELECT round(1809242.3151111344, 9), round(-1809242.3151111344, 9), round(1809242.3151111344BD, 9), round(-1809242.3151111344BD, 9)
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_2.q
index 6be30855ae..43988c1225 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_2.q
@@ -1,8 +1,10 @@
--- test for NaN (not-a-number)
+set hive.fetch.task.conversion=more;
+
+-- test for NaN (not-a-number)
create table tstTbl1(n double);
insert overwrite table tstTbl1
-select 'NaN' from src limit 1;
+select 'NaN' from src tablesample (1 rows);
select * from tstTbl1;
@@ -10,4 +12,4 @@ select round(n, 1) from tstTbl1;
select round(n) from tstTbl1;
-- test for Infinity
-select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src limit 1;
+select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_3.q
index 50a1f44419..f042b6f3fa 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_round_3.q
@@ -1,14 +1,16 @@
+set hive.fetch.task.conversion=more;
+
-- test for TINYINT
-select round(-128), round(127), round(0) from src limit 1;
+select round(-128), round(127), round(0) from src tablesample (1 rows);
-- test for SMALLINT
-select round(-32768), round(32767), round(-129), round(128) from src limit 1;
+select round(-32768), round(32767), round(-129), round(128) from src tablesample (1 rows);
-- test for INT
-select round(cast(negative(pow(2, 31)) as INT)), round(cast((pow(2, 31) - 1) as INT)), round(-32769), round(32768) from src limit 1;
+select round(cast(negative(pow(2, 31)) as INT)), round(cast((pow(2, 31) - 1) as INT)), round(-32769), round(32768) from src tablesample (1 rows);
-- test for BIGINT
-select round(cast(negative(pow(2, 63)) as BIGINT)), round(cast((pow(2, 63) - 1) as BIGINT)), round(cast(negative(pow(2, 31) + 1) as BIGINT)), round(cast(pow(2, 31) as BIGINT)) from src limit 1;
+select round(cast(negative(pow(2, 63)) as BIGINT)), round(cast((pow(2, 63) - 1) as BIGINT)), round(cast(negative(pow(2, 31) + 1) as BIGINT)), round(cast(pow(2, 31) as BIGINT)) from src tablesample (1 rows);
-- test for DOUBLE
-select round(126.1), round(126.7), round(32766.1), round(32766.7) from src limit 1;
+select round(126.1), round(126.7), round(32766.1), round(32766.7) from src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_rpad.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_rpad.q
index 01e5fbd429..4ee69e8985 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_rpad.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_rpad.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION rpad;
DESCRIBE FUNCTION EXTENDED rpad;
@@ -5,10 +7,10 @@ EXPLAIN SELECT
rpad('hi', 1, '?'),
rpad('hi', 5, '.'),
rpad('hi', 6, '123')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
rpad('hi', 1, '?'),
rpad('hi', 5, '.'),
rpad('hi', 6, '123')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_second.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_second.q
index 1943188086..f63426d7e7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_second.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_second.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION second;
DESCRIBE FUNCTION EXTENDED second;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sign.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sign.q
index abceb3439e..b1602e8a25 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sign.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sign.q
@@ -1,20 +1,22 @@
-explain
-select sign(0) FROM src LIMIT 1;
-select sign(0) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
+
+explain
+select sign(0) FROM src tablesample (1 rows);
+select sign(0) FROM src tablesample (1 rows);
-select sign(-45) FROM src LIMIT 1;
+select sign(-45) FROM src tablesample (1 rows);
-select sign(46) FROM src LIMIT 1;
+select sign(46) FROM src tablesample (1 rows);
DESCRIBE FUNCTION sign;
DESCRIBE FUNCTION EXTENDED sign;
explain
-select sign(0) FROM src LIMIT 1;
-select sign(0) FROM src LIMIT 1;
+select sign(0) FROM src tablesample (1 rows);
+select sign(0) FROM src tablesample (1 rows);
-select sign(-45) FROM src LIMIT 1;
+select sign(-45) FROM src tablesample (1 rows);
-select sign(46) FROM src LIMIT 1;
+select sign(46) FROM src tablesample (1 rows);
DESCRIBE FUNCTION sign;
DESCRIBE FUNCTION EXTENDED sign;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sin.q
index abb7cac8da..79745be772 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sin.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sin.q
@@ -1,8 +1,10 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION sin;
DESCRIBE FUNCTION EXTENDED sin;
SELECT sin(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT sin(0.98), sin(1.57), sin(-0.5)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_size.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_size.q
index 8aaa68a6e3..f6f76a30e7 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_size.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_size.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION size;
DESCRIBE FUNCTION EXTENDED size;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sort_array.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sort_array.q
index ef0973212a..313bcf8a1e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sort_array.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_sort_array.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
use default;
-- Test sort_array() UDF
@@ -6,16 +8,16 @@ DESCRIBE FUNCTION EXTENDED sort_array;
-- Evaluate function against STRING valued keys
EXPLAIN
-SELECT sort_array(array("b", "d", "c", "a")) FROM src LIMIT 1;
+SELECT sort_array(array("b", "d", "c", "a")) FROM src tablesample (1 rows);
-SELECT sort_array(array("f", "a", "g", "c", "b", "d", "e")) FROM src LIMIT 1;
-SELECT sort_array(sort_array(array("hadoop distributed file system", "enterprise databases", "hadoop map-reduce"))) FROM src LIMIT 1;
+SELECT sort_array(array("f", "a", "g", "c", "b", "d", "e")) FROM src tablesample (1 rows);
+SELECT sort_array(sort_array(array("hadoop distributed file system", "enterprise databases", "hadoop map-reduce"))) FROM src tablesample (1 rows);
-- Evaluate function against INT valued keys
-SELECT sort_array(array(2, 9, 7, 3, 5, 4, 1, 6, 8)) FROM src LIMIT 1;
+SELECT sort_array(array(2, 9, 7, 3, 5, 4, 1, 6, 8)) FROM src tablesample (1 rows);
-- Evaluate function against FLOAT valued keys
-SELECT sort_array(sort_array(array(2.333, 9, 1.325, 2.003, 0.777, -3.445, 1))) FROM src LIMIT 1;
+SELECT sort_array(sort_array(array(2.333, 9, 1.325, 2.003, 0.777, -3.445, 1))) FROM src tablesample (1 rows);
-- Test it against data in a table.
CREATE TABLE dest1 (
@@ -30,7 +32,7 @@ CREATE TABLE dest1 (
timestamps ARRAY<TIMESTAMP>
) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1;
+LOAD DATA LOCAL INPATH '../../data/files/primitive_type_arrays.txt' OVERWRITE INTO TABLE dest1;
SELECT sort_array(tinyints), sort_array(smallints), sort_array(ints),
sort_array(bigints), sort_array(booleans), sort_array(floats),
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_space.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_space.q
index cf6466fb63..cc616f784f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_space.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_space.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION space;
DESCRIBE FUNCTION EXTENDED space;
@@ -7,7 +9,7 @@ EXPLAIN SELECT
space(1),
space(-1),
space(-100)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
length(space(10)),
@@ -15,7 +17,7 @@ SELECT
length(space(1)),
length(space(-1)),
length(space(-100))
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
space(10),
@@ -23,5 +25,5 @@ SELECT
space(1),
space(-1),
space(-100)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_split.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_split.q
index f79901736c..55919eac74 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_split.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_split.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION split;
DESCRIBE FUNCTION EXTENDED split;
@@ -6,11 +8,11 @@ EXPLAIN SELECT
split('oneAtwoBthreeC', '[ABC]'),
split('', '.'),
split(50401020, 0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
split('a b c', ' '),
split('oneAtwoBthreeC', '[ABC]'),
split('', '.'),
split(50401020, 0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_struct.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_struct.q
index 3ee19c8369..ee2135b509 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_struct.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_struct.q
@@ -1,9 +1,11 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION struct;
DESCRIBE FUNCTION EXTENDED struct;
EXPLAIN
SELECT struct(1), struct(1, "a"), struct(1, "b", 1.5).col1, struct(1, struct("a", 1.5)).col2.col1
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT struct(1), struct(1, "a"), struct(1, "b", 1.5).col1, struct(1, struct("a", 1.5)).col2.col1
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_substr.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_substr.q
index 32757bef51..2d04f904bb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_substr.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_substr.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION substr;
DESCRIBE FUNCTION EXTENDED substr;
@@ -5,7 +7,7 @@ SELECT
substr(null, 1), substr(null, 1, 1),
substr('ABC', null), substr('ABC', null, 1),
substr('ABC', 1, null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
substr('ABC', 1, 0), substr('ABC', 1, -1), substr('ABC', 2, -100),
@@ -14,7 +16,7 @@ SELECT
substr('ABC', 100), substr('ABC', 100, 100),
substr('ABC', -100), substr('ABC', -100, 100),
substr('ABC', 2147483647), substr('ABC', 2147483647, 2147483647)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
substr('ABCDEFG', 3, 4), substr('ABCDEFG', -5, 4),
@@ -22,7 +24,7 @@ SELECT
substr('ABC', 0), substr('ABC', 1), substr('ABC', 2), substr('ABC', 3),
substr('ABC', 1, 2147483647), substr('ABC', 2, 2147483647),
substr('A', 0), substr('A', 1), substr('A', -1)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
substr('ABC', 0, 1), substr('ABC', 0, 2), substr('ABC', 0, 3), substr('ABC', 0, 4),
@@ -30,14 +32,14 @@ SELECT
substr('ABC', 2, 1), substr('ABC', 2, 2), substr('ABC', 2, 3), substr('ABC', 2, 4),
substr('ABC', 3, 1), substr('ABC', 3, 2), substr('ABC', 3, 3), substr('ABC', 3, 4),
substr('ABC', 4, 1)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT
substr('ABC', -1, 1), substr('ABC', -1, 2), substr('ABC', -1, 3), substr('ABC', -1, 4),
substr('ABC', -2, 1), substr('ABC', -2, 2), substr('ABC', -2, 3), substr('ABC', -2, 4),
substr('ABC', -3, 1), substr('ABC', -3, 2), substr('ABC', -3, 3), substr('ABC', -3, 4),
substr('ABC', -4, 1)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- substring() is a synonim of substr(), so just perform some basic tests
SELECT
@@ -46,7 +48,7 @@ SELECT
substring('ABC', 0), substring('ABC', 1), substring('ABC', 2), substring('ABC', 3),
substring('ABC', 1, 2147483647), substring('ABC', 2, 2147483647),
substring('A', 0), substring('A', 1), substring('A', -1)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
-- test for binary substr
SELECT
@@ -63,7 +65,7 @@ SELECT
substr(ABC, -3, 1), substr(ABC, -3, 2), substr(ABC, -3, 3), substr(ABC, -3, 4),
substr(ABC, -4, 1)
FROM (
- select CAST(concat(substr(value, 1, 0), 'ABC') as BINARY) as ABC from src LIMIT 1
+ select CAST(concat(substr(value, 1, 0), 'ABC') as BINARY) as ABC from src tablesample (1 rows)
) X;
-- test UTF-8 substr
@@ -72,4 +74,4 @@ SELECT
substr("abc 玩", 5),
substr("abc 玩玩玩 abc", 5),
substr("abc 玩玩玩 abc", 5, 3)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_tan.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_tan.q
index f103da9ecb..3980fe83fb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_tan.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_tan.q
@@ -1,16 +1,18 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION tan;
DESCRIBE FUNCTION EXTENDED tan;
SELECT tan(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT tan(1), tan(6), tan(-1.0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
DESCRIBE FUNCTION tan;
DESCRIBE FUNCTION EXTENDED tan;
SELECT tan(null)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT tan(1), tan(6), tan(-1.0)
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength.q
index 322a061d64..c94a52133d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength.q
@@ -1,12 +1,10 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
CREATE TEMPORARY FUNCTION testlength AS 'org.apache.hadoop.hive.ql.udf.UDFTestLength';
CREATE TEMPORARY FUNCTION testlength AS 'org.apache.hadoop.hive.ql.udf.UDFTestLength';
-CREATE TABLE dest1(len INT);
-
-FROM src INSERT OVERWRITE TABLE dest1 SELECT testlength(src.value);
-
-SELECT dest1.* FROM dest1;
+SELECT testlength(src.value) FROM src;
DROP TEMPORARY FUNCTION testlength;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength2.q
index 6de270902f..27e46c24a8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_testlength2.q
@@ -1,12 +1,10 @@
+set hive.fetch.task.conversion=more;
+
EXPLAIN
CREATE TEMPORARY FUNCTION testlength2 AS 'org.apache.hadoop.hive.ql.udf.UDFTestLength2';
CREATE TEMPORARY FUNCTION testlength2 AS 'org.apache.hadoop.hive.ql.udf.UDFTestLength2';
-CREATE TABLE dest1(len INT);
-
-FROM src INSERT OVERWRITE TABLE dest1 SELECT testlength2(src.value);
-
-SELECT dest1.* FROM dest1;
+SELECT testlength2(src.value) FROM src;
DROP TEMPORARY FUNCTION testlength2;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_boolean.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_boolean.q
index ca23f719f9..8bea7abcbc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_boolean.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_boolean.q
@@ -1,44 +1,46 @@
--- 'true' cases:
+set hive.fetch.task.conversion=more;
-SELECT CAST(CAST(1 AS TINYINT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(2 AS SMALLINT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(-4 AS INT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(-444 AS BIGINT) AS BOOLEAN) FROM src LIMIT 1;
+-- 'true' cases:
-SELECT CAST(CAST(7.0 AS FLOAT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(-8.0 AS DOUBLE) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(-99.0 AS DECIMAL) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(1 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(2 AS SMALLINT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-4 AS INT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-444 AS BIGINT) AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST('Foo' AS STRING) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(7.0 AS FLOAT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-8.0 AS DOUBLE) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-99.0 AS DECIMAL) AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST('2011-05-06 07:08:09' as timestamp) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST('Foo' AS STRING) AS BOOLEAN) FROM src tablesample (1 rows);
+
+SELECT CAST(CAST('2011-05-06 07:08:09' as timestamp) AS BOOLEAN) FROM src tablesample (1 rows);
-- 'false' cases:
-SELECT CAST(CAST(0 AS TINYINT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(0 AS SMALLINT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(0 AS INT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(0 AS BIGINT) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(0 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(0 AS SMALLINT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(0 AS INT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(0 AS BIGINT) AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST(0.0 AS FLOAT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(0.0 AS DOUBLE) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(0.0 AS DECIMAL) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(0.0 AS FLOAT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(0.0 AS DOUBLE) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(0.0 AS DECIMAL) AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST('' AS STRING) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST('' AS STRING) AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST(0 as timestamp) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(0 as timestamp) AS BOOLEAN) FROM src tablesample (1 rows);
-- 'NULL' cases:
-SELECT CAST(NULL AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(NULL AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST(NULL AS TINYINT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(NULL AS SMALLINT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(NULL AS INT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(NULL AS BIGINT) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(NULL AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(NULL AS SMALLINT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(NULL AS INT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(NULL AS BIGINT) AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST(NULL AS FLOAT) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(NULL AS DOUBLE) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(NULL AS DECIMAL) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(NULL AS FLOAT) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(NULL AS DOUBLE) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(NULL AS DECIMAL) AS BOOLEAN) FROM src tablesample (1 rows);
-SELECT CAST(CAST(NULL AS STRING) AS BOOLEAN) FROM src LIMIT 1;
-SELECT CAST(CAST(NULL as timestamp) AS BOOLEAN) FROM src LIMIT 1;
+SELECT CAST(CAST(NULL AS STRING) AS BOOLEAN) FROM src tablesample (1 rows);
+SELECT CAST(CAST(NULL as timestamp) AS BOOLEAN) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_byte.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_byte.q
index ded930d111..aa0a250e46 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_byte.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_byte.q
@@ -1,15 +1,17 @@
--- Conversion of main primitive types to Byte type:
-SELECT CAST(NULL AS TINYINT) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
-SELECT CAST(TRUE AS TINYINT) FROM src LIMIT 1;
+-- Conversion of main primitive types to Byte type:
+SELECT CAST(NULL AS TINYINT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-18 AS SMALLINT) AS TINYINT) FROM src LIMIT 1;
-SELECT CAST(-129 AS TINYINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-1025 AS BIGINT) AS TINYINT) FROM src LIMIT 1;
+SELECT CAST(TRUE AS TINYINT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-3.14 AS DOUBLE) AS TINYINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS FLOAT) AS TINYINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS DECIMAL) AS TINYINT) FROM src LIMIT 1;
+SELECT CAST(CAST(-18 AS SMALLINT) AS TINYINT) FROM src tablesample (1 rows);
+SELECT CAST(-129 AS TINYINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-1025 AS BIGINT) AS TINYINT) FROM src tablesample (1 rows);
-SELECT CAST('-38' AS TINYINT) FROM src LIMIT 1;
+SELECT CAST(CAST(-3.14 AS DOUBLE) AS TINYINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS FLOAT) AS TINYINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS DECIMAL) AS TINYINT) FROM src tablesample (1 rows);
+
+SELECT CAST('-38' AS TINYINT) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_double.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_double.q
index b0a248ad70..005ec9d24e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_double.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_double.q
@@ -1,15 +1,17 @@
--- Conversion of main primitive types to Double type:
-SELECT CAST(NULL AS DOUBLE) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
-SELECT CAST(TRUE AS DOUBLE) FROM src LIMIT 1;
+-- Conversion of main primitive types to Double type:
+SELECT CAST(NULL AS DOUBLE) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-7 AS TINYINT) AS DOUBLE) FROM src LIMIT 1;
-SELECT CAST(CAST(-18 AS SMALLINT) AS DOUBLE) FROM src LIMIT 1;
-SELECT CAST(-129 AS DOUBLE) FROM src LIMIT 1;
-SELECT CAST(CAST(-1025 AS BIGINT) AS DOUBLE) FROM src LIMIT 1;
+SELECT CAST(TRUE AS DOUBLE) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-3.14 AS FLOAT) AS DOUBLE) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS DECIMAL) AS DOUBLE) FROM src LIMIT 1;
+SELECT CAST(CAST(-7 AS TINYINT) AS DOUBLE) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-18 AS SMALLINT) AS DOUBLE) FROM src tablesample (1 rows);
+SELECT CAST(-129 AS DOUBLE) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-1025 AS BIGINT) AS DOUBLE) FROM src tablesample (1 rows);
-SELECT CAST('-38.14' AS DOUBLE) FROM src LIMIT 1;
+SELECT CAST(CAST(-3.14 AS FLOAT) AS DOUBLE) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS DECIMAL(3,2)) AS DOUBLE) FROM src tablesample (1 rows);
+
+SELECT CAST('-38.14' AS DOUBLE) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_float.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_float.q
index c91d18cc2f..95671f15fe 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_float.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_float.q
@@ -1,15 +1,17 @@
--- Conversion of main primitive types to Float type:
-SELECT CAST(NULL AS FLOAT) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
-SELECT CAST(TRUE AS FLOAT) FROM src LIMIT 1;
+-- Conversion of main primitive types to Float type:
+SELECT CAST(NULL AS FLOAT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-7 AS TINYINT) AS FLOAT) FROM src LIMIT 1;
-SELECT CAST(CAST(-18 AS SMALLINT) AS FLOAT) FROM src LIMIT 1;
-SELECT CAST(-129 AS FLOAT) FROM src LIMIT 1;
-SELECT CAST(CAST(-1025 AS BIGINT) AS FLOAT) FROM src LIMIT 1;
+SELECT CAST(TRUE AS FLOAT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-3.14 AS DOUBLE) AS FLOAT) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS DECIMAL) AS FLOAT) FROM src LIMIT 1;
+SELECT CAST(CAST(-7 AS TINYINT) AS FLOAT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-18 AS SMALLINT) AS FLOAT) FROM src tablesample (1 rows);
+SELECT CAST(-129 AS FLOAT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-1025 AS BIGINT) AS FLOAT) FROM src tablesample (1 rows);
-SELECT CAST('-38.14' AS FLOAT) FROM src LIMIT 1;
+SELECT CAST(CAST(-3.14 AS DOUBLE) AS FLOAT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS DECIMAL(3,2)) AS FLOAT) FROM src tablesample (1 rows);
+
+SELECT CAST('-38.14' AS FLOAT) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_long.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_long.q
index 45dc6f8bd3..706411a398 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_long.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_long.q
@@ -1,15 +1,17 @@
--- Conversion of main primitive types to Long type:
-SELECT CAST(NULL AS BIGINT) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
-SELECT CAST(TRUE AS BIGINT) FROM src LIMIT 1;
+-- Conversion of main primitive types to Long type:
+SELECT CAST(NULL AS BIGINT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-7 AS TINYINT) AS BIGINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-18 AS SMALLINT) AS BIGINT) FROM src LIMIT 1;
-SELECT CAST(-129 AS BIGINT) FROM src LIMIT 1;
+SELECT CAST(TRUE AS BIGINT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-3.14 AS DOUBLE) AS BIGINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS FLOAT) AS BIGINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS DECIMAL) AS BIGINT) FROM src LIMIT 1;
+SELECT CAST(CAST(-7 AS TINYINT) AS BIGINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-18 AS SMALLINT) AS BIGINT) FROM src tablesample (1 rows);
+SELECT CAST(-129 AS BIGINT) FROM src tablesample (1 rows);
-SELECT CAST('-38' AS BIGINT) FROM src LIMIT 1;
+SELECT CAST(CAST(-3.14 AS DOUBLE) AS BIGINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS FLOAT) AS BIGINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS DECIMAL) AS BIGINT) FROM src tablesample (1 rows);
+
+SELECT CAST('-38' AS BIGINT) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_short.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_short.q
index 7d843c1ea1..5cc4e57c8c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_short.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_short.q
@@ -1,15 +1,17 @@
--- Conversion of main primitive types to Short type:
-SELECT CAST(NULL AS SMALLINT) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
-SELECT CAST(TRUE AS SMALLINT) FROM src LIMIT 1;
+-- Conversion of main primitive types to Short type:
+SELECT CAST(NULL AS SMALLINT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-18 AS TINYINT) AS SMALLINT) FROM src LIMIT 1;
-SELECT CAST(-129 AS SMALLINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-1025 AS BIGINT) AS SMALLINT) FROM src LIMIT 1;
+SELECT CAST(TRUE AS SMALLINT) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-3.14 AS DOUBLE) AS SMALLINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS FLOAT) AS SMALLINT) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS DECIMAL) AS SMALLINT) FROM src LIMIT 1;
+SELECT CAST(CAST(-18 AS TINYINT) AS SMALLINT) FROM src tablesample (1 rows);
+SELECT CAST(-129 AS SMALLINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-1025 AS BIGINT) AS SMALLINT) FROM src tablesample (1 rows);
-SELECT CAST('-38' AS SMALLINT) FROM src LIMIT 1;
+SELECT CAST(CAST(-3.14 AS DOUBLE) AS SMALLINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS FLOAT) AS SMALLINT) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS DECIMAL) AS SMALLINT) FROM src tablesample (1 rows);
+
+SELECT CAST('-38' AS SMALLINT) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_string.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_string.q
index 3b585e7170..ac4b5242e1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_string.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_string.q
@@ -1,16 +1,18 @@
--- Conversion of main primitive types to String type:
-SELECT CAST(NULL AS STRING) FROM src LIMIT 1;
+set hive.fetch.task.conversion=more;
-SELECT CAST(TRUE AS STRING) FROM src LIMIT 1;
+-- Conversion of main primitive types to String type:
+SELECT CAST(NULL AS STRING) FROM src tablesample (1 rows);
-SELECT CAST(CAST(1 AS TINYINT) AS STRING) FROM src LIMIT 1;
-SELECT CAST(CAST(-18 AS SMALLINT) AS STRING) FROM src LIMIT 1;
-SELECT CAST(-129 AS STRING) FROM src LIMIT 1;
-SELECT CAST(CAST(-1025 AS BIGINT) AS STRING) FROM src LIMIT 1;
+SELECT CAST(TRUE AS STRING) FROM src tablesample (1 rows);
-SELECT CAST(CAST(-3.14 AS DOUBLE) AS STRING) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS FLOAT) AS STRING) FROM src LIMIT 1;
-SELECT CAST(CAST(-3.14 AS DECIMAL) AS STRING) FROM src LIMIT 1;
+SELECT CAST(CAST(1 AS TINYINT) AS STRING) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-18 AS SMALLINT) AS STRING) FROM src tablesample (1 rows);
+SELECT CAST(-129 AS STRING) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-1025 AS BIGINT) AS STRING) FROM src tablesample (1 rows);
-SELECT CAST('Foo' AS STRING) FROM src LIMIT 1;
+SELECT CAST(CAST(-3.14 AS DOUBLE) AS STRING) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS FLOAT) AS STRING) FROM src tablesample (1 rows);
+SELECT CAST(CAST(-3.14 AS DECIMAL(3,2)) AS STRING) FROM src tablesample (1 rows);
+
+SELECT CAST('Foo' AS STRING) FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q
index 3024074bba..0a2758edfc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_to_unix_timestamp.q
@@ -1,8 +1,10 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION to_unix_timestamp;
DESCRIBE FUNCTION EXTENDED to_unix_timestamp;
create table oneline(key int, value string);
-load data local inpath '../data/files/things.txt' into table oneline;
+load data local inpath '../../data/files/things.txt' into table oneline;
SELECT
'2009-03-20 11:30:01',
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_translate.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_translate.q
index cba6ff90f6..21d799882f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_translate.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_translate.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION translate;
DESCRIBE FUNCTION EXTENDED translate;
@@ -10,28 +12,28 @@ FROM src INSERT OVERWRITE TABLE table_translate SELECT 'abcd', 'ahd', '12' WHERE
-- Run some queries on constant input parameters
SELECT translate('abcd', 'ab', '12'),
- translate('abcd', 'abc', '12') FROM src LIMIT 1;
+ translate('abcd', 'abc', '12') FROM src tablesample (1 rows);
-- Run some queries where first parameter being a table column while the other two being constants
SELECT translate(table_input.input, 'ab', '12'),
- translate(table_input.input, 'abc', '12') FROM table_input LIMIT 1;
+ translate(table_input.input, 'abc', '12') FROM table_input tablesample (1 rows);
-- Run some queries where all parameters are coming from table columns
-SELECT translate(input_string, from_string, to_string) FROM table_translate LIMIT 1;
+SELECT translate(input_string, from_string, to_string) FROM table_translate tablesample (1 rows);
-- Run some queries where some parameters are NULL
SELECT translate(NULL, 'ab', '12'),
translate('abcd', NULL, '12'),
translate('abcd', 'ab', NULL),
- translate(NULL, NULL, NULL) FROM src LIMIT 1;
+ translate(NULL, NULL, NULL) FROM src tablesample (1 rows);
-- Run some queries where the same character appears several times in the from string (2nd argument) of the UDF
SELECT translate('abcd', 'aba', '123'),
- translate('abcd', 'aba', '12') FROM src LIMIT 1;
+ translate('abcd', 'aba', '12') FROM src tablesample (1 rows);
-- Run some queries for the ignorant case when the 3rd parameter has more characters than the second one
-SELECT translate('abcd', 'abc', '1234') FROM src LIMIT 1;
+SELECT translate('abcd', 'abc', '1234') FROM src tablesample (1 rows);
-- Test proper function over UTF-8 characters
-SELECT translate('Àbcd', 'À', 'Ã') FROM src LIMIT 1;
+SELECT translate('Àbcd', 'À', 'Ã') FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unhex.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unhex.q
index e80021c8b2..257e469ffb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unhex.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unhex.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION unhex;
DESCRIBE FUNCTION EXTENDED unhex;
@@ -9,11 +11,11 @@ SELECT
unhex('61'),
unhex('2D34'),
unhex('')
-FROM src limit 1;
+FROM src tablesample (1 rows);
-- Bad inputs
SELECT
unhex('MySQL'),
unhex('G123'),
unhex('\0')
-FROM src limit 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_union.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_union.q
index 9140d22fb5..3876beb17d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_union.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_union.q
@@ -1,11 +1,13 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION create_union;
DESCRIBE FUNCTION EXTENDED create_union;
EXPLAIN
SELECT create_union(0, key), create_union(if(key<100, 0, 1), 2.0, value),
create_union(1, "a", struct(2, "b"))
-FROM src LIMIT 2;
+FROM src tablesample (2 rows);
SELECT create_union(0, key), create_union(if(key<100, 0, 1), 2.0, value),
create_union(1, "a", struct(2, "b"))
-FROM src LIMIT 2;
+FROM src tablesample (2 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unix_timestamp.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unix_timestamp.q
index 89288a1193..1664329c33 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unix_timestamp.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_unix_timestamp.q
@@ -1,8 +1,10 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION unix_timestamp;
DESCRIBE FUNCTION EXTENDED unix_timestamp;
create table oneline(key int, value string);
-load data local inpath '../data/files/things.txt' into table oneline;
+load data local inpath '../../data/files/things.txt' into table oneline;
SELECT
'2009-03-20 11:30:01',
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_using.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_using.q
new file mode 100644
index 0000000000..093187ddc3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_using.q
@@ -0,0 +1,15 @@
+dfs ${system:test.dfs.mkdir} hdfs:///tmp/udf_using;
+
+dfs -copyFromLocal ../../data/files/sales.txt hdfs:///tmp/udf_using/sales.txt;
+
+create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file 'hdfs:///tmp/udf_using/sales.txt';
+
+create table udf_using (c1 string);
+insert overwrite table udf_using select 'Joe' from src limit 2;
+
+select c1, lookup(c1) from udf_using;
+
+drop table udf_using;
+drop function lookup;
+
+dfs -rmr hdfs:///tmp/udf_using;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_weekofyear.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_weekofyear.q
index 4b7b4ea55a..abb0a2d7d2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_weekofyear.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_weekofyear.q
@@ -1,6 +1,8 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION weekofyear;
DESCRIBE FUNCTION EXTENDED weekofyear;
SELECT weekofyear('1980-01-01'), weekofyear('1980-01-06'), weekofyear('1980-01-07'), weekofyear('1980-12-31'),
weekofyear('1984-1-1'), weekofyear('2008-02-20 00:00:00'), weekofyear('1980-12-28 23:59:59'), weekofyear('1980-12-29 23:59:59')
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_when.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_when.q
index d94a594f02..4eb7f6918a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_when.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_when.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION when;
DESCRIBE FUNCTION EXTENDED when;
@@ -27,7 +29,7 @@ SELECT CASE
WHEN 25=26 THEN 27
WHEN 28=28 THEN NULL
END
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
SELECT CASE
WHEN 1=1 THEN 2
@@ -54,4 +56,21 @@ SELECT CASE
WHEN 25=26 THEN 27
WHEN 28=28 THEN NULL
END
-FROM src LIMIT 1;
+FROM src tablesample (1 rows);
+
+-- Allow compatible types to be used in return value
+SELECT CASE
+ WHEN 1=1 THEN 123.0BD
+ ELSE 0.0BD
+ END,
+ CASE
+ WHEN 1=1 THEN 123
+ WHEN 1=2 THEN 1.0
+ ELSE 222.02BD
+ END,
+ CASE
+ WHEN 1=1 THEN 'abcd'
+ WHEN 1=2 THEN cast('efgh' as varchar(10))
+ ELSE cast('ijkl' as char(4))
+ END
+FROM src tablesample (1 rows);
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath.q
index fca1ba11d8..1ad38abcf6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath.q
@@ -1,8 +1,10 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath ;
DESCRIBE FUNCTION EXTENDED xpath ;
-SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/text()') FROM src LIMIT 1 ;
-SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/*/text()') FROM src LIMIT 1 ;
-SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/b/text()') FROM src LIMIT 1 ;
-SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/c/text()') FROM src LIMIT 1 ;
-SELECT xpath ('<a><b class="bb">b1</b><b>b2</b><b>b3</b><c class="bb">c1</c><c>c2</c></a>', 'a/*[@class="bb"]/text()') FROM src LIMIT 1 ; \ No newline at end of file
+SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/text()') FROM src tablesample (1 rows) ;
+SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/*/text()') FROM src tablesample (1 rows) ;
+SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/b/text()') FROM src tablesample (1 rows) ;
+SELECT xpath ('<a><b>b1</b><b>b2</b><b>b3</b><c>c1</c><c>c2</c></a>', 'a/c/text()') FROM src tablesample (1 rows) ;
+SELECT xpath ('<a><b class="bb">b1</b><b>b2</b><b>b3</b><c class="bb">c1</c><c>c2</c></a>', 'a/*[@class="bb"]/text()') FROM src tablesample (1 rows) ; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_boolean.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_boolean.q
index 3a6e613eb6..6e3ff244b0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_boolean.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_boolean.q
@@ -1,9 +1,11 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath_boolean ;
DESCRIBE FUNCTION EXTENDED xpath_boolean ;
-SELECT xpath_boolean ('<a><b>b</b></a>', 'a/b') FROM src LIMIT 1 ;
-SELECT xpath_boolean ('<a><b>b</b></a>', 'a/c') FROM src LIMIT 1 ;
-SELECT xpath_boolean ('<a><b>b</b></a>', 'a/b = "b"') FROM src LIMIT 1 ;
-SELECT xpath_boolean ('<a><b>b</b></a>', 'a/b = "c"') FROM src LIMIT 1 ;
-SELECT xpath_boolean ('<a><b>10</b></a>', 'a/b < 10') FROM src LIMIT 1 ;
-SELECT xpath_boolean ('<a><b>10</b></a>', 'a/b = 10') FROM src LIMIT 1 ;
+SELECT xpath_boolean ('<a><b>b</b></a>', 'a/b') FROM src tablesample (1 rows) ;
+SELECT xpath_boolean ('<a><b>b</b></a>', 'a/c') FROM src tablesample (1 rows) ;
+SELECT xpath_boolean ('<a><b>b</b></a>', 'a/b = "b"') FROM src tablesample (1 rows) ;
+SELECT xpath_boolean ('<a><b>b</b></a>', 'a/b = "c"') FROM src tablesample (1 rows) ;
+SELECT xpath_boolean ('<a><b>10</b></a>', 'a/b < 10') FROM src tablesample (1 rows) ;
+SELECT xpath_boolean ('<a><b>10</b></a>', 'a/b = 10') FROM src tablesample (1 rows) ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_double.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_double.q
index 4328747f44..68441762e5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_double.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_double.q
@@ -1,14 +1,16 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath_number ;
DESCRIBE FUNCTION EXTENDED xpath_number ;
DESCRIBE FUNCTION xpath_double ;
DESCRIBE FUNCTION EXTENDED xpath_double ;
-SELECT xpath_double ('<a>this is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_double ('<a>this 2 is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_double ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src LIMIT 1 ;
-SELECT xpath_double ('<a>try a boolean</a>', 'a = 10') FROM src LIMIT 1 ;
-SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src LIMIT 1 ;
-SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src LIMIT 1 ;
-SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src LIMIT 1 ;
-SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src LIMIT 1 ; \ No newline at end of file
+SELECT xpath_double ('<a>this is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_double ('<a>this 2 is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_double ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src tablesample (1 rows) ;
+SELECT xpath_double ('<a>try a boolean</a>', 'a = 10') FROM src tablesample (1 rows) ;
+SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src tablesample (1 rows) ;
+SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src tablesample (1 rows) ;
+SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src tablesample (1 rows) ;
+SELECT xpath_double ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src tablesample (1 rows) ; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_float.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_float.q
index 1f1482833c..4596a32260 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_float.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_float.q
@@ -1,11 +1,13 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath_float ;
DESCRIBE FUNCTION EXTENDED xpath_float ;
-SELECT xpath_float ('<a>this is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_float ('<a>this 2 is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_float ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src LIMIT 1 ;
-SELECT xpath_float ('<a>try a boolean</a>', 'a = 10') FROM src LIMIT 1 ;
-SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src LIMIT 1 ;
-SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src LIMIT 1 ;
-SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src LIMIT 1 ;
-SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src LIMIT 1 ; \ No newline at end of file
+SELECT xpath_float ('<a>this is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_float ('<a>this 2 is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_float ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src tablesample (1 rows) ;
+SELECT xpath_float ('<a>try a boolean</a>', 'a = 10') FROM src tablesample (1 rows) ;
+SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src tablesample (1 rows) ;
+SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src tablesample (1 rows) ;
+SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src tablesample (1 rows) ;
+SELECT xpath_float ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src tablesample (1 rows) ; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_int.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_int.q
index 9b50bd9eeb..9f3898f114 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_int.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_int.q
@@ -1,11 +1,13 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath_int ;
DESCRIBE FUNCTION EXTENDED xpath_int ;
-SELECT xpath_int ('<a>this is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_int ('<a>this 2 is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_int ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src LIMIT 1 ;
-SELECT xpath_int ('<a>try a boolean</a>', 'a = 10') FROM src LIMIT 1 ;
-SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src LIMIT 1 ;
-SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src LIMIT 1 ;
-SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src LIMIT 1 ;
-SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src LIMIT 1 ; \ No newline at end of file
+SELECT xpath_int ('<a>this is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_int ('<a>this 2 is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_int ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src tablesample (1 rows) ;
+SELECT xpath_int ('<a>try a boolean</a>', 'a = 10') FROM src tablesample (1 rows) ;
+SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src tablesample (1 rows) ;
+SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src tablesample (1 rows) ;
+SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src tablesample (1 rows) ;
+SELECT xpath_int ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src tablesample (1 rows) ; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_long.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_long.q
index 04ee61b1f0..3a335937c5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_long.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_long.q
@@ -1,11 +1,13 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath_long ;
DESCRIBE FUNCTION EXTENDED xpath_long ;
-SELECT xpath_long ('<a>this is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_long ('<a>this 2 is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_long ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src LIMIT 1 ;
-SELECT xpath_long ('<a>try a boolean</a>', 'a = 10') FROM src LIMIT 1 ;
-SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src LIMIT 1 ;
-SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src LIMIT 1 ;
-SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src LIMIT 1 ;
-SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src LIMIT 1 ;
+SELECT xpath_long ('<a>this is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_long ('<a>this 2 is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_long ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src tablesample (1 rows) ;
+SELECT xpath_long ('<a>try a boolean</a>', 'a = 10') FROM src tablesample (1 rows) ;
+SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src tablesample (1 rows) ;
+SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src tablesample (1 rows) ;
+SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src tablesample (1 rows) ;
+SELECT xpath_long ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src tablesample (1 rows) ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_short.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_short.q
index 6a1abdc0bd..073056e72e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_short.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_short.q
@@ -1,11 +1,13 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath_short ;
DESCRIBE FUNCTION EXTENDED xpath_short ;
-SELECT xpath_short ('<a>this is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_short ('<a>this 2 is not a number</a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_short ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src LIMIT 1 ;
-SELECT xpath_short ('<a>try a boolean</a>', 'a = 10') FROM src LIMIT 1 ;
-SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src LIMIT 1 ;
-SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src LIMIT 1 ;
-SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src LIMIT 1 ;
-SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src LIMIT 1 ; \ No newline at end of file
+SELECT xpath_short ('<a>this is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_short ('<a>this 2 is not a number</a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_short ('<a><b>2000000000</b><c>40000000000</c></a>', 'a/b * a/c') FROM src tablesample (1 rows) ;
+SELECT xpath_short ('<a>try a boolean</a>', 'a = 10') FROM src tablesample (1 rows) ;
+SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'a/b') FROM src tablesample (1 rows) ;
+SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/*)') FROM src tablesample (1 rows) ;
+SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b)') FROM src tablesample (1 rows) ;
+SELECT xpath_short ('<a><b class="odd">1</b><b class="even">2</b><b class="odd">4</b><c>8</c></a>', 'sum(a/b[@class="odd"])') FROM src tablesample (1 rows) ; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_string.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_string.q
index ebbc913b71..1f1731c67d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_string.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udf_xpath_string.q
@@ -1,11 +1,13 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION xpath_string ;
DESCRIBE FUNCTION EXTENDED xpath_string ;
-SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a') FROM src LIMIT 1 ;
-SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a/b') FROM src LIMIT 1 ;
-SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a/c') FROM src LIMIT 1 ;
-SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a/d') FROM src LIMIT 1 ;
-SELECT xpath_string ('<a><b>b1</b><b>b2</b></a>', '//b') FROM src LIMIT 1 ;
-SELECT xpath_string ('<a><b>b1</b><b>b2</b></a>', 'a/b[1]') FROM src LIMIT 1 ;
-SELECT xpath_string ('<a><b>b1</b><b>b2</b></a>', 'a/b[2]') FROM src LIMIT 1 ;
-SELECT xpath_string ('<a><b>b1</b><b id="b_2">b2</b></a>', 'a/b[@id="b_2"]') FROM src LIMIT 1 ;
+SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a') FROM src tablesample (1 rows) ;
+SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a/b') FROM src tablesample (1 rows) ;
+SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a/c') FROM src tablesample (1 rows) ;
+SELECT xpath_string ('<a><b>bb</b><c>cc</c></a>', 'a/d') FROM src tablesample (1 rows) ;
+SELECT xpath_string ('<a><b>b1</b><b>b2</b></a>', '//b') FROM src tablesample (1 rows) ;
+SELECT xpath_string ('<a><b>b1</b><b>b2</b></a>', 'a/b[1]') FROM src tablesample (1 rows) ;
+SELECT xpath_string ('<a><b>b1</b><b>b2</b></a>', 'a/b[2]') FROM src tablesample (1 rows) ;
+SELECT xpath_string ('<a><b>b1</b><b id="b_2">b2</b></a>', 'a/b[@id="b_2"]') FROM src tablesample (1 rows) ;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_explode.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_explode.q
index 638a4e9ca5..1d405b3560 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_explode.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_explode.q
@@ -1,3 +1,5 @@
+set hive.fetch.task.conversion=more;
+
DESCRIBE FUNCTION explode;
DESCRIBE FUNCTION EXTENDED explode;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_json_tuple.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_json_tuple.q
index 712d9598c7..93d829d4ed 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_json_tuple.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_json_tuple.q
@@ -2,17 +2,17 @@ create table json_t (key string, jstring string);
insert overwrite table json_t
select * from (
- select '1', '{"f1": "value1", "f2": "value2", "f3": 3, "f5": 5.23}' from src limit 1
+ select '1', '{"f1": "value1", "f2": "value2", "f3": 3, "f5": 5.23}' from src tablesample (1 rows)
union all
- select '2', '{"f1": "value12", "f3": "value3", "f2": 2, "f4": 4.01}' from src limit 1
+ select '2', '{"f1": "value12", "f3": "value3", "f2": 2, "f4": 4.01}' from src tablesample (1 rows)
union all
- select '3', '{"f1": "value13", "f4": "value44", "f3": "value33", "f2": 2, "f5": 5.01}' from src limit 1
+ select '3', '{"f1": "value13", "f4": "value44", "f3": "value33", "f2": 2, "f5": 5.01}' from src tablesample (1 rows)
union all
- select '4', cast(null as string) from src limit 1
+ select '4', cast(null as string) from src tablesample (1 rows)
union all
- select '5', '{"f1": "", "f5": null}' from src limit 1
+ select '5', '{"f1": "", "f5": null}' from src tablesample (1 rows)
union all
- select '6', '[invalid JSON string]' from src limit 1
+ select '6', '[invalid JSON string]' from src tablesample (1 rows)
) s;
explain
@@ -40,7 +40,7 @@ select f2, count(*) from json_t a lateral view json_tuple(a.jstring, 'f1', 'f2',
CREATE TABLE dest1(c1 STRING) STORED AS RCFILE;
-INSERT OVERWRITE TABLE dest1 SELECT '{"a":"b\nc"}' FROM src LIMIT 1;
+INSERT OVERWRITE TABLE dest1 SELECT '{"a":"b\nc"}' FROM src tablesample (1 rows);
SELECT * FROM dest1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q
index 055e39b297..0870cbc4a9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_parse_url_tuple.q
@@ -2,17 +2,17 @@ create table url_t (key string, fullurl string);
insert overwrite table url_t
select * from (
- select '1', 'http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1' from src limit 1
+ select '1', 'http://facebook.com/path1/p.php?k1=v1&k2=v2#Ref1' from src tablesample (1 rows)
union all
- select '2', 'https://www.socs.uts.edu.au:80/MosaicDocs-old/url-primer.html?k1=tps#chapter1' from src limit 1
+ select '2', 'https://www.socs.uts.edu.au:80/MosaicDocs-old/url-primer.html?k1=tps#chapter1' from src tablesample (1 rows)
union all
- select '3', 'ftp://sites.google.com/a/example.com/site/page' from src limit 1
+ select '3', 'ftp://sites.google.com/a/example.com/site/page' from src tablesample (1 rows)
union all
- select '4', cast(null as string) from src limit 1
+ select '4', cast(null as string) from src tablesample (1 rows)
union all
- select '5', 'htttp://' from src limit 1
+ select '5', 'htttp://' from src tablesample (1 rows)
union all
- select '6', '[invalid url string]' from src limit 1
+ select '6', '[invalid url string]' from src tablesample (1 rows)
) s;
describe function parse_url_tuple;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_posexplode.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_posexplode.q
new file mode 100644
index 0000000000..343f08ba6f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/udtf_posexplode.q
@@ -0,0 +1,15 @@
+CREATE TABLE employees (
+name STRING,
+salary FLOAT,
+subordinates ARRAY<STRING>,
+deductions MAP<STRING, FLOAT>,
+address STRUCT<street:STRING, city:STRING, state:STRING, zip:INT>);
+
+LOAD DATA LOCAL INPATH '../../data/files/posexplode_data.txt' INTO TABLE employees;
+
+SELECT
+ name, pos, sub
+FROM
+ employees
+LATERAL VIEW
+ posexplode(subordinates) subView AS pos, sub;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union.q
index 91bbd1b07f..525eccbbfe 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union.q
@@ -6,13 +6,13 @@ FROM (
UNION ALL
FROM src SELECT src.* WHERE src.key > 100
) unioninput
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/union.out' SELECT unioninput.*;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.*;
FROM (
FROM src select src.key, src.value WHERE src.key < 100
UNION ALL
FROM src SELECT src.* WHERE src.key > 100
) unioninput
-INSERT OVERWRITE DIRECTORY '../build/ql/test/data/warehouse/union.out' SELECT unioninput.*;
+INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.*;
-dfs -cat ../build/ql/test/data/warehouse/union.out/*;
+dfs -cat ${system:test.warehouse.dir}/union.out/*;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union34.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union34.q
index 36bc865be8..238b583fd8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union34.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union34.q
@@ -3,7 +3,7 @@ create table src10_2 (key string, value string);
create table src10_3 (key string, value string);
create table src10_4 (key string, value string);
-from (select * from src limit 10) a
+from (select * from src tablesample (10 rows)) a
insert overwrite table src10_1 select *
insert overwrite table src10_2 select *
insert overwrite table src10_3 select *
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_date.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_date.q
index e332a8af7b..dd6f08e5b4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_date.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_date.q
@@ -17,8 +17,8 @@ create table union_date_2 (
FL_NUM int
);
-LOAD DATA LOCAL INPATH '../data/files/flights_join.txt' OVERWRITE INTO TABLE union_date_1;
-LOAD DATA LOCAL INPATH '../data/files/flights_join.txt' OVERWRITE INTO TABLE union_date_2;
+LOAD DATA LOCAL INPATH '../../data/files/flights_join.txt' OVERWRITE INTO TABLE union_date_1;
+LOAD DATA LOCAL INPATH '../../data/files/flights_join.txt' OVERWRITE INTO TABLE union_date_2;
select * from (
select fl_num, fl_date from union_date_1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_null.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_null.q
index 70147070fe..4368b8a5b6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_null.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_null.q
@@ -1,2 +1,5 @@
-- HIVE-2901
select x from (select value as x from src union all select NULL as x from src)a limit 10;
+
+-- HIVE-4837
+select * from (select null as N from src1 group by key UNION ALL select null as N from src1 group by key ) a;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_1.q
index c9f920c648..c87b3fef1e 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_1.q
@@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_10.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_10.q
index a072fe3b56..6701952717 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_10.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_10.q
@@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_11.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_11.q
index 6250a20210..4b2fa42f1b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_11.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_11.q
@@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_12.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_12.q
index 168eac34a0..69d0d0af9f 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_12.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_12.q
@@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_13.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_13.q
index a88a83e152..7605f0ec26 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_13.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_13.q
@@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_14.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_14.q
index e588e8fd62..a4fdfc8ee4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_14.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_14.q
@@ -25,7 +25,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_15.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_15.q
index 237f2e7629..e3c937b7b1 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_15.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_15.q
@@ -25,7 +25,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1 partition (ds)
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_16.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_16.q
index 06d5043f9c..537078b080 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_16.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_16.q
@@ -25,7 +25,7 @@ set hive.exec.dynamic.partition=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile ;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1 partition (ds)
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_17.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_17.q
index 65b8255c3e..d70f3d3da3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_17.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_17.q
@@ -22,7 +22,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1 partition (ds)
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_18.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_18.q
index 265acfd357..478650038c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_18.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_18.q
@@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, ds string) stored as textfile;
create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1 partition (ds)
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_19.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_19.q
index 1450caa2d0..8c45953ed8 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_19.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_19.q
@@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_2.q
index 015c146eca..83cd2887ab 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_2.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_2.q
@@ -20,7 +20,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_20.q
index ac72788927..f80f7c1dfd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_20.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_20.q
@@ -20,7 +20,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(values bigint, key string) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_21.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_21.q
index f897446241..8963c2576c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_21.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_21.q
@@ -20,7 +20,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_22.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_22.q
index f01053d27d..b0c1ccd73d 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_22.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_22.q
@@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint, values2 bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_23.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_23.q
index 805dd76274..a1b989a0eb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_23.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_23.q
@@ -20,7 +20,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_24.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_24.q
index 36fd947d60..ec561e0979 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_24.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_24.q
@@ -18,7 +18,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key double, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
EXPLAIN
INSERT OVERWRITE TABLE outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_3.q
index da0f1c0473..9617f737bc 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_3.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_3.q
@@ -20,7 +20,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_4.q
index 18d4730a12..cae323b1ef 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_4.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_4.q
@@ -20,7 +20,7 @@ set hive.merge.smallfiles.avgsize=1;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_5.q
index a6fbeb03dd..5df84e145c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_5.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_5.q
@@ -22,7 +22,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_6.q
index 7ae5af30c1..bfce26d0fb 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_6.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_6.q
@@ -17,7 +17,7 @@ create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as textfile;
create table outputTbl2(key string, values bigint) stored as textfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
FROM (
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_7.q
index 5a639ca117..3a956747a4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_7.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_7.q
@@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_8.q
index 79b84e8118..a83a43e466 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_8.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_8.q
@@ -22,7 +22,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_9.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_9.q
index f6038384f3..e71f6dd001 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_9.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_remove_9.q
@@ -22,7 +22,7 @@ set mapred.input.dir.recursive=true;
create table inputTbl1(key string, val string) stored as textfile;
create table outputTbl1(key string, values bigint) stored as rcfile;
-load data local inpath '../data/files/T1.txt' into table inputTbl1;
+load data local inpath '../../data/files/T1.txt' into table inputTbl1;
explain
insert overwrite table outputTbl1
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_top_level.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_top_level.q
new file mode 100644
index 0000000000..6a4b45fbc4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_top_level.q
@@ -0,0 +1,106 @@
+-- top level
+explain
+select * from (
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3
+) q1 order by key;
+
+select * from (
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3
+) q1 order by key;
+
+explain
+select * from (
+select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
+union all
+select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
+) q1 order by k;
+
+select * from (
+select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
+union all
+select s1.key as k, s2.value as v from src s1 join src s2 on (s1.key = s2.key) limit 10
+) q1 order by k;
+
+-- ctas
+explain
+create table union_top as
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+create table union_top as
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+select * from union_top order by key;
+
+truncate table union_top;
+
+-- insert into
+explain
+insert into table union_top
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+insert into table union_top
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+select * from union_top order by key;
+
+explain
+insert overwrite table union_top
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+insert overwrite table union_top
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+select * from union_top order by key;
+
+-- create view
+explain
+create view union_top_view as
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+create view union_top_view as
+select key, 0 as value from src where key % 3 == 0 limit 3
+union all
+select key, 1 as value from src where key % 3 == 1 limit 3
+union all
+select key, 2 as value from src where key % 3 == 2 limit 3;
+
+select * from union_top_view order by key;
+
+drop table union_top;
+drop view union_top_view;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_view.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_view.q
index 4f8bafe37a..b727199cf4 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_view.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/union_view.q
@@ -1,3 +1,4 @@
+set hive.stats.dbclass=fs;
CREATE TABLE src_union_1 (key int, value string) PARTITIONED BY (ds string);
CREATE INDEX src_union_1_key_idx ON TABLE src_union_1(key) AS 'COMPACT' WITH DEFERRED REBUILD;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/uniquejoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/uniquejoin.q
index 51bcf22bfa..3bc8ef931b 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/uniquejoin.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/uniquejoin.q
@@ -2,9 +2,9 @@ CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T2(key STRING, val STRING) STORED AS TEXTFILE;
CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/T1.txt' INTO TABLE T1;
-LOAD DATA LOCAL INPATH '../data/files/T2.txt' INTO TABLE T2;
-LOAD DATA LOCAL INPATH '../data/files/T3.txt' INTO TABLE T3;
+LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1;
+LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2;
+LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3;
FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key)
SELECT a.key, b.key, c.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_1.q
index 53273b3187..bed61f381c 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_1.q
@@ -5,7 +5,7 @@ create table varchar1 (key varchar(10), value varchar(20));
create table varchar1_1 (key string, value string);
-- load from file
-load data local inpath '../data/files/srcbucket0.txt' overwrite into table varchar1;
+load data local inpath '../../data/files/srcbucket0.txt' overwrite into table varchar1;
select * from varchar1 order by key, value limit 2;
-- insert overwrite, from same/different length varchar
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_cast.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_cast.q
index 550f3dc728..c356b1dbca 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_cast.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_cast.q
@@ -1,3 +1,4 @@
+set hive.fetch.task.conversion=more;
-- Cast from varchar to other data types
select
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_comparison.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_comparison.q
index b6c6f40641..05cad852a2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_comparison.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_comparison.q
@@ -1,3 +1,4 @@
+set hive.fetch.task.conversion=more;
-- Should all be true
select
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_join1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_join1.q
index 6a19efaa3c..94226879c5 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_join1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_join1.q
@@ -17,9 +17,9 @@ create table varchar_join1_str (
c2 string
);
-load data local inpath '../data/files/vc1.txt' into table varchar_join1_vc1;
-load data local inpath '../data/files/vc1.txt' into table varchar_join1_vc2;
-load data local inpath '../data/files/vc1.txt' into table varchar_join1_str;
+load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc1;
+load data local inpath '../../data/files/vc1.txt' into table varchar_join1_vc2;
+load data local inpath '../../data/files/vc1.txt' into table varchar_join1_str;
-- Join varchar with same length varchar
select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_serde.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_serde.q
index 7351b688cf..ea2a022b94 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_serde.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_serde.q
@@ -18,7 +18,7 @@ with serdeproperties (
)
stored as textfile;
-load data local inpath '../data/files/srcbucket0.txt' overwrite into table varchar_serde_regex;
+load data local inpath '../../data/files/srcbucket0.txt' overwrite into table varchar_serde_regex;
select * from varchar_serde_regex limit 5;
select value, count(*) from varchar_serde_regex group by value limit 5;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_union1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_union1.q
index cf90eab33e..dd3cffe0db 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_union1.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/varchar_union1.q
@@ -17,9 +17,9 @@ create table varchar_union1_str (
c2 string
);
-load data local inpath '../data/files/vc1.txt' into table varchar_union1_vc1;
-load data local inpath '../data/files/vc1.txt' into table varchar_union1_vc2;
-load data local inpath '../data/files/vc1.txt' into table varchar_union1_str;
+load data local inpath '../../data/files/vc1.txt' into table varchar_union1_vc1;
+load data local inpath '../../data/files/vc1.txt' into table varchar_union1_vc2;
+load data local inpath '../../data/files/vc1.txt' into table varchar_union1_str;
-- union varchar with same length varchar
select * from (
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_between_in.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_between_in.q
new file mode 100644
index 0000000000..1bc66118f8
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_between_in.q
@@ -0,0 +1,35 @@
+SET hive.vectorized.execution.enabled=true;
+
+CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate;
+
+EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate;
+
+EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE));
+
+EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1;
+
+EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568);
+
+EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate;
+
+EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate;
+
+EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1;
+
+EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351;
+
+SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate;
+
+SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE));
+
+SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1;
+
+SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568);
+
+SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate;
+
+SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate;
+
+SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1;
+
+SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_coalesce.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_coalesce.q
new file mode 100644
index 0000000000..052ab716be
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_coalesce.q
@@ -0,0 +1,32 @@
+SET hive.vectorized.execution.enabled=true;
+EXPLAIN SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint)
+FROM alltypesorc
+WHERE (cdouble IS NULL) LIMIT 10;
+
+SELECT cdouble, cstring1, cint, cfloat, csmallint, coalesce(cdouble, cstring1, cint, cfloat, csmallint)
+FROM alltypesorc
+WHERE (cdouble IS NULL) LIMIT 10;
+
+EXPLAIN SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0)
+FROM alltypesorc
+WHERE (ctinyint IS NULL) LIMIT 10;
+
+SELECT ctinyint, cdouble, cint, coalesce(ctinyint+10, (cdouble+log2(cint)), 0)
+FROM alltypesorc
+WHERE (ctinyint IS NULL) LIMIT 10;
+
+EXPLAIN SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0)
+FROM alltypesorc
+WHERE (cfloat IS NULL AND cbigint IS NULL) LIMIT 10;
+
+SELECT cfloat, cbigint, coalesce(cfloat, cbigint, 0)
+FROM alltypesorc
+WHERE (cfloat IS NULL AND cbigint IS NULL) LIMIT 10;
+
+EXPLAIN SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2)
+FROM alltypesorc
+WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL LIMIT 10;
+
+SELECT ctimestamp1, ctimestamp2, coalesce(ctimestamp1, ctimestamp2)
+FROM alltypesorc
+WHERE ctimestamp1 IS NOT NULL OR ctimestamp2 IS NOT NULL LIMIT 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
new file mode 100644
index 0000000000..eb9146e95d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_aggregate.q
@@ -0,0 +1,20 @@
+CREATE TABLE decimal_vgby STORED AS ORC AS
+ SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1,
+ CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2,
+ cint
+ FROM alltypesorc;
+
+SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN SELECT cint,
+ COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
+ COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
+ FROM decimal_vgby
+ GROUP BY cint
+ HAVING COUNT(*) > 1;
+SELECT cint,
+ COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1),
+ COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2)
+ FROM decimal_vgby
+ GROUP BY cint
+ HAVING COUNT(*) > 1; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_cast.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_cast.q
new file mode 100644
index 0000000000..ea7a5b817d
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_cast.q
@@ -0,0 +1,5 @@
+SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10;
+
+SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS DECIMAL(20,10)), CAST(cint AS DECIMAL(23,14)), CAST(cboolean1 AS DECIMAL(5,2)), CAST(ctimestamp1 AS DECIMAL(15,0)) FROM alltypesorc WHERE cdouble IS NOT NULL AND cint IS NOT NULL AND cboolean1 IS NOT NULL AND ctimestamp1 IS NOT NULL LIMIT 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_expressions.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
new file mode 100644
index 0000000000..a74b17bd12
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_expressions.q
@@ -0,0 +1,5 @@
+CREATE TABLE decimal_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc;
+SET hive.vectorized.execution.enabled=true;
+EXPLAIN SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10;
+
+SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdecimal1+2.34)/cdecimal2), (cdecimal1 * (cdecimal2/3.4)), cdecimal1 % 10, CAST(cdecimal1 AS INT), CAST(cdecimal2 AS SMALLINT), CAST(cdecimal2 AS TINYINT), CAST(cdecimal1 AS BIGINT), CAST (cdecimal1 AS BOOLEAN), CAST(cdecimal2 AS DOUBLE), CAST(cdecimal1 AS FLOAT), CAST(cdecimal2 AS STRING), CAST(cdecimal1 AS TIMESTAMP) FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL LIMIT 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
new file mode 100644
index 0000000000..d8b3d1a9ac
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_mapjoin.q
@@ -0,0 +1,19 @@
+CREATE TABLE decimal_mapjoin STORED AS ORC AS
+ SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1,
+ CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2,
+ cint
+ FROM alltypesorc;
+
+SET hive.auto.convert.join=true;
+SET hive.auto.convert.join.nonconditionaltask=true;
+SET hive.auto.convert.join.nonconditionaltask.size=1000000000;
+SET hive.vectorized.execution.enabled=true;
+
+EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981;
+SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
+ FROM decimal_mapjoin l
+ JOIN decimal_mapjoin r ON l.cint = r.cint
+ WHERE l.cint = 6981; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q
new file mode 100644
index 0000000000..6e2c0b1fd4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_decimal_math_funcs.q
@@ -0,0 +1,77 @@
+CREATE TABLE decimal_test STORED AS ORC AS SELECT cbigint, cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2 FROM alltypesorc;
+SET hive.vectorized.execution.enabled=true;
+
+-- Test math functions in vectorized mode to verify they run correctly end-to-end.
+
+explain
+select
+ cdecimal1
+ ,Round(cdecimal1, 2)
+ ,Round(cdecimal1)
+ ,Floor(cdecimal1)
+ ,Ceil(cdecimal1)
+ ,Exp(cdecimal1)
+ ,Ln(cdecimal1)
+ ,Log10(cdecimal1)
+ -- Use log2 as a representative function to test all input types.
+ ,Log2(cdecimal1)
+ -- Use 15601.0 to test zero handling, as there are no zeroes in the table
+ ,Log2(cdecimal1 - 15601.0)
+ ,Log(2.0, cdecimal1)
+ ,Pow(log2(cdecimal1), 2.0)
+ ,Power(log2(cdecimal1), 2.0)
+ ,Sqrt(cdecimal1)
+ ,Abs(cdecimal1)
+ ,Sin(cdecimal1)
+ ,Asin(cdecimal1)
+ ,Cos(cdecimal1)
+ ,ACos(cdecimal1)
+ ,Atan(cdecimal1)
+ ,Degrees(cdecimal1)
+ ,Radians(cdecimal1)
+ ,Positive(cdecimal1)
+ ,Negative(cdecimal1)
+ ,Sign(cdecimal1)
+ -- Test nesting
+ ,cos(-sin(log(cdecimal1)) + 3.14159)
+from decimal_test
+-- limit output to a reasonably small number of rows
+where cbigint % 500 = 0
+-- test use of a math function in the WHERE clause
+and sin(cdecimal1) >= -1.0;
+
+select
+ cdecimal1
+ ,Round(cdecimal1, 2)
+ ,Round(cdecimal1)
+ ,Floor(cdecimal1)
+ ,Ceil(cdecimal1)
+ ,Exp(cdecimal1)
+ ,Ln(cdecimal1)
+ ,Log10(cdecimal1)
+ -- Use log2 as a representative function to test all input types.
+ ,Log2(cdecimal1)
+ -- Use 15601.0 to test zero handling, as there are no zeroes in the table
+ ,Log2(cdecimal1 - 15601.0)
+ ,Log(2.0, cdecimal1)
+ ,Pow(log2(cdecimal1), 2.0)
+ ,Power(log2(cdecimal1), 2.0)
+ ,Sqrt(cdecimal1)
+ ,Abs(cdecimal1)
+ ,Sin(cdecimal1)
+ ,Asin(cdecimal1)
+ ,Cos(cdecimal1)
+ ,ACos(cdecimal1)
+ ,Atan(cdecimal1)
+ ,Degrees(cdecimal1)
+ ,Radians(cdecimal1)
+ ,Positive(cdecimal1)
+ ,Negative(cdecimal1)
+ ,Sign(cdecimal1)
+ -- Test nesting
+ ,cos(-sin(log(cdecimal1)) + 3.14159)
+from decimal_test
+-- limit output to a reasonably small number of rows
+where cbigint % 500 = 0
+-- test use of a math function in the WHERE clause
+and sin(cdecimal1) >= -1.0;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_left_outer_join.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_left_outer_join.q
new file mode 100644
index 0000000000..6e96690497
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_left_outer_join.q
@@ -0,0 +1,21 @@
+set hive.vectorized.execution.enabled=true;
+set hive.auto.convert.join=true;
+explain
+select count(*) from (select c.ctinyint
+from alltypesorc c
+left outer join alltypesorc cd
+ on cd.cint = c.cint
+left outer join alltypesorc hd
+ on hd.ctinyint = c.ctinyint
+) t1
+;
+select count(*) from (select c.ctinyint
+from alltypesorc c
+left outer join alltypesorc cd
+ on cd.cint = c.cint
+left outer join alltypesorc hd
+ on hd.ctinyint = c.ctinyint
+) t1;
+
+set hive.auto.convert.join=false;
+set hive.vectorized.execution.enabled=false;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_non_string_partition.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_non_string_partition.q
new file mode 100644
index 0000000000..fc1dc6d3b8
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vector_non_string_partition.q
@@ -0,0 +1,17 @@
+SET hive.vectorized.execution.enabled=true;
+CREATE TABLE non_string_part(cint INT, cstring1 STRING, cdouble DOUBLE, ctimestamp1 TIMESTAMP) PARTITIONED BY (ctinyint tinyint) STORED AS ORC;
+SET hive.exec.dynamic.partition.mode=nonstrict;
+SET hive.exec.dynamic.partition=true;
+
+INSERT OVERWRITE TABLE non_string_part PARTITION(ctinyint) SELECT cint, cstring1, cdouble, ctimestamp1, ctinyint fROM alltypesorc
+WHERE ctinyint IS NULL AND cdouble IS NOT NULL ORDER BY cdouble;
+
+SHOW PARTITIONS non_string_part;
+
+EXPLAIN SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10;
+
+SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10;
+
+EXPLAIN SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10;
+
+SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_0.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_0.q
new file mode 100644
index 0000000000..39fba7d1ac
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_0.q
@@ -0,0 +1,27 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT AVG(cbigint),
+ (-(AVG(cbigint))),
+ (-6432 + AVG(cbigint)),
+ STDDEV_POP(cbigint),
+ (-((-6432 + AVG(cbigint)))),
+ ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))),
+ VAR_SAMP(cbigint),
+ (-((-6432 + AVG(cbigint)))),
+ (-6432 + (-((-6432 + AVG(cbigint))))),
+ (-((-6432 + AVG(cbigint)))),
+ ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))),
+ COUNT(*),
+ SUM(cfloat),
+ (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)),
+ (-(VAR_SAMP(cbigint))),
+ ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))),
+ MIN(ctinyint),
+ (-(MIN(ctinyint)))
+FROM alltypesorc
+WHERE (((cstring2 LIKE '%b%')
+ OR ((79.553 != cint)
+ OR (cbigint < cdouble)))
+ OR ((ctinyint >= csmallint)
+ AND ((cboolean2 = 1)
+ AND (3569 = ctinyint))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_1.q
new file mode 100644
index 0000000000..745aa06f2f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_1.q
@@ -0,0 +1,21 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT VAR_POP(ctinyint),
+ (VAR_POP(ctinyint) / -26.28),
+ SUM(cfloat),
+ (-1.389 + SUM(cfloat)),
+ (SUM(cfloat) * (-1.389 + SUM(cfloat))),
+ MAX(ctinyint),
+ (-((SUM(cfloat) * (-1.389 + SUM(cfloat))))),
+ MAX(cint),
+ (MAX(cint) * 79.553),
+ VAR_SAMP(cdouble),
+ (10.175 % (-((SUM(cfloat) * (-1.389 + SUM(cfloat)))))),
+ COUNT(cint),
+ (-563 % MAX(cint))
+FROM alltypesorc
+WHERE (((cdouble > ctinyint)
+ AND (cboolean2 > 0))
+ OR ((cbigint < ctinyint)
+ OR ((cint > cbigint)
+ OR (cboolean1 < 0))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_10.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_10.q
new file mode 100644
index 0000000000..720f38d02b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_10.q
@@ -0,0 +1,24 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cdouble,
+ ctimestamp1,
+ ctinyint,
+ cboolean1,
+ cstring1,
+ (-(cdouble)),
+ (cdouble + csmallint),
+ ((cdouble + csmallint) % 33),
+ (-(cdouble)),
+ (ctinyint % cdouble),
+ (ctinyint % csmallint),
+ (-(cdouble)),
+ (cbigint * (ctinyint % csmallint)),
+ (9763215.5639 - (cdouble + csmallint)),
+ (-((-(cdouble))))
+FROM alltypesorc
+WHERE (((cstring2 <= '10')
+ OR ((ctinyint > cdouble)
+ AND (-5638.15 >= ctinyint)))
+ OR ((cdouble > 6981)
+ AND ((csmallint = 9763215.5639)
+ OR (cstring1 LIKE '%a'))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_11.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_11.q
new file mode 100644
index 0000000000..329ed28948
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_11.q
@@ -0,0 +1,15 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cstring1,
+ cboolean1,
+ cdouble,
+ ctimestamp1,
+ (-3728 * csmallint),
+ (cdouble - 9763215.5639),
+ (-(cdouble)),
+ ((-(cdouble)) + 6981),
+ (cdouble * -5638.15)
+FROM alltypesorc
+WHERE ((cstring2 = cstring1)
+ OR ((ctimestamp1 IS NULL)
+ AND (cstring1 LIKE '%a')));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_12.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_12.q
new file mode 100644
index 0000000000..fef62fc7a0
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_12.q
@@ -0,0 +1,32 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cbigint,
+ cboolean1,
+ cstring1,
+ ctimestamp1,
+ cdouble,
+ (-6432 * cdouble),
+ (-(cbigint)),
+ COUNT(cbigint),
+ (cbigint * COUNT(cbigint)),
+ STDDEV_SAMP(cbigint),
+ ((-6432 * cdouble) / -6432),
+ (-(((-6432 * cdouble) / -6432))),
+ AVG(cdouble),
+ (-((-6432 * cdouble))),
+ (-5638.15 + cbigint),
+ SUM(cbigint),
+ (AVG(cdouble) / (-6432 * cdouble)),
+ AVG(cdouble),
+ (-((-(((-6432 * cdouble) / -6432))))),
+ (((-6432 * cdouble) / -6432) + (-((-6432 * cdouble)))),
+ STDDEV_POP(cdouble)
+FROM alltypesorc
+WHERE (((ctimestamp1 IS NULL)
+ AND ((cboolean1 >= cboolean2)
+ OR (ctinyint != csmallint)))
+ AND ((cstring1 LIKE '%a')
+ OR ((cboolean2 <= 1)
+ AND (cbigint >= csmallint))))
+GROUP BY cbigint, cboolean1, cstring1, ctimestamp1, cdouble
+ORDER BY ctimestamp1, cdouble, cbigint, cstring1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_13.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_13.q
new file mode 100644
index 0000000000..fad2585d22
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_13.q
@@ -0,0 +1,31 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cboolean1,
+ ctinyint,
+ ctimestamp1,
+ cfloat,
+ cstring1,
+ (-(ctinyint)),
+ MAX(ctinyint),
+ ((-(ctinyint)) + MAX(ctinyint)),
+ SUM(cfloat),
+ (SUM(cfloat) * ((-(ctinyint)) + MAX(ctinyint))),
+ (-(SUM(cfloat))),
+ (79.553 * cfloat),
+ STDDEV_POP(cfloat),
+ (-(SUM(cfloat))),
+ STDDEV_POP(ctinyint),
+ (((-(ctinyint)) + MAX(ctinyint)) - 10.175),
+ (-((-(SUM(cfloat))))),
+ (-26.28 / (-((-(SUM(cfloat)))))),
+ MAX(cfloat),
+ ((SUM(cfloat) * ((-(ctinyint)) + MAX(ctinyint))) / ctinyint),
+ MIN(ctinyint)
+FROM alltypesorc
+WHERE (((cfloat < 3569)
+ AND ((10.175 >= cdouble)
+ AND (cboolean1 != 1)))
+ OR ((ctimestamp1 > -29071)
+ AND ((ctimestamp2 != -29071)
+ AND (ctinyint < 9763215.5639))))
+GROUP BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_14.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_14.q
new file mode 100644
index 0000000000..a121c64edc
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_14.q
@@ -0,0 +1,33 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT ctimestamp1,
+ cfloat,
+ cstring1,
+ cboolean1,
+ cdouble,
+ (-26.28 + cdouble),
+ (-((-26.28 + cdouble))),
+ STDDEV_SAMP((-((-26.28 + cdouble)))),
+ (cfloat * -26.28),
+ MAX(cfloat),
+ (-(cfloat)),
+ (-(MAX(cfloat))),
+ ((-((-26.28 + cdouble))) / 10.175),
+ STDDEV_POP(cfloat),
+ COUNT(cfloat),
+ (-(((-((-26.28 + cdouble))) / 10.175))),
+ (-1.389 % STDDEV_SAMP((-((-26.28 + cdouble))))),
+ (cfloat - cdouble),
+ VAR_POP(cfloat),
+ (VAR_POP(cfloat) % 10.175),
+ VAR_SAMP(cfloat),
+ (-((cfloat - cdouble)))
+FROM alltypesorc
+WHERE (((ctinyint <= cbigint)
+ AND ((cint <= cdouble)
+ OR (ctimestamp2 < ctimestamp1)))
+ AND ((cdouble < ctinyint)
+ AND ((cbigint > -257)
+ OR (cfloat < cint))))
+GROUP BY ctimestamp1, cfloat, cstring1, cboolean1, cdouble
+ORDER BY cstring1, cfloat, cdouble, ctimestamp1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_15.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_15.q
new file mode 100644
index 0000000000..7daad0b2c5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_15.q
@@ -0,0 +1,31 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cfloat,
+ cboolean1,
+ cdouble,
+ cstring1,
+ ctinyint,
+ cint,
+ ctimestamp1,
+ STDDEV_SAMP(cfloat),
+ (-26.28 - cint),
+ MIN(cdouble),
+ (cdouble * 79.553),
+ (33 % cfloat),
+ STDDEV_SAMP(ctinyint),
+ VAR_POP(ctinyint),
+ (-23 % cdouble),
+ (-(ctinyint)),
+ VAR_SAMP(cint),
+ (cint - cfloat),
+ (-23 % ctinyint),
+ (-((-26.28 - cint))),
+ STDDEV_POP(cint)
+FROM alltypesorc
+WHERE (((cstring2 LIKE '%ss%')
+ OR (cstring1 LIKE '10%'))
+ OR ((cint >= -75)
+ AND ((ctinyint = csmallint)
+ AND (cdouble >= -3728))))
+GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1
+ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_16.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_16.q
new file mode 100644
index 0000000000..39a9402680
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_16.q
@@ -0,0 +1,20 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cstring1,
+ cdouble,
+ ctimestamp1,
+ (cdouble - 9763215.5639),
+ (-((cdouble - 9763215.5639))),
+ COUNT(cdouble),
+ STDDEV_SAMP(cdouble),
+ (-(STDDEV_SAMP(cdouble))),
+ (STDDEV_SAMP(cdouble) * COUNT(cdouble)),
+ MIN(cdouble),
+ (9763215.5639 / cdouble),
+ (COUNT(cdouble) / -1.389),
+ STDDEV_SAMP(cdouble)
+FROM alltypesorc
+WHERE ((cstring2 LIKE '%b%')
+ AND ((cdouble >= -1.389)
+ OR (cstring1 < 'a')))
+GROUP BY cstring1, cdouble, ctimestamp1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_2.q
new file mode 100644
index 0000000000..b8647a4344
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_2.q
@@ -0,0 +1,23 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT AVG(csmallint),
+ (AVG(csmallint) % -563),
+ (AVG(csmallint) + 762),
+ SUM(cfloat),
+ VAR_POP(cbigint),
+ (-(VAR_POP(cbigint))),
+ (SUM(cfloat) - AVG(csmallint)),
+ COUNT(*),
+ (-((SUM(cfloat) - AVG(csmallint)))),
+ (VAR_POP(cbigint) - 762),
+ MIN(ctinyint),
+ ((-(VAR_POP(cbigint))) + MIN(ctinyint)),
+ AVG(cdouble),
+ (((-(VAR_POP(cbigint))) + MIN(ctinyint)) - SUM(cfloat))
+FROM alltypesorc
+WHERE (((ctimestamp1 < ctimestamp2)
+ AND ((cstring2 LIKE 'b%')
+ AND (cfloat <= -5638.15)))
+ OR ((cdouble < ctinyint)
+ AND ((-10669 != ctimestamp2)
+ OR (359 > cint))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_3.q
new file mode 100644
index 0000000000..1d53994519
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_3.q
@@ -0,0 +1,25 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT STDDEV_SAMP(csmallint),
+ (STDDEV_SAMP(csmallint) - 10.175),
+ STDDEV_POP(ctinyint),
+ (STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)),
+ (-(STDDEV_POP(ctinyint))),
+ (STDDEV_SAMP(csmallint) % 79.553),
+ (-((STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)))),
+ STDDEV_SAMP(cfloat),
+ (-(STDDEV_SAMP(csmallint))),
+ SUM(cfloat),
+ ((-((STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)))) / (STDDEV_SAMP(csmallint) - 10.175)),
+ (-((STDDEV_SAMP(csmallint) - 10.175))),
+ AVG(cint),
+ (-3728 - STDDEV_SAMP(csmallint)),
+ STDDEV_POP(cint),
+ (AVG(cint) / STDDEV_SAMP(cfloat))
+FROM alltypesorc
+WHERE (((cint <= cfloat)
+ AND ((79.553 != cbigint)
+ AND (ctimestamp2 = -29071)))
+ OR ((cbigint > cdouble)
+ AND ((79.553 <= csmallint)
+ AND (ctimestamp1 > ctimestamp2))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_4.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_4.q
new file mode 100644
index 0000000000..1eb324d190
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_4.q
@@ -0,0 +1,23 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT SUM(cint),
+ (SUM(cint) * -563),
+ (-3728 + SUM(cint)),
+ STDDEV_POP(cdouble),
+ (-(STDDEV_POP(cdouble))),
+ AVG(cdouble),
+ ((SUM(cint) * -563) % SUM(cint)),
+ (((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble)),
+ VAR_POP(cdouble),
+ (-((((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble)))),
+ ((-3728 + SUM(cint)) - (SUM(cint) * -563)),
+ MIN(ctinyint),
+ MIN(ctinyint),
+ (MIN(ctinyint) * (-((((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble)))))
+FROM alltypesorc
+WHERE (((csmallint >= cint)
+ OR ((-89010 >= ctinyint)
+ AND (cdouble > 79.553)))
+ OR ((-563 != cbigint)
+ AND ((ctinyint != cbigint)
+ OR (-3728 >= cdouble))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_5.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_5.q
new file mode 100644
index 0000000000..826b20fd4f
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_5.q
@@ -0,0 +1,20 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT MAX(csmallint),
+ (MAX(csmallint) * -75),
+ COUNT(*),
+ ((MAX(csmallint) * -75) / COUNT(*)),
+ (6981 * MAX(csmallint)),
+ MIN(csmallint),
+ (-(MIN(csmallint))),
+ (197 % ((MAX(csmallint) * -75) / COUNT(*))),
+ SUM(cint),
+ MAX(ctinyint),
+ (-(MAX(ctinyint))),
+ ((-(MAX(ctinyint))) + MAX(ctinyint))
+FROM alltypesorc
+WHERE (((cboolean2 IS NOT NULL)
+ AND (cstring1 LIKE '%b%'))
+ OR ((ctinyint = cdouble)
+ AND ((ctimestamp2 IS NOT NULL)
+ AND (cstring2 LIKE 'a'))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_6.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_6.q
new file mode 100644
index 0000000000..2b59f10ed8
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_6.q
@@ -0,0 +1,21 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cboolean1,
+ cfloat,
+ cstring1,
+ (988888 * csmallint),
+ (-(csmallint)),
+ (-(cfloat)),
+ (-26.28 / cfloat),
+ (cfloat * 359),
+ (cint % ctinyint),
+ (-(cdouble)),
+ (ctinyint - -75),
+ (762 * (cint % ctinyint))
+FROM alltypesorc
+WHERE ((ctinyint != 0)
+ AND ((((cboolean1 <= 0)
+ AND (cboolean2 >= cboolean1))
+ OR ((cbigint IS NOT NULL)
+ AND ((cstring2 LIKE '%a')
+ OR (cfloat <= -257))))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_7.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_7.q
new file mode 100644
index 0000000000..20c1148659
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_7.q
@@ -0,0 +1,25 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cboolean1,
+ cbigint,
+ csmallint,
+ ctinyint,
+ ctimestamp1,
+ cstring1,
+ (cbigint + cbigint),
+ (csmallint % -257),
+ (-(csmallint)),
+ (-(ctinyint)),
+ ((-(ctinyint)) + 17),
+ (cbigint * (-(csmallint))),
+ (cint % csmallint),
+ (-(ctinyint)),
+ ((-(ctinyint)) % ctinyint)
+FROM alltypesorc
+WHERE ((ctinyint != 0)
+ AND (((ctimestamp1 <= 0)
+ OR ((ctinyint = cint)
+ OR (cstring2 LIKE 'ss')))
+ AND ((988888 < cdouble)
+ OR ((ctimestamp2 > -29071)
+ AND (3569 >= cdouble)))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_8.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_8.q
new file mode 100644
index 0000000000..98b3385f16
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_8.q
@@ -0,0 +1,23 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT ctimestamp1,
+ cdouble,
+ cboolean1,
+ cstring1,
+ cfloat,
+ (-(cdouble)),
+ (-5638.15 - cdouble),
+ (cdouble * -257),
+ (cint + cfloat),
+ ((-(cdouble)) + cbigint),
+ (-(cdouble)),
+ (-1.389 - cfloat),
+ (-(cfloat)),
+ ((-5638.15 - cdouble) + (cint + cfloat))
+FROM alltypesorc
+WHERE (((cstring2 IS NOT NULL)
+ AND ((ctimestamp1 <= -29071)
+ AND (ctimestamp2 != 16558)))
+ OR ((cfloat < -6432)
+ OR ((cboolean1 IS NOT NULL)
+ AND (cdouble = 988888))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_9.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_9.q
new file mode 100644
index 0000000000..252c426d64
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_9.q
@@ -0,0 +1,24 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT cfloat,
+ cstring1,
+ cint,
+ ctimestamp1,
+ cdouble,
+ cbigint,
+ (cfloat / ctinyint),
+ (cint % cbigint),
+ (-(cdouble)),
+ (cdouble + (cfloat / ctinyint)),
+ (cdouble / cint),
+ (-((-(cdouble)))),
+ (9763215.5639 % cbigint),
+ (2563.58 + (-((-(cdouble)))))
+FROM alltypesorc
+WHERE (((cbigint > -23)
+ AND ((cdouble != 988888)
+ OR (cint > -863.257)))
+ AND ((ctinyint >= 33)
+ OR ((csmallint >= cbigint)
+ OR (cfloat = cdouble))))
+ORDER BY cbigint, cfloat;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_decimal_date.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_decimal_date.q
new file mode 100644
index 0000000000..2b82a5aa04
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_decimal_date.q
@@ -0,0 +1,4 @@
+CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc;
+SET hive.vectorized.execution.enabled=true;
+EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10;
+SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_div0.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_div0.q
new file mode 100644
index 0000000000..69e388a28e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_div0.q
@@ -0,0 +1,24 @@
+SET hive.vectorized.execution.enabled = true;
+
+-- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants
+explain
+select cdouble / 0.0 from alltypesorc limit 100;
+select cdouble / 0.0 from alltypesorc limit 100;
+
+-- There are no zeros in the table, but there is 988888, so use it as zero
+
+-- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants as numerators
+explain
+select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L)
+from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100;
+select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L)
+from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100;
+
+-- There are no zeros in the table, but there is -200.0, so use it as zero
+
+explain
+select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 1 / (cdouble + 200.0), 1.2 / (cdouble + 200.0)
+from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100;
+select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 1 / (cdouble + 200.0), 1.2 / (cdouble + 200.0)
+from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_limit.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_limit.q
new file mode 100644
index 0000000000..094a8d26a3
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_limit.q
@@ -0,0 +1,37 @@
+SET hive.vectorized.execution.enabled=true;
+explain SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7;
+SELECT cbigint, cdouble FROM alltypesorc WHERE cbigint < cdouble and cint > 0 limit 7;
+
+set hive.optimize.reducededuplication.min.reducer=1;
+set hive.limit.pushdown.memory.usage=0.3f;
+
+-- HIVE-3562 Some limit can be pushed down to map stage - c/p parts from limit_pushdown
+
+explain
+select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20;
+select ctinyint,cdouble,csmallint from alltypesorc where ctinyint is not null order by ctinyint,cdouble limit 20;
+
+-- deduped RS
+explain
+select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20;
+select ctinyint,avg(cdouble + 1) from alltypesorc group by ctinyint order by ctinyint limit 20;
+
+-- distincts
+explain
+select distinct(ctinyint) from alltypesorc limit 20;
+select distinct(ctinyint) from alltypesorc limit 20;
+
+explain
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20;
+select ctinyint, count(distinct(cdouble)) from alltypesorc group by ctinyint limit 20;
+
+-- limit zero
+explain
+select ctinyint,cdouble from alltypesorc order by ctinyint limit 0;
+select ctinyint,cdouble from alltypesorc order by ctinyint limit 0;
+
+-- 2MR (applied to last RS)
+explain
+select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20;
+select cdouble, sum(ctinyint) as sum from alltypesorc where ctinyint is not null group by cdouble order by sum, cdouble limit 20;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_nested_udf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_nested_udf.q
new file mode 100644
index 0000000000..bb50f9b853
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_nested_udf.q
@@ -0,0 +1,3 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT SUM(abs(ctinyint)) from alltypesorc;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_not.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_not.q
new file mode 100644
index 0000000000..bfd3dd7221
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_not.q
@@ -0,0 +1,27 @@
+SET hive.vectorized.execution.enabled=true;
+SELECT AVG(cbigint),
+ (-(AVG(cbigint))),
+ (-6432 + AVG(cbigint)),
+ STDDEV_POP(cbigint),
+ (-((-6432 + AVG(cbigint)))),
+ ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))),
+ VAR_SAMP(cbigint),
+ (-((-6432 + AVG(cbigint)))),
+ (-6432 + (-((-6432 + AVG(cbigint))))),
+ (-((-6432 + AVG(cbigint)))),
+ ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))),
+ COUNT(*),
+ SUM(cfloat),
+ (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)),
+ (-(VAR_SAMP(cbigint))),
+ ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))),
+ MIN(ctinyint),
+ (-(MIN(ctinyint)))
+FROM alltypesorc
+WHERE (((cstring2 LIKE '%b%')
+ OR ((79.553 != cint)
+ OR (NOT(cbigint >= cdouble))))
+ OR ((ctinyint >= csmallint)
+ AND (NOT ((cboolean2 != 1)
+ OR (3569 != ctinyint)))));
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part.q
new file mode 100644
index 0000000000..0e34585b5a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part.q
@@ -0,0 +1,7 @@
+SET hive.vectorized.execution.enabled=true;
+CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC;
+insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100;
+insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100;
+
+select count(cdouble), cint from alltypesorc_part where ds='2011' group by cint limit 10;
+select count(*) from alltypesorc_part A join alltypesorc_part B on A.ds=B.ds;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part_project.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part_project.q
new file mode 100644
index 0000000000..c68ce56fd4
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_part_project.q
@@ -0,0 +1,7 @@
+SET hive.vectorized.execution.enabled=true;
+CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC;
+insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100;
+insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100;
+
+explain select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10;
+select (cdouble+2) c1 from alltypesorc_part order by c1 limit 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_pushdown.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_pushdown.q
new file mode 100644
index 0000000000..bafe550478
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_pushdown.q
@@ -0,0 +1,4 @@
+SET hive.vectorized.execution.enabled=true;
+SET hive.optimize.index.filter=true;
+explain SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble;
+SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_short_regress.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_short_regress.q
new file mode 100644
index 0000000000..638a31ff41
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorization_short_regress.q
@@ -0,0 +1,852 @@
+SET hive.vectorized.execution.enabled=true;
+
+-- If you look at ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/OrcFileGenerator.java
+-- which is the data generation class you'll see that those values are specified in the
+-- initializeFixedPointValues for each data type. When I created the queries I usedthose values
+-- where I needed scalar values to ensure that when the queries executed their predicates would be
+-- filtering on values that are guaranteed to exist.
+
+-- Beyond those values, all the other data in the alltypesorc file is random, but there is a
+-- specific pattern to the data that is important for coverage. In orc and subsequently
+-- vectorization there are a number of optimizations for certain data patterns: AllValues, NoNulls,
+-- RepeatingValue, RepeatingNull. The data in alltypesorc is generated such that each column has
+-- exactly 3 batches of each data pattern. This gives us coverage for the vector expression
+-- optimizations and ensure the metadata in appropriately set on the row batch object which are
+-- reused across batches.
+
+-- For the queries themselves in order to efficiently cover as much of the new vectorization
+-- functionality as I could I used a number of different techniques to create the
+-- vectorization_short_regress.q test suite, primarily equivalence classes, and pairwise
+-- combinations.
+
+-- First I divided the search space into a number of dimensions such as type, aggregate function,
+-- filter operation, arithmetic operation, etc. The types were explored as equivalence classes of
+-- long, double, time, string, and bool. Also, rather than creating a very large number of small
+-- queries the resulting vectors were grouped by compatible dimensions to reduce the number of
+-- queries.
+
+-- TargetTypeClasses: Long, Timestamp, Double, String, Bool
+-- Functions: Avg, Sum, StDevP, StDev, Var, Min, Count
+-- ArithmeticOps: Add, Multiply, Subtract, Divide
+-- FilterOps: Equal, NotEqual, GreaterThan, LessThan, LessThanOrEqual
+-- GroupBy: NoGroupByProjectAggs
+EXPLAIN SELECT AVG(cint),
+ (AVG(cint) + -3728),
+ (-((AVG(cint) + -3728))),
+ (-((-((AVG(cint) + -3728))))),
+ ((-((-((AVG(cint) + -3728))))) * (AVG(cint) + -3728)),
+ SUM(cdouble),
+ (-(AVG(cint))),
+ STDDEV_POP(cint),
+ (((-((-((AVG(cint) + -3728))))) * (AVG(cint) + -3728)) * (-((-((AVG(cint) + -3728)))))),
+ STDDEV_SAMP(csmallint),
+ (-(STDDEV_POP(cint))),
+ (STDDEV_POP(cint) - (-((-((AVG(cint) + -3728)))))),
+ ((STDDEV_POP(cint) - (-((-((AVG(cint) + -3728)))))) * STDDEV_POP(cint)),
+ VAR_SAMP(cint),
+ AVG(cfloat),
+ (10.175 - VAR_SAMP(cint)),
+ (-((10.175 - VAR_SAMP(cint)))),
+ ((-(STDDEV_POP(cint))) / -563),
+ STDDEV_SAMP(cint),
+ (-(((-(STDDEV_POP(cint))) / -563))),
+ (AVG(cint) / SUM(cdouble)),
+ MIN(ctinyint),
+ COUNT(csmallint),
+ (MIN(ctinyint) / ((-(STDDEV_POP(cint))) / -563)),
+ (-((AVG(cint) / SUM(cdouble))))
+FROM alltypesorc
+WHERE ((762 = cbigint)
+ OR ((csmallint < cfloat)
+ AND ((ctimestamp2 > -10669)
+ AND (cdouble != cint)))
+ OR (cstring1 = 'a')
+ OR ((cbigint <= -1.389)
+ AND ((cstring2 != 'a')
+ AND ((79.553 != cint)
+ AND (cboolean2 != cboolean1)))));
+SELECT AVG(cint),
+ (AVG(cint) + -3728),
+ (-((AVG(cint) + -3728))),
+ (-((-((AVG(cint) + -3728))))),
+ ((-((-((AVG(cint) + -3728))))) * (AVG(cint) + -3728)),
+ SUM(cdouble),
+ (-(AVG(cint))),
+ STDDEV_POP(cint),
+ (((-((-((AVG(cint) + -3728))))) * (AVG(cint) + -3728)) * (-((-((AVG(cint) + -3728)))))),
+ STDDEV_SAMP(csmallint),
+ (-(STDDEV_POP(cint))),
+ (STDDEV_POP(cint) - (-((-((AVG(cint) + -3728)))))),
+ ((STDDEV_POP(cint) - (-((-((AVG(cint) + -3728)))))) * STDDEV_POP(cint)),
+ VAR_SAMP(cint),
+ AVG(cfloat),
+ (10.175 - VAR_SAMP(cint)),
+ (-((10.175 - VAR_SAMP(cint)))),
+ ((-(STDDEV_POP(cint))) / -563),
+ STDDEV_SAMP(cint),
+ (-(((-(STDDEV_POP(cint))) / -563))),
+ (AVG(cint) / SUM(cdouble)),
+ MIN(ctinyint),
+ COUNT(csmallint),
+ (MIN(ctinyint) / ((-(STDDEV_POP(cint))) / -563)),
+ (-((AVG(cint) / SUM(cdouble))))
+FROM alltypesorc
+WHERE ((762 = cbigint)
+ OR ((csmallint < cfloat)
+ AND ((ctimestamp2 > -10669)
+ AND (cdouble != cint)))
+ OR (cstring1 = 'a')
+ OR ((cbigint <= -1.389)
+ AND ((cstring2 != 'a')
+ AND ((79.553 != cint)
+ AND (cboolean2 != cboolean1)))));
+
+-- TargetTypeClasses: Long, Bool, Double, String, Timestamp
+-- Functions: Max, VarP, StDevP, Avg, Min, StDev, Var
+-- ArithmeticOps: Divide, Multiply, Remainder, Subtract
+-- FilterOps: LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual, Like, RLike
+-- GroupBy: NoGroupByProjectAggs
+EXPLAIN SELECT MAX(cint),
+ (MAX(cint) / -3728),
+ (MAX(cint) * -3728),
+ VAR_POP(cbigint),
+ (-((MAX(cint) * -3728))),
+ STDDEV_POP(csmallint),
+ (-563 % (MAX(cint) * -3728)),
+ (VAR_POP(cbigint) / STDDEV_POP(csmallint)),
+ (-(STDDEV_POP(csmallint))),
+ MAX(cdouble),
+ AVG(ctinyint),
+ (STDDEV_POP(csmallint) - 10.175),
+ MIN(cint),
+ ((MAX(cint) * -3728) % (STDDEV_POP(csmallint) - 10.175)),
+ (-(MAX(cdouble))),
+ MIN(cdouble),
+ (MAX(cdouble) % -26.28),
+ STDDEV_SAMP(csmallint),
+ (-((MAX(cint) / -3728))),
+ ((-((MAX(cint) * -3728))) % (-563 % (MAX(cint) * -3728))),
+ ((MAX(cint) / -3728) - AVG(ctinyint)),
+ (-((MAX(cint) * -3728))),
+ VAR_SAMP(cint)
+FROM alltypesorc
+WHERE (((cbigint <= 197)
+ AND (cint < cbigint))
+ OR ((cdouble >= -26.28)
+ AND (csmallint > cdouble))
+ OR ((ctinyint > cfloat)
+ AND (cstring1 RLIKE '.*ss.*'))
+ OR ((cfloat > 79.553)
+ AND (cstring2 LIKE '10%')));
+SELECT MAX(cint),
+ (MAX(cint) / -3728),
+ (MAX(cint) * -3728),
+ VAR_POP(cbigint),
+ (-((MAX(cint) * -3728))),
+ STDDEV_POP(csmallint),
+ (-563 % (MAX(cint) * -3728)),
+ (VAR_POP(cbigint) / STDDEV_POP(csmallint)),
+ (-(STDDEV_POP(csmallint))),
+ MAX(cdouble),
+ AVG(ctinyint),
+ (STDDEV_POP(csmallint) - 10.175),
+ MIN(cint),
+ ((MAX(cint) * -3728) % (STDDEV_POP(csmallint) - 10.175)),
+ (-(MAX(cdouble))),
+ MIN(cdouble),
+ (MAX(cdouble) % -26.28),
+ STDDEV_SAMP(csmallint),
+ (-((MAX(cint) / -3728))),
+ ((-((MAX(cint) * -3728))) % (-563 % (MAX(cint) * -3728))),
+ ((MAX(cint) / -3728) - AVG(ctinyint)),
+ (-((MAX(cint) * -3728))),
+ VAR_SAMP(cint)
+FROM alltypesorc
+WHERE (((cbigint <= 197)
+ AND (cint < cbigint))
+ OR ((cdouble >= -26.28)
+ AND (csmallint > cdouble))
+ OR ((ctinyint > cfloat)
+ AND (cstring1 RLIKE '.*ss.*'))
+ OR ((cfloat > 79.553)
+ AND (cstring2 LIKE '10%')));
+
+-- TargetTypeClasses: String, Long, Bool, Double, Timestamp
+-- Functions: VarP, Count, Max, StDevP, StDev, Avg
+-- ArithmeticOps: Subtract, Remainder, Multiply, Add
+-- FilterOps: Equal, LessThanOrEqual, GreaterThan, Like, LessThan
+-- GroupBy: NoGroupByProjectAggs
+EXPLAIN SELECT VAR_POP(cbigint),
+ (-(VAR_POP(cbigint))),
+ (VAR_POP(cbigint) - (-(VAR_POP(cbigint)))),
+ COUNT(*),
+ (COUNT(*) % 79.553),
+ MAX(ctinyint),
+ (COUNT(*) - (-(VAR_POP(cbigint)))),
+ (-((-(VAR_POP(cbigint))))),
+ (-1 % (-(VAR_POP(cbigint)))),
+ COUNT(*),
+ (-(COUNT(*))),
+ STDDEV_POP(csmallint),
+ (-((-((-(VAR_POP(cbigint))))))),
+ (762 * (-(COUNT(*)))),
+ MAX(cint),
+ (MAX(ctinyint) + (762 * (-(COUNT(*))))),
+ ((-(VAR_POP(cbigint))) + MAX(cint)),
+ STDDEV_SAMP(cdouble),
+ ((-(COUNT(*))) % COUNT(*)),
+ COUNT(ctinyint),
+ AVG(ctinyint),
+ (-3728 % (MAX(ctinyint) + (762 * (-(COUNT(*))))))
+FROM alltypesorc
+WHERE ((ctimestamp1 = ctimestamp2)
+ OR (762 = cfloat)
+ OR (cstring1 = 'ss')
+ OR ((csmallint <= cbigint)
+ AND (1 = cboolean2))
+ OR ((cboolean1 IS NOT NULL)
+ AND ((ctimestamp2 IS NOT NULL)
+ AND (cstring2 > 'a'))));
+SELECT VAR_POP(cbigint),
+ (-(VAR_POP(cbigint))),
+ (VAR_POP(cbigint) - (-(VAR_POP(cbigint)))),
+ COUNT(*),
+ (COUNT(*) % 79.553),
+ MAX(ctinyint),
+ (COUNT(*) - (-(VAR_POP(cbigint)))),
+ (-((-(VAR_POP(cbigint))))),
+ (-1 % (-(VAR_POP(cbigint)))),
+ COUNT(*),
+ (-(COUNT(*))),
+ STDDEV_POP(csmallint),
+ (-((-((-(VAR_POP(cbigint))))))),
+ (762 * (-(COUNT(*)))),
+ MAX(cint),
+ (MAX(ctinyint) + (762 * (-(COUNT(*))))),
+ ((-(VAR_POP(cbigint))) + MAX(cint)),
+ STDDEV_SAMP(cdouble),
+ ((-(COUNT(*))) % COUNT(*)),
+ COUNT(ctinyint),
+ AVG(ctinyint),
+ (-3728 % (MAX(ctinyint) + (762 * (-(COUNT(*))))))
+FROM alltypesorc
+WHERE ((ctimestamp1 = ctimestamp2)
+ OR (762 = cfloat)
+ OR (cstring1 = 'ss')
+ OR ((csmallint <= cbigint)
+ AND (1 = cboolean2))
+ OR ((cboolean1 IS NOT NULL)
+ AND ((ctimestamp2 IS NOT NULL)
+ AND (cstring2 > 'a'))));
+
+-- TargetTypeClasses: String, Bool, Timestamp, Long, Double
+-- Functions: Avg, Max, StDev, VarP
+-- ArithmeticOps: Add, Divide, Remainder, Multiply
+-- FilterOps: LessThanOrEqual, NotEqual, GreaterThanOrEqual, LessThan, Equal
+-- GroupBy: NoGroupByProjectAggs
+EXPLAIN SELECT AVG(ctinyint),
+ (AVG(ctinyint) + 6981),
+ ((AVG(ctinyint) + 6981) + AVG(ctinyint)),
+ MAX(cbigint),
+ (((AVG(ctinyint) + 6981) + AVG(ctinyint)) / AVG(ctinyint)),
+ (-((AVG(ctinyint) + 6981))),
+ STDDEV_SAMP(cint),
+ (AVG(ctinyint) % (-((AVG(ctinyint) + 6981)))),
+ VAR_POP(cint),
+ VAR_POP(cbigint),
+ (-(MAX(cbigint))),
+ ((-(MAX(cbigint))) / STDDEV_SAMP(cint)),
+ MAX(cfloat),
+ (VAR_POP(cbigint) * -26.28)
+FROM alltypesorc
+WHERE (((ctimestamp2 <= ctimestamp1)
+ AND ((cbigint != cdouble)
+ AND ('ss' <= cstring1)))
+ OR ((csmallint < ctinyint)
+ AND (ctimestamp1 >= 0))
+ OR (cfloat = 17));
+SELECT AVG(ctinyint),
+ (AVG(ctinyint) + 6981),
+ ((AVG(ctinyint) + 6981) + AVG(ctinyint)),
+ MAX(cbigint),
+ (((AVG(ctinyint) + 6981) + AVG(ctinyint)) / AVG(ctinyint)),
+ (-((AVG(ctinyint) + 6981))),
+ STDDEV_SAMP(cint),
+ (AVG(ctinyint) % (-((AVG(ctinyint) + 6981)))),
+ VAR_POP(cint),
+ VAR_POP(cbigint),
+ (-(MAX(cbigint))),
+ ((-(MAX(cbigint))) / STDDEV_SAMP(cint)),
+ MAX(cfloat),
+ (VAR_POP(cbigint) * -26.28)
+FROM alltypesorc
+WHERE (((ctimestamp2 <= ctimestamp1)
+ AND ((cbigint != cdouble)
+ AND ('ss' <= cstring1)))
+ OR ((csmallint < ctinyint)
+ AND (ctimestamp1 >= 0))
+ OR (cfloat = 17));
+
+-- TargetTypeClasses: Timestamp, String, Long, Double, Bool
+-- Functions: Max, Avg, Min, Var, StDev, Count, StDevP, Sum
+-- ArithmeticOps: Multiply, Subtract, Add, Divide
+-- FilterOps: Like, NotEqual, LessThan, GreaterThanOrEqual, GreaterThan, RLike
+-- GroupBy: NoGroupByProjectColumns
+EXPLAIN SELECT cint,
+ cdouble,
+ ctimestamp2,
+ cstring1,
+ cboolean2,
+ ctinyint,
+ cfloat,
+ ctimestamp1,
+ csmallint,
+ cbigint,
+ (-3728 * cbigint),
+ (-(cint)),
+ (-863.257 - cint),
+ (-(csmallint)),
+ (csmallint - (-(csmallint))),
+ ((csmallint - (-(csmallint))) + (-(csmallint))),
+ (cint / cint),
+ ((-863.257 - cint) - -26.28),
+ (-(cfloat)),
+ (cdouble * -89010),
+ (ctinyint / 988888),
+ (-(ctinyint)),
+ (79.553 / ctinyint)
+FROM alltypesorc
+WHERE (((cstring1 RLIKE 'a.*')
+ AND (cstring2 LIKE '%ss%'))
+ OR ((1 != cboolean2)
+ AND ((csmallint < 79.553)
+ AND (-257 != ctinyint)))
+ OR ((cdouble > ctinyint)
+ AND (cfloat >= cint))
+ OR ((cint < cbigint)
+ AND (ctinyint > cbigint)));
+SELECT cint,
+ cdouble,
+ ctimestamp2,
+ cstring1,
+ cboolean2,
+ ctinyint,
+ cfloat,
+ ctimestamp1,
+ csmallint,
+ cbigint,
+ (-3728 * cbigint),
+ (-(cint)),
+ (-863.257 - cint),
+ (-(csmallint)),
+ (csmallint - (-(csmallint))),
+ ((csmallint - (-(csmallint))) + (-(csmallint))),
+ (cint / cint),
+ ((-863.257 - cint) - -26.28),
+ (-(cfloat)),
+ (cdouble * -89010),
+ (ctinyint / 988888),
+ (-(ctinyint)),
+ (79.553 / ctinyint)
+FROM alltypesorc
+WHERE (((cstring1 RLIKE 'a.*')
+ AND (cstring2 LIKE '%ss%'))
+ OR ((1 != cboolean2)
+ AND ((csmallint < 79.553)
+ AND (-257 != ctinyint)))
+ OR ((cdouble > ctinyint)
+ AND (cfloat >= cint))
+ OR ((cint < cbigint)
+ AND (ctinyint > cbigint)));
+
+-- TargetTypeClasses: Long, String, Double, Bool, Timestamp
+-- Functions: VarP, Var, StDev, StDevP, Max, Sum
+-- ArithmeticOps: Divide, Remainder, Subtract, Multiply
+-- FilterOps: Equal, LessThanOrEqual, LessThan, Like, GreaterThanOrEqual, NotEqual, GreaterThan
+-- GroupBy: NoGroupByProjectColumns
+EXPLAIN SELECT cint,
+ cbigint,
+ cstring1,
+ cboolean1,
+ cfloat,
+ cdouble,
+ ctimestamp2,
+ csmallint,
+ cstring2,
+ cboolean2,
+ (cint / cbigint),
+ (cbigint % 79.553),
+ (-((cint / cbigint))),
+ (10.175 % cfloat),
+ (-(cfloat)),
+ (cfloat - (-(cfloat))),
+ ((cfloat - (-(cfloat))) % -6432),
+ (cdouble * csmallint),
+ (-(cdouble)),
+ (-(cbigint)),
+ (cfloat - (cint / cbigint)),
+ (-(csmallint)),
+ (3569 % cbigint),
+ (359 - cdouble),
+ (-(csmallint))
+FROM alltypesorc
+WHERE (((197 > ctinyint)
+ AND (cint = cbigint))
+ OR (cbigint = 359)
+ OR (cboolean1 < 0)
+ OR ((cstring1 LIKE '%ss')
+ AND (cfloat <= ctinyint)));
+
+SELECT cint,
+ cbigint,
+ cstring1,
+ cboolean1,
+ cfloat,
+ cdouble,
+ ctimestamp2,
+ csmallint,
+ cstring2,
+ cboolean2,
+ (cint / cbigint),
+ (cbigint % 79.553),
+ (-((cint / cbigint))),
+ (10.175 % cfloat),
+ (-(cfloat)),
+ (cfloat - (-(cfloat))),
+ ((cfloat - (-(cfloat))) % -6432),
+ (cdouble * csmallint),
+ (-(cdouble)),
+ (-(cbigint)),
+ (cfloat - (cint / cbigint)),
+ (-(csmallint)),
+ (3569 % cbigint),
+ (359 - cdouble),
+ (-(csmallint))
+FROM alltypesorc
+WHERE (((197 > ctinyint)
+ AND (cint = cbigint))
+ OR (cbigint = 359)
+ OR (cboolean1 < 0)
+ OR ((cstring1 LIKE '%ss')
+ AND (cfloat <= ctinyint)));
+
+-- TargetTypeClasses: String, Bool, Double, Long, Timestamp
+-- Functions: Sum, Max, Avg, Var, StDevP, VarP
+-- ArithmeticOps: Add, Subtract, Divide, Multiply, Remainder
+-- FilterOps: NotEqual, GreaterThanOrEqual, Like, LessThanOrEqual, Equal, GreaterThan
+-- GroupBy: NoGroupByProjectColumns
+EXPLAIN SELECT cint,
+ cstring1,
+ cboolean2,
+ ctimestamp2,
+ cdouble,
+ cfloat,
+ cbigint,
+ csmallint,
+ cboolean1,
+ (cint + csmallint),
+ (cbigint - ctinyint),
+ (-(cbigint)),
+ (-(cfloat)),
+ ((cbigint - ctinyint) + cbigint),
+ (cdouble / cdouble),
+ (-(cdouble)),
+ ((cint + csmallint) * (-(cbigint))),
+ ((-(cdouble)) + cbigint),
+ (-1.389 / ctinyint),
+ (cbigint % cdouble),
+ (-(csmallint)),
+ (csmallint + (cint + csmallint))
+FROM alltypesorc
+WHERE (((csmallint > -26.28)
+ AND (cstring2 LIKE 'ss'))
+ OR ((cdouble <= cbigint)
+ AND ((cstring1 >= 'ss')
+ AND (cint != cdouble)))
+ OR (ctinyint = -89010)
+ OR ((cbigint <= cfloat)
+ AND (-26.28 <= csmallint)))
+ORDER BY cboolean1, cstring1, ctimestamp2, cfloat, cbigint, cstring1, cdouble, cint, csmallint, cdouble;
+SELECT cint,
+ cstring1,
+ cboolean2,
+ ctimestamp2,
+ cdouble,
+ cfloat,
+ cbigint,
+ csmallint,
+ cboolean1,
+ (cint + csmallint),
+ (cbigint - ctinyint),
+ (-(cbigint)),
+ (-(cfloat)),
+ ((cbigint - ctinyint) + cbigint),
+ (cdouble / cdouble),
+ (-(cdouble)),
+ ((cint + csmallint) * (-(cbigint))),
+ ((-(cdouble)) + cbigint),
+ (-1.389 / ctinyint),
+ (cbigint % cdouble),
+ (-(csmallint)),
+ (csmallint + (cint + csmallint))
+FROM alltypesorc
+WHERE (((csmallint > -26.28)
+ AND (cstring2 LIKE 'ss'))
+ OR ((cdouble <= cbigint)
+ AND ((cstring1 >= 'ss')
+ AND (cint != cdouble)))
+ OR (ctinyint = -89010)
+ OR ((cbigint <= cfloat)
+ AND (-26.28 <= csmallint)))
+ORDER BY cboolean1, cstring1, ctimestamp2, cfloat, cbigint, cstring1, cdouble, cint, csmallint, cdouble;
+
+-- TargetTypeClasses: Long, String, Double, Timestamp
+-- Functions: Avg, Min, StDevP, Sum, Var
+-- ArithmeticOps: Divide, Subtract, Multiply, Remainder
+-- FilterOps: GreaterThan, LessThan, LessThanOrEqual, GreaterThanOrEqual, Like
+-- GroupBy: NoGroupByProjectColumns
+EXPLAIN SELECT ctimestamp1,
+ cstring2,
+ cdouble,
+ cfloat,
+ cbigint,
+ csmallint,
+ (cbigint / 3569),
+ (-257 - csmallint),
+ (-6432 * cfloat),
+ (-(cdouble)),
+ (cdouble * 10.175),
+ ((-6432 * cfloat) / cfloat),
+ (-(cfloat)),
+ (cint % csmallint),
+ (-(cdouble)),
+ (cdouble * (-(cdouble)))
+FROM alltypesorc
+WHERE (((-1.389 >= cint)
+ AND ((csmallint < ctinyint)
+ AND (-6432 > csmallint)))
+ OR ((cdouble >= cfloat)
+ AND (cstring2 <= 'a'))
+ OR ((cstring1 LIKE 'ss%')
+ AND (10.175 > cbigint)))
+ORDER BY csmallint, cstring2, cdouble;
+SELECT ctimestamp1,
+ cstring2,
+ cdouble,
+ cfloat,
+ cbigint,
+ csmallint,
+ (cbigint / 3569),
+ (-257 - csmallint),
+ (-6432 * cfloat),
+ (-(cdouble)),
+ (cdouble * 10.175),
+ ((-6432 * cfloat) / cfloat),
+ (-(cfloat)),
+ (cint % csmallint),
+ (-(cdouble)),
+ (cdouble * (-(cdouble)))
+FROM alltypesorc
+WHERE (((-1.389 >= cint)
+ AND ((csmallint < ctinyint)
+ AND (-6432 > csmallint)))
+ OR ((cdouble >= cfloat)
+ AND (cstring2 <= 'a'))
+ OR ((cstring1 LIKE 'ss%')
+ AND (10.175 > cbigint)))
+ORDER BY csmallint, cstring2, cdouble;
+
+-- TargetTypeClasses: Double, String, Long
+-- Functions: StDev, Sum, VarP, Count
+-- ArithmeticOps: Remainder, Divide, Subtract
+-- FilterOps: GreaterThanOrEqual, Equal, LessThanOrEqual
+-- GroupBy: GroupBy
+EXPLAIN SELECT csmallint,
+ (csmallint % -75),
+ STDDEV_SAMP(csmallint),
+ (-1.389 / csmallint),
+ SUM(cbigint),
+ ((csmallint % -75) / SUM(cbigint)),
+ (-((csmallint % -75))),
+ VAR_POP(ctinyint),
+ (-((-((csmallint % -75))))),
+ COUNT(*),
+ (COUNT(*) - -89010)
+FROM alltypesorc
+WHERE (((csmallint >= -257))
+ AND ((-6432 = csmallint)
+ OR ((cint >= cdouble)
+ AND (ctinyint <= cint))))
+GROUP BY csmallint
+ORDER BY csmallint;
+SELECT csmallint,
+ (csmallint % -75),
+ STDDEV_SAMP(csmallint),
+ (-1.389 / csmallint),
+ SUM(cbigint),
+ ((csmallint % -75) / SUM(cbigint)),
+ (-((csmallint % -75))),
+ VAR_POP(ctinyint),
+ (-((-((csmallint % -75))))),
+ COUNT(*),
+ (COUNT(*) - -89010)
+FROM alltypesorc
+WHERE (((csmallint >= -257))
+ AND ((-6432 = csmallint)
+ OR ((cint >= cdouble)
+ AND (ctinyint <= cint))))
+GROUP BY csmallint
+ORDER BY csmallint;
+
+-- TargetTypeClasses: Long, Double, Timestamp
+-- Functions: Var, Count, Sum, VarP, StDevP
+-- ArithmeticOps: Multiply, Add, Subtract, Remainder
+-- FilterOps: GreaterThan, LessThan, Equal, LessThanOrEqual, GreaterThanOrEqual
+-- GroupBy: GroupBy
+EXPLAIN SELECT cdouble,
+ VAR_SAMP(cdouble),
+ (2563.58 * VAR_SAMP(cdouble)),
+ (-(VAR_SAMP(cdouble))),
+ COUNT(cfloat),
+ ((2563.58 * VAR_SAMP(cdouble)) + -5638.15),
+ ((-(VAR_SAMP(cdouble))) * ((2563.58 * VAR_SAMP(cdouble)) + -5638.15)),
+ SUM(cfloat),
+ VAR_POP(cdouble),
+ (cdouble - (-(VAR_SAMP(cdouble)))),
+ STDDEV_POP(cdouble),
+ (cdouble + VAR_SAMP(cdouble)),
+ (cdouble * 762),
+ SUM(cdouble),
+ (-863.257 % (cdouble * 762)),
+ SUM(cdouble)
+FROM alltypesorc
+WHERE (((cdouble > 2563.58))
+ AND (((cbigint >= cint)
+ AND ((csmallint < cint)
+ AND (cfloat < -5638.15)))
+ OR (2563.58 = ctinyint)
+ OR ((cdouble <= cbigint)
+ AND (-5638.15 > cbigint))))
+GROUP BY cdouble
+ORDER BY cdouble;
+SELECT cdouble,
+ VAR_SAMP(cdouble),
+ (2563.58 * VAR_SAMP(cdouble)),
+ (-(VAR_SAMP(cdouble))),
+ COUNT(cfloat),
+ ((2563.58 * VAR_SAMP(cdouble)) + -5638.15),
+ ((-(VAR_SAMP(cdouble))) * ((2563.58 * VAR_SAMP(cdouble)) + -5638.15)),
+ SUM(cfloat),
+ VAR_POP(cdouble),
+ (cdouble - (-(VAR_SAMP(cdouble)))),
+ STDDEV_POP(cdouble),
+ (cdouble + VAR_SAMP(cdouble)),
+ (cdouble * 762),
+ SUM(cdouble),
+ (-863.257 % (cdouble * 762)),
+ SUM(cdouble)
+FROM alltypesorc
+WHERE (((cdouble > 2563.58))
+ AND (((cbigint >= cint)
+ AND ((csmallint < cint)
+ AND (cfloat < -5638.15)))
+ OR (2563.58 = ctinyint)
+ OR ((cdouble <= cbigint)
+ AND (-5638.15 > cbigint))))
+GROUP BY cdouble
+ORDER BY cdouble;
+
+-- TargetTypeClasses: Bool, Timestamp, String, Double, Long
+-- Functions: StDevP, Avg, Count, Min, Var, VarP, Sum
+-- ArithmeticOps: Multiply, Subtract, Add, Divide, Remainder
+-- FilterOps: NotEqual, LessThan, Like, Equal, RLike
+-- GroupBy: GroupBy
+EXPLAIN SELECT ctimestamp1,
+ cstring1,
+ STDDEV_POP(cint),
+ (STDDEV_POP(cint) * 10.175),
+ (-(STDDEV_POP(cint))),
+ AVG(csmallint),
+ (-(STDDEV_POP(cint))),
+ (-26.28 - STDDEV_POP(cint)),
+ COUNT(*),
+ (-(COUNT(*))),
+ ((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))),
+ MIN(ctinyint),
+ (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*)))),
+ (-((STDDEV_POP(cint) * 10.175))),
+ VAR_SAMP(csmallint),
+ (VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))),
+ (-((-(STDDEV_POP(cint))))),
+ ((-(COUNT(*))) / STDDEV_POP(cint)),
+ VAR_POP(cfloat),
+ (10.175 / AVG(csmallint)),
+ AVG(cint),
+ VAR_SAMP(cfloat),
+ ((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) - (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))),
+ (-((-((STDDEV_POP(cint) * 10.175))))),
+ AVG(cfloat),
+ (((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) - (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) * 10.175),
+ (10.175 % (10.175 / AVG(csmallint))),
+ (-(MIN(ctinyint))),
+ MIN(cdouble),
+ VAR_POP(csmallint),
+ (-(((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))))),
+ ((-(STDDEV_POP(cint))) % AVG(cfloat)),
+ (-26.28 / (-(MIN(ctinyint)))),
+ STDDEV_POP(ctinyint),
+ SUM(cint),
+ ((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) / VAR_POP(cfloat)),
+ (-((-(COUNT(*))))),
+ COUNT(*),
+ ((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) % -26.28)
+FROM alltypesorc
+WHERE (((ctimestamp1 != 0))
+ AND ((((-257 != ctinyint)
+ AND (cboolean2 IS NOT NULL))
+ AND ((cstring1 RLIKE '.*ss')
+ AND (-10669 < ctimestamp1)))
+ OR (ctimestamp2 = -10669)
+ OR ((ctimestamp1 < 0)
+ AND (cstring2 LIKE '%b%'))
+ OR (cdouble = cint)
+ OR ((cboolean1 IS NULL)
+ AND (cfloat < cint))))
+GROUP BY ctimestamp1, cstring1;
+SELECT ctimestamp1,
+ cstring1,
+ STDDEV_POP(cint),
+ (STDDEV_POP(cint) * 10.175),
+ (-(STDDEV_POP(cint))),
+ AVG(csmallint),
+ (-(STDDEV_POP(cint))),
+ (-26.28 - STDDEV_POP(cint)),
+ COUNT(*),
+ (-(COUNT(*))),
+ ((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))),
+ MIN(ctinyint),
+ (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*)))),
+ (-((STDDEV_POP(cint) * 10.175))),
+ VAR_SAMP(csmallint),
+ (VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))),
+ (-((-(STDDEV_POP(cint))))),
+ ((-(COUNT(*))) / STDDEV_POP(cint)),
+ VAR_POP(cfloat),
+ (10.175 / AVG(csmallint)),
+ AVG(cint),
+ VAR_SAMP(cfloat),
+ ((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) - (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))),
+ (-((-((STDDEV_POP(cint) * 10.175))))),
+ AVG(cfloat),
+ (((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) - (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) * 10.175),
+ (10.175 % (10.175 / AVG(csmallint))),
+ (-(MIN(ctinyint))),
+ MIN(cdouble),
+ VAR_POP(csmallint),
+ (-(((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))))),
+ ((-(STDDEV_POP(cint))) % AVG(cfloat)),
+ (-26.28 / (-(MIN(ctinyint)))),
+ STDDEV_POP(ctinyint),
+ SUM(cint),
+ ((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) / VAR_POP(cfloat)),
+ (-((-(COUNT(*))))),
+ COUNT(*),
+ ((VAR_SAMP(csmallint) + (((-26.28 - STDDEV_POP(cint)) * (-(STDDEV_POP(cint)))) * (-(COUNT(*))))) % -26.28)
+FROM alltypesorc
+WHERE (((ctimestamp1 != 0))
+ AND ((((-257 != ctinyint)
+ AND (cboolean2 IS NOT NULL))
+ AND ((cstring1 RLIKE '.*ss')
+ AND (-10669 < ctimestamp1)))
+ OR (ctimestamp2 = -10669)
+ OR ((ctimestamp1 < 0)
+ AND (cstring2 LIKE '%b%'))
+ OR (cdouble = cint)
+ OR ((cboolean1 IS NULL)
+ AND (cfloat < cint))))
+GROUP BY ctimestamp1, cstring1;
+
+-- TargetTypeClasses: Double, Long, String, Timestamp, Bool
+-- Functions: Max, Sum, Var, Avg, Min, VarP, StDev, StDevP
+-- ArithmeticOps: Divide, Subtract, Remainder, Add, Multiply
+-- FilterOps: GreaterThan, LessThanOrEqual, Equal, LessThan, GreaterThanOrEqual, NotEqual, Like, RLike
+-- GroupBy: GroupBy
+EXPLAIN SELECT cboolean1,
+ MAX(cfloat),
+ (-(MAX(cfloat))),
+ (-26.28 / MAX(cfloat)),
+ SUM(cbigint),
+ (SUM(cbigint) - 10.175),
+ VAR_SAMP(cint),
+ (VAR_SAMP(cint) % MAX(cfloat)),
+ (10.175 + (-(MAX(cfloat)))),
+ AVG(cdouble),
+ ((SUM(cbigint) - 10.175) + VAR_SAMP(cint)),
+ MIN(cbigint),
+ VAR_POP(cbigint),
+ (-((10.175 + (-(MAX(cfloat)))))),
+ (79.553 / VAR_POP(cbigint)),
+ (VAR_SAMP(cint) % (79.553 / VAR_POP(cbigint))),
+ (-((10.175 + (-(MAX(cfloat)))))),
+ SUM(cint),
+ STDDEV_SAMP(ctinyint),
+ (-1.389 * MIN(cbigint)),
+ (SUM(cint) - (-1.389 * MIN(cbigint))),
+ STDDEV_POP(csmallint),
+ (-((SUM(cint) - (-1.389 * MIN(cbigint))))),
+ AVG(cint),
+ (-(AVG(cint))),
+ (AVG(cint) * SUM(cint))
+FROM alltypesorc
+WHERE (((cboolean1 IS NOT NULL))
+ AND (((cdouble < csmallint)
+ AND ((cboolean2 = cboolean1)
+ AND (cbigint <= -863.257)))
+ OR ((cint >= -257)
+ AND ((cstring1 IS NOT NULL)
+ AND (cboolean1 >= 1)))
+ OR (cstring2 RLIKE 'b')
+ OR ((csmallint >= ctinyint)
+ AND (ctimestamp2 IS NULL))))
+GROUP BY cboolean1
+ORDER BY cboolean1;
+SELECT cboolean1,
+ MAX(cfloat),
+ (-(MAX(cfloat))),
+ (-26.28 / MAX(cfloat)),
+ SUM(cbigint),
+ (SUM(cbigint) - 10.175),
+ VAR_SAMP(cint),
+ (VAR_SAMP(cint) % MAX(cfloat)),
+ (10.175 + (-(MAX(cfloat)))),
+ AVG(cdouble),
+ ((SUM(cbigint) - 10.175) + VAR_SAMP(cint)),
+ MIN(cbigint),
+ VAR_POP(cbigint),
+ (-((10.175 + (-(MAX(cfloat)))))),
+ (79.553 / VAR_POP(cbigint)),
+ (VAR_SAMP(cint) % (79.553 / VAR_POP(cbigint))),
+ (-((10.175 + (-(MAX(cfloat)))))),
+ SUM(cint),
+ STDDEV_SAMP(ctinyint),
+ (-1.389 * MIN(cbigint)),
+ (SUM(cint) - (-1.389 * MIN(cbigint))),
+ STDDEV_POP(csmallint),
+ (-((SUM(cint) - (-1.389 * MIN(cbigint))))),
+ AVG(cint),
+ (-(AVG(cint))),
+ (AVG(cint) * SUM(cint))
+FROM alltypesorc
+WHERE (((cboolean1 IS NOT NULL))
+ AND (((cdouble < csmallint)
+ AND ((cboolean2 = cboolean1)
+ AND (cbigint <= -863.257)))
+ OR ((cint >= -257)
+ AND ((cstring1 IS NOT NULL)
+ AND (cboolean1 >= 1)))
+ OR (cstring2 RLIKE 'b')
+ OR ((csmallint >= ctinyint)
+ AND (ctimestamp2 IS NULL))))
+GROUP BY cboolean1
+ORDER BY cboolean1;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q
new file mode 100644
index 0000000000..e309713795
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_bucketmapjoin1.q
@@ -0,0 +1,46 @@
+create table vsmb_bucket_1(key int, value string)
+ CLUSTERED BY (key)
+ SORTED BY (key) INTO 1 BUCKETS
+ STORED AS ORC;
+create table vsmb_bucket_2(key int, value string)
+ CLUSTERED BY (key)
+ SORTED BY (key) INTO 1 BUCKETS
+ STORED AS ORC;
+
+create table vsmb_bucket_RC(key int, value string)
+ CLUSTERED BY (key)
+ SORTED BY (key) INTO 1 BUCKETS
+ STORED AS RCFILE;
+
+create table vsmb_bucket_TXT(key int, value string)
+ CLUSTERED BY (key)
+ SORTED BY (key) INTO 1 BUCKETS
+ STORED AS TEXTFILE;
+
+insert into table vsmb_bucket_1 select cint, cstring1 from alltypesorc limit 2;
+insert into table vsmb_bucket_2 select cint, cstring1 from alltypesorc limit 2;
+insert into table vsmb_bucket_RC select cint, cstring1 from alltypesorc limit 2;
+insert into table vsmb_bucket_TXT select cint, cstring1 from alltypesorc limit 2;
+
+set hive.vectorized.execution.enabled=true;
+set hive.optimize.bucketmapjoin = true;
+set hive.optimize.bucketmapjoin.sortedmerge = true;
+set hive.auto.convert.sortmerge.join.noconditionaltask = true;
+set hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
+
+explain
+select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key;
+select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key;
+
+explain
+select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key;
+select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key;
+
+-- RC file does not yet provide the vectorized CommonRCFileformat out-of-the-box
+-- explain
+-- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
+-- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key;
+
+explain
+select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key;
+select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_case.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_case.q
new file mode 100644
index 0000000000..e448d51f6b
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_case.q
@@ -0,0 +1,37 @@
+set hive.vectorized.execution.enabled = true
+;
+explain
+select
+ csmallint,
+ case
+ when csmallint = 418 then "a"
+ when csmallint = 12205 then "b"
+ else "c"
+ end,
+ case csmallint
+ when 418 then "a"
+ when 12205 then "b"
+ else "c"
+ end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+;
+select
+ csmallint,
+ case
+ when csmallint = 418 then "a"
+ when csmallint = 12205 then "b"
+ else "c"
+ end,
+ case csmallint
+ when 418 then "a"
+ when 12205 then "b"
+ else "c"
+ end
+from alltypesorc
+where csmallint = 418
+or csmallint = 12205
+or csmallint = 10583
+;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_casts.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_casts.q
new file mode 100644
index 0000000000..3f818b1853
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_casts.q
@@ -0,0 +1,149 @@
+SET hive.vectorized.execution.enabled = true;
+
+-- Test type casting in vectorized mode to verify end-to-end functionality.
+
+explain
+select
+-- to boolean
+ cast (ctinyint as boolean)
+ ,cast (csmallint as boolean)
+ ,cast (cint as boolean)
+ ,cast (cbigint as boolean)
+ ,cast (cfloat as boolean)
+ ,cast (cdouble as boolean)
+ ,cast (cboolean1 as boolean)
+ ,cast (cbigint * 0 as boolean)
+ ,cast (ctimestamp1 as boolean)
+ ,cast (cstring1 as boolean)
+-- to int family
+ ,cast (ctinyint as int)
+ ,cast (csmallint as int)
+ ,cast (cint as int)
+ ,cast (cbigint as int)
+ ,cast (cfloat as int)
+ ,cast (cdouble as int)
+ ,cast (cboolean1 as int)
+ ,cast (ctimestamp1 as int)
+ ,cast (cstring1 as int)
+ ,cast (substr(cstring1, 1, 1) as int)
+ ,cast (cfloat as tinyint)
+ ,cast (cfloat as smallint)
+ ,cast (cfloat as bigint)
+-- to float family
+ ,cast (ctinyint as double)
+ ,cast (csmallint as double)
+ ,cast (cint as double)
+ ,cast (cbigint as double)
+ ,cast (cfloat as double)
+ ,cast (cdouble as double)
+ ,cast (cboolean1 as double)
+ ,cast (ctimestamp1 as double)
+ ,cast (cstring1 as double)
+ ,cast (substr(cstring1, 1, 1) as double)
+ ,cast (cint as float)
+ ,cast (cdouble as float)
+-- to timestamp
+ ,cast (ctinyint as timestamp)
+ ,cast (csmallint as timestamp)
+ ,cast (cint as timestamp)
+ ,cast (cbigint as timestamp)
+ ,cast (cfloat as timestamp)
+ ,cast (cdouble as timestamp)
+ ,cast (cboolean1 as timestamp)
+ ,cast (cbigint * 0 as timestamp)
+ ,cast (ctimestamp1 as timestamp)
+ ,cast (cstring1 as timestamp)
+ ,cast (substr(cstring1, 1, 1) as timestamp)
+-- to string
+ ,cast (ctinyint as string)
+ ,cast (csmallint as string)
+ ,cast (cint as string)
+ ,cast (cbigint as string)
+ ,cast (cfloat as string)
+ ,cast (cdouble as string)
+ ,cast (cboolean1 as string)
+ ,cast (cbigint * 0 as string)
+ ,cast (ctimestamp1 as string)
+ ,cast (cstring1 as string)
+-- nested and expression arguments
+ ,cast (cast (cfloat as int) as float)
+ ,cast (cint * 2 as double)
+ ,cast (sin(cfloat) as string)
+ ,cast (cint as float) + cast(cboolean1 as double)
+from alltypesorc
+-- limit output to a reasonably small number of rows
+where cbigint % 250 = 0;
+
+
+select
+-- to boolean
+ cast (ctinyint as boolean)
+ ,cast (csmallint as boolean)
+ ,cast (cint as boolean)
+ ,cast (cbigint as boolean)
+ ,cast (cfloat as boolean)
+ ,cast (cdouble as boolean)
+ ,cast (cboolean1 as boolean)
+ ,cast (cbigint * 0 as boolean)
+ ,cast (ctimestamp1 as boolean)
+ ,cast (cstring1 as boolean)
+-- to int family
+ ,cast (ctinyint as int)
+ ,cast (csmallint as int)
+ ,cast (cint as int)
+ ,cast (cbigint as int)
+ ,cast (cfloat as int)
+ ,cast (cdouble as int)
+ ,cast (cboolean1 as int)
+ ,cast (ctimestamp1 as int)
+ ,cast (cstring1 as int)
+ ,cast (substr(cstring1, 1, 1) as int)
+ ,cast (cfloat as tinyint)
+ ,cast (cfloat as smallint)
+ ,cast (cfloat as bigint)
+-- to float family
+ ,cast (ctinyint as double)
+ ,cast (csmallint as double)
+ ,cast (cint as double)
+ ,cast (cbigint as double)
+ ,cast (cfloat as double)
+ ,cast (cdouble as double)
+ ,cast (cboolean1 as double)
+ ,cast (ctimestamp1 as double)
+ ,cast (cstring1 as double)
+ ,cast (substr(cstring1, 1, 1) as double)
+ ,cast (cint as float)
+ ,cast (cdouble as float)
+-- to timestamp
+ ,cast (ctinyint as timestamp)
+ ,cast (csmallint as timestamp)
+ ,cast (cint as timestamp)
+ ,cast (cbigint as timestamp)
+ ,cast (cfloat as timestamp)
+ ,cast (cdouble as timestamp)
+ ,cast (cboolean1 as timestamp)
+ ,cast (cbigint * 0 as timestamp)
+ ,cast (ctimestamp1 as timestamp)
+ ,cast (cstring1 as timestamp)
+ ,cast (substr(cstring1, 1, 1) as timestamp)
+-- to string
+ ,cast (ctinyint as string)
+ ,cast (csmallint as string)
+ ,cast (cint as string)
+ ,cast (cbigint as string)
+ ,cast (cfloat as string)
+ ,cast (cdouble as string)
+ ,cast (cboolean1 as string)
+ ,cast (cbigint * 0 as string)
+ ,cast (ctimestamp1 as string)
+ ,cast (cstring1 as string)
+-- nested and expression arguments
+ ,cast (cast (cfloat as int) as float)
+ ,cast (cint * 2 as double)
+ ,cast (sin(cfloat) as string)
+ ,cast (cint as float) + cast(cboolean1 as double)
+from alltypesorc
+-- limit output to a reasonably small number of rows
+where cbigint % 250 = 0;
+
+ \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_context.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_context.q
new file mode 100644
index 0000000000..381e4255ca
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_context.q
@@ -0,0 +1,47 @@
+create table store(s_store_sk int, s_city string)
+stored as orc;
+insert overwrite table store
+select cint, cstring1
+from alltypesorc
+where cint not in (
+-3728, -563, 762, 6981, 253665376, 528534767, 626923679);
+create table store_sales(ss_store_sk int, ss_hdemo_sk int, ss_net_profit double)
+stored as orc;
+insert overwrite table store_sales
+select cint, cint, cdouble
+from alltypesorc
+where cint not in (
+-3728, -563, 762, 6981, 253665376, 528534767, 626923679);
+create table household_demographics(hd_demo_sk int)
+stored as orc;
+insert overwrite table household_demographics
+select cint
+from alltypesorc
+where cint not in (
+-3728, -563, 762, 6981, 253665376, 528534767, 626923679);
+set hive.auto.convert.join=true;
+set hive.vectorized.execution.enabled=true;
+
+
+explain
+select store.s_city, ss_net_profit
+from store_sales
+JOIN store ON store_sales.ss_store_sk = store.s_store_sk
+JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
+limit 100
+;
+
+select store.s_city, ss_net_profit
+from store_sales
+JOIN store ON store_sales.ss_store_sk = store.s_store_sk
+JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
+limit 100
+;
+
+set hive.auto.convert.join=false;
+set hive.vectorized.execution.enabled=false;
+
+drop table store;
+drop table store_sales;
+drop table household_demographics;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_date_funcs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
new file mode 100644
index 0000000000..b7aa3c28ac
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_date_funcs.q
@@ -0,0 +1,122 @@
+SET hive.vectorized.execution.enabled = true;
+
+-- Test timestamp functions in vectorized mode to verify they run correctly end-to-end.
+
+CREATE TABLE date_udf_flight (
+ origin_city_name STRING,
+ dest_city_name STRING,
+ fl_date DATE,
+ arr_delay FLOAT,
+ fl_num INT
+);
+LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OVERWRITE INTO TABLE date_udf_flight;
+
+CREATE TABLE date_udf_flight_orc (
+ fl_date DATE,
+ fl_time TIMESTAMP
+) STORED AS ORC;
+
+INSERT INTO TABLE date_udf_flight_orc SELECT fl_date, to_utc_timestamp(fl_date, 'America/Los_Angeles') FROM date_udf_flight;
+
+SELECT * FROM date_udf_flight_orc;
+
+EXPLAIN SELECT
+ to_unix_timestamp(fl_time),
+ year(fl_time),
+ month(fl_time),
+ day(fl_time),
+ dayofmonth(fl_time),
+ weekofyear(fl_time),
+ date(fl_time),
+ to_date(fl_time),
+ date_add(fl_time, 2),
+ date_sub(fl_time, 2),
+ datediff(fl_time, "2000-01-01")
+FROM date_udf_flight_orc;
+
+SELECT
+ to_unix_timestamp(fl_time),
+ year(fl_time),
+ month(fl_time),
+ day(fl_time),
+ dayofmonth(fl_time),
+ weekofyear(fl_time),
+ date(fl_time),
+ to_date(fl_time),
+ date_add(fl_time, 2),
+ date_sub(fl_time, 2),
+ datediff(fl_time, "2000-01-01")
+FROM date_udf_flight_orc;
+
+EXPLAIN SELECT
+ to_unix_timestamp(fl_date),
+ year(fl_date),
+ month(fl_date),
+ day(fl_date),
+ dayofmonth(fl_date),
+ weekofyear(fl_date),
+ date(fl_date),
+ to_date(fl_date),
+ date_add(fl_date, 2),
+ date_sub(fl_date, 2),
+ datediff(fl_date, "2000-01-01")
+FROM date_udf_flight_orc;
+
+SELECT
+ to_unix_timestamp(fl_date),
+ year(fl_date),
+ month(fl_date),
+ day(fl_date),
+ dayofmonth(fl_date),
+ weekofyear(fl_date),
+ date(fl_date),
+ to_date(fl_date),
+ date_add(fl_date, 2),
+ date_sub(fl_date, 2),
+ datediff(fl_date, "2000-01-01")
+FROM date_udf_flight_orc;
+
+EXPLAIN SELECT
+ year(fl_time) = year(fl_date),
+ month(fl_time) = month(fl_date),
+ day(fl_time) = day(fl_date),
+ dayofmonth(fl_time) = dayofmonth(fl_date),
+ weekofyear(fl_time) = weekofyear(fl_date),
+ date(fl_time) = date(fl_date),
+ to_date(fl_time) = to_date(fl_date),
+ date_add(fl_time, 2) = date_add(fl_date, 2),
+ date_sub(fl_time, 2) = date_sub(fl_date, 2),
+ datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01")
+FROM date_udf_flight_orc;
+
+-- Should all be true or NULL
+SELECT
+ year(fl_time) = year(fl_date),
+ month(fl_time) = month(fl_date),
+ day(fl_time) = day(fl_date),
+ dayofmonth(fl_time) = dayofmonth(fl_date),
+ weekofyear(fl_time) = weekofyear(fl_date),
+ date(fl_time) = date(fl_date),
+ to_date(fl_time) = to_date(fl_date),
+ date_add(fl_time, 2) = date_add(fl_date, 2),
+ date_sub(fl_time, 2) = date_sub(fl_date, 2),
+ datediff(fl_time, "2000-01-01") = datediff(fl_date, "2000-01-01")
+FROM date_udf_flight_orc;
+
+EXPLAIN SELECT
+ fl_date,
+ to_date(date_add(fl_date, 2)),
+ to_date(date_sub(fl_date, 2)),
+ datediff(fl_date, date_add(fl_date, 2)),
+ datediff(fl_date, date_sub(fl_date, 2)),
+ datediff(date_add(fl_date, 2), date_sub(fl_date, 2))
+FROM date_udf_flight_orc LIMIT 10;
+
+SELECT
+ fl_date,
+ to_date(date_add(fl_date, 2)),
+ to_date(date_sub(fl_date, 2)),
+ datediff(fl_date, date_add(fl_date, 2)),
+ datediff(fl_date, date_sub(fl_date, 2)),
+ datediff(date_add(fl_date, 2), date_sub(fl_date, 2))
+FROM date_udf_flight_orc LIMIT 10;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q
new file mode 100644
index 0000000000..6e622007e5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_distinct_gby.q
@@ -0,0 +1,12 @@
+SET hive.vectorized.execution.enabled=true;
+
+SET hive.map.groupby.sorted=true;
+
+create table dtest(a int, b int) clustered by (a) sorted by (a) into 1 buckets stored as orc;
+insert into table dtest select c,b from (select array(300,300,300,300,300) as a, 1 as b from src limit 1) y lateral view explode(a) t1 as c;
+
+explain select sum(distinct a), count(distinct a) from dtest;
+select sum(distinct a), count(distinct a) from dtest;
+
+explain select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc;
+select sum(distinct cint), count(distinct cint), avg(distinct cint), std(distinct cint) from alltypesorc;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_mapjoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_mapjoin.q
new file mode 100644
index 0000000000..f390c2caaf
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_mapjoin.q
@@ -0,0 +1,12 @@
+SET hive.vectorized.execution.enabled=true;
+SET hive.auto.convert.join=true;
+SET hive.auto.convert.join.nonconditionaltask=true;
+SET hive.auto.convert.join.nonconditionaltask.size=1000000000;
+
+EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
+ FROM alltypesorc t1
+ JOIN alltypesorc t2 ON t1.cint = t2.cint;
+
+SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
+ FROM alltypesorc t1
+ JOIN alltypesorc t2 ON t1.cint = t2.cint; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_math_funcs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_math_funcs.q
new file mode 100644
index 0000000000..d6b0824679
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_math_funcs.q
@@ -0,0 +1,107 @@
+SET hive.vectorized.execution.enabled = true;
+
+-- Test math functions in vectorized mode to verify they run correctly end-to-end.
+
+explain
+select
+ cdouble
+ ,Round(cdouble, 2)
+ ,Floor(cdouble)
+ ,Ceil(cdouble)
+ ,Rand()
+ ,Rand(98007)
+ ,Exp(ln(cdouble))
+ ,Ln(cdouble)
+ ,Ln(cfloat)
+ ,Log10(cdouble)
+ -- Use log2 as a representative function to test all input types.
+ ,Log2(cdouble)
+ -- Use 15601.0 to test zero handling, as there are no zeroes in the table
+ ,Log2(cdouble - 15601.0)
+ ,Log2(cfloat)
+ ,Log2(cbigint)
+ ,Log2(cint)
+ ,Log2(csmallint)
+ ,Log2(ctinyint)
+ ,Log(2.0, cdouble)
+ ,Pow(log2(cdouble), 2.0)
+ ,Power(log2(cdouble), 2.0)
+ ,Sqrt(cdouble)
+ ,Sqrt(cbigint)
+ ,Bin(cbigint)
+ ,Hex(cdouble)
+ ,Conv(cbigint, 10, 16)
+ ,Abs(cdouble)
+ ,Abs(ctinyint)
+ ,Pmod(cint, 3)
+ ,Sin(cdouble)
+ ,Asin(cdouble)
+ ,Cos(cdouble)
+ ,ACos(cdouble)
+ ,Atan(cdouble)
+ ,Degrees(cdouble)
+ ,Radians(cdouble)
+ ,Positive(cdouble)
+ ,Positive(cbigint)
+ ,Negative(cdouble)
+ ,Sign(cdouble)
+ ,Sign(cbigint)
+ -- Test nesting
+ ,cos(-sin(log(cdouble)) + 3.14159)
+from alltypesorc
+-- limit output to a reasonably small number of rows
+where cbigint % 500 = 0
+-- test use of a math function in the WHERE clause
+and sin(cfloat) >= -1.0;
+
+select
+ cdouble
+ ,Round(cdouble, 2)
+ ,Floor(cdouble)
+ ,Ceil(cdouble)
+ -- Omit rand() from runtime test because it's nondeterministic.
+ -- ,Rand()
+ ,Rand(98007)
+ ,Exp(ln(cdouble))
+ ,Ln(cdouble)
+ ,Ln(cfloat)
+ ,Log10(cdouble)
+ -- Use log2 as a representative function to test all input types.
+ ,Log2(cdouble)
+ -- Use 15601.0 to test zero handling, as there are no zeroes in the table
+ ,Log2(cdouble - 15601.0)
+ ,Log2(cfloat)
+ ,Log2(cbigint)
+ ,Log2(cint)
+ ,Log2(csmallint)
+ ,Log2(ctinyint)
+ ,Log(2.0, cdouble)
+ ,Pow(log2(cdouble), 2.0)
+ ,Power(log2(cdouble), 2.0)
+ ,Sqrt(cdouble)
+ ,Sqrt(cbigint)
+ ,Bin(cbigint)
+ ,Hex(cdouble)
+ ,Conv(cbigint, 10, 16)
+ ,Abs(cdouble)
+ ,Abs(ctinyint)
+ ,Pmod(cint, 3)
+ ,Sin(cdouble)
+ ,Asin(cdouble)
+ ,Cos(cdouble)
+ ,ACos(cdouble)
+ ,Atan(cdouble)
+ ,Degrees(cdouble)
+ ,Radians(cdouble)
+ ,Positive(cdouble)
+ ,Positive(cbigint)
+ ,Negative(cdouble)
+ ,Sign(cdouble)
+ ,Sign(cbigint)
+ -- Test nesting
+ ,cos(-sin(log(cdouble)) + 3.14159)
+from alltypesorc
+-- limit output to a reasonably small number of rows
+where cbigint % 500 = 0
+-- test use of a math function in the WHERE clause
+and sin(cfloat) >= -1.0;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q
new file mode 100644
index 0000000000..ce4227cf0a
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_nested_mapjoin.q
@@ -0,0 +1,8 @@
+SET hive.vectorized.execution.enabled=true;
+SET hive.auto.convert.join=true;
+SET hive.auto.convert.join.nonconditionaltask=true;
+SET hive.auto.convert.join.nonconditionaltask.size=1000000000;
+
+explain select sum(t1.td) from (select v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint;
+
+select sum(t1.td) from (select v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q
new file mode 100644
index 0000000000..488d2f3885
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_rcfile_columnar.q
@@ -0,0 +1,18 @@
+--This query must pass even when vectorized reader is not available for
+--RC files. The query must fall back to the non-vector mode and run successfully.
+
+CREATE table columnTable (key STRING, value STRING)
+ROW FORMAT SERDE
+ 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'
+STORED AS
+ INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat'
+ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat';
+
+FROM src
+INSERT OVERWRITE TABLE columnTable SELECT src.key, src.value LIMIT 10;
+describe columnTable;
+
+SET hive.vectorized.execution.enabled=true;
+
+SELECT key, value FROM columnTable ORDER BY key;
+
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q
new file mode 100644
index 0000000000..6b60aa08c5
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_shufflejoin.q
@@ -0,0 +1,10 @@
+SET hive.vectorized.execution.enabled=true;
+SET hive.auto.convert.join=false;
+
+EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
+ FROM alltypesorc t1
+ JOIN alltypesorc t2 ON t1.cint = t2.cint;
+
+SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint)
+ FROM alltypesorc t1
+ JOIN alltypesorc t2 ON t1.cint = t2.cint; \ No newline at end of file
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_string_funcs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_string_funcs.q
new file mode 100644
index 0000000000..96fe53da1e
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_string_funcs.q
@@ -0,0 +1,46 @@
+SET hive.vectorized.execution.enabled = true;
+
+-- Test string functions in vectorized mode to verify end-to-end functionality.
+
+explain
+select
+ substr(cstring1, 1, 2)
+ ,substr(cstring1, 2)
+ ,lower(cstring1)
+ ,upper(cstring1)
+ ,ucase(cstring1)
+ ,length(cstring1)
+ ,trim(cstring1)
+ ,ltrim(cstring1)
+ ,rtrim(cstring1)
+ ,concat(cstring1, cstring2)
+ ,concat('>', cstring1)
+ ,concat(cstring1, '<')
+ ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2))
+from alltypesorc
+-- Limit the number of rows of output to a reasonable amount.
+where cbigint % 237 = 0
+-- Test function use in the WHERE clause.
+and length(substr(cstring1, 1, 2)) <= 2
+and cstring1 like '%';
+
+select
+ substr(cstring1, 1, 2)
+ ,substr(cstring1, 2)
+ ,lower(cstring1)
+ ,upper(cstring1)
+ ,ucase(cstring1)
+ ,length(cstring1)
+ ,trim(cstring1)
+ ,ltrim(cstring1)
+ ,rtrim(cstring1)
+ ,concat(cstring1, cstring2)
+ ,concat('>', cstring1)
+ ,concat(cstring1, '<')
+ ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2))
+from alltypesorc
+-- Limit the number of rows of output to a reasonable amount.
+where cbigint % 237 = 0
+-- Test function use in the WHERE clause.
+and length(substr(cstring1, 1, 2)) <= 2
+and cstring1 like '%';
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q
new file mode 100644
index 0000000000..95eedd3b58
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q
@@ -0,0 +1,124 @@
+SET hive.vectorized.execution.enabled = true;
+
+-- Test timestamp functions in vectorized mode to verify they run correctly end-to-end.
+
+CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC;
+
+INSERT OVERWRITE TABLE alltypesorc_string
+SELECT
+ to_utc_timestamp(ctimestamp1, 'America/Los_Angeles'),
+ CAST(to_utc_timestamp(ctimestamp1, 'America/Los_Angeles') AS STRING)
+FROM alltypesorc
+LIMIT 40;
+
+CREATE TABLE alltypesorc_wrong(stimestamp1 string) STORED AS ORC;
+
+INSERT INTO TABLE alltypesorc_wrong SELECT 'abcd' FROM alltypesorc LIMIT 1;
+INSERT INTO TABLE alltypesorc_wrong SELECT '2000:01:01 00-00-00' FROM alltypesorc LIMIT 1;
+INSERT INTO TABLE alltypesorc_wrong SELECT '0000-00-00 99:99:99' FROM alltypesorc LIMIT 1;
+
+EXPLAIN SELECT
+ to_unix_timestamp(ctimestamp1) AS c1,
+ year(ctimestamp1),
+ month(ctimestamp1),
+ day(ctimestamp1),
+ dayofmonth(ctimestamp1),
+ weekofyear(ctimestamp1),
+ hour(ctimestamp1),
+ minute(ctimestamp1),
+ second(ctimestamp1)
+FROM alltypesorc_string
+ORDER BY c1;
+
+SELECT
+ to_unix_timestamp(ctimestamp1) AS c1,
+ year(ctimestamp1),
+ month(ctimestamp1),
+ day(ctimestamp1),
+ dayofmonth(ctimestamp1),
+ weekofyear(ctimestamp1),
+ hour(ctimestamp1),
+ minute(ctimestamp1),
+ second(ctimestamp1)
+FROM alltypesorc_string
+ORDER BY c1;
+
+EXPLAIN SELECT
+ to_unix_timestamp(stimestamp1) AS c1,
+ year(stimestamp1),
+ month(stimestamp1),
+ day(stimestamp1),
+ dayofmonth(stimestamp1),
+ weekofyear(stimestamp1),
+ hour(stimestamp1),
+ minute(stimestamp1),
+ second(stimestamp1)
+FROM alltypesorc_string
+ORDER BY c1;
+
+SELECT
+ to_unix_timestamp(stimestamp1) AS c1,
+ year(stimestamp1),
+ month(stimestamp1),
+ day(stimestamp1),
+ dayofmonth(stimestamp1),
+ weekofyear(stimestamp1),
+ hour(stimestamp1),
+ minute(stimestamp1),
+ second(stimestamp1)
+FROM alltypesorc_string
+ORDER BY c1;
+
+EXPLAIN SELECT
+ to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1) AS c1,
+ year(ctimestamp1) = year(stimestamp1),
+ month(ctimestamp1) = month(stimestamp1),
+ day(ctimestamp1) = day(stimestamp1),
+ dayofmonth(ctimestamp1) = dayofmonth(stimestamp1),
+ weekofyear(ctimestamp1) = weekofyear(stimestamp1),
+ hour(ctimestamp1) = hour(stimestamp1),
+ minute(ctimestamp1) = minute(stimestamp1),
+ second(ctimestamp1) = second(stimestamp1)
+FROM alltypesorc_string
+ORDER BY c1;
+
+-- Should all be true or NULL
+SELECT
+ to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1) AS c1,
+ year(ctimestamp1) = year(stimestamp1),
+ month(ctimestamp1) = month(stimestamp1),
+ day(ctimestamp1) = day(stimestamp1),
+ dayofmonth(ctimestamp1) = dayofmonth(stimestamp1),
+ weekofyear(ctimestamp1) = weekofyear(stimestamp1),
+ hour(ctimestamp1) = hour(stimestamp1),
+ minute(ctimestamp1) = minute(stimestamp1),
+ second(ctimestamp1) = second(stimestamp1)
+FROM alltypesorc_string
+ORDER BY c1;
+
+-- Wrong format. Should all be NULL.
+EXPLAIN SELECT
+ to_unix_timestamp(stimestamp1) AS c1,
+ year(stimestamp1),
+ month(stimestamp1),
+ day(stimestamp1),
+ dayofmonth(stimestamp1),
+ weekofyear(stimestamp1),
+ hour(stimestamp1),
+ minute(stimestamp1),
+ second(stimestamp1)
+FROM alltypesorc_wrong
+ORDER BY c1;
+
+SELECT
+ to_unix_timestamp(stimestamp1) AS c1,
+ year(stimestamp1),
+ month(stimestamp1),
+ day(stimestamp1),
+ dayofmonth(stimestamp1),
+ weekofyear(stimestamp1),
+ hour(stimestamp1),
+ minute(stimestamp1),
+ second(stimestamp1)
+FROM alltypesorc_wrong
+ORDER BY c1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view.q
index 4e3d057292..bc193554f9 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view.q
@@ -4,13 +4,13 @@ USE db1;
CREATE TABLE table1 (key STRING, value STRING)
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt'
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
OVERWRITE INTO TABLE table1;
CREATE TABLE table2 (key STRING, value STRING)
STORED AS TEXTFILE;
-LOAD DATA LOCAL INPATH '../data/files/kv1.txt'
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt'
OVERWRITE INTO TABLE table2;
-- relative reference, no alias
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view_cast.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view_cast.q
index b0b078ec62..95517c3bcd 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view_cast.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/view_cast.q
@@ -1,11 +1,11 @@
DROP TABLE IF EXISTS atab;
CREATE TABLE IF NOT EXISTS atab (ks_uid BIGINT, sr_uid STRING, sr_id STRING, tstamp STRING, m_id STRING, act STRING, at_sr_uid STRING, tstamp_type STRING, original_m_id STRING, original_tstamp STRING, registered_flag TINYINT, at_ks_uid BIGINT) PARTITIONED BY (dt STRING,nt STRING);
-LOAD DATA LOCAL INPATH '../data/files/v1.txt' INTO TABLE atab PARTITION (dt='20130312', nt='tw');
-LOAD DATA LOCAL INPATH '../data/files/v1.txt' INTO TABLE atab PARTITION (dt='20130311', nt='tw');
+LOAD DATA LOCAL INPATH '../../data/files/v1.txt' INTO TABLE atab PARTITION (dt='20130312', nt='tw');
+LOAD DATA LOCAL INPATH '../../data/files/v1.txt' INTO TABLE atab PARTITION (dt='20130311', nt='tw');
DROP TABLE IF EXISTS mstab;
CREATE TABLE mstab(ks_uid INT, csc INT) PARTITIONED BY (dt STRING);
-LOAD DATA LOCAL INPATH '../data/files/v2.txt' INTO TABLE mstab PARTITION (dt='20130311');
+LOAD DATA LOCAL INPATH '../../data/files/v2.txt' INTO TABLE mstab PARTITION (dt='20130311');
DROP VIEW IF EXISTS aa_view_tw;
CREATE VIEW aa_view_tw AS SELECT ks_uid, sr_id, act, at_ks_uid, at_sr_uid, from_unixtime(CAST(CAST( tstamp as BIGINT)/1000 AS BIGINT),'yyyyMMdd') AS act_date, from_unixtime(CAST(CAST( original_tstamp AS BIGINT)/1000 AS BIGINT),'yyyyMMdd') AS content_creation_date FROM atab WHERE dt='20130312' AND nt='tw' AND ks_uid != at_ks_uid;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing.q
index a7297db610..2f22145518 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing.q
@@ -13,7 +13,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-- 1. testWindowing
select p_mfgr, p_name, p_size,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q
index 9c7625dcd7..67cab9f7b2 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_adjust_rowcontainer_sz.q
@@ -13,7 +13,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
set hive.join.cache.size=1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_columnPruning.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_columnPruning.q
index 7c4ab38614..24f9ff73a3 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_columnPruning.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_columnPruning.q
@@ -13,7 +13,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
-- 1. testQueryLevelPartitionColsNotInSelect
select p_size,
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_expressions.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_expressions.q
index 2c3339095f..7e27c6b1c0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_expressions.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_expressions.q
@@ -13,7 +13,7 @@ CREATE TABLE part(
p_comment STRING
);
-LOAD DATA LOCAL INPATH '../data/files/part_tiny.txt' overwrite into table part;
+LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part;
drop table over10k;
@@ -27,12 +27,12 @@ create table over10k(
bo boolean,
s string,
ts timestamp,
- dec decimal,
+ dec decimal(4,2),
bin binary)
row format delimited
fields terminated by '|';
-load data local inpath '../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k;
select p_mfgr, p_retailprice, p_size,
round(sum(p_retailprice) over w1 , 2) = round(sum(lag(p_retailprice,1,0.0)) over w1 + last_value(p_retailprice) over w1 , 2),
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_multipartitioning.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_multipartitioning.q
index bb371e9009..1c6e1aac37 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_multipartitioning.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_multipartitioning.q
@@ -10,12 +10,12 @@ create table over10k(
bo boolean,
s string,
ts timestamp,
- dec decimal,
+ dec decimal(4,2),
bin binary)
row format delimited
fields terminated by '|';
-load data local inpath '../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k;
select s, rank() over (partition by s order by si), sum(b) over (partition by s order by si) from over10k limit 100;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_navfn.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_navfn.q
index 8a9d001259..05da2ba7ef 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_navfn.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_navfn.q
@@ -9,13 +9,13 @@ create table over10k(
d double,
bo boolean,
s string,
- ts timestamp,
- dec decimal,
+ ts timestamp,
+ dec decimal(4,2),
bin binary)
row format delimited
fields terminated by '|';
-load data local inpath '../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k;
select s, row_number() over (partition by d order by dec) from over10k limit 100;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_ntile.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_ntile.q
index 505c259f4b..73e8192ee6 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_ntile.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_ntile.q
@@ -10,12 +10,12 @@ create table over10k(
bo boolean,
s string,
ts timestamp,
- dec decimal,
+ dec decimal(4,2),
bin binary)
row format delimited
fields terminated by '|';
-load data local inpath '../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k;
select i, ntile(10) over (partition by s order by i) from over10k limit 100;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_rank.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_rank.q
index bf76867813..4b951179e0 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_rank.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_rank.q
@@ -10,12 +10,12 @@ create table over10k(
bo boolean,
s string,
ts timestamp,
- dec decimal,
+ dec decimal(4,2),
bin binary)
row format delimited
fields terminated by '|';
-load data local inpath '../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k;
select s, rank() over (partition by f order by t) from over10k limit 100;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf.q
index f22b992cd4..0173ab7a3a 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf.q
@@ -15,7 +15,7 @@ create table over10k(
row format delimited
fields terminated by '|';
-load data local inpath '../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k;
select s, min(i) over (partition by s) from over10k limit 100;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf2.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf2.q
new file mode 100644
index 0000000000..b813657bae
--- /dev/null
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_udaf2.q
@@ -0,0 +1,4 @@
+-- user-added aggregates should be usable as windowing functions
+create temporary function mysum as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum';
+
+select sum(key) over (), mysum(key) over () from src limit 1;
diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_windowspec.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_windowspec.q
index 7cc1367306..6d8ce67045 100644
--- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_windowspec.q
+++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/windowing_windowspec.q
@@ -15,7 +15,7 @@ create table over10k(
row format delimited
fields terminated by '|';
-load data local inpath '../data/files/over10k' into table over10k;
+load data local inpath '../../data/files/over10k' into table over10k;
select s, sum(b) over (partition by i order by s,b rows unbounded preceding) from over10k limit 100;