Skip to content

Commit

Permalink
HIVE-22709: NullPointerException during query compilation after HIVE-…
Browse files Browse the repository at this point in the history
…22578 (Jason Dere, reviewed by Prasanth Jayachandran)
  • Loading branch information
Jason Dere committed Jan 9, 2020
1 parent 0760b2f commit f8e583f
Show file tree
Hide file tree
Showing 3 changed files with 153 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12464,7 +12464,7 @@ void analyzeInternal(final ASTNode astToAnalyze, Supplier<PlannerContext> pcf) t
boolean isCacheEnabled = isResultsCacheEnabled();
QueryResultsCache.LookupInfo lookupInfo = null;
if (isCacheEnabled && !needsTransform && queryTypeCanUseCache()) {
lookupInfo = createLookupInfoForQuery(ast);
lookupInfo = createLookupInfoForQuery(astToAnalyze);
if (checkResultsCache(lookupInfo, false)) {
return;
}
Expand All @@ -12476,9 +12476,9 @@ void analyzeInternal(final ASTNode astToAnalyze, Supplier<PlannerContext> pcf) t
// If we use CBO and we may apply masking/filtering policies, we create a copy of the ast.
// The reason is that the generation of the operator tree may modify the initial ast,
// but if we need to parse for a second time, we would like to parse the unmodified ast.
astForMasking = (ASTNode) ParseDriver.adaptor.dupTree(ast);
astForMasking = (ASTNode) ParseDriver.adaptor.dupTree(astToAnalyze);
} else {
astForMasking = ast;
astForMasking = astToAnalyze;
}

// 2. Gen OP Tree from resolved Parse Tree
Expand Down Expand Up @@ -12510,7 +12510,7 @@ void analyzeInternal(final ASTNode astToAnalyze, Supplier<PlannerContext> pcf) t
// In the case that row or column masking/filtering was required, we do not support caching.
// TODO: Enable caching for queries with masking/filtering
if (isCacheEnabled && needsTransform && !usesMasking && queryTypeCanUseCache()) {
lookupInfo = createLookupInfoForQuery(ast);
lookupInfo = createLookupInfoForQuery(astToAnalyze);
if (checkResultsCache(lookupInfo, false)) {
return;
}
Expand Down
29 changes: 29 additions & 0 deletions ql/src/test/queries/clientpositive/results_cache_with_auth.q
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@

-- Setup results cache
set hive.compute.query.using.stats=false;
set hive.query.results.cache.enabled=true;
set hive.query.results.cache.nontransactional.tables.enabled=true;

-- Setup auth
set hive.test.authz.sstd.hs2.mode=true;
set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest;
set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator;
set hive.security.authorization.enabled=true;

create table results_cache_with_auth_t1 (c1 string);
insert into results_cache_with_auth_t1 values ('abc');

explain
select count(*) from results_cache_with_auth_t1;

select count(*) from results_cache_with_auth_t1;

set test.comment="Cache should be used for this query";
set test.comment;
explain
select count(*) from results_cache_with_auth_t1;

select count(*) from results_cache_with_auth_t1;

set hive.security.authorization.enabled=false;
drop table results_cache_with_auth_t1;
120 changes: 120 additions & 0 deletions ql/src/test/results/clientpositive/results_cache_with_auth.q.out
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
PREHOOK: query: create table results_cache_with_auth_t1 (c1 string)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@results_cache_with_auth_t1
POSTHOOK: query: create table results_cache_with_auth_t1 (c1 string)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@results_cache_with_auth_t1
PREHOOK: query: insert into results_cache_with_auth_t1 values ('abc')
PREHOOK: type: QUERY
PREHOOK: Input: _dummy_database@_dummy_table
PREHOOK: Output: default@results_cache_with_auth_t1
POSTHOOK: query: insert into results_cache_with_auth_t1 values ('abc')
POSTHOOK: type: QUERY
POSTHOOK: Input: _dummy_database@_dummy_table
POSTHOOK: Output: default@results_cache_with_auth_t1
POSTHOOK: Lineage: results_cache_with_auth_t1.c1 SCRIPT []
PREHOOK: query: explain
select count(*) from results_cache_with_auth_t1
PREHOOK: type: QUERY
PREHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from results_cache_with_auth_t1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1

STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: results_cache_with_auth_t1
Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink

PREHOOK: query: select count(*) from results_cache_with_auth_t1
PREHOOK: type: QUERY
PREHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from results_cache_with_auth_t1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
1
test.comment="Cache should be used for this query"
PREHOOK: query: explain
select count(*) from results_cache_with_auth_t1
PREHOOK: type: QUERY
PREHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from results_cache_with_auth_t1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-0 is a root stage

STAGE PLANS:
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
Cached Query Result: true

PREHOOK: query: select count(*) from results_cache_with_auth_t1
PREHOOK: type: QUERY
PREHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from results_cache_with_auth_t1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@results_cache_with_auth_t1
#### A masked pattern was here ####
1
PREHOOK: query: drop table results_cache_with_auth_t1
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@results_cache_with_auth_t1
PREHOOK: Output: default@results_cache_with_auth_t1
POSTHOOK: query: drop table results_cache_with_auth_t1
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@results_cache_with_auth_t1
POSTHOOK: Output: default@results_cache_with_auth_t1

0 comments on commit f8e583f

Please sign in to comment.