Start line:  
End line:  

Snippet Preview

Snippet HTML Code

Stack Overflow Questions
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
  
  
  package org.apache.hadoop.hive.ql;
  
  import java.io.DataInput;
  import java.util.HashMap;
  import java.util.HashSet;
  import java.util.List;
  import java.util.Map;
  import java.util.Queue;
  import java.util.Set;
  
  import  org.apache.commons.lang.StringUtils;
  import  org.apache.hadoop.fs.FSDataInputStream;
  import  org.apache.hadoop.fs.Path;
  import  org.apache.hadoop.hive.common.JavaUtils;
  import  org.apache.hadoop.hive.conf.HiveConf;
  import  org.apache.hadoop.hive.metastore.MetaStoreUtils;
  import  org.apache.hadoop.hive.metastore.api.FieldSchema;
  import  org.apache.hadoop.hive.metastore.api.Schema;
  import  org.apache.hadoop.hive.ql.exec.ConditionalTask;
  import  org.apache.hadoop.hive.ql.exec.ExecDriver;
  import  org.apache.hadoop.hive.ql.exec.FetchTask;
  import  org.apache.hadoop.hive.ql.exec.MapRedTask;
  import  org.apache.hadoop.hive.ql.exec.MoveTask;
  import  org.apache.hadoop.hive.ql.exec.Operator;
  import  org.apache.hadoop.hive.ql.exec.StatsTask;
  import  org.apache.hadoop.hive.ql.exec.TableScanOperator;
  import  org.apache.hadoop.hive.ql.exec.Task;
  import  org.apache.hadoop.hive.ql.exec.TaskFactory;
  import  org.apache.hadoop.hive.ql.exec.TaskResult;
  import  org.apache.hadoop.hive.ql.exec.TaskRunner;
  import  org.apache.hadoop.hive.ql.exec.Utilities;
  import  org.apache.hadoop.hive.ql.history.HiveHistory.Keys;
  import  org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
  import  org.apache.hadoop.hive.ql.hooks.Hook;
  import  org.apache.hadoop.hive.ql.hooks.HookContext;
  import  org.apache.hadoop.hive.ql.hooks.PostExecute;
  import  org.apache.hadoop.hive.ql.hooks.PreExecute;
  import  org.apache.hadoop.hive.ql.hooks.ReadEntity;
  import  org.apache.hadoop.hive.ql.hooks.WriteEntity;
  import  org.apache.hadoop.hive.ql.lockmgr.HiveLock;
  import  org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
  import  org.apache.hadoop.hive.ql.lockmgr.HiveLockManagerCtx;
  import  org.apache.hadoop.hive.ql.lockmgr.HiveLockMode;
  import  org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
  import  org.apache.hadoop.hive.ql.lockmgr.HiveLockObject;
  import  org.apache.hadoop.hive.ql.lockmgr.HiveLockObject.HiveLockObjectData;
  import  org.apache.hadoop.hive.ql.lockmgr.LockException;
  import  org.apache.hadoop.hive.ql.log.PerfLogger;
  import  org.apache.hadoop.hive.ql.metadata.AuthorizationException;
  import  org.apache.hadoop.hive.ql.metadata.DummyPartition;
  import  org.apache.hadoop.hive.ql.metadata.Hive;
  import  org.apache.hadoop.hive.ql.metadata.HiveException;
  import  org.apache.hadoop.hive.ql.metadata.Partition;
  import  org.apache.hadoop.hive.ql.metadata.Table;
  import  org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
  import  org.apache.hadoop.hive.ql.parse.ASTNode;
  import  org.apache.hadoop.hive.ql.parse.AbstractSemanticAnalyzerHook;
  import  org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
  import  org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
  import  org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl;
  import  org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer;
  import  org.apache.hadoop.hive.ql.parse.ParseContext;
  import  org.apache.hadoop.hive.ql.parse.ParseDriver;
  import  org.apache.hadoop.hive.ql.parse.ParseUtils;
  import  org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
  import  org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
  import  org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory;
  import  org.apache.hadoop.hive.ql.parse.SemanticException;
  import  org.apache.hadoop.hive.ql.parse.VariableSubstitution;
 import  org.apache.hadoop.hive.ql.plan.ConditionalResolver;
 import  org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles;
 import  org.apache.hadoop.hive.ql.plan.HiveOperation;
 import  org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import  org.apache.hadoop.hive.ql.plan.TableDesc;
 import  org.apache.hadoop.hive.ql.processors.CommandProcessor;
 import  org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import  org.apache.hadoop.hive.ql.session.SessionState;
 import  org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import  org.apache.hadoop.hive.serde2.ByteStream;
 import  org.apache.hadoop.hive.shims.ShimLoader;
 import  org.apache.hadoop.mapred.ClusterStatus;
 import  org.apache.hadoop.mapred.JobClient;
 import  org.apache.hadoop.mapred.JobConf;
 import  org.apache.hadoop.util.ReflectionUtils;
 
 
 @SuppressWarnings({ "deprecation""unchecked""rawtypes" })
 public class Driver implements CommandProcessor {
 
     // hivesterix
     private IExecutionEngine engine;
     private boolean hivesterix = false;
     private Set<Task> executedConditionalTsks = new HashSet<Task>();
 
     static final private Log LOG = LogFactory.getLog(Driver.class.getName());
     static final private LogHelper console = new LogHelper();
 
     private static final Object compileMonitor = new Object();
 
     private int maxRows = 100;
     ByteStream.Output bos = new ByteStream.Output();
 
     private HiveConf conf;
     private DataInput resStream;
     private Context ctx;
     private QueryPlan plan;
     private Schema schema;
     private HiveLockManager hiveLockMgr;
 
     private String errorMessage;
     private String SQLState;
 
     // A limit on the number of threads that can be launched
     private int maxthreads;
     private static final int SLEEP_TIME = 2000;
     protected int tryCount = .;

    
for backwards compatibility with current tests
 
     public Driver(HiveConf conf) {
         this. = conf;
 
     }
 
     public Driver() {
         if (SessionState.get() != null) {
              = SessionState.get().getConf();
         }
 
         // hivesterix
          = new HyracksExecutionEngine();
     }
 
     // hivesterix: plan printer
     public Driver(HiveConf confPrintWriter planPrinter) {
         this. = conf;
          = new HyracksExecutionEngine(confplanPrinter);
     }
 
     public void clear() {
         this. = false;
         this..clear();
     }
 
     private boolean checkLockManager() {
         boolean supportConcurrency = .getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
         if (!supportConcurrency) {
             return false;
         }
         if (( == null)) {
             try {
                 setLockManager();
             } catch (SemanticException e) {
                  = "FAILED: Error in semantic analysis: " + e.getMessage();
                  = ErrorMsg.findSQLState(e.getMessage());
                 .printError("\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
                 return false;
             }
         }
         // the reason that we set the lock manager for the cxt here is because each
         // query has its own ctx object. The hiveLockMgr is shared accross the
         // same instance of Driver, which can run multiple queries.
         .setHiveLockMgr();
         return  != null;
     }
 
     private void setLockManager() throws SemanticException {
         boolean supportConcurrency = .getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
         if (supportConcurrency) {
             String lockMgr = .getVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER);
             if ((lockMgr == null) || (lockMgr.isEmpty())) {
                 throw new SemanticException(ErrorMsg.LOCKMGR_NOT_SPECIFIED.getMsg());
             }
 
             try {
                  = (HiveLockManager) ReflectionUtils.newInstance(.getClassByName(lockMgr), );
                 .setContext(new HiveLockManagerCtx());
             } catch (Exception e) {
                 // set hiveLockMgr to null just in case this invalid manager got set to
                 // next query's ctx.
                 if ( != null) {
                     try {
                         .close();
                     } catch (LockException e1) {
                         //nothing can do here
                     }
                      = null;
                 }
                 throw new SemanticException(ErrorMsg.LOCKMGR_NOT_INITIALIZED.getMsg() + e.getMessage());
             }
         }
     }
 
     public void init() {
         Operator.resetId();
     }

    
Return the status information about the Map-Reduce cluster
 
     public ClusterStatus getClusterStatus() throws Exception {
         ClusterStatus cs;
         try {
             JobConf job = new JobConf(, ExecDriver.class);
             JobClient jc = new JobClient(job);
             cs = jc.getClusterStatus();
         } catch (Exception e) {
             e.printStackTrace();
             throw e;
         }
         .info("Returning cluster status: " + cs.toString());
         return cs;
     }
 
     public Schema getSchema() {
         return ;
     }

    
Get a Schema with fields represented with native Hive types
 
     public static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) {
         Schema schema = null;
 
         // If we have a plan, prefer its logical result schema if it's
         // available; otherwise, try digging out a fetch task; failing that,
         // give up.
         if (sem == null) {
             // can't get any info without a plan
         } else if (sem.getResultSchema() != null) {
             List<FieldSchema> lst = sem.getResultSchema();
             schema = new Schema(lstnull);
         } else if (sem.getFetchTask() != null) {
             FetchTask ft = sem.getFetchTask();
             TableDesc td = ft.getTblDesc();
             // partitioned tables don't have tableDesc set on the FetchTask. Instead
             // they have a list of PartitionDesc objects, each with a table desc.
             // Let's
             // try to fetch the desc for the first partition and use it's
             // deserializer.
             if (td == null && ft.getWork() != null && ft.getWork().getPartDesc() != null) {
                 if (ft.getWork().getPartDesc().size() > 0) {
                     td = ft.getWork().getPartDesc().get(0).getTableDesc();
                 }
             }
 
             if (td == null) {
                 .info("No returning schema.");
             } else {
                 String tableName = "result";
                 List<FieldSchema> lst = null;
                 try {
                     lst = MetaStoreUtils.getFieldsFromDeserializer(tableNametd.getDeserializer());
                 } catch (Exception e) {
                     .warn("Error getting schema: " + org.apache.hadoop.util.StringUtils.stringifyException(e));
                 }
                 if (lst != null) {
                     schema = new Schema(lstnull);
                 }
             }
         }
         if (schema == null) {
             schema = new Schema();
         }
         .info("Returning Hive schema: " + schema);
         return schema;
     }

    
Get a Schema with fields represented with Thrift DDL types
 
     public Schema getThriftSchema() throws Exception {
         Schema schema;
         try {
             schema = getSchema();
             if (schema != null) {
                 List<FieldSchema> lst = schema.getFieldSchemas();
                 // Go over the schema and convert type to thrift type
                 if (lst != null) {
                     for (FieldSchema f : lst) {
                         f.setType(MetaStoreUtils.typeToThriftType(f.getType()));
                     }
                 }
             }
         } catch (Exception e) {
             e.printStackTrace();
             throw e;
         }
         .info("Returning Thrift schema: " + schema);
         return schema;
     }

    
Return the maximum number of rows returned by getResults
 
     public int getMaxRows() {
         return ;
     }

    
Set the maximum number of rows returned by getResults
 
     public void setMaxRows(int maxRows) {
         this. = maxRows;
     }
 
     public boolean hasReduceTasks(List<Task<? extends Serializable>> tasks) {
         if (tasks == null) {
             return false;
         }
 
         boolean hasReduce = false;
         for (Task<? extends Serializabletask : tasks) {
             if (task.hasReduce()) {
                 return true;
             }
 
             hasReduce = (hasReduce || hasReduceTasks(task.getChildTasks()));
         }
         return hasReduce;
     }

    
Compile a new query. Any currently-planned query associated with this Driver is discarded.

Parameters:
command The SQL query to compile.
 
     public int compile(String command) {
         return compile(commandtrue);
     }

    
Hold state variables specific to each query being executed, that may not be consistent in the overall SessionState
 
     private static class QueryState {
         private HiveOperation op;
         private String cmd;
         private boolean init = false;

        
Initialize the queryState with the query state variables
 
         public void init(HiveOperation opString cmd) {
             this. = op;
             this. = cmd;
             this. = true;
         }
 
         public boolean isInitialized() {
             return this.;
         }
 
         public HiveOperation getOp() {
             return this.;
         }
 
         public String getCmd() {
             return this.;
         }
     }
 
     public void saveSession(QueryState qs) {
         SessionState oldss = SessionState.get();
         if (oldss != null && oldss.getHiveOperation() != null) {
             qs.init(oldss.getHiveOperation(), oldss.getCmd());
         }
     }
 
     public void restoreSession(QueryState qs) {
         SessionState ss = SessionState.get();
         if (ss != null && qs != null && qs.isInitialized()) {
             ss.setCmd(qs.getCmd());
             ss.setCommandType(qs.getOp());
         }
     }

    
Compile a new query, but potentially reset taskID counter. Not resetting task counter is useful for generating re-entrant QL queries.

Parameters:
command The HiveQL query to compile
resetTaskIds Resets taskID counter if true.
Returns:
0 for ok
 
     public int compile(String commandboolean resetTaskIds) {
         PerfLogger perfLogger = PerfLogger.getPerfLogger();
         perfLogger.PerfLogBegin(, PerfLogger.COMPILE);
 
         //holder for parent command type/string when executing reentrant queries
         QueryState queryState = new QueryState();
 
         if ( != null) {
             close();
              = null;
         }
 
         if (resetTaskIds) {
             TaskFactory.resetId();
         }
         saveSession(queryState);
 
         try {
             command = new VariableSubstitution().substitute(command);
              = new Context();
             .setTryCount(getTryCount());
             .setCmd(command);
             .setHDFSCleanup(true);
 
             ParseDriver pd = new ParseDriver();
             ASTNode tree = pd.parse(command);
             tree = ParseUtils.findRootNonNullToken(tree);
 
             BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(tree);
             List<AbstractSemanticAnalyzerHook> saHooks = getHooks(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK,
                     AbstractSemanticAnalyzerHook.class);
 
             // Do semantic analysis and plan generation
             if (saHooks != null) {
                 HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
                 hookCtx.setConf();
                 for (AbstractSemanticAnalyzerHook hook : saHooks) {
                     tree = hook.preAnalyze(hookCtxtree);
                 }
                 sem.analyze(tree);
                 hookCtx.update(sem);
                 for (AbstractSemanticAnalyzerHook hook : saHooks) {
                     hook.postAnalyze(hookCtxsem.getRootTasks());
                 }
             } else {
                 sem.analyze(tree);
             }
 
             .info("Semantic Analysis Completed");
 
             // validate the plan
             sem.validate();
 
              = new QueryPlan(commandsemperfLogger.getStartTime(PerfLogger.DRIVER_RUN));
 
             // test Only - serialize the query plan and deserialize it
             if ("true".equalsIgnoreCase(System.getProperty("test.serialize.qplan"))) {
 
                 String queryPlanFileName = .getLocalScratchDir(true) + Path.SEPARATOR_CHAR + "queryplan.xml";
                 .info("query plan = " + queryPlanFileName);
                 queryPlanFileName = new Path(queryPlanFileName).toUri().getPath();
 
                 // serialize the queryPlan
                 FileOutputStream fos = new FileOutputStream(queryPlanFileName);
                 Utilities.serializeQueryPlan(fos);
                 fos.close();
 
                 // deserialize the queryPlan
                 FileInputStream fis = new FileInputStream(queryPlanFileName);
                 QueryPlan newPlan = Utilities.deserializeQueryPlan(fis);
                 fis.close();
 
                 // Use the deserialized plan
                  = newPlan;
             }
 
             // initialize FetchTask right here
             if (.getFetchTask() != null) {
                 .getFetchTask().initialize(null);
             }
 
             // get the output schema
              = getSchema(sem);
 
             //do the authorization check
             if (HiveConf.getBoolVar(, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
                 try {
                     perfLogger.PerfLogBegin(, PerfLogger.DO_AUTHORIZATION);
                     doAuthorization(sem);
                 } catch (AuthorizationException authExp) {
                     .printError("Authorization failed:" + authExp.getMessage()
                             + ". Use show grant to get more details.");
                     return 403;
                 } finally {
                     perfLogger.PerfLogEnd(, PerfLogger.DO_AUTHORIZATION);
                 }
             }
 
             //restore state after we're done executing a specific query
             // hyracks run
             if (sem instanceof SemanticAnalyzer && command.toLowerCase().indexOf("create") < 0) {
                 int engineRet = .compileJob(sem.getRootTasks());
                 if (engineRet == 0) {
                      = true;
                 }
             }
             return 0;
         } catch (Exception e) {
             ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
              = "FAILED: " + e.getClass().getSimpleName();
             if (error != ErrorMsg.GENERIC_ERROR) {
                  += " [Error " + error.getErrorCode() + "]:";
             }
              += " " + e.getMessage();
              = error.getSQLState();
             .printError("\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
             return error.getErrorCode();
         } finally {
             perfLogger.PerfLogEnd(, PerfLogger.COMPILE);
             restoreSession(queryState);
         }
     }
 
     private void doAuthorization(BaseSemanticAnalyzer semthrows HiveException, AuthorizationException {
         HashSet<ReadEntity> inputs = sem.getInputs();
         HashSet<WriteEntity> outputs = sem.getOutputs();
         SessionState ss = SessionState.get();
         HiveOperation op = ss.getHiveOperation();
         Hive db = sem.getDb();
         if (op != null) {
             if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) || op.equals(HiveOperation.CREATETABLE)) {
                 ss.getAuthorizer().authorize(db.getDatabase(db.getCurrentDatabase()), null,
                         HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
             } else {
                 if (op.equals(HiveOperation.IMPORT)) {
                     ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem;
                     if (!isa.existsTable()) {
                         ss.getAuthorizer().authorize(db.getDatabase(db.getCurrentDatabase()), null,
                                 HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
                     }
                 }
             }
             if (outputs != null && outputs.size() > 0) {
                 for (WriteEntity write : outputs) {
 
                     if (write.getType() == WriteEntity.Type.PARTITION) {
                         Partition part = db.getPartition(write.getTable(), write.getPartition().getSpec(), false);
                         if (part != null) {
                             ss.getAuthorizer().authorize(write.getPartition(), nullop.getOutputRequiredPrivileges());
                             continue;
                         }
                     }
 
                     if (write.getTable() != null) {
                         ss.getAuthorizer().authorize(write.getTable(), nullop.getOutputRequiredPrivileges());
                     }
                 }
 
             }
         }
 
         if (inputs != null && inputs.size() > 0) {
 
             Map<Table, List<String>> tab2Cols = new HashMap<Table, List<String>>();
             Map<Partition, List<String>> part2Cols = new HashMap<Partition, List<String>>();
 
             Map<StringBooleantableUsePartLevelAuth = new HashMap<StringBoolean>();
             for (ReadEntity read : inputs) {
                 Table tbl = read.getTable();
                 if ((read.getPartition() != null) || (tbl.isPartitioned())) {
                     String tblName = tbl.getTableName();
                     if (tableUsePartLevelAuth.get(tblName) == null) {
                         boolean usePartLevelPriv = (tbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE") != null && ("TRUE"
                                 .equalsIgnoreCase(tbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))));
                         if (usePartLevelPriv) {
                             tableUsePartLevelAuth.put(tblName.);
                         } else {
                             tableUsePartLevelAuth.put(tblName.);
                         }
                     }
                 }
             }
 
             if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) || op.equals(HiveOperation.QUERY)) {
                 SemanticAnalyzer querySem = (SemanticAnalyzer) sem;
                 ParseContext parseCtx = querySem.getParseContext();
                 Map<TableScanOperator, Table> tsoTopMap = parseCtx.getTopToTable();
 
                 for (Map.Entry<String, Operator<? extends OperatorDesc>> topOpMap : querySem.getParseContext()
                         .getTopOps().entrySet()) {
                     Operator<? extends OperatorDesc> topOp = topOpMap.getValue();
                     if (topOp instanceof TableScanOperator && tsoTopMap.containsKey(topOp)) {
                         TableScanOperator tableScanOp = (TableScanOperator) topOp;
                         Table tbl = tsoTopMap.get(tableScanOp);
                         List<IntegerneededColumnIds = tableScanOp.getNeededColumnIDs();
                         List<FieldSchema> columns = tbl.getCols();
                         List<Stringcols = new ArrayList<String>();
                         if (neededColumnIds != null && neededColumnIds.size() > 0) {
                             for (int i = 0; i < neededColumnIds.size(); i++) {
                                 cols.add(columns.get(neededColumnIds.get(i)).getName());
                             }
                         } else {
                             for (int i = 0; i < columns.size(); i++) {
                                 cols.add(columns.get(i).getName());
                             }
                         }
                         //map may not contain all sources, since input list may have been optimized out
                         //or non-existent tho such sources may still be referenced by the TableScanOperator
                         //if it's null then the partition probably doesn't exist so let's use table permission
                         if (tbl.isPartitioned() && tableUsePartLevelAuth.get(tbl.getTableName()) == .) {
                             String alias_id = topOpMap.getKey();
                             PrunedPartitionList partsList = PartitionPruner.prune(parseCtx.getTopToTable().get(topOp),
                                     parseCtx.getOpToPartPruner().get(topOp), parseCtx.getConf(), alias_id,
                                     parseCtx.getPrunedPartitions());
                             Set<Partition> parts = new HashSet<Partition>();
                             parts.addAll(partsList.getConfirmedPartns());
                             parts.addAll(partsList.getUnknownPartns());
                             for (Partition part : parts) {
                                 List<StringexistingCols = part2Cols.get(part);
                                 if (existingCols == null) {
                                     existingCols = new ArrayList<String>();
                                 }
                                 existingCols.addAll(cols);
                                 part2Cols.put(partexistingCols);
                             }
                         } else {
                             List<StringexistingCols = tab2Cols.get(tbl);
                             if (existingCols == null) {
                                 existingCols = new ArrayList<String>();
                             }
                             existingCols.addAll(cols);
                             tab2Cols.put(tblexistingCols);
                         }
                     }
                 }
             }
 
             // cache the results for table authorization
             Set<StringtableAuthChecked = new HashSet<String>();
             for (ReadEntity read : inputs) {
                 Table tbl = read.getTable();
                 if (read.getPartition() != null) {
                     Partition partition = read.getPartition();
                     tbl = partition.getTable();
                     // use partition level authorization
                     if (tableUsePartLevelAuth.get(tbl.getTableName()) == .) {
                         List<Stringcols = part2Cols.get(partition);
                         if (cols != null && cols.size() > 0) {
                             ss.getAuthorizer().authorize(partition.getTable(), partitioncols,
                                     op.getInputRequiredPrivileges(), null);
                         } else {
                             ss.getAuthorizer().authorize(partitionop.getInputRequiredPrivileges(), null);
                         }
                         continue;
                     }
                 }
 
                 // if we reach here, it means it needs to do a table authorization
                 // check, and the table authorization may already happened because of other
                 // partitions
                 if (tbl != null && !tableAuthChecked.contains(tbl.getTableName())
                         && !(tableUsePartLevelAuth.get(tbl.getTableName()) == .)) {
                     List<Stringcols = tab2Cols.get(tbl);
                     if (cols != null && cols.size() > 0) {
                         ss.getAuthorizer().authorize(tblnullcolsop.getInputRequiredPrivileges(), null);
                     } else {
                         ss.getAuthorizer().authorize(tblop.getInputRequiredPrivileges(), null);
                     }
                     tableAuthChecked.add(tbl.getTableName());
                 }
             }
 
         }
     }

    

Returns:
The current query plan associated with this Driver, if any.
 
     public QueryPlan getPlan() {
         return ;
     }

    

Parameters:
t The table to be locked
p The partition to be locked
mode The mode of the lock (SHARED/EXCLUSIVE) Get the list of objects to be locked. If a partition needs to be locked (in any mode), all its parents should also be locked in SHARED mode.
 
     private List<HiveLockObj> getLockObjects(Table t, Partition p, HiveLockMode modethrows SemanticException {
         List<HiveLockObj> locks = new LinkedList<HiveLockObj>();
 
         HiveLockObjectData lockData = new HiveLockObjectData(.getQueryId(), String.valueOf(System
                 .currentTimeMillis()), "IMPLICIT".getQueryStr());
 
         if (t != null) {
             locks.add(new HiveLockObj(new HiveLockObject(tlockData), mode));
             mode = HiveLockMode.SHARED;
             locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), lockData), mode));
             return locks;
         }
 
         if (p != null) {
             if (!(p instanceof DummyPartition)) {
                 locks.add(new HiveLockObj(new HiveLockObject(plockData), mode));
             }
 
             // All the parents are locked in shared mode
             mode = HiveLockMode.SHARED;
 
             // For dummy partitions, only partition name is needed
             String name = p.getName();
 
             if (p instanceof DummyPartition) {
                 name = p.getName().split("@")[2];
             }
 
             String partialName = "";
             String[] partns = name.split("/");
             int len = p instanceof DummyPartition ? partns.length : partns.length - 1;
             Map<StringStringpartialSpec = new LinkedHashMap<StringString>();
             for (int idx = 0; idx < lenidx++) {
                 String partn = partns[idx];
                 partialName += partn;
                 String[] nameValue = partn.split("=");
                 assert (nameValue.length == 2);
                 partialSpec.put(nameValue[0], nameValue[1]);
                 try {
                     locks.add(new HiveLockObj(new HiveLockObject(new DummyPartition(p.getTable(), p.getTable()
                             .getDbName() + "/" + p.getTable().getTableName() + "/" + partialNamepartialSpec),
                             lockData), mode));
                     partialName += "/";
                 } catch (HiveException e) {
                     throw new SemanticException(e.getMessage());
                 }
             }
 
             locks.add(new HiveLockObj(new HiveLockObject(p.getTable(), lockData), mode));
             locks.add(new HiveLockObj(new HiveLockObject(p.getTable().getDbName(), lockData), mode));
         }
         return locks;
     }

    
Acquire read and write locks needed by the statement. The list of objects to be locked are obtained from he inputs and outputs populated by the compiler. The lock acuisition scheme is pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making sure that the locks are lexicographically sorted.
 
     public int acquireReadWriteLocks() {
         PerfLogger perfLogger = PerfLogger.getPerfLogger();
         perfLogger.PerfLogBegin(, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
 
         try {
             boolean supportConcurrency = .getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
             if (!supportConcurrency) {
                 return 0;
             }
 
             List<HiveLockObj> lockObjects = new ArrayList<HiveLockObj>();
 
             // Sort all the inputs, outputs.
             // If a lock needs to be acquired on any partition, a read lock needs to be acquired on all
             // its parents also
             for (ReadEntity input : .getInputs()) {
                 if (input.getType() == ReadEntity.Type.TABLE) {
                     lockObjects.addAll(getLockObjects(input.getTable(), null, HiveLockMode.SHARED));
                 } else {
                     lockObjects.addAll(getLockObjects(nullinput.getPartition(), HiveLockMode.SHARED));
                 }
             }
 
             for (WriteEntity output : .getOutputs()) {
                 List<HiveLockObj> lockObj = null;
                 if (output.getTyp() == WriteEntity.Type.TABLE) {
                     lockObj = getLockObjects(output.getTable(), nulloutput.isComplete() ? HiveLockMode.EXCLUSIVE
                             : HiveLockMode.SHARED);
                 } else if (output.getTyp() == WriteEntity.Type.PARTITION) {
                     lockObj = getLockObjects(nulloutput.getPartition(), HiveLockMode.EXCLUSIVE);
                 }
                 // In case of dynamic queries, it is possible to have incomplete dummy partitions
                 else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) {
                     lockObj = getLockObjects(nulloutput.getPartition(), HiveLockMode.SHARED);
                 }
 
                 if (lockObj != null) {
                     lockObjects.addAll(lockObj);
                     .getOutputLockObjects().put(outputlockObj);
                 }
             }
 
             if (lockObjects.isEmpty() && !.isNeedLockMgr()) {
                 return 0;
             }
 
             HiveLockObjectData lockData = new HiveLockObjectData(.getQueryId(), String.valueOf(System
                     .currentTimeMillis()), "IMPLICIT".getQueryStr());
 
             // Lock the database also
             try {
                 Hive db = Hive.get();
                 lockObjects.add(new HiveLockObj(new HiveLockObject(db.getCurrentDatabase(), lockData),
                         HiveLockMode.SHARED));
             } catch (HiveException e) {
                 throw new SemanticException(e.getMessage());
             }
 
             List<HiveLock> hiveLocks = .getHiveLockMgr().lock(lockObjectsfalse);
 
             if (hiveLocks == null) {
                 throw new SemanticException(ErrorMsg.LOCK_CANNOT_BE_ACQUIRED.getMsg());
             } else {
                 .setHiveLocks(hiveLocks);
             }
 
             return (0);
         } catch (SemanticException e) {
              = "FAILED: Error in acquiring locks: " + e.getMessage();
              = ErrorMsg.findSQLState(e.getMessage());
             .printError("\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
             return (10);
         } catch (LockException e) {
              = "FAILED: Error in acquiring locks: " + e.getMessage();
              = ErrorMsg.findSQLState(e.getMessage());
             .printError("\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
             return (10);
         } finally {
             perfLogger.PerfLogEnd(, PerfLogger.ACQUIRE_READ_WRITE_LOCKS);
         }
     }

    

Parameters:
hiveLocks list of hive locks to be released Release all the locks specified. If some of the locks have already been released, ignore them
 
     private void releaseLocks(List<HiveLock> hiveLocks) {
         PerfLogger perfLogger = PerfLogger.getPerfLogger();
         perfLogger.PerfLogBegin(, PerfLogger.RELEASE_LOCKS);
 
         if (hiveLocks != null) {
             .getHiveLockMgr().releaseLocks(hiveLocks);
         }
         .setHiveLocks(null);
 
         perfLogger.PerfLogEnd(, PerfLogger.RELEASE_LOCKS);
     }
 
     public CommandProcessorResponse run(String commandthrows CommandNeedRetryException {
          = null;
          = null;
 
         if (!validateConfVariables()) {
             return new CommandProcessorResponse(12, );
         }
 
         HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(command);
         // Get all the driver run hooks and pre-execute them.
         List<HiveDriverRunHook> driverRunHooks;
         try {
             driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class);
             for (HiveDriverRunHook driverRunHook : driverRunHooks) {
                 driverRunHook.preDriverRun(hookContext);
             }
         } catch (Exception e) {
              = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
              = ErrorMsg.findSQLState(e.getMessage());
             .printError( + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
             return new CommandProcessorResponse(12, );
         }
 
         // Reset the perf logger
         PerfLogger perfLogger = PerfLogger.getPerfLogger(true);
         perfLogger.PerfLogBegin(, PerfLogger.DRIVER_RUN);
         perfLogger.PerfLogBegin(, PerfLogger.TIME_TO_SUBMIT);
 
         int ret;
         synchronized () {
             ret = compile(command);
         }
         if (ret != 0) {
             releaseLocks(.getHiveLocks());
             return new CommandProcessorResponse(ret);
         }
 
         boolean requireLock = false;
         boolean ckLock = checkLockManager();
 
         if (ckLock) {
             boolean lockOnlyMapred = HiveConf.getBoolVar(, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
             if (lockOnlyMapred) {
                 Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
                 taskQueue.addAll(.getRootTasks());
                 while (taskQueue.peek() != null) {
                     Task<? extends Serializabletsk = taskQueue.remove();
                     requireLock = requireLock || tsk.requireLock();
                     if (requireLock) {
                         break;
                     }
                     if (tsk instanceof ConditionalTask) {
                         taskQueue.addAll(((ConditionalTask) tsk).getListTasks());
                     }
                     if (tsk.getChildTasks() != null) {
                         taskQueue.addAll(tsk.getChildTasks());
                     }
                     // does not add back up task here, because back up task should be the same
                     // type of the original task.
                 }
             } else {
                 requireLock = true;
             }
         }
 
         if (requireLock) {
             ret = acquireReadWriteLocks();
             if (ret != 0) {
                 releaseLocks(.getHiveLocks());
                 return new CommandProcessorResponse(ret);
             }
         }
 
         ret = execute();
         if (ret != 0) {
             //if needRequireLock is false, the release here will do nothing because there is no lock
             releaseLocks(.getHiveLocks());
             return new CommandProcessorResponse(ret);
         }
 
         //if needRequireLock is false, the release here will do nothing because there is no lock
         releaseLocks(.getHiveLocks());
 
         perfLogger.PerfLogEnd(, PerfLogger.DRIVER_RUN);
         perfLogger.close();
 
         // Take all the driver run hooks and post-execute them.
         try {
             for (HiveDriverRunHook driverRunHook : driverRunHooks) {
                 driverRunHook.postDriverRun(hookContext);
             }
         } catch (Exception e) {
              = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
              = ErrorMsg.findSQLState(e.getMessage());
             .printError( + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
             return new CommandProcessorResponse(12, );
         }
 
         return new CommandProcessorResponse(ret);
     }

    
Validate configuration variables.

Returns:
 
     private boolean validateConfVariables() {
         boolean valid = true;
         if ((!.getBoolVar(HiveConf.ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES))
                 && ((.getBoolVar(HiveConf.ConfVars.HADOOPMAPREDINPUTDIRRECURSIVE))
                         || (.getBoolVar(HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) || ((
                             .getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE))))) {
              = "FAILED: Hive Internal Error: " + ErrorMsg.SUPPORT_DIR_MUST_TRUE_FOR_LIST_BUCKETING.getMsg();
              = ErrorMsg.findSQLState();
             .printError( + "\n");
             valid = false;
         }
         return valid;
     }

    
Returns a set of hooks specified in a configuration variable. See getHooks(HiveConf.ConfVars hookConfVar, Class<T> clazz)

Parameters:
hookConfVar
Returns:
Throws:
Exception
    private List<Hook> getHooks(HiveConf.ConfVars hookConfVarthrows Exception {
        return getHooks(hookConfVar, Hook.class);
    }

    
Returns the hooks specified in a configuration variable. The hooks are returned in a list in the order they were specified in the configuration variable.

Parameters:
hookConfVar The configuration variable specifying a comma separated list of the hook class names.
clazz The super type of the hooks.
Returns:
A list of the hooks cast as the type specified in clazz, in the order they are listed in the value of hookConfVar
Throws:
Exception
    private <T extends Hook> List<T> getHooks(HiveConf.ConfVars hookConfVarClass<T> clazzthrows Exception {
        List<T> hooks = new ArrayList<T>();
        String csHooks = .getVar(hookConfVar);
        if (csHooks == null) {
            return hooks;
        }
        csHooks = csHooks.trim();
        if (csHooks.equals("")) {
            return hooks;
        }
        String[] hookClasses = csHooks.split(",");
        for (String hookClass : hookClasses) {
            try {
                T hook = (T) Class.forName(hookClass.trim(), true, JavaUtils.getClassLoader()).newInstance();
                hooks.add(hook);
            } catch (ClassNotFoundException e) {
                .printError(hookConfVar.varname + " Class not found:" + e.getMessage());
                throw e;
            }
        }
        return hooks;
    }
    public int execute() throws CommandNeedRetryException {
        // execute hivesterix plan
        if () {
             = false;
            int ret = .executeJob();
            if (ret != 0)
                return ret;
        }
        PerfLogger perfLogger = PerfLogger.getPerfLogger();
        perfLogger.PerfLogBegin(, PerfLogger.DRIVER_EXECUTE);
        boolean noName = StringUtils.isEmpty(.getVar(HiveConf.ConfVars.HADOOPJOBNAME));
        int maxlen = .getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
        String queryId = .getQueryId();
        String queryStr = .getQueryStr();
        .setVar(HiveConf.ConfVars.HIVEQUERYID, queryId);
        .setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);
        .set("mapreduce.workflow.id""hive_" + queryId);
        .set("mapreduce.workflow.name"queryStr);
         = HiveConf.getIntVar(, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);
        try {
            .info("Starting command: " + queryStr);
            .setStarted();
            if (SessionState.get() != null) {
                SessionState.get().getHiveHistory().startQuery(queryStr.getVar(HiveConf.ConfVars.HIVEQUERYID));
                SessionState.get().getHiveHistory().logPlanProgress();
            }
             = null;
            HookContext hookContext = new HookContext(.getPathToCS());
            hookContext.setHookType(HookContext.HookType.PRE_EXEC_HOOK);
            for (Hook peh : getHooks(HiveConf.ConfVars.PREEXECHOOKS)) {
                if (peh instanceof ExecuteWithHookContext) {
                    perfLogger.PerfLogBegin(, PerfLogger.PRE_HOOK + peh.getClass().getName());
                    ((ExecuteWithHookContext) peh).run(hookContext);
                    perfLogger.PerfLogEnd(, PerfLogger.PRE_HOOK + peh.getClass().getName());
                } else if (peh instanceof PreExecute) {
                    perfLogger.PerfLogBegin(, PerfLogger.PRE_HOOK + peh.getClass().getName());
                    ((PreExecute) peh).run(SessionState.get(), .getInputs(), .getOutputs(), ShimLoader
                            .getHadoopShims().getUGIForConf());
                    perfLogger.PerfLogEnd(, PerfLogger.PRE_HOOK + peh.getClass().getName());
                }
            }
            int jobs = Utilities.getMRTasks(.getRootTasks()).size();
            if (jobs > 0) {
                .printInfo("Total MapReduce jobs = " + jobs);
            }
            if (SessionState.get() != null) {
                SessionState.get().getHiveHistory()
                        .setQueryProperty(queryId, Keys.QUERY_NUM_TASKS, String.valueOf(jobs));
                SessionState.get().getHiveHistory().setIdToTableMap(.getIdToTableNameMap());
            }
            String jobname = Utilities.abbreviate(queryStrmaxlen - 6);
            // A runtime that launches runnable tasks as separate Threads through
            // TaskRunners
            // As soon as a task isRunnable, it is put in a queue
            // At any time, at most maxthreads tasks can be running
            // The main thread polls the TaskRunners to check if they have finished.
            Queue<Task<? extends Serializable>> runnable = new ConcurrentLinkedQueue<Task<? extends Serializable>>();
            Map<TaskResult, TaskRunner> running = new HashMap<TaskResult, TaskRunner>();
            DriverContext driverCxt = new DriverContext(runnable);
            .setHDFSCleanup(true);
            SessionState.get().setLastMapRedStatsList(new ArrayList<MapRedStats>());
            SessionState.get().setStackTraces(new HashMap<StringList<List<String>>>());
            SessionState.get().setLocalMapRedErrors(new HashMap<StringList<String>>());
            // Add root Tasks to runnable
            for (Task<? extends Serializabletsk : .getRootTasks()) {
                // This should never happen, if it does, it's a bug with the potential to produce
                // incorrect results.
                assert tsk.getParentTasks() == null || tsk.getParentTasks().isEmpty();
                driverCxt.addToRunnable(tsk);
            }
            perfLogger.PerfLogEnd(, PerfLogger.TIME_TO_SUBMIT);
            // Loop while you either have tasks running, or tasks queued up
            while (running.size() != 0 || runnable.peek() != null) {
                // Launch upto maxthreads tasks
                while (runnable.peek() != null && running.size() < ) {
                    Task<? extends Serializabletsk = runnable.remove();
                    launchTask(tskqueryIdnoNamerunningjobnamejobsdriverCxt);
                }
                // poll the Tasks to see which one completed
                TaskResult tskRes = pollTasks(running.keySet());
                TaskRunner tskRun = running.remove(tskRes);
                Task<? extends Serializabletsk = tskRun.getTask();
                hookContext.addCompleteTask(tskRun);
                int exitVal = tskRes.getExitVal();
                if (exitVal != 0) {
                    if (tsk.ifRetryCmdWhenFail()) {
                        if (!running.isEmpty()) {
                            taskCleanup(running);
                        }
                        // in case we decided to run everything in local mode, restore the
                        // the jobtracker setting to its initial value
                        .restoreOriginalTracker();
                        throw new CommandNeedRetryException();
                    }
                    Task<? extends SerializablebackupTask = tsk.getAndInitBackupTask();
                    if (backupTask != null) {
                         = "FAILED: Execution Error, return code " + exitVal + " from "
                                + tsk.getClass().getName();
                        ErrorMsg em = ErrorMsg.getErrorMsg(exitVal);
                        if (em != null) {
                             += ". " + em.getMsg();
                        }
                        .printError();
                         = "ATTEMPT: Execute BackupTask: " + backupTask.getClass().getName();
                        .printError();
                        // add backup task to runnable
                        if (DriverContext.isLaunchable(backupTask)) {
                            driverCxt.addToRunnable(backupTask);
                        }
                        continue;
                    } else {
                        hookContext.setHookType(HookContext.HookType.ON_FAILURE_HOOK);
                        // Get all the failure execution hooks and execute them.
                        for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) {
                            perfLogger.PerfLogBegin(, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
                            ((ExecuteWithHookContext) ofh).run(hookContext);
                            perfLogger.PerfLogEnd(, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
                        }
                         = "FAILED: Execution Error, return code " + exitVal + " from "
                                + tsk.getClass().getName();
                        ErrorMsg em = ErrorMsg.getErrorMsg(exitVal);
                        if (em != null) {
                             += ". " + em.getMsg();
                        }
                         = "08S01";
                        .printError();
                        if (!running.isEmpty()) {
                            taskCleanup(running);
                        }