protected void destruct() { if (lockMgr != null) { try { lockMgr.close(); } catch (LockException e) { // Not much I can do about it. LOG.warn("Got exception when closing lock manager " + e.getMessage()); } } }
@Override public HiveLockManager getLockManager() throws LockException { if (lockMgr == null) { boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); if (supportConcurrency) { String lockMgrName = conf.getVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER); if ((lockMgrName == null) || (lockMgrName.isEmpty())) { throw new LockException(ErrorMsg.LOCKMGR_NOT_SPECIFIED.getMsg()); } try { LOG.info("Creating lock manager of type " + lockMgrName); lockMgr = (HiveLockManager) ReflectionUtils.newInstance(conf.getClassByName(lockMgrName), conf); lockMgr.setContext(new HiveLockManagerCtx(conf)); } catch (Exception e) { // set hiveLockMgr to null just in case this invalid manager got set to // next query's ctx. if (lockMgr != null) { try { lockMgr.close(); } catch (LockException e1) { // nothing can do here } lockMgr = null; } throw new LockException(ErrorMsg.LOCKMGR_NOT_INITIALIZED.getMsg() + e.getMessage()); } } else { LOG.info("Concurrency mode is disabled, not creating a lock manager"); return null; } } // Force a re-read of the configuration file. This is done because // different queries in the session may be using the same lock manager. lockMgr.refresh(); return lockMgr; }
@Override public void acquireLocks(QueryPlan plan, Context ctx, String username) throws LockException { // Make sure we've built the lock manager getLockManager(); // If the lock manager is still null, then it means we aren't using a // lock manager if (lockMgr == null) return; List<HiveLockObj> lockObjects = new ArrayList<HiveLockObj>(); // Sort all the inputs, outputs. // If a lock needs to be acquired on any partition, a read lock needs to be acquired on all // its parents also for (ReadEntity input : plan.getInputs()) { if (!input.needsLock()) { continue; } LOG.debug("Adding " + input.getName() + " to list of lock inputs"); if (input.getType() == ReadEntity.Type.DATABASE) { lockObjects.addAll( getLockObjects(plan, input.getDatabase(), null, null, HiveLockMode.SHARED)); } else if (input.getType() == ReadEntity.Type.TABLE) { lockObjects.addAll(getLockObjects(plan, null, input.getTable(), null, HiveLockMode.SHARED)); } else { lockObjects.addAll( getLockObjects(plan, null, null, input.getPartition(), HiveLockMode.SHARED)); } } for (WriteEntity output : plan.getOutputs()) { HiveLockMode lockMode = getWriteEntityLockMode(output); if (lockMode == null) { continue; } LOG.debug("Adding " + output.getName() + " to list of lock outputs"); List<HiveLockObj> lockObj = null; if (output.getType() == WriteEntity.Type.DATABASE) { lockObjects.addAll(getLockObjects(plan, output.getDatabase(), null, null, lockMode)); } else if (output.getTyp() == WriteEntity.Type.TABLE) { lockObj = getLockObjects(plan, null, output.getTable(), null, lockMode); } else if (output.getTyp() == WriteEntity.Type.PARTITION) { lockObj = getLockObjects(plan, null, null, output.getPartition(), lockMode); } // In case of dynamic queries, it is possible to have incomplete dummy partitions else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { lockObj = getLockObjects(plan, null, null, output.getPartition(), HiveLockMode.SHARED); } if (lockObj != null) { lockObjects.addAll(lockObj); ctx.getOutputLockObjects().put(output, lockObj); } } if (lockObjects.isEmpty() && !ctx.isNeedLockMgr()) { return; } dedupLockObjects(lockObjects); List<HiveLock> hiveLocks = lockMgr.lock(lockObjects, false); if (hiveLocks == null) { throw new LockException(ErrorMsg.LOCK_CANNOT_BE_ACQUIRED.getMsg()); } else { ctx.setHiveLocks(hiveLocks); } }