/** * Check if the given read entity is a table that has parents of type Table Hive compiler performs * a query rewrite by replacing view with its definition. In the process, tt captures both the * original view and the tables/view that it selects from . The access authorization is only * interested in the top level views and not the underlying tables. * * @param readEntity * @return */ private boolean isChildTabForView(ReadEntity readEntity) { // If this is a table added for view, then we need to skip that if (!readEntity.getType().equals(Type.TABLE) && !readEntity.getType().equals(Type.PARTITION)) { return false; } if (readEntity.getParents() != null && readEntity.getParents().size() > 0) { for (ReadEntity parentEntity : readEntity.getParents()) { if (!parentEntity.getType().equals(Type.TABLE)) { return false; } } return true; } else { return false; } }
/** * Add column level hierarchy to inputHierarchy * * @param inputHierarchy * @param entity * @param sentryContext */ private void addColumnHierarchy( List<List<DBModelAuthorizable>> inputHierarchy, ReadEntity entity) { List<DBModelAuthorizable> entityHierarchy = new ArrayList<DBModelAuthorizable>(); entityHierarchy.add(hiveAuthzBinding.getAuthServer()); entityHierarchy.addAll(getAuthzHierarchyFromEntity(entity)); switch (entity.getType()) { case TABLE: case PARTITION: List<String> cols = entity.getAccessedColumns(); for (String col : cols) { List<DBModelAuthorizable> colHierarchy = new ArrayList<DBModelAuthorizable>(entityHierarchy); colHierarchy.add(new Column(col)); inputHierarchy.add(colHierarchy); } break; default: inputHierarchy.add(entityHierarchy); } }
@Override public void acquireLocks(QueryPlan plan, Context ctx, String username) throws LockException { // Make sure we've built the lock manager getLockManager(); // If the lock manager is still null, then it means we aren't using a // lock manager if (lockMgr == null) return; List<HiveLockObj> lockObjects = new ArrayList<HiveLockObj>(); // Sort all the inputs, outputs. // If a lock needs to be acquired on any partition, a read lock needs to be acquired on all // its parents also for (ReadEntity input : plan.getInputs()) { if (!input.needsLock()) { continue; } LOG.debug("Adding " + input.getName() + " to list of lock inputs"); if (input.getType() == ReadEntity.Type.DATABASE) { lockObjects.addAll( getLockObjects(plan, input.getDatabase(), null, null, HiveLockMode.SHARED)); } else if (input.getType() == ReadEntity.Type.TABLE) { lockObjects.addAll(getLockObjects(plan, null, input.getTable(), null, HiveLockMode.SHARED)); } else { lockObjects.addAll( getLockObjects(plan, null, null, input.getPartition(), HiveLockMode.SHARED)); } } for (WriteEntity output : plan.getOutputs()) { HiveLockMode lockMode = getWriteEntityLockMode(output); if (lockMode == null) { continue; } LOG.debug("Adding " + output.getName() + " to list of lock outputs"); List<HiveLockObj> lockObj = null; if (output.getType() == WriteEntity.Type.DATABASE) { lockObjects.addAll(getLockObjects(plan, output.getDatabase(), null, null, lockMode)); } else if (output.getTyp() == WriteEntity.Type.TABLE) { lockObj = getLockObjects(plan, null, output.getTable(), null, lockMode); } else if (output.getTyp() == WriteEntity.Type.PARTITION) { lockObj = getLockObjects(plan, null, null, output.getPartition(), lockMode); } // In case of dynamic queries, it is possible to have incomplete dummy partitions else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) { lockObj = getLockObjects(plan, null, null, output.getPartition(), HiveLockMode.SHARED); } if (lockObj != null) { lockObjects.addAll(lockObj); ctx.getOutputLockObjects().put(output, lockObj); } } if (lockObjects.isEmpty() && !ctx.isNeedLockMgr()) { return; } dedupLockObjects(lockObjects); List<HiveLock> hiveLocks = lockMgr.lock(lockObjects, false); if (hiveLocks == null) { throw new LockException(ErrorMsg.LOCK_CANNOT_BE_ACQUIRED.getMsg()); } else { ctx.setHiveLocks(hiveLocks); } }