private void renameTable(HiveMetaStoreBridge dgiBridge, HiveEventContext event) throws Exception {
    // crappy, no easy of getting new name
    assert event.getInputs() != null && event.getInputs().size() == 1;
    assert event.getOutputs() != null && event.getOutputs().size() > 0;

    // Update entity if not exists
    ReadEntity oldEntity = event.getInputs().iterator().next();
    Table oldTable = oldEntity.getTable();

    for (WriteEntity writeEntity : event.getOutputs()) {
      if (writeEntity.getType() == Entity.Type.TABLE) {
        Table newTable = writeEntity.getTable();
        // Hive sends with both old and new table names in the outputs which is weird. So skipping
        // that with the below check
        if (!newTable.getDbName().equals(oldTable.getDbName())
            || !newTable.getTableName().equals(oldTable.getTableName())) {
          final String oldQualifiedName =
              dgiBridge.getTableQualifiedName(dgiBridge.getClusterName(), oldTable);
          final String newQualifiedName =
              dgiBridge.getTableQualifiedName(dgiBridge.getClusterName(), newTable);

          // Create/update old table entity - create entity with oldQFNme and old tableName if it
          // doesnt exist. If exists, will update
          // We always use the new entity while creating the table since some flags, attributes of
          // the table are not set in inputEntity and Hive.getTable(oldTableName) also fails since
          // the table doesnt exist in hive anymore
          final LinkedHashMap<Type, Referenceable> tables =
              createOrUpdateEntities(dgiBridge, event, writeEntity, true);
          Referenceable tableEntity = tables.get(Type.TABLE);

          // Reset regular column QF Name to old Name and create a new partial notification request
          // to replace old column QFName to newName to retain any existing traits
          replaceColumnQFName(
              event,
              (List<Referenceable>) tableEntity.get(HiveDataModelGenerator.COLUMNS),
              oldQualifiedName,
              newQualifiedName);

          // Reset partition key column QF Name to old Name and create a new partial notification
          // request to replace old column QFName to newName to retain any existing traits
          replaceColumnQFName(
              event,
              (List<Referenceable>) tableEntity.get(HiveDataModelGenerator.PART_COLS),
              oldQualifiedName,
              newQualifiedName);

          // Reset SD QF Name to old Name and create a new partial notification request to replace
          // old SD QFName to newName to retain any existing traits
          replaceSDQFName(event, tableEntity, oldQualifiedName, newQualifiedName);

          // Reset Table QF Name to old Name and create a new partial notification request to
          // replace old Table QFName to newName
          replaceTableQFName(
              event, oldTable, newTable, tableEntity, oldQualifiedName, newQualifiedName);
        }
      }
    }
  }
  private void handleExternalTables(
      final HiveMetaStoreBridge dgiBridge,
      final HiveEventContext event,
      final LinkedHashMap<Type, Referenceable> tables)
      throws HiveException, MalformedURLException {
    List<Referenceable> entities = new ArrayList<>();
    final Entity hiveEntity = getEntityByType(event.getOutputs(), Type.TABLE);
    Table hiveTable = hiveEntity.getTable();
    // Refresh to get the correct location
    hiveTable = dgiBridge.hiveClient.getTable(hiveTable.getDbName(), hiveTable.getTableName());

    final String location = lower(hiveTable.getDataLocation().toString());
    if (hiveTable != null && TableType.EXTERNAL_TABLE.equals(hiveTable.getTableType())) {
      LOG.info("Registering external table process {} ", event.getQueryStr());
      final ReadEntity dfsEntity = new ReadEntity();
      dfsEntity.setTyp(Type.DFS_DIR);
      dfsEntity.setName(location);

      SortedMap<Entity, Referenceable> inputs =
          new TreeMap<Entity, Referenceable>(entityComparator) {
            {
              put(dfsEntity, dgiBridge.fillHDFSDataSet(location));
            }
          };

      SortedMap<Entity, Referenceable> outputs =
          new TreeMap<Entity, Referenceable>(entityComparator) {
            {
              put(hiveEntity, tables.get(Type.TABLE));
            }
          };

      Referenceable processReferenceable =
          getProcessReferenceable(dgiBridge, event, inputs, outputs);
      String tableQualifiedName =
          dgiBridge.getTableQualifiedName(dgiBridge.getClusterName(), hiveTable);

      if (isCreateOp(event)) {
        processReferenceable.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, tableQualifiedName);
      }
      entities.addAll(tables.values());
      entities.add(processReferenceable);
      event.addMessage(new HookNotification.EntityUpdateRequest(event.getUser(), entities));
    }
  }
 /**
  * Get Authorizable from inputs and put into inputHierarchy
  *
  * @param inputHierarchy
  * @param entity
  * @param sentryContext
  */
 private void getInputHierarchyFromInputs(
     List<List<DBModelAuthorizable>> inputHierarchy, Set<ReadEntity> inputs) {
   for (ReadEntity readEntity : inputs) {
     // skip the tables/view that are part of expanded view definition
     // skip the Hive generated dummy entities created for queries like 'select <expr>'
     if (isChildTabForView(readEntity) || isDummyEntity(readEntity)) {
       continue;
     }
     if (readEntity.getAccessedColumns() != null && !readEntity.getAccessedColumns().isEmpty()) {
       addColumnHierarchy(inputHierarchy, readEntity);
     } else {
       List<DBModelAuthorizable> entityHierarchy = new ArrayList<DBModelAuthorizable>();
       entityHierarchy.add(hiveAuthzBinding.getAuthServer());
       entityHierarchy.addAll(getAuthzHierarchyFromEntity(readEntity));
       inputHierarchy.add(entityHierarchy);
     }
   }
 }
  /**
   * Add column level hierarchy to inputHierarchy
   *
   * @param inputHierarchy
   * @param entity
   * @param sentryContext
   */
  private void addColumnHierarchy(
      List<List<DBModelAuthorizable>> inputHierarchy, ReadEntity entity) {
    List<DBModelAuthorizable> entityHierarchy = new ArrayList<DBModelAuthorizable>();
    entityHierarchy.add(hiveAuthzBinding.getAuthServer());
    entityHierarchy.addAll(getAuthzHierarchyFromEntity(entity));

    switch (entity.getType()) {
      case TABLE:
      case PARTITION:
        List<String> cols = entity.getAccessedColumns();
        for (String col : cols) {
          List<DBModelAuthorizable> colHierarchy =
              new ArrayList<DBModelAuthorizable>(entityHierarchy);
          colHierarchy.add(new Column(col));
          inputHierarchy.add(colHierarchy);
        }
        break;
      default:
        inputHierarchy.add(entityHierarchy);
    }
  }
 /**
  * Check if the given read entity is a table that has parents of type Table Hive compiler performs
  * a query rewrite by replacing view with its definition. In the process, tt captures both the
  * original view and the tables/view that it selects from . The access authorization is only
  * interested in the top level views and not the underlying tables.
  *
  * @param readEntity
  * @return
  */
 private boolean isChildTabForView(ReadEntity readEntity) {
   // If this is a table added for view, then we need to skip that
   if (!readEntity.getType().equals(Type.TABLE) && !readEntity.getType().equals(Type.PARTITION)) {
     return false;
   }
   if (readEntity.getParents() != null && readEntity.getParents().size() > 0) {
     for (ReadEntity parentEntity : readEntity.getParents()) {
       if (!parentEntity.getType().equals(Type.TABLE)) {
         return false;
       }
     }
     return true;
   } else {
     return false;
   }
 }
  /**
   * Convert the input/output entities into authorizables. generate authorizables for cases like
   * Database and metadata operations where the compiler doesn't capture entities. invoke the hive
   * binding to validate permissions
   *
   * @param context
   * @param stmtAuthObject
   * @param stmtOperation
   * @throws AuthorizationException
   */
  private void authorizeWithHiveBindings(
      HiveSemanticAnalyzerHookContext context,
      HiveAuthzPrivileges stmtAuthObject,
      HiveOperation stmtOperation)
      throws AuthorizationException {
    Set<ReadEntity> inputs = context.getInputs();
    Set<WriteEntity> outputs = context.getOutputs();
    List<List<DBModelAuthorizable>> inputHierarchy = new ArrayList<List<DBModelAuthorizable>>();
    List<List<DBModelAuthorizable>> outputHierarchy = new ArrayList<List<DBModelAuthorizable>>();

    if (LOG.isDebugEnabled()) {
      LOG.debug("stmtAuthObject.getOperationScope() = " + stmtAuthObject.getOperationScope());
      LOG.debug("context.getInputs() = " + context.getInputs());
      LOG.debug("context.getOutputs() = " + context.getOutputs());
    }

    // Workaround to allow DESCRIBE <table> to be executed with only column-level privileges, while
    // still authorizing DESCRIBE [EXTENDED|FORMATTED] as table-level.
    // This is done by treating DESCRIBE <table> the same as SHOW COLUMNS, which only requires
    // column
    // level privs.
    if (isDescTableBasic) {
      stmtAuthObject = HiveAuthzPrivilegesMap.getHiveAuthzPrivileges(HiveOperation.SHOWCOLUMNS);
    }

    switch (stmtAuthObject.getOperationScope()) {
      case SERVER:
        // validate server level privileges if applicable. Eg create UDF,register jar etc ..
        List<DBModelAuthorizable> serverHierarchy = new ArrayList<DBModelAuthorizable>();
        serverHierarchy.add(hiveAuthzBinding.getAuthServer());
        inputHierarchy.add(serverHierarchy);
        break;
      case DATABASE:
        // workaround for database scope statements (create/alter/drop db)
        List<DBModelAuthorizable> dbHierarchy = new ArrayList<DBModelAuthorizable>();
        dbHierarchy.add(hiveAuthzBinding.getAuthServer());
        dbHierarchy.add(currDB);
        inputHierarchy.add(dbHierarchy);
        outputHierarchy.add(dbHierarchy);

        getInputHierarchyFromInputs(inputHierarchy, inputs);
        break;
      case TABLE:
        // workaround for add partitions
        if (partitionURI != null) {
          inputHierarchy.add(ImmutableList.of(hiveAuthzBinding.getAuthServer(), partitionURI));
        }

        getInputHierarchyFromInputs(inputHierarchy, inputs);
        for (WriteEntity writeEntity : outputs) {
          if (filterWriteEntity(writeEntity)) {
            continue;
          }
          List<DBModelAuthorizable> entityHierarchy = new ArrayList<DBModelAuthorizable>();
          entityHierarchy.add(hiveAuthzBinding.getAuthServer());
          entityHierarchy.addAll(getAuthzHierarchyFromEntity(writeEntity));
          outputHierarchy.add(entityHierarchy);
        }
        // workaround for metadata queries.
        // Capture the table name in pre-analyze and include that in the input entity list
        if (currTab != null) {
          List<DBModelAuthorizable> externalAuthorizableHierarchy =
              new ArrayList<DBModelAuthorizable>();
          externalAuthorizableHierarchy.add(hiveAuthzBinding.getAuthServer());
          externalAuthorizableHierarchy.add(currDB);
          externalAuthorizableHierarchy.add(currTab);
          inputHierarchy.add(externalAuthorizableHierarchy);
        }

        // workaround for DDL statements
        // Capture the table name in pre-analyze and include that in the output entity list
        if (currOutTab != null) {
          List<DBModelAuthorizable> externalAuthorizableHierarchy =
              new ArrayList<DBModelAuthorizable>();
          externalAuthorizableHierarchy.add(hiveAuthzBinding.getAuthServer());
          externalAuthorizableHierarchy.add(currOutDB);
          externalAuthorizableHierarchy.add(currOutTab);
          outputHierarchy.add(externalAuthorizableHierarchy);
        }
        break;
      case FUNCTION:
        /* The 'FUNCTION' privilege scope currently used for
         *  - CREATE TEMP FUNCTION
         *  - DROP TEMP FUNCTION.
         */
        if (udfURI != null) {
          List<DBModelAuthorizable> udfUriHierarchy = new ArrayList<DBModelAuthorizable>();
          udfUriHierarchy.add(hiveAuthzBinding.getAuthServer());
          udfUriHierarchy.add(udfURI);
          inputHierarchy.add(udfUriHierarchy);
          for (WriteEntity writeEntity : outputs) {
            List<DBModelAuthorizable> entityHierarchy = new ArrayList<DBModelAuthorizable>();
            entityHierarchy.add(hiveAuthzBinding.getAuthServer());
            entityHierarchy.addAll(getAuthzHierarchyFromEntity(writeEntity));
            outputHierarchy.add(entityHierarchy);
          }
        }
        break;
      case CONNECT:
        /* The 'CONNECT' is an implicit privilege scope currently used for
         *  - USE <db>
         *  It's allowed when the user has any privilege on the current database. For application
         *  backward compatibility, we allow (optional) implicit connect permission on 'default' db.
         */
        List<DBModelAuthorizable> connectHierarchy = new ArrayList<DBModelAuthorizable>();
        connectHierarchy.add(hiveAuthzBinding.getAuthServer());
        // by default allow connect access to default db
        Table currTbl = Table.ALL;
        Column currCol = Column.ALL;
        if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(currDB.getName())
            && "false"
                .equalsIgnoreCase(
                    authzConf.get(
                        HiveAuthzConf.AuthzConfVars.AUTHZ_RESTRICT_DEFAULT_DB.getVar(), "false"))) {
          currDB = Database.ALL;
          currTbl = Table.SOME;
        }

        connectHierarchy.add(currDB);
        connectHierarchy.add(currTbl);
        connectHierarchy.add(currCol);

        inputHierarchy.add(connectHierarchy);
        outputHierarchy.add(connectHierarchy);
        break;
      case COLUMN:
        for (ReadEntity readEntity : inputs) {
          if (readEntity.getAccessedColumns() != null
              && !readEntity.getAccessedColumns().isEmpty()) {
            addColumnHierarchy(inputHierarchy, readEntity);
          } else {
            List<DBModelAuthorizable> entityHierarchy = new ArrayList<DBModelAuthorizable>();
            entityHierarchy.add(hiveAuthzBinding.getAuthServer());
            entityHierarchy.addAll(getAuthzHierarchyFromEntity(readEntity));
            entityHierarchy.add(Column.ALL);
            inputHierarchy.add(entityHierarchy);
          }
        }
        break;
      default:
        throw new AuthorizationException(
            "Unknown operation scope type " + stmtAuthObject.getOperationScope().toString());
    }

    HiveAuthzBinding binding = null;
    try {
      binding = getHiveBindingWithPrivilegeCache(hiveAuthzBinding, context.getUserName());
    } catch (SemanticException e) {
      // Will use the original hiveAuthzBinding
      binding = hiveAuthzBinding;
    }
    // validate permission
    binding.authorize(
        stmtOperation, stmtAuthObject, getCurrentSubject(context), inputHierarchy, outputHierarchy);
  }
Esempio n. 7
0
  @Override
  public void acquireLocks(QueryPlan plan, Context ctx, String username) throws LockException {
    // Make sure we've built the lock manager
    getLockManager();

    // If the lock manager is still null, then it means we aren't using a
    // lock manager
    if (lockMgr == null) return;

    List<HiveLockObj> lockObjects = new ArrayList<HiveLockObj>();

    // Sort all the inputs, outputs.
    // If a lock needs to be acquired on any partition, a read lock needs to be acquired on all
    // its parents also
    for (ReadEntity input : plan.getInputs()) {
      if (!input.needsLock()) {
        continue;
      }
      LOG.debug("Adding " + input.getName() + " to list of lock inputs");
      if (input.getType() == ReadEntity.Type.DATABASE) {
        lockObjects.addAll(
            getLockObjects(plan, input.getDatabase(), null, null, HiveLockMode.SHARED));
      } else if (input.getType() == ReadEntity.Type.TABLE) {
        lockObjects.addAll(getLockObjects(plan, null, input.getTable(), null, HiveLockMode.SHARED));
      } else {
        lockObjects.addAll(
            getLockObjects(plan, null, null, input.getPartition(), HiveLockMode.SHARED));
      }
    }

    for (WriteEntity output : plan.getOutputs()) {
      HiveLockMode lockMode = getWriteEntityLockMode(output);
      if (lockMode == null) {
        continue;
      }
      LOG.debug("Adding " + output.getName() + " to list of lock outputs");
      List<HiveLockObj> lockObj = null;
      if (output.getType() == WriteEntity.Type.DATABASE) {
        lockObjects.addAll(getLockObjects(plan, output.getDatabase(), null, null, lockMode));
      } else if (output.getTyp() == WriteEntity.Type.TABLE) {
        lockObj = getLockObjects(plan, null, output.getTable(), null, lockMode);
      } else if (output.getTyp() == WriteEntity.Type.PARTITION) {
        lockObj = getLockObjects(plan, null, null, output.getPartition(), lockMode);
      }
      // In case of dynamic queries, it is possible to have incomplete dummy partitions
      else if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) {
        lockObj = getLockObjects(plan, null, null, output.getPartition(), HiveLockMode.SHARED);
      }

      if (lockObj != null) {
        lockObjects.addAll(lockObj);
        ctx.getOutputLockObjects().put(output, lockObj);
      }
    }

    if (lockObjects.isEmpty() && !ctx.isNeedLockMgr()) {
      return;
    }

    dedupLockObjects(lockObjects);
    List<HiveLock> hiveLocks = lockMgr.lock(lockObjects, false);

    if (hiveLocks == null) {
      throw new LockException(ErrorMsg.LOCK_CANNOT_BE_ACQUIRED.getMsg());
    } else {
      ctx.setHiveLocks(hiveLocks);
    }
  }