Пример #1
0
  @Override
  public void createTable(Table table) throws HiveMetastoreException {
    if (!existsDb(table.getDbName())) {
      throw new HiveMetastoreException("DB " + table.getDbName() + " does not exist!");
    }

    HiveObjectSpec tableSpec = new HiveObjectSpec(table.getDbName(), table.getTableName());
    if (specToTable.containsKey(tableSpec)) {
      throw new HiveMetastoreException("Table already exists: " + tableSpec);
    }
    specToTable.put(tableSpec, table);
  }
  @Override
  public synchronized void createTable(Table table) {
    SchemaTableName schemaTableName = new SchemaTableName(table.getDbName(), table.getTableName());
    Table tableCopy = table.deepCopy();
    if (tableCopy.getSd() == null) {
      tableCopy.setSd(new StorageDescriptor());
    } else if (tableCopy.getSd().getLocation() != null) {
      File directory = new File(new Path(tableCopy.getSd().getLocation()).toUri());
      checkArgument(directory.exists(), "Table directory does not exist");
      checkArgument(
          isParentDir(directory, baseDirectory),
          "Table directory must be inside of the metastore base directory");
    }

    if (relations.putIfAbsent(schemaTableName, tableCopy) != null) {
      throw new TableAlreadyExistsException(schemaTableName);
    }

    if (tableCopy.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
      views.put(schemaTableName, tableCopy);
    }

    PrincipalPrivilegeSet privileges = table.getPrivileges();
    if (privileges != null) {
      for (Entry<String, List<PrivilegeGrantInfo>> entry :
          privileges.getUserPrivileges().entrySet()) {
        String user = entry.getKey();
        Set<HivePrivilegeInfo> userPrivileges =
            entry
                .getValue()
                .stream()
                .map(HivePrivilegeInfo::parsePrivilege)
                .flatMap(Collection::stream)
                .collect(toImmutableSet());
        setTablePrivileges(user, USER, table.getDbName(), table.getTableName(), userPrivileges);
      }
      for (Entry<String, List<PrivilegeGrantInfo>> entry :
          privileges.getRolePrivileges().entrySet()) {
        String role = entry.getKey();
        Set<HivePrivilegeInfo> rolePrivileges =
            entry
                .getValue()
                .stream()
                .map(HivePrivilegeInfo::parsePrivilege)
                .flatMap(Collection::stream)
                .collect(toImmutableSet());
        setTablePrivileges(role, ROLE, table.getDbName(), table.getTableName(), rolePrivileges);
      }
    }
  }
Пример #3
0
  @Override
  public void alterTable(String databaseName, String tableName, Table newTable) {
    SchemaTableName oldName = new SchemaTableName(databaseName, tableName);
    SchemaTableName newName = new SchemaTableName(newTable.getDbName(), newTable.getTableName());

    // if the name did not change, this is a simple schema change
    if (oldName.equals(newName)) {
      if (relations.replace(oldName, newTable) == null) {
        throw new TableNotFoundException(oldName);
      }
      return;
    }

    // remove old table definition and add the new one
    // TODO: use locking to do this properly
    Table table = relations.get(oldName);
    if (table == null) {
      throw new TableNotFoundException(oldName);
    }

    if (relations.putIfAbsent(newName, newTable) != null) {
      throw new TableAlreadyExistsException(newName);
    }
    relations.remove(oldName);
  }
Пример #4
0
  public static Table fromMetastoreApiTable(org.apache.hadoop.hive.metastore.api.Table table) {
    StorageDescriptor storageDescriptor = table.getSd();
    if (storageDescriptor == null) {
      throw new PrestoException(HIVE_INVALID_METADATA, "Table is missing storage descriptor");
    }

    Table.Builder tableBuilder =
        Table.builder()
            .setDatabaseName(table.getDbName())
            .setTableName(table.getTableName())
            .setOwner(nullToEmpty(table.getOwner()))
            .setTableType(table.getTableType())
            .setDataColumns(
                storageDescriptor
                    .getCols()
                    .stream()
                    .map(MetastoreUtil::fromMetastoreApiFieldSchema)
                    .collect(toList()))
            .setPartitionColumns(
                table
                    .getPartitionKeys()
                    .stream()
                    .map(MetastoreUtil::fromMetastoreApiFieldSchema)
                    .collect(toList()))
            .setParameters(
                table.getParameters() == null ? ImmutableMap.of() : table.getParameters())
            .setViewOriginalText(Optional.ofNullable(emptyToNull(table.getViewOriginalText())))
            .setViewExpandedText(Optional.ofNullable(emptyToNull(table.getViewExpandedText())));

    fromMetastoreApiStorageDescriptor(
        storageDescriptor, tableBuilder.getStorageBuilder(), table.getTableName());

    return tableBuilder.build();
  }
Пример #5
0
 /**
  * Removes the cache directive associated with the table from HDFS, uncaching all data. Also
  * updates the table's metadata. No-op if the table is not cached.
  */
 public static void uncacheTbl(org.apache.hadoop.hive.metastore.api.Table table)
     throws ImpalaRuntimeException {
   Preconditions.checkNotNull(table);
   LOG.debug("Uncaching table: " + table.getDbName() + "." + table.getTableName());
   Long id = getCacheDirectiveId(table.getParameters());
   if (id == null) return;
   HdfsCachingUtil.removeDirective(id);
   table.getParameters().remove(CACHE_DIR_ID_PROP_NAME);
   table.getParameters().remove(CACHE_DIR_REPLICATION_PROP_NAME);
 }
Пример #6
0
 @Override
 public void alterTable(String dbName, String tableName, Table table)
     throws HiveMetastoreException {
   HiveObjectSpec existingTableSpec = new HiveObjectSpec(dbName, tableName);
   HiveObjectSpec newTableSpec = new HiveObjectSpec(table.getDbName(), table.getTableName());
   if (!specToTable.containsKey(existingTableSpec)) {
     throw new HiveMetastoreException("Unknown table: " + existingTableSpec);
   }
   Table removedTable = specToTable.remove(existingTableSpec);
   if (removedTable == null) {
     throw new RuntimeException("Shouldn't happen!");
   }
   specToTable.put(newTableSpec, table);
 }
Пример #7
0
 @Override
 public void createTable(Table table) {
   try {
     retry()
         .stopOn(
             AlreadyExistsException.class,
             InvalidObjectException.class,
             MetaException.class,
             NoSuchObjectException.class)
         .stopOnIllegalExceptions()
         .run(
             "createTable",
             stats
                 .getCreateTable()
                 .wrap(
                     () -> {
                       try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) {
                         client.createTable(table);
                       }
                       return null;
                     }));
   } catch (AlreadyExistsException e) {
     throw new TableAlreadyExistsException(
         new SchemaTableName(table.getDbName(), table.getTableName()));
   } catch (NoSuchObjectException e) {
     throw new SchemaNotFoundException(table.getDbName());
   } catch (TException e) {
     throw new PrestoException(HIVE_METASTORE_ERROR, e);
   } catch (Exception e) {
     if (e instanceof InterruptedException) {
       Thread.currentThread().interrupt();
     }
     throw Throwables.propagate(e);
   } finally {
     invalidateTable(table.getDbName(), table.getTableName());
   }
 }
Пример #8
0
 // This method is completely copied from Hive's HBaseStorageHandler.java.
 private String getHBaseTableName(org.apache.hadoop.hive.metastore.api.Table tbl) {
   // Give preference to TBLPROPERTIES over SERDEPROPERTIES
   // (really we should only use TBLPROPERTIES, so this is just
   // for backwards compatibility with the original specs).
   String tableName = tbl.getParameters().get(HBaseSerDe.HBASE_TABLE_NAME);
   if (tableName == null) {
     tableName = tbl.getSd().getSerdeInfo().getParameters().get(HBaseSerDe.HBASE_TABLE_NAME);
   }
   if (tableName == null) {
     tableName = tbl.getDbName() + "." + tbl.getTableName();
     if (tableName.startsWith(DEFAULT_PREFIX)) {
       tableName = tableName.substring(DEFAULT_PREFIX.length());
     }
   }
   return tableName;
 }
 private void validateTable(Table expectedTable, Table actualTable) {
   assertEquals(expectedTable.getTableName(), actualTable.getTableName());
   assertEquals(expectedTable.getDbName(), actualTable.getDbName());
   assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
 }