Beispiel #1
0
  private void addPartition(
      String databaseName, String tableName, CatalogProtos.PartitionDescProto partitionDescProto) {
    HiveCatalogStoreClientPool.HiveCatalogStoreClient client = null;
    try {

      client = clientPool.getClient();

      Partition partition = new Partition();
      partition.setDbName(databaseName);
      partition.setTableName(tableName);

      List<String> values = Lists.newArrayList();
      for (CatalogProtos.PartitionKeyProto keyProto : partitionDescProto.getPartitionKeysList()) {
        values.add(keyProto.getPartitionValue());
      }
      partition.setValues(values);

      Table table = client.getHiveClient().getTable(databaseName, tableName);
      StorageDescriptor sd = table.getSd();
      sd.setLocation(partitionDescProto.getPath());
      partition.setSd(sd);

      client.getHiveClient().add_partition(partition);
    } catch (Exception e) {
      throw new TajoInternalError(e);
    } finally {
      if (client != null) {
        client.release();
      }
    }
  }
  @Override
  public void alterTable(String databaseName, String tableName, Table newTable) {
    SchemaTableName oldName = new SchemaTableName(databaseName, tableName);
    SchemaTableName newName = new SchemaTableName(newTable.getDbName(), newTable.getTableName());

    // if the name did not change, this is a simple schema change
    if (oldName.equals(newName)) {
      if (relations.replace(oldName, newTable) == null) {
        throw new TableNotFoundException(oldName);
      }
      return;
    }

    // remove old table definition and add the new one
    // TODO: use locking to do this properly
    Table table = relations.get(oldName);
    if (table == null) {
      throw new TableNotFoundException(oldName);
    }

    if (relations.putIfAbsent(newName, newTable) != null) {
      throw new TableAlreadyExistsException(newName);
    }
    relations.remove(oldName);
  }
Beispiel #3
0
  private boolean existColumn(
      final String databaseName, final String tableName, final String columnName)
      throws CatalogException {
    boolean exist = false;
    HiveCatalogStoreClientPool.HiveCatalogStoreClient client = null;

    try {

      client = clientPool.getClient();
      Table table = client.getHiveClient().getTable(databaseName, tableName);
      List<FieldSchema> columns = table.getSd().getCols();

      for (final FieldSchema currentColumn : columns) {
        if (currentColumn.getName().equalsIgnoreCase(columnName)) {
          exist = true;
        }
      }
      client.getHiveClient().alter_table(databaseName, tableName, table);

    } catch (NoSuchObjectException nsoe) {
    } catch (Exception e) {
      throw new TajoInternalError(e);
    } finally {
      if (client != null) {
        client.release();
      }
    }

    return exist;
  }
  @Override
  public void preCreateTable(Table tbl) throws MetaException {

    boolean isExternal = MetaStoreUtils.isExternalTable(tbl);
    if (isExternal) {
      Log.info("Creating External table for Splice...");
    }

    String inputTableName = tbl.getParameters().get(MRConstants.SPLICE_TABLE_NAME);
    if (inputTableName == null)
      throw new MetaException(
          "Wrong param, you are missing " + MRConstants.SPLICE_TABLE_NAME + " ? ");

    // We can choose to support user define column mapping.
    // But currently I don't think it is necessary
    // We map all columns from Splice Table to Hive Table.
    String connStr = tbl.getParameters().get(MRConstants.SPLICE_JDBC_STR);
    if (connStr == null)
      throw new MetaException("Wrong param, did you mean " + MRConstants.SPLICE_JDBC_STR + " ? ");
    if (sqlUtil == null) sqlUtil = SMSQLUtil.getInstance(connStr);
    if (inputTableName != null) {
      inputTableName = inputTableName.trim();
      checkTableExists(inputTableName);
    }
  }
Beispiel #5
0
  private void renameColumn(
      String databaseName, String tableName, CatalogProtos.AlterColumnProto alterColumnProto) {
    HiveCatalogStoreClientPool.HiveCatalogStoreClient client = null;
    try {

      client = clientPool.getClient();
      Table table = client.getHiveClient().getTable(databaseName, tableName);
      List<FieldSchema> columns = table.getSd().getCols();

      for (final FieldSchema currentColumn : columns) {
        if (currentColumn.getName().equalsIgnoreCase(alterColumnProto.getOldColumnName())) {
          currentColumn.setName(alterColumnProto.getNewColumnName());
        }
      }
      client.getHiveClient().alter_table(databaseName, tableName, table);

    } catch (NoSuchObjectException nsoe) {
    } catch (Exception e) {
      throw new TajoInternalError(e);
    } finally {
      if (client != null) {
        client.release();
      }
    }
  }
 private Optional<Table> loadTable(HiveTableName hiveTableName) throws Exception {
   try {
     return retry()
         .stopOn(NoSuchObjectException.class, HiveViewNotSupportedException.class)
         .stopOnIllegalExceptions()
         .run(
             "getTable",
             stats
                 .getGetTable()
                 .wrap(
                     () -> {
                       try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) {
                         Table table =
                             client.getTable(
                                 hiveTableName.getDatabaseName(), hiveTableName.getTableName());
                         if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name())
                             && (!isPrestoView(table))) {
                           throw new HiveViewNotSupportedException(
                               new SchemaTableName(
                                   hiveTableName.getDatabaseName(), hiveTableName.getTableName()));
                         }
                         return Optional.of(table);
                       }
                     }));
   } catch (NoSuchObjectException e) {
     return Optional.empty();
   } catch (TException e) {
     throw new PrestoException(HIVE_METASTORE_ERROR, e);
   }
 }
  private void validateAlterTableColumns(
      Table expectedOldTable, Table expectedNewTable, Table actualOldTable, Table actualNewTable) {
    validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);

    assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
    assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
  }
 /**
  * Caches the location of the given Hive Metastore Table and updates the table's properties with
  * the submitted cache directive ID. The caller is responsible for not caching the same table
  * twice, as HDFS will create a second cache directive even if it is similar to an already
  * existing one.
  *
  * <p>Returns the ID of the submitted cache directive and throws if there is an error submitting.
  */
 public static long submitCacheTblDirective(
     org.apache.hadoop.hive.metastore.api.Table table, String poolName, short replication)
     throws ImpalaRuntimeException {
   long id =
       HdfsCachingUtil.submitDirective(
           new Path(table.getSd().getLocation()), poolName, replication);
   if (id != -1) table.putToParameters(CACHE_DIR_ID_PROP_NAME, Long.toString(id));
   table.putToParameters(CACHE_DIR_REPLICATION_PROP_NAME, Long.toString(replication));
   return id;
 }
 /**
  * Update cache directive for a table and updates the metastore parameters. Returns the cache
  * directive ID
  */
 public static long modifyCacheDirective(
     Long id, org.apache.hadoop.hive.metastore.api.Table table, String poolName, short replication)
     throws ImpalaRuntimeException {
   Preconditions.checkNotNull(id);
   HdfsCachingUtil.modifyCacheDirective(
       id, new Path(table.getSd().getLocation()), poolName, replication);
   table.putToParameters(CACHE_DIR_ID_PROP_NAME, Long.toString(id));
   table.putToParameters(CACHE_DIR_REPLICATION_PROP_NAME, Long.toString(replication));
   return id;
 }
 /**
  * Removes the cache directive associated with the table from HDFS, uncaching all data. Also
  * updates the table's metadata. No-op if the table is not cached.
  */
 public static void uncacheTbl(org.apache.hadoop.hive.metastore.api.Table table)
     throws ImpalaRuntimeException {
   Preconditions.checkNotNull(table);
   LOG.debug("Uncaching table: " + table.getDbName() + "." + table.getTableName());
   Long id = getCacheDirectiveId(table.getParameters());
   if (id == null) return;
   HdfsCachingUtil.removeDirective(id);
   table.getParameters().remove(CACHE_DIR_ID_PROP_NAME);
   table.getParameters().remove(CACHE_DIR_REPLICATION_PROP_NAME);
 }
Beispiel #11
0
  /** Performs a deep copy on <i>other</i>. */
  public Table(Table other) {
    __isset_bit_vector.clear();
    __isset_bit_vector.or(other.__isset_bit_vector);
    if (other.isSetTableName()) {
      this.tableName = other.tableName;
    }
    if (other.isSetDbName()) {
      this.dbName = other.dbName;
    }
    if (other.isSetOwner()) {
      this.owner = other.owner;
    }
    this.createTime = other.createTime;
    this.lastAccessTime = other.lastAccessTime;
    this.retention = other.retention;
    if (other.isSetSd()) {
      this.sd = new StorageDescriptor(other.sd);
    }
    if (other.isSetPartitionKeys()) {
      List<FieldSchema> __this__partitionKeys = new ArrayList<FieldSchema>();
      for (FieldSchema other_element : other.partitionKeys) {
        __this__partitionKeys.add(new FieldSchema(other_element));
      }
      this.partitionKeys = __this__partitionKeys;
    }
    if (other.isSetParameters()) {
      Map<String, String> __this__parameters = new HashMap<String, String>();
      for (Map.Entry<String, String> other_element : other.parameters.entrySet()) {

        String other_element_key = other_element.getKey();
        String other_element_value = other_element.getValue();

        String __this__parameters_copy_key = other_element_key;

        String __this__parameters_copy_value = other_element_value;

        __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value);
      }
      this.parameters = __this__parameters;
    }
    if (other.isSetViewOriginalText()) {
      this.viewOriginalText = other.viewOriginalText;
    }
    if (other.isSetViewExpandedText()) {
      this.viewExpandedText = other.viewExpandedText;
    }
    if (other.isSetTableType()) {
      this.tableType = other.tableType;
    }
    if (other.isSetPrivileges()) {
      this.privileges = new PrincipalPrivilegeSet(other.privileges);
    }
  }
  @Override
  public void createTable(Table table) throws HiveMetastoreException {
    if (!existsDb(table.getDbName())) {
      throw new HiveMetastoreException("DB " + table.getDbName() + " does not exist!");
    }

    HiveObjectSpec tableSpec = new HiveObjectSpec(table.getDbName(), table.getTableName());
    if (specToTable.containsKey(tableSpec)) {
      throw new HiveMetastoreException("Table already exists: " + tableSpec);
    }
    specToTable.put(tableSpec, table);
  }
 @Override
 public void alterTable(String dbName, String tableName, Table table)
     throws HiveMetastoreException {
   HiveObjectSpec existingTableSpec = new HiveObjectSpec(dbName, tableName);
   HiveObjectSpec newTableSpec = new HiveObjectSpec(table.getDbName(), table.getTableName());
   if (!specToTable.containsKey(existingTableSpec)) {
     throw new HiveMetastoreException("Unknown table: " + existingTableSpec);
   }
   Table removedTable = specToTable.remove(existingTableSpec);
   if (removedTable == null) {
     throw new RuntimeException("Shouldn't happen!");
   }
   specToTable.put(newTableSpec, table);
 }
  @Override
  public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata) {
    checkArgument(!isNullOrEmpty(tableMetadata.getOwner()), "Table owner is null or empty");

    SchemaTableName schemaTableName = tableMetadata.getTable();
    String schemaName = schemaTableName.getSchemaName();
    String tableName = schemaTableName.getTableName();

    ImmutableList.Builder<String> columnNames = ImmutableList.builder();
    ImmutableList.Builder<Type> columnTypes = ImmutableList.builder();

    buildColumnInfo(tableMetadata, columnNames, columnTypes);

    ImmutableList.Builder<FieldSchema> partitionKeys = ImmutableList.builder();
    ImmutableList.Builder<FieldSchema> columns = ImmutableList.builder();

    List<String> names = columnNames.build();
    List<String> typeNames =
        columnTypes
            .build()
            .stream()
            .map(HiveType::toHiveType)
            .map(HiveType::getHiveTypeName)
            .collect(toList());

    for (int i = 0; i < names.size(); i++) {
      if (tableMetadata.getColumns().get(i).isPartitionKey()) {
        partitionKeys.add(new FieldSchema(names.get(i), typeNames.get(i), null));
      } else {
        columns.add(new FieldSchema(names.get(i), typeNames.get(i), null));
      }
    }

    Path targetPath = getTargetPath(schemaName, tableName, schemaTableName);

    HiveStorageFormat hiveStorageFormat = getHiveStorageFormat(session, this.hiveStorageFormat);
    SerDeInfo serdeInfo = new SerDeInfo();
    serdeInfo.setName(tableName);
    serdeInfo.setSerializationLib(hiveStorageFormat.getSerDe());

    StorageDescriptor sd = new StorageDescriptor();
    sd.setLocation(targetPath.toString());

    sd.setCols(columns.build());
    sd.setSerdeInfo(serdeInfo);
    sd.setInputFormat(hiveStorageFormat.getInputFormat());
    sd.setOutputFormat(hiveStorageFormat.getOutputFormat());

    Table table = new Table();
    table.setDbName(schemaName);
    table.setTableName(tableName);
    table.setOwner(tableMetadata.getOwner());
    table.setTableType(TableType.MANAGED_TABLE.toString());
    String tableComment = "Created by Presto";
    table.setParameters(ImmutableMap.of("comment", tableComment));
    table.setPartitionKeys(partitionKeys.build());
    table.setSd(sd);

    metastore.createTable(table);
  }
 private ConnectorTableMetadata getTableMetadata(SchemaTableName tableName) {
   try {
     Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName());
     if (table.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
       throw new TableNotFoundException(tableName);
     }
     List<HiveColumnHandle> handles = hiveColumnHandles(typeManager, connectorId, table, false);
     List<ColumnMetadata> columns =
         ImmutableList.copyOf(transform(handles, columnMetadataGetter(table, typeManager)));
     return new ConnectorTableMetadata(tableName, columns, table.getOwner());
   } catch (NoSuchObjectException e) {
     throw new TableNotFoundException(tableName);
   }
 }
Beispiel #16
0
    /**
     * Validates and gets the query parameter for JDO filter pushdown based on the column and the
     * constant stored in this node.
     *
     * @param table The table.
     * @param partColIndex The index of the column to check.
     * @param filterBuilder filter builder used to report error, if any.
     * @return The parameter string.
     */
    private String getJdoFilterPushdownParam(
        Table table, int partColIndex, FilterBuilder filterBuilder, boolean canPushDownIntegral)
        throws MetaException {
      boolean isIntegralSupported = canPushDownIntegral && canJdoUseStringsWithIntegral();
      String colType = table.getPartitionKeys().get(partColIndex).getType();
      // Can only support partitions whose types are string, or maybe integers
      if (!colType.equals(serdeConstants.STRING_TYPE_NAME)
          && (!isIntegralSupported || !serdeConstants.IntegralTypes.contains(colType))) {
        filterBuilder.setError(
            "Filtering is supported only on partition keys of type "
                + "string"
                + (isIntegralSupported ? ", or integral types" : ""));
        return null;
      }

      // There's no support for date cast in JDO. Let's convert it to string; the date
      // columns have been excluded above, so it will either compare w/string or fail.
      Object val = value;
      if (value instanceof Date) {
        val = HiveMetaStore.PARTITION_DATE_FORMAT.get().format((Date) value);
      }
      boolean isStringValue = val instanceof String;
      if (!isStringValue && (!isIntegralSupported || !(val instanceof Long))) {
        filterBuilder.setError(
            "Filtering is supported only on partition keys of type "
                + "string"
                + (isIntegralSupported ? ", or integral types" : ""));
        return null;
      }

      return isStringValue ? (String) val : Long.toString((Long) val);
    }
Beispiel #17
0
 // This method is completely copied from Hive's HBaseStorageHandler.java.
 private String getHBaseTableName(org.apache.hadoop.hive.metastore.api.Table tbl) {
   // Give preference to TBLPROPERTIES over SERDEPROPERTIES
   // (really we should only use TBLPROPERTIES, so this is just
   // for backwards compatibility with the original specs).
   String tableName = tbl.getParameters().get(HBaseSerDe.HBASE_TABLE_NAME);
   if (tableName == null) {
     tableName = tbl.getSd().getSerdeInfo().getParameters().get(HBaseSerDe.HBASE_TABLE_NAME);
   }
   if (tableName == null) {
     tableName = tbl.getDbName() + "." + tbl.getTableName();
     if (tableName.startsWith(DEFAULT_PREFIX)) {
       tableName = tableName.substring(DEFAULT_PREFIX.length());
     }
   }
   return tableName;
 }
Beispiel #18
0
 @Override
 public Integer run() throws Exception {
   if (jobContext.getCoreExitCode() != 0) {
     log("Job 运行失败,不进行产出目录清理");
     return 0;
   }
   SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd");
   Calendar cal = Calendar.getInstance();
   cal.add(Calendar.DAY_OF_YEAR, keepDays * (-1));
   Date limit = format.parse(format.format(cal.getTime()));
   for (String tableName : tables) {
     Table t = tableManager.getTable(tableName);
     int ptIndex = -1;
     for (FieldSchema fs : t.getPartitionKeys()) {
       ptIndex++;
       if (fs.getName().equalsIgnoreCase("pt")) {
         break;
       }
     }
     if (ptIndex < 0) {
       log("表" + tableName + "不含pt分区字段,不进行历史分区清理");
       continue;
     }
     List<Partition> parts = tableManager.getPartitions(tableName, null);
     for (Partition p : parts) {
       Date ptDate = null;
       try {
         ptDate = format.parse(StringUtils.substring(p.getValues().get(ptIndex), 0, 8));
       } catch (Exception e) {
         log("分区字段格式非法:");
         log(e);
       }
       if (ptDate == null) {
         log("解析分区时间失败。" + p.getValues().get(ptIndex));
         continue;
       }
       if (ptDate.before(limit)) {
         if (!tableManager.dropPartition(tableName, p.getValues(), true)) {
           log("drop partition failed.table[" + tableName + "],part_vals=[" + p.getValues());
         } else {
           log("drop partition ok. Table[" + tableName + "],part_vals=[" + p.getValues() + "]");
         }
       }
     }
   }
   return 0;
 }
  @Before
  public void createTable() throws Exception {
    // Use Junit's Assume to skip running this fixture against any storage formats whose
    // SerDe is in the disabled serdes list.
    Assume.assumeTrue(!DISABLED_SERDES.contains(serdeClass));

    String databaseName = (dbName == null) ? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName;
    try {
      client.dropTable(databaseName, tableName);
    } catch (Exception e) {
      // Can fail with NoSuchObjectException.
    }

    Table tbl = new Table();
    tbl.setDbName(databaseName);
    tbl.setTableName(tableName);
    if (isTableExternal()) {
      tbl.setTableType(TableType.EXTERNAL_TABLE.toString());
    } else {
      tbl.setTableType(TableType.MANAGED_TABLE.toString());
    }
    StorageDescriptor sd = new StorageDescriptor();
    sd.setCols(getTableColumns());

    tbl.setPartitionKeys(getPartitionKeys());
    tbl.setSd(sd);

    sd.setBucketCols(new ArrayList<String>(2));
    sd.setSerdeInfo(new SerDeInfo());
    sd.getSerdeInfo().setName(tbl.getTableName());
    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
    sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
    if (isTableExternal()) {
      sd.getSerdeInfo().getParameters().put("EXTERNAL", "TRUE");
    }
    sd.getSerdeInfo().setSerializationLib(serdeClass);
    sd.setInputFormat(inputFormatClass);
    sd.setOutputFormat(outputFormatClass);

    Map<String, String> tableParams = new HashMap<String, String>();
    if (isTableExternal()) {
      tableParams.put("EXTERNAL", "TRUE");
    }
    if (isTableImmutable()) {
      tableParams.put(hive_metastoreConstants.IS_IMMUTABLE, "true");
    }
    StatsSetupConst.setBasicStatsState(tableParams, StatsSetupConst.TRUE);
    tbl.setParameters(tableParams);

    client.createTable(tbl);
  }
Beispiel #20
0
  private void renameTable(String databaseName, String tableName, String newTableName) {
    HiveCatalogStoreClientPool.HiveCatalogStoreClient client = null;
    try {
      client = clientPool.getClient();
      Table newTable = client.getHiveClient().getTable(databaseName, tableName);
      newTable.setTableName(newTableName);
      client.getHiveClient().alter_table(databaseName, tableName, newTable);

    } catch (NoSuchObjectException nsoe) {
    } catch (Exception e) {
      throw new TajoInternalError(e);
    } finally {
      if (client != null) {
        client.release();
      }
    }
  }
  /*
   * Builds a TDescribeTableResult that contains the result of a DESCRIBE FORMATTED
   * <table> command. For the formatted describe output the goal is to be exactly the
   * same as what Hive (via HiveServer2) outputs, for compatibility reasons. To do this,
   * Hive's MetadataFormatUtils class is used to build the results.
   */
  private static TDescribeTableResult describeTableFormatted(Table table) {
    TDescribeTableResult descResult = new TDescribeTableResult();
    descResult.results = Lists.newArrayList();

    org.apache.hadoop.hive.metastore.api.Table msTable = table.getMetaStoreTable().deepCopy();
    // Fixup the metastore table so the output of DESCRIBE FORMATTED matches Hive's.
    // This is to distinguish between empty comments and no comments (value is null).
    for (FieldSchema fs : msTable.getSd().getCols())
      fs.setComment(table.getColumn(fs.getName()).getComment());
    for (FieldSchema fs : msTable.getPartitionKeys()) {
      fs.setComment(table.getColumn(fs.getName()).getComment());
    }

    // To avoid initializing any of the SerDe classes in the metastore table Thrift
    // struct, create the ql.metadata.Table object by calling the empty c'tor and
    // then calling setTTable().
    org.apache.hadoop.hive.ql.metadata.Table hiveTable =
        new org.apache.hadoop.hive.ql.metadata.Table();
    hiveTable.setTTable(msTable);
    StringBuilder sb = new StringBuilder();
    // First add all the columns (includes partition columns).
    sb.append(
        MetaDataFormatUtils.getAllColumnsInformation(
            msTable.getSd().getCols(), msTable.getPartitionKeys()));
    // Add the extended table metadata information.
    sb.append(MetaDataFormatUtils.getTableInformation(hiveTable));

    for (String line : sb.toString().split("\n")) {
      // To match Hive's HiveServer2 output, split each line into multiple column
      // values based on the field delimiter.
      String[] columns = line.split(MetaDataFormatUtils.FIELD_DELIM);
      TResultRow resultRow = new TResultRow();
      for (int i = 0; i < NUM_DESC_FORMATTED_RESULT_COLS; ++i) {
        TColumnValue colVal = new TColumnValue();
        colVal.setString_val(null);
        if (columns.length > i) {
          // Add the column value.
          colVal.setString_val(columns[i]);
        }
        resultRow.addToColVals(colVal);
      }
      descResult.results.add(resultRow);
    }
    return descResult;
  }
  private static Function<HiveColumnHandle, ColumnMetadata> columnMetadataGetter(
      Table table, final TypeManager typeManager) {
    ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
    for (FieldSchema field : concat(table.getSd().getCols(), table.getPartitionKeys())) {
      if (field.getComment() != null) {
        builder.put(field.getName(), field.getComment());
      }
    }
    final Map<String, String> columnComment = builder.build();

    return input ->
        new ColumnMetadata(
            input.getName(),
            typeManager.getType(input.getTypeSignature()),
            input.isPartitionKey(),
            columnComment.get(input.getName()),
            false);
  }
  /**
   * Returns the partition name (e.g. ds=1/hr=2) given a Table and Partition object. For simplicity,
   * this does not handle special characters properly.
   *
   * @param table the table that the partition belongs to
   * @param partition the partition to get the name for
   * @return the name of the partition
   * @throws HiveMetastoreException if the schema between the table and partition do not match
   */
  private String getPartitionName(Table table, Partition partition) throws HiveMetastoreException {
    if (table.getPartitionKeys().size() != partition.getValues().size()) {
      throw new HiveMetastoreException(
          "Partition column mismatch: "
              + "table has "
              + table.getPartitionKeys().size()
              + " columns "
              + "while partition has "
              + partition.getValues().size()
              + " values");
    }

    List<String> keyValues = new ArrayList<>();
    int keyValueIndex = 0;
    for (FieldSchema field : table.getPartitionKeys()) {
      keyValues.add(field.getName() + "=" + partition.getValues().get(keyValueIndex));
      keyValueIndex++;
    }
    return StringUtils.join(keyValues, "/");
  }
 /**
  * Returns the table parameter 'transient_lastDdlTime', or -1 if it's not set. TODO: move this to
  * a metastore helper class.
  */
 public static long getLastDdlTime(org.apache.hadoop.hive.metastore.api.Table msTbl) {
   Preconditions.checkNotNull(msTbl);
   Map<String, String> params = msTbl.getParameters();
   String lastDdlTimeStr = params.get("transient_lastDdlTime");
   if (lastDdlTimeStr != null) {
     try {
       return Long.parseLong(lastDdlTimeStr);
     } catch (NumberFormatException e) {
     }
   }
   return -1;
 }
 private String getPartitionName(Table table, List<String> values) {
   StringBuilder sb = new StringBuilder();
   int index = 0;
   for (FieldSchema fs : table.getPartitionKeys()) {
     if (index > 0) {
       sb.append("/");
     }
     sb.append(fs.getName());
     sb.append("=");
     sb.append(values.get(index));
     index++;
   }
   return sb.toString();
 }
  private static List<String> listAllDataPaths(
      HiveMetastore metastore, String schemaName, String tableName) {
    ImmutableList.Builder<String> locations = ImmutableList.builder();
    Table table = metastore.getTable(schemaName, tableName).get();
    if (table.getSd().getLocation() != null) {
      // For unpartitioned table, there should be nothing directly under this directory.
      // But including this location in the set makes the directory content assert more
      // extensive, which is desirable.
      locations.add(table.getSd().getLocation());
    }

    Optional<List<String>> partitionNames = metastore.getPartitionNames(schemaName, tableName);
    if (partitionNames.isPresent()) {
      metastore
          .getPartitionsByNames(schemaName, tableName, partitionNames.get())
          .stream()
          .map(partition -> partition.getSd().getLocation())
          .filter(location -> !location.startsWith(table.getSd().getLocation()))
          .forEach(locations::add);
    }

    return locations.build();
  }
  @Override
  public void dropTable(ConnectorTableHandle tableHandle) {
    HiveTableHandle handle = checkType(tableHandle, HiveTableHandle.class, "tableHandle");
    SchemaTableName tableName = schemaTableName(tableHandle);

    if (!allowDropTable) {
      throw new PrestoException(PERMISSION_DENIED, "DROP TABLE is disabled in this Hive catalog");
    }

    try {
      Table table = metastore.getTable(handle.getSchemaName(), handle.getTableName());
      if (!handle.getSession().getUser().equals(table.getOwner())) {
        throw new PrestoException(
            PERMISSION_DENIED,
            format(
                "Unable to drop table '%s': owner of the table is different from session user",
                table));
      }
      metastore.dropTable(handle.getSchemaName(), handle.getTableName());
    } catch (NoSuchObjectException e) {
      throw new TableNotFoundException(tableName);
    }
  }
Beispiel #28
0
    /**
     * Get partition column index in the table partition column list that corresponds to the key
     * that is being filtered on by this tree node.
     *
     * @param table The table.
     * @param filterBuilder filter builder used to report error, if any.
     * @return The index.
     */
    public int getPartColIndexForFilter(Table table, FilterBuilder filterBuilder)
        throws MetaException {
      int partitionColumnIndex;
      assert (table.getPartitionKeys().size() > 0);
      for (partitionColumnIndex = 0;
          partitionColumnIndex < table.getPartitionKeys().size();
          ++partitionColumnIndex) {
        if (table
            .getPartitionKeys()
            .get(partitionColumnIndex)
            .getName()
            .equalsIgnoreCase(keyName)) {
          break;
        }
      }
      if (partitionColumnIndex == table.getPartitionKeys().size()) {
        filterBuilder.setError(
            "Specified key <" + keyName + "> is not a partitioning key for the table");
        return -1;
      }

      return partitionColumnIndex;
    }
 @Override
 public void createTable(Table table) {
   try {
     retry()
         .stopOn(
             AlreadyExistsException.class,
             InvalidObjectException.class,
             MetaException.class,
             NoSuchObjectException.class)
         .stopOnIllegalExceptions()
         .run(
             "createTable",
             stats
                 .getCreateTable()
                 .wrap(
                     () -> {
                       try (HiveMetastoreClient client = clientProvider.createMetastoreClient()) {
                         client.createTable(table);
                       }
                       return null;
                     }));
   } catch (AlreadyExistsException e) {
     throw new TableAlreadyExistsException(
         new SchemaTableName(table.getDbName(), table.getTableName()));
   } catch (NoSuchObjectException e) {
     throw new SchemaNotFoundException(table.getDbName());
   } catch (TException e) {
     throw new PrestoException(HIVE_METASTORE_ERROR, e);
   } catch (Exception e) {
     if (e instanceof InterruptedException) {
       Thread.currentThread().interrupt();
     }
     throw Throwables.propagate(e);
   } finally {
     invalidateTable(table.getDbName(), table.getTableName());
   }
 }
  @Override
  public Map<SchemaTableName, String> getViews(ConnectorSession session, SchemaTablePrefix prefix) {
    ImmutableMap.Builder<SchemaTableName, String> views = ImmutableMap.builder();
    List<SchemaTableName> tableNames;
    if (prefix.getTableName() != null) {
      tableNames =
          ImmutableList.of(new SchemaTableName(prefix.getSchemaName(), prefix.getTableName()));
    } else {
      tableNames = listViews(session, prefix.getSchemaName());
    }

    for (SchemaTableName schemaTableName : tableNames) {
      try {
        Table table =
            metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName());
        if (HiveUtil.isPrestoView(table)) {
          views.put(schemaTableName, decodeViewData(table.getViewOriginalText()));
        }
      } catch (NoSuchObjectException ignored) {
      }
    }

    return views.build();
  }