Пример #1
0
 public static org.apache.hadoop.hive.metastore.api.Partition toMetastoreApiPartition(
     Partition partition) {
   org.apache.hadoop.hive.metastore.api.Partition result =
       new org.apache.hadoop.hive.metastore.api.Partition();
   result.setDbName(partition.getDatabaseName());
   result.setTableName(partition.getTableName());
   result.setValues(partition.getValues());
   result.setSd(
       makeStorageDescriptor(
           partition.getTableName(), partition.getColumns(), partition.getStorage()));
   result.setParameters(partition.getParameters());
   return result;
 }
Пример #2
0
  @Override
  public CatalogProtos.PartitionDescProto getPartition(
      String databaseName, String tableName, String partitionName) throws CatalogException {
    HiveCatalogStoreClientPool.HiveCatalogStoreClient client = null;
    CatalogProtos.PartitionDescProto.Builder builder = null;

    try {
      client = clientPool.getClient();

      Partition partition =
          client.getHiveClient().getPartition(databaseName, tableName, partitionName);
      builder = CatalogProtos.PartitionDescProto.newBuilder();
      builder.setPartitionName(partitionName);
      builder.setPath(partition.getSd().getLocation());

      String[] partitionNames = partitionName.split("/");

      for (int i = 0; i < partition.getValues().size(); i++) {
        String value = partition.getValues().get(i);
        CatalogProtos.PartitionKeyProto.Builder keyBuilder =
            CatalogProtos.PartitionKeyProto.newBuilder();

        String columnName = partitionNames[i].split("=")[0];
        keyBuilder.setColumnName(columnName);
        keyBuilder.setPartitionValue(value);
        builder.addPartitionKeys(keyBuilder);
      }
    } catch (NoSuchObjectException e) {
      return null;
    } catch (Exception e) {
      throw new TajoInternalError(e);
    } finally {
      if (client != null) {
        client.release();
      }
    }
    return builder.build();
  }