コード例 #1
0
ファイル: MetaDataClient.java プロジェクト: QimengZou/phoenix
 public MutationState dropTable(DropTableStatement statement) throws SQLException {
   connection.rollback();
   boolean wasAutoCommit = connection.getAutoCommit();
   try {
     TableName tableNameNode = statement.getTableName();
     String schemaName = tableNameNode.getSchemaName();
     String tableName = tableNameNode.getTableName();
     byte[] key = SchemaUtil.getTableKey(schemaName, tableName);
     Long scn = connection.getSCN();
     @SuppressWarnings(
         "deprecation") // FIXME: Remove when unintentionally deprecated method is fixed
                        // (HBASE-7870).
     // FIXME: the version of the Delete constructor without the lock args was introduced
     // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
     // of the client.
     List<Mutation> tableMetaData =
         Collections.<Mutation>singletonList(
             new Delete(key, scn == null ? HConstants.LATEST_TIMESTAMP : scn, null));
     MetaDataMutationResult result =
         connection.getQueryServices().dropTable(tableMetaData, statement.isView());
     MutationCode code = result.getMutationCode();
     switch (code) {
       case TABLE_NOT_FOUND:
         if (!statement.ifExists()) {
           throw new TableNotFoundException(schemaName, tableName);
         }
         break;
       case NEWER_TABLE_FOUND:
         throw new NewerTableAlreadyExistsException(schemaName, tableName);
       case UNALLOWED_TABLE_MUTATION:
         throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
             .setSchemaName(schemaName)
             .setTableName(tableName)
             .build()
             .buildException();
       default:
         try {
           connection.removeTable(schemaName, tableName);
         } catch (TableNotFoundException e) { // Ignore - just means wasn't cached
         }
         if (!statement.isView()) {
           connection.setAutoCommit(true);
           // Delete everything in the column. You'll still be able to do queries at earlier
           // timestamps
           long ts = (scn == null ? result.getMutationTime() : scn);
           // Create empty table and schema - they're only used to get the name from
           // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn>
           // columns
           PTable table = result.getTable();
           PSchema schema =
               new PSchemaImpl(
                   schemaName,
                   ImmutableMap.<String, PTable>of(table.getName().getString(), table));
           TableRef tableRef = new TableRef(null, table, schema, ts);
           MutationPlan plan =
               new PostDDLCompiler(connection)
                   .compile(tableRef, null, Collections.<PColumn>emptyList(), ts);
           return connection.getQueryServices().updateData(plan);
         }
         break;
     }
     return new MutationState(0, connection);
   } finally {
     connection.setAutoCommit(wasAutoCommit);
   }
 }
コード例 #2
0
ファイル: MetaDataClient.java プロジェクト: QimengZou/phoenix
  public MutationState createTable(CreateTableStatement statement, byte[][] splits)
      throws SQLException {
    PTableType tableType = statement.getTableType();
    boolean isView = tableType == PTableType.VIEW;
    if (isView && !statement.getProps().isEmpty()) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_TABLE_CONFIG)
          .build()
          .buildException();
    }
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
      connection.setAutoCommit(false);
      TableName tableNameNode = statement.getTableName();
      String schemaName = tableNameNode.getSchemaName();
      String tableName = tableNameNode.getTableName();

      PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
      String pkName = null;
      Set<String> pkColumns = Collections.<String>emptySet();
      Iterator<String> pkColumnsIterator = Iterators.emptyIterator();
      if (pkConstraint != null) {
        pkColumns = pkConstraint.getColumnNames();
        pkColumnsIterator = pkColumns.iterator();
        pkName = pkConstraint.getName();
      }

      List<ColumnDef> colDefs = statement.getColumnDefs();
      List<PColumn> columns = Lists.newArrayListWithExpectedSize(colDefs.size());
      PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN);
      int columnOrdinal = 0;
      Map<String, PName> familyNames = Maps.newLinkedHashMap();
      boolean isPK = false;
      for (ColumnDef colDef : colDefs) {
        if (colDef.isPK()) {
          if (isPK) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS)
                .setColumnName(colDef.getColumnDefName().getColumnName().getName())
                .build()
                .buildException();
          }
          isPK = true;
        }
        PColumn column = newColumn(columnOrdinal++, colDef, pkConstraint);
        if (SchemaUtil.isPKColumn(column)) {
          // TODO: remove this constraint?
          if (!pkColumns.isEmpty()
              && !column.getName().getString().equals(pkColumnsIterator.next())) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER)
                .setSchemaName(schemaName)
                .setTableName(tableName)
                .setColumnName(column.getName().getString())
                .build()
                .buildException();
          }
        }
        columns.add(column);
        if (colDef.getDataType() == PDataType.BINARY && colDefs.size() > 1) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.BINARY_IN_ROW_KEY)
              .setSchemaName(schemaName)
              .setTableName(tableName)
              .setColumnName(column.getName().getString())
              .build()
              .buildException();
        }
        if (column.getFamilyName() != null) {
          familyNames.put(column.getFamilyName().getString(), column.getFamilyName());
        }
      }
      if (!isPK && pkColumns.isEmpty()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING)
            .setSchemaName(schemaName)
            .setTableName(tableName)
            .build()
            .buildException();
      }

      List<Pair<byte[], Map<String, Object>>> familyPropList =
          Lists.newArrayListWithExpectedSize(familyNames.size());
      Map<String, Object> commonFamilyProps = Collections.emptyMap();
      Map<String, Object> tableProps = Collections.emptyMap();
      if (!statement.getProps().isEmpty()) {
        if (statement.isView()) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES)
              .build()
              .buildException();
        }
        for (String familyName : statement.getProps().keySet()) {
          if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
            if (familyNames.get(familyName) == null) {
              throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY)
                  .setFamilyName(familyName)
                  .build()
                  .buildException();
            }
          }
        }
        commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
        tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());

        Collection<Pair<String, Object>> props =
            statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY);
        // Somewhat hacky way of determining if property is for HColumnDescriptor or
        // HTableDescriptor
        HColumnDescriptor defaultDescriptor =
            new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
        for (Pair<String, Object> prop : props) {
          if (defaultDescriptor.getValue(prop.getFirst()) != null) {
            commonFamilyProps.put(prop.getFirst(), prop.getSecond());
          } else {
            tableProps.put(prop.getFirst(), prop.getSecond());
          }
        }
      }

      for (PName familyName : familyNames.values()) {
        Collection<Pair<String, Object>> props = statement.getProps().get(familyName.getString());
        if (props.isEmpty()) {
          familyPropList.add(
              new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps));
        } else {
          Map<String, Object> combinedFamilyProps =
              Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size());
          combinedFamilyProps.putAll(commonFamilyProps);
          for (Pair<String, Object> prop : props) {
            combinedFamilyProps.put(prop.getFirst(), prop.getSecond());
          }
          familyPropList.add(
              new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps));
        }
      }

      // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists
      if (tableType == PTableType.SYSTEM) {
        PTable table =
            new PTableImpl(
                new PNameImpl(tableName),
                tableType,
                MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                0,
                QueryConstants.SYSTEM_TABLE_PK_NAME,
                null,
                columns);
        connection.addTable(schemaName, table);
      }

      for (PColumn column : columns) {
        addColumnMutation(schemaName, tableName, column, colUpsert);
      }

      Integer saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS);
      if (saltBucketNum != null
          && (saltBucketNum <= 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM)
            .build()
            .buildException();
      }

      PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE);
      tableUpsert.setString(1, schemaName);
      tableUpsert.setString(2, tableName);
      tableUpsert.setString(3, tableType.getSerializedValue());
      tableUpsert.setInt(4, 0);
      tableUpsert.setInt(5, columnOrdinal);
      if (saltBucketNum != null) {
        tableUpsert.setInt(6, saltBucketNum);
      } else {
        tableUpsert.setNull(6, Types.INTEGER);
      }
      tableUpsert.setString(7, pkName);
      tableUpsert.execute();

      final List<Mutation> tableMetaData = connection.getMutationState().toMutations();
      connection.rollback();

      MetaDataMutationResult result =
          connection
              .getQueryServices()
              .createTable(tableMetaData, isView, tableProps, familyPropList, splits);
      MutationCode code = result.getMutationCode();
      switch (code) {
        case TABLE_ALREADY_EXISTS:
          connection.addTable(schemaName, result.getTable());
          if (!statement.ifNotExists()) {
            throw new TableAlreadyExistsException(schemaName, tableName);
          }
          break;
        case NEWER_TABLE_FOUND:
          // TODO: add table if in result?
          throw new NewerTableAlreadyExistsException(schemaName, tableName);
        case UNALLOWED_TABLE_MUTATION:
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
              .setSchemaName(schemaName)
              .setTableName(tableName)
              .build()
              .buildException();
        default:
          PTable table =
              new PTableImpl(
                  new PNameImpl(tableName),
                  tableType,
                  result.getMutationTime(),
                  0,
                  pkName,
                  saltBucketNum,
                  columns);
          connection.addTable(schemaName, table);
          if (tableType == PTableType.USER) {
            connection.setAutoCommit(true);
            // Delete everything in the column. You'll still be able to do queries at earlier
            // timestamps
            Long scn = connection.getSCN();
            long ts = (scn == null ? result.getMutationTime() : scn);
            PSchema schema =
                new PSchemaImpl(
                    schemaName,
                    ImmutableMap.<String, PTable>of(table.getName().getString(), table));
            TableRef tableRef = new TableRef(null, table, schema, ts);
            byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies());
            MutationPlan plan =
                new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts);
            return connection.getQueryServices().updateData(plan);
          }
          break;
      }
      return new MutationState(0, connection);
    } finally {
      connection.setAutoCommit(wasAutoCommit);
    }
  }