@Override public void write(DataOutput output) throws IOException { Bytes.writeByteArray(output, name.getBytes()); WritableUtils.writeVInt(output, type.ordinal()); WritableUtils.writeVLong(output, sequenceNumber); output.writeLong(timeStamp); Bytes.writeByteArray( output, pkName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(pkName)); WritableUtils.writeVInt(output, allColumns.size()); for (int i = 0; i < allColumns.size(); i++) { PColumn column = allColumns.get(i); column.write(output); } stats.write(output); }
public MutationState createTable(CreateTableStatement statement, byte[][] splits) throws SQLException { PTableType tableType = statement.getTableType(); boolean isView = tableType == PTableType.VIEW; if (isView && !statement.getProps().isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_TABLE_CONFIG) .build() .buildException(); } connection.rollback(); boolean wasAutoCommit = connection.getAutoCommit(); try { connection.setAutoCommit(false); TableName tableNameNode = statement.getTableName(); String schemaName = tableNameNode.getSchemaName(); String tableName = tableNameNode.getTableName(); PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint(); String pkName = null; Set<String> pkColumns = Collections.<String>emptySet(); Iterator<String> pkColumnsIterator = Iterators.emptyIterator(); if (pkConstraint != null) { pkColumns = pkConstraint.getColumnNames(); pkColumnsIterator = pkColumns.iterator(); pkName = pkConstraint.getName(); } List<ColumnDef> colDefs = statement.getColumnDefs(); List<PColumn> columns = Lists.newArrayListWithExpectedSize(colDefs.size()); PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN); int columnOrdinal = 0; Map<String, PName> familyNames = Maps.newLinkedHashMap(); boolean isPK = false; for (ColumnDef colDef : colDefs) { if (colDef.isPK()) { if (isPK) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) .setColumnName(colDef.getColumnDefName().getColumnName().getName()) .build() .buildException(); } isPK = true; } PColumn column = newColumn(columnOrdinal++, colDef, pkConstraint); if (SchemaUtil.isPKColumn(column)) { // TODO: remove this constraint? if (!pkColumns.isEmpty() && !column.getName().getString().equals(pkColumnsIterator.next())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } } columns.add(column); if (colDef.getDataType() == PDataType.BINARY && colDefs.size() > 1) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.BINARY_IN_ROW_KEY) .setSchemaName(schemaName) .setTableName(tableName) .setColumnName(column.getName().getString()) .build() .buildException(); } if (column.getFamilyName() != null) { familyNames.put(column.getFamilyName().getString(), column.getFamilyName()); } } if (!isPK && pkColumns.isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); } List<Pair<byte[], Map<String, Object>>> familyPropList = Lists.newArrayListWithExpectedSize(familyNames.size()); Map<String, Object> commonFamilyProps = Collections.emptyMap(); Map<String, Object> tableProps = Collections.emptyMap(); if (!statement.getProps().isEmpty()) { if (statement.isView()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES) .build() .buildException(); } for (String familyName : statement.getProps().keySet()) { if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) { if (familyNames.get(familyName) == null) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY) .setFamilyName(familyName) .build() .buildException(); } } } commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size()); Collection<Pair<String, Object>> props = statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY); // Somewhat hacky way of determining if property is for HColumnDescriptor or // HTableDescriptor HColumnDescriptor defaultDescriptor = new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES); for (Pair<String, Object> prop : props) { if (defaultDescriptor.getValue(prop.getFirst()) != null) { commonFamilyProps.put(prop.getFirst(), prop.getSecond()); } else { tableProps.put(prop.getFirst(), prop.getSecond()); } } } for (PName familyName : familyNames.values()) { Collection<Pair<String, Object>> props = statement.getProps().get(familyName.getString()); if (props.isEmpty()) { familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps)); } else { Map<String, Object> combinedFamilyProps = Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size()); combinedFamilyProps.putAll(commonFamilyProps); for (Pair<String, Object> prop : props) { combinedFamilyProps.put(prop.getFirst(), prop.getSecond()); } familyPropList.add( new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps)); } } // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists if (tableType == PTableType.SYSTEM) { PTable table = new PTableImpl( new PNameImpl(tableName), tableType, MetaDataProtocol.MIN_TABLE_TIMESTAMP, 0, QueryConstants.SYSTEM_TABLE_PK_NAME, null, columns); connection.addTable(schemaName, table); } for (PColumn column : columns) { addColumnMutation(schemaName, tableName, column, colUpsert); } Integer saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS); if (saltBucketNum != null && (saltBucketNum <= 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM) .build() .buildException(); } PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE); tableUpsert.setString(1, schemaName); tableUpsert.setString(2, tableName); tableUpsert.setString(3, tableType.getSerializedValue()); tableUpsert.setInt(4, 0); tableUpsert.setInt(5, columnOrdinal); if (saltBucketNum != null) { tableUpsert.setInt(6, saltBucketNum); } else { tableUpsert.setNull(6, Types.INTEGER); } tableUpsert.setString(7, pkName); tableUpsert.execute(); final List<Mutation> tableMetaData = connection.getMutationState().toMutations(); connection.rollback(); MetaDataMutationResult result = connection .getQueryServices() .createTable(tableMetaData, isView, tableProps, familyPropList, splits); MutationCode code = result.getMutationCode(); switch (code) { case TABLE_ALREADY_EXISTS: connection.addTable(schemaName, result.getTable()); if (!statement.ifNotExists()) { throw new TableAlreadyExistsException(schemaName, tableName); } break; case NEWER_TABLE_FOUND: // TODO: add table if in result? throw new NewerTableAlreadyExistsException(schemaName, tableName); case UNALLOWED_TABLE_MUTATION: throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE) .setSchemaName(schemaName) .setTableName(tableName) .build() .buildException(); default: PTable table = new PTableImpl( new PNameImpl(tableName), tableType, result.getMutationTime(), 0, pkName, saltBucketNum, columns); connection.addTable(schemaName, table); if (tableType == PTableType.USER) { connection.setAutoCommit(true); // Delete everything in the column. You'll still be able to do queries at earlier // timestamps Long scn = connection.getSCN(); long ts = (scn == null ? result.getMutationTime() : scn); PSchema schema = new PSchemaImpl( schemaName, ImmutableMap.<String, PTable>of(table.getName().getString(), table)); TableRef tableRef = new TableRef(null, table, schema, ts); byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies()); MutationPlan plan = new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts); return connection.getQueryServices().updateData(plan); } break; } return new MutationState(0, connection); } finally { connection.setAutoCommit(wasAutoCommit); } }
@Override public String toString() { return name.getString(); }
@Override public int newKey(ImmutableBytesWritable key, byte[][] values) { int i = 0; TrustedByteArrayOutputStream os = new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this)); try { List<PColumn> columns = getPKColumns(); int nColumns = columns.size(); PColumn lastPKColumn = columns.get(nColumns - 1); while (i < values.length && i < nColumns) { PColumn column = columns.get(i); PDataType type = column.getDataType(); // This will throw if the value is null and the type doesn't allow null byte[] byteValue = values[i++]; if (byteValue == null) { byteValue = ByteUtil.EMPTY_BYTE_ARRAY; } // An empty byte array return value means null. Do this, // since a type may have muliple representations of null. // For example, VARCHAR treats both null and an empty string // as null. This way we don't need to leak that part of the // implementation outside of PDataType by checking the value // here. if (byteValue.length == 0 && !column.isNullable()) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " may not be null"); } Integer byteSize = column.getByteSize(); if (type.isFixedWidth()) { // TODO: handle multi-byte characters if (byteValue.length != byteSize) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " must be " + byteSize + " bytes (" + SchemaUtil.toString(type, byteValue) + ")"); } } else if (byteSize != null && byteValue.length > byteSize) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " may not exceed " + byteSize + " bytes (" + SchemaUtil.toString(type, byteValue) + ")"); } os.write(byteValue, 0, byteValue.length); // Separate variable length column values in key with zero byte if (!type.isFixedWidth() && column != lastPKColumn) { os.write(SEPARATOR_BYTE); } } // If some non null pk values aren't set, then throw if (i < nColumns) { PColumn column = columns.get(i); PDataType type = column.getDataType(); if (type.isFixedWidth() || !column.isNullable()) { throw new ConstraintViolationException( name.getString() + "." + column.getName().getString() + " may not be null"); } // Separate variable length column values in key with zero byte if (column != lastPKColumn) { os.write(SEPARATOR_BYTE); } } key.set(os.getBuffer(), 0, os.size()); return i; } finally { try { os.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } }