private static void setValues(
     byte[][] values,
     int[] pkSlotIndex,
     int[] columnIndexes,
     PTable table,
     Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation) {
   Map<PColumn, byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
   byte[][] pkValues = new byte[table.getPKColumns().size()][];
   // If the table uses salting, the first byte is the salting byte, set to an empty arrary
   // here and we will fill in the byte later in PRowImpl.
   if (table.getBucketNum() != null) {
     pkValues[0] = new byte[] {0};
   }
   for (int i = 0; i < values.length; i++) {
     byte[] value = values[i];
     PColumn column = table.getColumns().get(columnIndexes[i]);
     if (SchemaUtil.isPKColumn(column)) {
       pkValues[pkSlotIndex[i]] = value;
     } else {
       columnValues.put(column, value);
     }
   }
   ImmutableBytesPtr ptr = new ImmutableBytesPtr();
   table.newKey(ptr, pkValues);
   mutation.put(ptr, columnValues);
 }
Esempio n. 2
0
 @Override
 public List<Mutation>
     toRowMutations() { // TODO: change to List<Mutation> once it implements Row
   List<Mutation> mutations = new ArrayList<Mutation>(3);
   if (deleteRow != null) {
     // Include only deleteRow mutation if present because it takes precedence over all others
     mutations.add(deleteRow);
   } else {
     // Because we cannot enforce a not null constraint on a KV column (since we don't know if
     // the row exists when
     // we upsert it), se instead add a KV that is always emtpy. This allows us to imitate SQL
     // semantics given the
     // way HBase works.
     setValues.add(
         SchemaUtil.getEmptyColumnFamily(getColumnFamilies()),
         QueryConstants.EMPTY_COLUMN_BYTES,
         ts,
         ByteUtil.EMPTY_BYTE_ARRAY);
     mutations.add(setValues);
     if (!unsetValues.isEmpty()) {
       mutations.add(unsetValues);
     }
   }
   return mutations;
 }
Esempio n. 3
0
  @Override
  protected ResultIterator newIterator() throws SQLException {
    // Hack to set state on scan to make upgrade happen
    int upgradeColumnCount =
        SchemaUtil.upgradeColumnCount(
            context.getConnection().getURL(), context.getConnection().getClientInfo());
    if (upgradeColumnCount > 0) {
      context.getScan().setAttribute(SchemaUtil.UPGRADE_TO_2_0, Bytes.toBytes(upgradeColumnCount));
    }
    if (groupBy.isEmpty()) {
      UngroupedAggregateRegionObserver.serializeIntoScan(context.getScan());
    }
    ParallelIterators parallelIterators =
        new ParallelIterators(
            context, tableRef, statement, projection, groupBy, null, wrapParallelIteratorFactory());
    splits = parallelIterators.getSplits();

    AggregatingResultIterator aggResultIterator;
    // No need to merge sort for ungrouped aggregation
    if (groupBy.isEmpty()) {
      aggResultIterator =
          new UngroupedAggregatingResultIterator(
              new ConcatResultIterator(parallelIterators), aggregators);
    } else {
      aggResultIterator =
          new GroupedAggregatingResultIterator(
              new MergeSortRowKeyResultIterator(parallelIterators), aggregators);
    }

    if (having != null) {
      aggResultIterator = new FilterAggregatingResultIterator(aggResultIterator, having);
    }

    if (statement.isDistinct()
        && statement.isAggregate()) { // Dedup on client if select distinct and aggregation
      aggResultIterator = new DistinctAggregatingResultIterator(aggResultIterator, getProjector());
    }

    ResultIterator resultScanner = aggResultIterator;
    if (orderBy.getOrderByExpressions().isEmpty()) {
      if (limit != null) {
        resultScanner = new LimitingResultIterator(aggResultIterator, limit);
      }
    } else {
      int thresholdBytes =
          getConnectionQueryServices(context.getConnection().getQueryServices())
              .getProps()
              .getInt(
                  QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB,
                  QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
      resultScanner =
          new OrderedAggregatingResultIterator(
              aggResultIterator, orderBy.getOrderByExpressions(), thresholdBytes, limit);
    }

    if (context.getSequenceManager().getSequenceCount() > 0) {
      resultScanner = new SequenceResultIterator(resultScanner, context.getSequenceManager());
    }
    return resultScanner;
  }
 @Override
 public MetaDataMutationResult updateIndexState(
     List<Mutation> tableMetadata, String parentTableName) throws SQLException {
   byte[][] rowKeyMetadata = new byte[3][];
   SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata);
   KeyValue newKV = tableMetadata.get(0).getFamilyMap().get(TABLE_FAMILY_BYTES).get(0);
   PIndexState newState =
       PIndexState.fromSerializedValue(newKV.getBuffer()[newKV.getValueOffset()]);
   String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]);
   String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
   String indexTableName = SchemaUtil.getTableName(schemaName, indexName);
   PTable index = metaData.getTable(indexTableName);
   index =
       PTableImpl.makePTable(
           index,
           newState == PIndexState.USABLE
               ? PIndexState.ACTIVE
               : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState);
   return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index);
 }
  @Test
  public void testNegativeCompareNegativeValue() throws Exception {
    String query = "SELECT string_key FROM HBASE_NATIVE WHERE uint_key > 100000";
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 7); // Run query at timestamp 7
    Properties props = new Properties(TEST_PROPERTIES);
    PhoenixConnection conn =
        DriverManager.getConnection(url, props).unwrap(PhoenixConnection.class);
    HTableInterface hTable =
        conn.getQueryServices()
            .getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));

    List<Row> mutations = new ArrayList<Row>();
    byte[] family = Bytes.toBytes("1");
    byte[] uintCol = Bytes.toBytes("UINT_COL");
    byte[] ulongCol = Bytes.toBytes("ULONG_COL");
    byte[] key;
    Put put;

    // Need to use native APIs because the Phoenix APIs wouldn't let you insert a
    // negative number for an unsigned type
    key = ByteUtil.concat(Bytes.toBytes(-10), Bytes.toBytes(100L), Bytes.toBytes("e"));
    put = new Put(key);
    // Insert at later timestamp than other queries in this test are using, so that
    // we don't affect them
    put.add(family, uintCol, ts + 6, Bytes.toBytes(10));
    put.add(family, ulongCol, ts + 6, Bytes.toBytes(100L));
    put.add(family, QueryConstants.EMPTY_COLUMN_BYTES, ts + 6, ByteUtil.EMPTY_BYTE_ARRAY);
    mutations.add(put);
    hTable.batch(mutations);

    // Demonstrates weakness of HBase Bytes serialization. Negative numbers
    // show up as bigger than positive numbers
    PreparedStatement statement = conn.prepareStatement(query);
    ResultSet rs = statement.executeQuery();
    assertTrue(rs.next());
    assertEquals("e", rs.getString(1));
    assertFalse(rs.next());
  }
Esempio n. 6
0
  public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
      connection.setAutoCommit(false);
      TableName tableNameNode = statement.getTableName();
      String schemaName = tableNameNode.getSchemaName();
      String tableName = tableNameNode.getTableName();
      PTable table = getLatestTable(schemaName, tableName); // TODO: Do in resolver?
      boolean retried = false;
      while (true) {
        final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
        ColumnRef columnRef = null;
        try {
          columnRef = resolver.resolveColumn((ColumnParseNode) statement.getColumnRef());
        } catch (ColumnNotFoundException e) {
          if (statement.ifExists()) {
            return new MutationState(0, connection);
          }
          throw e;
        }
        TableRef tableRef = columnRef.getTableRef();
        PColumn columnToDrop = columnRef.getColumn();
        if (SchemaUtil.isPKColumn(columnToDrop)) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK)
              .setColumnName(columnToDrop.getName().getString())
              .build()
              .buildException();
        }
        int columnCount = table.getColumns().size() - 1;
        String familyName = null;
        List<String> binds = Lists.newArrayListWithExpectedSize(4);
        StringBuilder buf =
            new StringBuilder(
                "DELETE FROM " + TYPE_SCHEMA + ".\"" + TYPE_TABLE + "\" WHERE " + TABLE_SCHEM_NAME);
        if (schemaName == null || schemaName.length() == 0) {
          buf.append(" IS NULL AND ");
        } else {
          buf.append(" = ? AND ");
          binds.add(schemaName);
        }
        buf.append(TABLE_NAME_NAME + " = ? AND " + COLUMN_NAME + " = ? AND " + TABLE_CAT_NAME);
        binds.add(tableName);
        binds.add(columnToDrop.getName().getString());
        if (columnToDrop.getFamilyName() == null) {
          buf.append(" IS NULL");
        } else {
          buf.append(" = ?");
          binds.add(familyName = columnToDrop.getFamilyName().getString());
        }

        PreparedStatement colDelete = connection.prepareStatement(buf.toString());
        for (int i = 0; i < binds.size(); i++) {
          colDelete.setString(i + 1, binds.get(i));
        }
        colDelete.execute();

        PreparedStatement colUpdate = connection.prepareStatement(UPDATE_COLUMN_POSITION);
        colUpdate.setString(1, schemaName);
        colUpdate.setString(2, tableName);
        for (int i = columnToDrop.getPosition() + 1; i < table.getColumns().size(); i++) {
          PColumn column = table.getColumns().get(i);
          colUpdate.setString(3, column.getName().getString());
          colUpdate.setString(
              4, column.getFamilyName() == null ? null : column.getFamilyName().getString());
          colUpdate.setInt(5, i);
          colUpdate.execute();
        }
        final long seqNum = table.getSequenceNumber() + 1;
        PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE);
        tableUpsert.setString(1, schemaName);
        tableUpsert.setString(2, tableName);
        tableUpsert.setString(3, table.getType().getSerializedValue());
        tableUpsert.setLong(4, seqNum);
        tableUpsert.setInt(5, columnCount);
        tableUpsert.execute();

        final List<Mutation> tableMetaData = connection.getMutationState().toMutations();
        connection.rollback();
        // If we're dropping the last KV colum, we have to pass an indication along to the
        // dropColumn call
        // to populate a new empty KV column
        byte[] emptyCF = null;
        if (table.getType() != PTableType.VIEW
            && !SchemaUtil.isPKColumn(columnToDrop)
            && table.getColumnFamilies().get(0).getName().equals(columnToDrop.getFamilyName())
            && table.getColumnFamilies().get(0).getColumns().size() == 1) {
          emptyCF =
              SchemaUtil.getEmptyColumnFamily(
                  table.getColumnFamilies().subList(1, table.getColumnFamilies().size()));
        }
        MetaDataMutationResult result =
            connection
                .getQueryServices()
                .dropColumn(
                    tableMetaData,
                    emptyCF != null
                            && Bytes.compareTo(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES) == 0
                        ? emptyCF
                        : null);
        try {
          MutationCode code = processMutationResult(schemaName, tableName, result);
          if (code == MutationCode.COLUMN_NOT_FOUND) {
            connection.addTable(schemaName, result.getTable());
            if (!statement.ifExists()) {
              throw new ColumnNotFoundException(
                  schemaName, tableName, familyName, columnToDrop.getName().getString());
            }
            return new MutationState(0, connection);
          }
          connection.removeColumn(
              schemaName,
              tableName,
              familyName,
              columnToDrop.getName().getString(),
              seqNum,
              result.getMutationTime());
          // If we have a VIEW, then only delete the metadata, and leave the table data alone
          if (table.getType() != PTableType.VIEW) {
            connection.setAutoCommit(true);
            Long scn = connection.getSCN();
            // Delete everything in the column. You'll still be able to do queries at earlier
            // timestamps
            long ts = (scn == null ? result.getMutationTime() : scn);
            MutationPlan plan =
                new PostDDLCompiler(connection)
                    .compile(tableRef, emptyCF, Collections.singletonList(columnToDrop), ts);
            return connection.getQueryServices().updateData(plan);
          }
          return new MutationState(0, connection);
        } catch (ConcurrentTableMutationException e) {
          if (retried) {
            throw e;
          }
          table = connection.getPMetaData().getSchema(schemaName).getTable(tableName);
          retried = true;
        }
      }
    } finally {
      connection.setAutoCommit(wasAutoCommit);
    }
  }
Esempio n. 7
0
  public MutationState addColumn(AddColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
      connection.setAutoCommit(false);
      TableName tableNameNode = statement.getTableName();
      String schemaName = tableNameNode.getSchemaName();
      String tableName = tableNameNode.getTableName();

      PTable table = getLatestTable(schemaName, tableName);
      PSchema schema = connection.getPMetaData().getSchema(schemaName);
      boolean retried = false;
      while (true) {
        int ordinalPosition = table.getColumns().size();

        // TODO: disallow adding columns if last column is fixed width and nullable
        List<PColumn> columns = Lists.newArrayListWithExpectedSize(1);
        ColumnDef colDef = statement.getColumnDef();
        if (!colDef.isNull() && colDef.isPK()) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY)
              .setColumnName(colDef.getColumnDefName().getColumnName().getName())
              .build()
              .buildException();
        }

        PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN);
        Pair<byte[], Map<String, Object>> family = null;
        PColumn column = newColumn(ordinalPosition++, colDef, null);
        addColumnMutation(schemaName, tableName, column, colUpsert);
        columns.add(column);
        if (column.getFamilyName() != null) {
          family =
              new Pair<byte[], Map<String, Object>>(
                  column.getFamilyName().getBytes(), statement.getProps());
        }
        final long seqNum = table.getSequenceNumber() + 1;
        PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE);
        tableUpsert.setString(1, schemaName);
        tableUpsert.setString(2, tableName);
        tableUpsert.setString(3, table.getType().getSerializedValue());
        tableUpsert.setLong(4, seqNum);
        tableUpsert.setInt(5, ordinalPosition);
        tableUpsert.execute();

        final List<Mutation> tableMetaData = connection.getMutationState().toMutations();
        connection.rollback();
        byte[] emptyCF = null;
        if (table.getType() != PTableType.VIEW
            && family != null
            && table.getColumnFamilies().isEmpty()) {
          emptyCF = family.getFirst();
        }
        MetaDataMutationResult result =
            connection
                .getQueryServices()
                .addColumn(tableMetaData, table.getType() == PTableType.VIEW, family);
        try {
          MutationCode code = processMutationResult(schemaName, tableName, result);
          if (code == MutationCode.COLUMN_ALREADY_EXISTS) {
            connection.addTable(schemaName, result.getTable());
            if (!statement.ifNotExists()) {
              throw new ColumnAlreadyExistsException(
                  schemaName, tableName, SchemaUtil.findExistingColumn(result.getTable(), columns));
            }
            return new MutationState(0, connection);
          }
          connection.addColumn(schemaName, tableName, columns, seqNum, result.getMutationTime());
          if (emptyCF != null) {
            Long scn = connection.getSCN();
            connection.setAutoCommit(true);
            // Delete everything in the column. You'll still be able to do queries at earlier
            // timestamps
            long ts = (scn == null ? result.getMutationTime() : scn);
            TableRef tableRef = new TableRef(null, table, schema, ts);
            MutationPlan plan =
                new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts);
            return connection.getQueryServices().updateData(plan);
          }
          return new MutationState(0, connection);
        } catch (ConcurrentTableMutationException e) {
          if (retried) {
            throw e;
          }
          table = connection.getPMetaData().getSchema(schemaName).getTable(tableName);
          retried = true;
        }
      }
    } finally {
      connection.setAutoCommit(wasAutoCommit);
    }
  }
Esempio n. 8
0
 public MutationState dropTable(DropTableStatement statement) throws SQLException {
   connection.rollback();
   boolean wasAutoCommit = connection.getAutoCommit();
   try {
     TableName tableNameNode = statement.getTableName();
     String schemaName = tableNameNode.getSchemaName();
     String tableName = tableNameNode.getTableName();
     byte[] key = SchemaUtil.getTableKey(schemaName, tableName);
     Long scn = connection.getSCN();
     @SuppressWarnings(
         "deprecation") // FIXME: Remove when unintentionally deprecated method is fixed
                        // (HBASE-7870).
     // FIXME: the version of the Delete constructor without the lock args was introduced
     // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
     // of the client.
     List<Mutation> tableMetaData =
         Collections.<Mutation>singletonList(
             new Delete(key, scn == null ? HConstants.LATEST_TIMESTAMP : scn, null));
     MetaDataMutationResult result =
         connection.getQueryServices().dropTable(tableMetaData, statement.isView());
     MutationCode code = result.getMutationCode();
     switch (code) {
       case TABLE_NOT_FOUND:
         if (!statement.ifExists()) {
           throw new TableNotFoundException(schemaName, tableName);
         }
         break;
       case NEWER_TABLE_FOUND:
         throw new NewerTableAlreadyExistsException(schemaName, tableName);
       case UNALLOWED_TABLE_MUTATION:
         throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
             .setSchemaName(schemaName)
             .setTableName(tableName)
             .build()
             .buildException();
       default:
         try {
           connection.removeTable(schemaName, tableName);
         } catch (TableNotFoundException e) { // Ignore - just means wasn't cached
         }
         if (!statement.isView()) {
           connection.setAutoCommit(true);
           // Delete everything in the column. You'll still be able to do queries at earlier
           // timestamps
           long ts = (scn == null ? result.getMutationTime() : scn);
           // Create empty table and schema - they're only used to get the name from
           // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn>
           // columns
           PTable table = result.getTable();
           PSchema schema =
               new PSchemaImpl(
                   schemaName,
                   ImmutableMap.<String, PTable>of(table.getName().getString(), table));
           TableRef tableRef = new TableRef(null, table, schema, ts);
           MutationPlan plan =
               new PostDDLCompiler(connection)
                   .compile(tableRef, null, Collections.<PColumn>emptyList(), ts);
           return connection.getQueryServices().updateData(plan);
         }
         break;
     }
     return new MutationState(0, connection);
   } finally {
     connection.setAutoCommit(wasAutoCommit);
   }
 }
Esempio n. 9
0
  public MutationState createTable(CreateTableStatement statement, byte[][] splits)
      throws SQLException {
    PTableType tableType = statement.getTableType();
    boolean isView = tableType == PTableType.VIEW;
    if (isView && !statement.getProps().isEmpty()) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_TABLE_CONFIG)
          .build()
          .buildException();
    }
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
      connection.setAutoCommit(false);
      TableName tableNameNode = statement.getTableName();
      String schemaName = tableNameNode.getSchemaName();
      String tableName = tableNameNode.getTableName();

      PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
      String pkName = null;
      Set<String> pkColumns = Collections.<String>emptySet();
      Iterator<String> pkColumnsIterator = Iterators.emptyIterator();
      if (pkConstraint != null) {
        pkColumns = pkConstraint.getColumnNames();
        pkColumnsIterator = pkColumns.iterator();
        pkName = pkConstraint.getName();
      }

      List<ColumnDef> colDefs = statement.getColumnDefs();
      List<PColumn> columns = Lists.newArrayListWithExpectedSize(colDefs.size());
      PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN);
      int columnOrdinal = 0;
      Map<String, PName> familyNames = Maps.newLinkedHashMap();
      boolean isPK = false;
      for (ColumnDef colDef : colDefs) {
        if (colDef.isPK()) {
          if (isPK) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS)
                .setColumnName(colDef.getColumnDefName().getColumnName().getName())
                .build()
                .buildException();
          }
          isPK = true;
        }
        PColumn column = newColumn(columnOrdinal++, colDef, pkConstraint);
        if (SchemaUtil.isPKColumn(column)) {
          // TODO: remove this constraint?
          if (!pkColumns.isEmpty()
              && !column.getName().getString().equals(pkColumnsIterator.next())) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER)
                .setSchemaName(schemaName)
                .setTableName(tableName)
                .setColumnName(column.getName().getString())
                .build()
                .buildException();
          }
        }
        columns.add(column);
        if (colDef.getDataType() == PDataType.BINARY && colDefs.size() > 1) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.BINARY_IN_ROW_KEY)
              .setSchemaName(schemaName)
              .setTableName(tableName)
              .setColumnName(column.getName().getString())
              .build()
              .buildException();
        }
        if (column.getFamilyName() != null) {
          familyNames.put(column.getFamilyName().getString(), column.getFamilyName());
        }
      }
      if (!isPK && pkColumns.isEmpty()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING)
            .setSchemaName(schemaName)
            .setTableName(tableName)
            .build()
            .buildException();
      }

      List<Pair<byte[], Map<String, Object>>> familyPropList =
          Lists.newArrayListWithExpectedSize(familyNames.size());
      Map<String, Object> commonFamilyProps = Collections.emptyMap();
      Map<String, Object> tableProps = Collections.emptyMap();
      if (!statement.getProps().isEmpty()) {
        if (statement.isView()) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES)
              .build()
              .buildException();
        }
        for (String familyName : statement.getProps().keySet()) {
          if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
            if (familyNames.get(familyName) == null) {
              throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY)
                  .setFamilyName(familyName)
                  .build()
                  .buildException();
            }
          }
        }
        commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
        tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());

        Collection<Pair<String, Object>> props =
            statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY);
        // Somewhat hacky way of determining if property is for HColumnDescriptor or
        // HTableDescriptor
        HColumnDescriptor defaultDescriptor =
            new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
        for (Pair<String, Object> prop : props) {
          if (defaultDescriptor.getValue(prop.getFirst()) != null) {
            commonFamilyProps.put(prop.getFirst(), prop.getSecond());
          } else {
            tableProps.put(prop.getFirst(), prop.getSecond());
          }
        }
      }

      for (PName familyName : familyNames.values()) {
        Collection<Pair<String, Object>> props = statement.getProps().get(familyName.getString());
        if (props.isEmpty()) {
          familyPropList.add(
              new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps));
        } else {
          Map<String, Object> combinedFamilyProps =
              Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size());
          combinedFamilyProps.putAll(commonFamilyProps);
          for (Pair<String, Object> prop : props) {
            combinedFamilyProps.put(prop.getFirst(), prop.getSecond());
          }
          familyPropList.add(
              new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps));
        }
      }

      // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists
      if (tableType == PTableType.SYSTEM) {
        PTable table =
            new PTableImpl(
                new PNameImpl(tableName),
                tableType,
                MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                0,
                QueryConstants.SYSTEM_TABLE_PK_NAME,
                null,
                columns);
        connection.addTable(schemaName, table);
      }

      for (PColumn column : columns) {
        addColumnMutation(schemaName, tableName, column, colUpsert);
      }

      Integer saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS);
      if (saltBucketNum != null
          && (saltBucketNum <= 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM)
            .build()
            .buildException();
      }

      PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE);
      tableUpsert.setString(1, schemaName);
      tableUpsert.setString(2, tableName);
      tableUpsert.setString(3, tableType.getSerializedValue());
      tableUpsert.setInt(4, 0);
      tableUpsert.setInt(5, columnOrdinal);
      if (saltBucketNum != null) {
        tableUpsert.setInt(6, saltBucketNum);
      } else {
        tableUpsert.setNull(6, Types.INTEGER);
      }
      tableUpsert.setString(7, pkName);
      tableUpsert.execute();

      final List<Mutation> tableMetaData = connection.getMutationState().toMutations();
      connection.rollback();

      MetaDataMutationResult result =
          connection
              .getQueryServices()
              .createTable(tableMetaData, isView, tableProps, familyPropList, splits);
      MutationCode code = result.getMutationCode();
      switch (code) {
        case TABLE_ALREADY_EXISTS:
          connection.addTable(schemaName, result.getTable());
          if (!statement.ifNotExists()) {
            throw new TableAlreadyExistsException(schemaName, tableName);
          }
          break;
        case NEWER_TABLE_FOUND:
          // TODO: add table if in result?
          throw new NewerTableAlreadyExistsException(schemaName, tableName);
        case UNALLOWED_TABLE_MUTATION:
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
              .setSchemaName(schemaName)
              .setTableName(tableName)
              .build()
              .buildException();
        default:
          PTable table =
              new PTableImpl(
                  new PNameImpl(tableName),
                  tableType,
                  result.getMutationTime(),
                  0,
                  pkName,
                  saltBucketNum,
                  columns);
          connection.addTable(schemaName, table);
          if (tableType == PTableType.USER) {
            connection.setAutoCommit(true);
            // Delete everything in the column. You'll still be able to do queries at earlier
            // timestamps
            Long scn = connection.getSCN();
            long ts = (scn == null ? result.getMutationTime() : scn);
            PSchema schema =
                new PSchemaImpl(
                    schemaName,
                    ImmutableMap.<String, PTable>of(table.getName().getString(), table));
            TableRef tableRef = new TableRef(null, table, schema, ts);
            byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies());
            MutationPlan plan =
                new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts);
            return connection.getQueryServices().updateData(plan);
          }
          break;
      }
      return new MutationState(0, connection);
    } finally {
      connection.setAutoCommit(wasAutoCommit);
    }
  }
  private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
    HTableInterface hTable =
        services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
    try {
      // Insert rows using standard HBase mechanism with standard HBase "types"
      List<Row> mutations = new ArrayList<Row>();
      byte[] family = Bytes.toBytes("1");
      byte[] uintCol = Bytes.toBytes("UINT_COL");
      byte[] ulongCol = Bytes.toBytes("ULONG_COL");
      byte[] key, bKey;
      Put put;

      key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
      put = new Put(key);
      put.add(family, uintCol, ts - 2, Bytes.toBytes(5));
      put.add(family, ulongCol, ts - 2, Bytes.toBytes(50L));
      mutations.add(put);
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(10));
      put.add(family, ulongCol, ts, Bytes.toBytes(100L));
      mutations.add(put);

      bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
      put = new Put(key);
      put.add(family, uintCol, ts - 4, Bytes.toBytes(5000));
      put.add(family, ulongCol, ts - 4, Bytes.toBytes(50000L));
      mutations.add(put);
      @SuppressWarnings(
          "deprecation") // FIXME: Remove when unintentionally deprecated method is fixed
                         // (HBASE-7870).
      // FIXME: the version of the Delete constructor without the lock args was introduced
      // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
      // of the client.
      Delete del = new Delete(key, ts - 2, null);
      mutations.add(del);
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(2000));
      put.add(family, ulongCol, ts, Bytes.toBytes(20000L));
      mutations.add(put);

      key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(3000));
      put.add(family, ulongCol, ts, Bytes.toBytes(30000L));
      mutations.add(put);

      key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(4000));
      put.add(family, ulongCol, ts, Bytes.toBytes(40000L));
      mutations.add(put);

      hTable.batch(mutations);

      Result r = hTable.get(new Get(bKey));
      assertFalse(r.isEmpty());
    } finally {
      hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_NATIVE, null, ts + 1);
  }
/**
 * Tests using native HBase types (i.e. Bytes.toBytes methods) for integers and longs. Phoenix can
 * support this if the numbers are positive.
 *
 * @author jtaylor
 * @since 0.1
 */
public class NativeHBaseTypesTest extends BaseClientManagedTimeTest {
  private static final byte[] HBASE_NATIVE_BYTES =
      SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE);
  private static final byte[] FAMILY_NAME = Bytes.toBytes(SchemaUtil.normalizeIdentifier("1"));
  private static final byte[][] SPLITS = new byte[][] {Bytes.toBytes(20), Bytes.toBytes(30)};
  private static final long ts = nextTimestamp();

  @BeforeClass
  public static void doBeforeTestSetup() throws Exception {
    HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES).getAdmin();
    try {
      try {
        admin.disableTable(HBASE_NATIVE_BYTES);
        admin.deleteTable(HBASE_NATIVE_BYTES);
      } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
      }
      HTableDescriptor descriptor = new HTableDescriptor(HBASE_NATIVE_BYTES);
      HColumnDescriptor columnDescriptor = new HColumnDescriptor(FAMILY_NAME);
      columnDescriptor.setKeepDeletedCells(true);
      descriptor.addFamily(columnDescriptor);
      admin.createTable(descriptor, SPLITS);
      initTableValues();
    } finally {
      admin.close();
    }
  }

  private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
    HTableInterface hTable =
        services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
    try {
      // Insert rows using standard HBase mechanism with standard HBase "types"
      List<Row> mutations = new ArrayList<Row>();
      byte[] family = Bytes.toBytes("1");
      byte[] uintCol = Bytes.toBytes("UINT_COL");
      byte[] ulongCol = Bytes.toBytes("ULONG_COL");
      byte[] key, bKey;
      Put put;

      key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
      put = new Put(key);
      put.add(family, uintCol, ts - 2, Bytes.toBytes(5));
      put.add(family, ulongCol, ts - 2, Bytes.toBytes(50L));
      mutations.add(put);
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(10));
      put.add(family, ulongCol, ts, Bytes.toBytes(100L));
      mutations.add(put);

      bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
      put = new Put(key);
      put.add(family, uintCol, ts - 4, Bytes.toBytes(5000));
      put.add(family, ulongCol, ts - 4, Bytes.toBytes(50000L));
      mutations.add(put);
      @SuppressWarnings(
          "deprecation") // FIXME: Remove when unintentionally deprecated method is fixed
                         // (HBASE-7870).
      // FIXME: the version of the Delete constructor without the lock args was introduced
      // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
      // of the client.
      Delete del = new Delete(key, ts - 2, null);
      mutations.add(del);
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(2000));
      put.add(family, ulongCol, ts, Bytes.toBytes(20000L));
      mutations.add(put);

      key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(3000));
      put.add(family, ulongCol, ts, Bytes.toBytes(30000L));
      mutations.add(put);

      key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
      put = new Put(key);
      put.add(family, uintCol, ts, Bytes.toBytes(4000));
      put.add(family, ulongCol, ts, Bytes.toBytes(40000L));
      mutations.add(put);

      hTable.batch(mutations);

      Result r = hTable.get(new Get(bKey));
      assertFalse(r.isEmpty());
    } finally {
      hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_NATIVE, null, ts + 1);
  }

  @Test
  public void testRangeQuery1() throws Exception {
    String query =
        "SELECT uint_key, ulong_key, string_key FROM HBASE_NATIVE WHERE uint_key > 20 and ulong_key >= 400";
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 5); // Run query at timestamp 5
    Properties props = new Properties(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    try {
      PreparedStatement statement = conn.prepareStatement(query);
      ResultSet rs = statement.executeQuery();
      assertTrue(rs.next());
      assertEquals(40, rs.getInt(1));
      assertEquals(400L, rs.getLong(2));
      assertEquals("d", rs.getString(3));
      assertFalse(rs.next());
    } finally {
      conn.close();
    }
  }

  @Test
  public void testRangeQuery2() throws Exception {
    String query =
        "SELECT uint_key, ulong_key, string_key FROM HBASE_NATIVE WHERE uint_key > 20 and uint_key < 40";
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 5); // Run query at timestamp 5
    Properties props = new Properties(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    try {
      PreparedStatement statement = conn.prepareStatement(query);
      ResultSet rs = statement.executeQuery();
      assertTrue(rs.next());
      assertEquals(30, rs.getInt(1));
      assertEquals(300L, rs.getLong(2));
      assertEquals("c", rs.getString(3));
      assertFalse(rs.next());
    } finally {
      conn.close();
    }
  }

  @Test
  public void testRangeQuery3() throws Exception {
    String query =
        "SELECT uint_key, ulong_key, string_key FROM HBASE_NATIVE WHERE ulong_key > 200 and ulong_key < 400";
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 5); // Run query at timestamp 5
    Properties props = new Properties(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    try {
      PreparedStatement statement = conn.prepareStatement(query);
      ResultSet rs = statement.executeQuery();
      assertTrue(rs.next());
      assertEquals(30, rs.getInt(1));
      assertEquals(300L, rs.getLong(2));
      assertEquals("c", rs.getString(3));
      assertFalse(rs.next());
    } finally {
      conn.close();
    }
  }

  @Test
  public void testNegativeAgainstUnsignedNone() throws Exception {
    String query = "SELECT uint_key, ulong_key, string_key FROM HBASE_NATIVE WHERE ulong_key < -1";
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 5); // Run query at timestamp 5
    Properties props = new Properties(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    try {
      PreparedStatement statement = conn.prepareStatement(query);
      ResultSet rs = statement.executeQuery();
      assertFalse(rs.next());
    } finally {
      conn.close();
    }
  }

  @Test
  public void testNegativeAgainstUnsignedAll() throws Exception {
    String query = "SELECT string_key FROM HBASE_NATIVE WHERE ulong_key > -100";
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 5); // Run query at timestamp 5
    Properties props = new Properties(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    try {
      PreparedStatement statement = conn.prepareStatement(query);
      ResultSet rs = statement.executeQuery();
      assertTrue(rs.next());
      assertEquals("a", rs.getString(1));
      assertTrue(rs.next());
      assertEquals("b", rs.getString(1));
      assertTrue(rs.next());
      assertEquals("c", rs.getString(1));
      assertTrue(rs.next());
      assertEquals("d", rs.getString(1));
      assertFalse(rs.next());
    } finally {
      conn.close();
    }
  }

  @Test
  public void testNegativeAddNegativeValue() throws Exception {
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 5); // Run query at timestamp 5
    Properties props = new Properties(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(url, props);
    try {
      PreparedStatement stmt =
          conn.prepareStatement(
              "UPSERT INTO HBASE_NATIVE(uint_key,ulong_key,string_key, uint_col) VALUES(?,?,?,?)");
      stmt.setInt(1, -1);
      stmt.setLong(2, 2L);
      stmt.setString(3, "foo");
      stmt.setInt(4, 3);
      stmt.execute();
      fail();
    } catch (SQLException e) {
      assertTrue(e.getMessage().contains("Type mismatch"));
    }
  }

  @Test
  public void testNegativeCompareNegativeValue() throws Exception {
    String query = "SELECT string_key FROM HBASE_NATIVE WHERE uint_key > 100000";
    String url =
        PHOENIX_JDBC_URL
            + ";"
            + PhoenixRuntime.CURRENT_SCN_ATTRIB
            + "="
            + (ts + 7); // Run query at timestamp 7
    Properties props = new Properties(TEST_PROPERTIES);
    PhoenixConnection conn =
        DriverManager.getConnection(url, props).unwrap(PhoenixConnection.class);
    HTableInterface hTable =
        conn.getQueryServices()
            .getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));

    List<Row> mutations = new ArrayList<Row>();
    byte[] family = Bytes.toBytes("1");
    byte[] uintCol = Bytes.toBytes("UINT_COL");
    byte[] ulongCol = Bytes.toBytes("ULONG_COL");
    byte[] key;
    Put put;

    // Need to use native APIs because the Phoenix APIs wouldn't let you insert a
    // negative number for an unsigned type
    key = ByteUtil.concat(Bytes.toBytes(-10), Bytes.toBytes(100L), Bytes.toBytes("e"));
    put = new Put(key);
    // Insert at later timestamp than other queries in this test are using, so that
    // we don't affect them
    put.add(family, uintCol, ts + 6, Bytes.toBytes(10));
    put.add(family, ulongCol, ts + 6, Bytes.toBytes(100L));
    put.add(family, QueryConstants.EMPTY_COLUMN_BYTES, ts + 6, ByteUtil.EMPTY_BYTE_ARRAY);
    mutations.add(put);
    hTable.batch(mutations);

    // Demonstrates weakness of HBase Bytes serialization. Negative numbers
    // show up as bigger than positive numbers
    PreparedStatement statement = conn.prepareStatement(query);
    ResultSet rs = statement.executeQuery();
    assertTrue(rs.next());
    assertEquals("e", rs.getString(1));
    assertFalse(rs.next());
  }
}
Esempio n. 12
0
  /**
   * Builds the projection for the scan
   *
   * @param context query context kept between compilation of different query clauses
   * @param statement TODO
   * @param groupBy compiled GROUP BY clause
   * @param targetColumns list of columns, parallel to aliasedNodes, that are being set for an
   *     UPSERT SELECT statement. Used to coerce expression types to the expected target type.
   * @return projector used to access row values during scan
   * @throws SQLException
   */
  public static RowProjector compile(
      StatementContext context,
      SelectStatement statement,
      GroupBy groupBy,
      List<? extends PDatum> targetColumns)
      throws SQLException {
    List<AliasedNode> aliasedNodes = statement.getSelect();
    // Setup projected columns in Scan
    SelectClauseVisitor selectVisitor = new SelectClauseVisitor(context, groupBy);
    List<ExpressionProjector> projectedColumns = new ArrayList<ExpressionProjector>();
    TableRef tableRef = context.getResolver().getTables().get(0);
    PTable table = tableRef.getTable();
    boolean isWildcard = false;
    Scan scan = context.getScan();
    int index = 0;
    List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
    List<byte[]> projectedFamilies = Lists.newArrayListWithExpectedSize(aliasedNodes.size());
    for (AliasedNode aliasedNode : aliasedNodes) {
      ParseNode node = aliasedNode.getNode();
      // TODO: visitor?
      if (node instanceof WildcardParseNode) {
        if (statement.isAggregate()) {
          ExpressionCompiler.throwNonAggExpressionInAggException(node.toString());
        }
        isWildcard = true;
        if (tableRef.getTable().getType() == PTableType.INDEX
            && ((WildcardParseNode) node).isRewrite()) {
          projectAllIndexColumns(context, tableRef, projectedExpressions, projectedColumns);
        } else {
          projectAllTableColumns(context, tableRef, projectedExpressions, projectedColumns);
        }
      } else if (node instanceof FamilyWildcardParseNode) {
        // Project everything for SELECT cf.*
        // TODO: support cf.* expressions for multiple tables the same way with *.
        String cfName = ((FamilyWildcardParseNode) node).getName();
        // Delay projecting to scan, as when any other column in the column family gets
        // added to the scan, it overwrites that we want to project the entire column
        // family. Instead, we do the projection at the end.
        // TODO: consider having a ScanUtil.addColumn and ScanUtil.addFamily to work
        // around this, as this code depends on this function being the last place where
        // columns are projected (which is currently true, but could change).
        projectedFamilies.add(Bytes.toBytes(cfName));
        if (tableRef.getTable().getType() == PTableType.INDEX
            && ((FamilyWildcardParseNode) node).isRewrite()) {
          projectIndexColumnFamily(
              context, cfName, tableRef, projectedExpressions, projectedColumns);
        } else {
          projectTableColumnFamily(
              context, cfName, tableRef, projectedExpressions, projectedColumns);
        }
      } else {
        Expression expression = node.accept(selectVisitor);
        projectedExpressions.add(expression);
        if (index < targetColumns.size()) {
          PDatum targetColumn = targetColumns.get(index);
          if (targetColumn.getDataType() != expression.getDataType()) {
            PDataType targetType = targetColumn.getDataType();
            // Check if coerce allowed using more relaxed isCastableTo check, since we promote
            // INTEGER to LONG
            // during expression evaluation and then convert back to INTEGER on UPSERT SELECT (and
            // we don't have
            // (an actual value we can specifically check against).
            if (expression.getDataType() != null
                && !expression.getDataType().isCastableTo(targetType)) {
              throw new ArgumentTypeMismatchException(
                  targetType, expression.getDataType(), "column: " + targetColumn);
            }
            expression = CoerceExpression.create(expression, targetType);
          }
        }
        if (node instanceof BindParseNode) {
          context.getBindManager().addParamMetaData((BindParseNode) node, expression);
        }
        if (!node.isStateless()) {
          if (!selectVisitor.isAggregate() && statement.isAggregate()) {
            ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
          }
        }
        String columnAlias =
            aliasedNode.getAlias() != null
                ? aliasedNode.getAlias()
                : SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias());
        boolean isCaseSensitive =
            (columnAlias != null
                    && (aliasedNode.isCaseSensitve() || SchemaUtil.isCaseSensitive(columnAlias)))
                || selectVisitor.isCaseSensitive;
        String name = columnAlias == null ? expression.toString() : columnAlias;
        projectedColumns.add(
            new ExpressionProjector(
                name, table.getName().getString(), expression, isCaseSensitive));
      }
      selectVisitor.reset();
      index++;
    }

    table = context.getCurrentTable().getTable(); // switch to current table for scan projection
    // TODO make estimatedByteSize more accurate by counting the joined columns.
    int estimatedKeySize = table.getRowKeySchema().getEstimatedValueLength();
    int estimatedByteSize = 0;
    for (Map.Entry<byte[], NavigableSet<byte[]>> entry : scan.getFamilyMap().entrySet()) {
      PColumnFamily family = table.getColumnFamily(entry.getKey());
      if (entry.getValue() == null) {
        for (PColumn column : family.getColumns()) {
          Integer byteSize = column.getByteSize();
          estimatedByteSize +=
              SizedUtil.KEY_VALUE_SIZE
                  + estimatedKeySize
                  + (byteSize == null ? RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE : byteSize);
        }
      } else {
        for (byte[] cq : entry.getValue()) {
          PColumn column = family.getColumn(cq);
          Integer byteSize = column.getByteSize();
          estimatedByteSize +=
              SizedUtil.KEY_VALUE_SIZE
                  + estimatedKeySize
                  + (byteSize == null ? RowKeySchema.ESTIMATED_VARIABLE_LENGTH_SIZE : byteSize);
        }
      }
    }

    selectVisitor.compile();
    // Since we don't have the empty key value in read-only tables,
    // we must project everything.
    boolean isProjectEmptyKeyValue =
        table.getType() != PTableType.VIEW && table.getViewType() != ViewType.MAPPED && !isWildcard;
    if (isProjectEmptyKeyValue) {
      for (byte[] family : projectedFamilies) {
        projectColumnFamily(table, scan, family);
      }
    } else {
      /*
       * TODO: this could be optimized by detecting:
       * - if a column is projected that's not in the where clause
       * - if a column is grouped by that's not in the where clause
       * - if we're not using IS NULL or CASE WHEN expressions
       */
      projectAllColumnFamilies(table, scan);
    }
    return new RowProjector(projectedColumns, estimatedByteSize, isProjectEmptyKeyValue);
  }
  public MutationPlan compile(UpsertStatement upsert, List<Object> binds) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize =
        services
            .getProps()
            .getInt(
                QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final ColumnResolver resolver = FromCompiler.getResolver(upsert, connection);
    final TableRef tableRef = resolver.getTables().get(0);
    PTable table = tableRef.getTable();
    if (table.getType() == PTableType.VIEW) {
      throw new ReadOnlyTableException("Mutations not allowed for a view (" + tableRef + ")");
    }
    Scan scan = new Scan();
    final StatementContext context =
        new StatementContext(connection, resolver, binds, upsert.getBindCount(), scan);
    // Setup array of column indexes parallel to values that are going to be set
    List<ColumnName> columnNodes = upsert.getColumns();
    List<PColumn> allColumns = table.getColumns();

    int[] columnIndexesToBe;
    int[] pkSlotIndexesToBe;
    PColumn[] targetColumns;
    // Allow full row upsert if no columns or only dynamic one are specified and values count match
    if (columnNodes.isEmpty()
        || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
      columnIndexesToBe = new int[allColumns.size()];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = new PColumn[columnIndexesToBe.length];
      int j = table.getBucketNum() == null ? 0 : 1; // Skip over the salting byte.
      for (int i = 0; i < allColumns.size(); i++) {
        columnIndexesToBe[i] = i;
        targetColumns[i] = allColumns.get(i);
        if (SchemaUtil.isPKColumn(allColumns.get(i))) {
          pkSlotIndexesToBe[i] = j++;
        }
      }
    } else {
      columnIndexesToBe = new int[columnNodes.size()];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = new PColumn[columnIndexesToBe.length];
      Arrays.fill(
          columnIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      Arrays.fill(
          pkSlotIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
      for (int i = 0; i < columnNodes.size(); i++) {
        ColumnName colName = columnNodes.get(i);
        ColumnRef ref =
            resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
        columnIndexesToBe[i] = ref.getColumnPosition();
        targetColumns[i] = ref.getColumn();
        if (SchemaUtil.isPKColumn(ref.getColumn())) {
          pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
        }
      }
      int i = table.getBucketNum() == null ? 0 : 1;
      for (; i < table.getPKColumns().size(); i++) {
        PColumn pkCol = table.getPKColumns().get(i);
        if (!pkColumnsSet.get(i)) {
          if (!pkCol.isNullable()) {
            throw new ConstraintViolationException(
                table.getName().getString()
                    + "."
                    + pkCol.getName().getString()
                    + " may not be null");
          }
        }
      }
    }

    List<ParseNode> valueNodes = upsert.getValues();
    QueryPlan plan = null;
    RowProjector projector = null;
    int nValuesToSet;
    boolean runOnServer = false;
    if (valueNodes == null) {
      SelectStatement select = upsert.getSelect();
      assert (select != null);
      TableRef selectTableRef = FromCompiler.getResolver(select, connection).getTables().get(0);
      boolean sameTable = tableRef.equals(selectTableRef);
      // Pass scan through if same table in upsert and select so that projection is computed
      // correctly
      QueryCompiler compiler =
          new QueryCompiler(connection, 0, sameTable ? scan : new Scan(), targetColumns);
      plan = compiler.compile(select, binds);
      projector = plan.getProjector();
      nValuesToSet = projector.getColumnCount();
      // Cannot auto commit if doing aggregation or topN or salted
      // Salted causes problems because the row may end up living on a different region
      runOnServer =
          plan.getGroupBy() == null
              && sameTable
              && select.getOrderBy().isEmpty()
              && table.getBucketNum() == null;
    } else {
      nValuesToSet = valueNodes.size();
    }
    final QueryPlan queryPlan = plan;
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty()) {
      columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
      pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
    }

    if (nValuesToSet != columnIndexesToBe.length) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH)
          .setMessage(
              "Numbers of columns: "
                  + columnIndexesToBe.length
                  + ". Number of values: "
                  + nValuesToSet)
          .build()
          .buildException();
    }

    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;

    // TODO: break this up into multiple functions
    ////////////////////////////////////////////////////////////////////
    // UPSERT SELECT
    /////////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
      /* We can run the upsert in a coprocessor if:
       * 1) the into table matches from table
       * 2) the select query isn't doing aggregation
       * 3) autoCommit is on
       * 4) not topN
       * Otherwise, run the query to pull the data from the server
       * and populate the MutationState (upto a limit).
       */
      final boolean isAutoCommit = connection.getAutoCommit();
      runOnServer &= isAutoCommit;

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run server-side (maybe)
      /////////////////////////////////////////////////////////////////////
      if (runOnServer) {
        // At most this array will grow bigger by the number of PK columns
        int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
        int[] reverseColumnIndexes = new int[table.getColumns().size()];
        List<Expression> projectedExpressions =
            Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
        Arrays.fill(reverseColumnIndexes, -1);
        for (int i = 0; i < nValuesToSet; i++) {
          projectedExpressions.add(projector.getColumnProjector(i).getExpression());
          reverseColumnIndexes[columnIndexes[i]] = i;
        }
        /*
         * Order projected columns and projected expressions with PK columns
         * leading order by slot position
         */
        int offset = table.getBucketNum() == null ? 0 : 1;
        for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
          PColumn column = table.getPKColumns().get(i + offset);
          int pos = reverseColumnIndexes[column.getPosition()];
          if (pos == -1) {
            // Last PK column may be fixed width and nullable
            // We don't want to insert a null expression b/c
            // it's not valid to set a fixed width type to null.
            if (column.getDataType().isFixedWidth()) {
              continue;
            }
            // Add literal null for missing PK columns
            pos = projectedExpressions.size();
            Expression literalNull = LiteralExpression.newConstant(null, column.getDataType());
            projectedExpressions.add(literalNull);
            allColumnsIndexes[pos] = column.getPosition();
          }
          // Swap select expression at pos with i
          Collections.swap(projectedExpressions, i, pos);
          // Swap column indexes and reverse column indexes too
          int tempPos = allColumnsIndexes[i];
          allColumnsIndexes[i] = allColumnsIndexes[pos];
          allColumnsIndexes[pos] = tempPos;
          reverseColumnIndexes[tempPos] = reverseColumnIndexes[i];
          reverseColumnIndexes[i] = i;
        }
        // If any pk slots are changing, be conservative and don't run this server side.
        // If the row ends up living in a different region, we'll get an error otherwise.
        for (int i = 0; i < table.getPKColumns().size(); i++) {
          PColumn column = table.getPKColumns().get(i);
          Expression source = projectedExpressions.get(i);
          if (source == null
              || !source.equals(
                  new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) {
            // TODO: we could check the region boundaries to see if the pk will still be in it.
            runOnServer = false; // bail on running server side, since PK may be changing
            break;
          }
        }

        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run server-side
        /////////////////////////////////////////////////////////////////////
        if (runOnServer) {
          // Iterate through columns being projected
          List<PColumn> projectedColumns =
              Lists.newArrayListWithExpectedSize(projectedExpressions.size());
          for (int i = 0; i < projectedExpressions.size(); i++) {
            // Must make new column if position has changed
            PColumn column = allColumns.get(allColumnsIndexes[i]);
            projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i));
          }
          // Build table from projectedColumns
          PTable projectedTable =
              PTableImpl.makePTable(
                  table.getName(),
                  table.getType(),
                  table.getTimeStamp(),
                  table.getSequenceNumber(),
                  table.getPKName(),
                  table.getBucketNum(),
                  projectedColumns);

          // Remove projection of empty column, since it can lead to problems when building another
          // projection
          // using this same scan. TODO: move projection code to a later stage, like
          // QueryPlan.newScanner to
          // prevent having to do this.
          ScanUtil.removeEmptyColumnFamily(context.getScan(), table);
          List<AliasedNode> select =
              Collections.<AliasedNode>singletonList(
                  NODE_FACTORY.aliasedNode(
                      null,
                      NODE_FACTORY.function(
                          CountAggregateFunction.NORMALIZED_NAME, LiteralParseNode.STAR)));
          // Ignore order by - it has no impact
          final RowProjector aggProjector =
              ProjectionCompiler.getRowProjector(
                  context, select, false, GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, null);
          /*
           * Transfer over PTable representing subset of columns selected, but all PK columns.
           * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
           * Transfer over List<Expression> for projection.
           * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
           * Create the PRow and get the mutations, adding them to the batch
           */
          scan.setAttribute(
              UngroupedAggregateRegionObserver.UPSERT_SELECT_TABLE,
              UngroupedAggregateRegionObserver.serialize(projectedTable));
          scan.setAttribute(
              UngroupedAggregateRegionObserver.UPSERT_SELECT_EXPRS,
              UngroupedAggregateRegionObserver.serialize(projectedExpressions));
          final QueryPlan aggPlan =
              new AggregatePlan(
                  context,
                  tableRef,
                  projector,
                  null,
                  GroupBy.EMPTY_GROUP_BY,
                  false,
                  null,
                  OrderBy.EMPTY_ORDER_BY);
          return new MutationPlan() {

            @Override
            public PhoenixConnection getConnection() {
              return connection;
            }

            @Override
            public ParameterMetaData getParameterMetaData() {
              return context.getBindManager().getParameterMetaData();
            }

            @Override
            public MutationState execute() throws SQLException {
              Scanner scanner = aggPlan.getScanner();
              ResultIterator iterator = scanner.iterator();
              try {
                Tuple row = iterator.next();
                ImmutableBytesWritable ptr = context.getTempPtr();
                final long mutationCount =
                    (Long) aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                return new MutationState(maxSize, connection) {
                  @Override
                  public long getUpdateCount() {
                    return mutationCount;
                  }
                };
              } finally {
                iterator.close();
              }
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
              List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
              List<String> planSteps =
                  Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
              planSteps.add("UPSERT ROWS");
              planSteps.addAll(queryPlanSteps);
              return new ExplainPlan(planSteps);
            }
          };
        }
      }

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run client-side
      /////////////////////////////////////////////////////////////////////
      final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
      return new MutationPlan() {

        @Override
        public PhoenixConnection getConnection() {
          return connection;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
          return context.getBindManager().getParameterMetaData();
        }

        @Override
        public MutationState execute() throws SQLException {
          byte[][] values = new byte[columnIndexes.length][];
          Scanner scanner = queryPlan.getScanner();
          int estSize = scanner.getEstimatedSize();
          int rowCount = 0;
          Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation =
              Maps.newHashMapWithExpectedSize(estSize);
          ResultSet rs = new PhoenixResultSet(scanner, statement);
          PTable table = tableRef.getTable();
          PColumn column;
          while (rs.next()) {
            for (int i = 0; i < values.length; i++) {
              column = table.getColumns().get(columnIndexes[i]);
              byte[] byteValue = rs.getBytes(i + 1);
              Object value = rs.getObject(i + 1);
              int rsPrecision = rs.getMetaData().getPrecision(i + 1);
              Integer precision = rsPrecision == 0 ? null : rsPrecision;
              int rsScale = rs.getMetaData().getScale(i + 1);
              Integer scale = rsScale == 0 ? null : rsScale;
              // If ColumnModifier from expression in SELECT doesn't match the
              // column being projected into then invert the bits.
              if (column.getColumnModifier() == ColumnModifier.SORT_DESC) {
                byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
                byteValue =
                    ColumnModifier.SORT_DESC.apply(byteValue, tempByteValue, 0, byteValue.length);
              }
              // We are guaranteed that the two column will have compatible types,
              // as we checked that before.
              if (!column
                  .getDataType()
                  .isSizeCompatible(
                      column.getDataType(),
                      value,
                      byteValue,
                      precision,
                      column.getMaxLength(),
                      scale,
                      column.getScale())) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_INCOMPATIBLE_WITH_TYPE)
                    .setColumnName(column.getName().getString())
                    .build()
                    .buildException();
              }
              values[i] =
                  column
                      .getDataType()
                      .coerceBytes(
                          byteValue,
                          value,
                          column.getDataType(),
                          precision,
                          scale,
                          column.getMaxLength(),
                          column.getScale());
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
              MutationState state = new MutationState(tableRef, mutation, 0, maxSize, connection);
              connection.getMutationState().join(state);
              connection.commit();
              mutation.clear();
            }
          }
          // If auto commit is true, this last batch will be committed upon return
          return new MutationState(
              tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
          List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps();
          List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
          planSteps.add("UPSERT SELECT");
          planSteps.addAll(queryPlanSteps);
          return new ExplainPlan(planSteps);
        }
      };
    }

    ////////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    /////////////////////////////////////////////////////////////////////
    int nodeIndex = 0;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final byte[][] values = new byte[nValuesToSet][];
    for (ParseNode valueNode : valueNodes) {
      if (!valueNode.isConstant()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT)
            .build()
            .buildException();
      }
      PColumn column = allColumns.get(columnIndexes[nodeIndex]);
      expressionBuilder.setColumn(column);
      LiteralExpression literalExpression = (LiteralExpression) valueNode.accept(expressionBuilder);
      byte[] byteValue = literalExpression.getBytes();
      if (literalExpression.getDataType() != null) {
        // If ColumnModifier from expression in SELECT doesn't match the
        // column being projected into then invert the bits.
        if (literalExpression.getColumnModifier() != column.getColumnModifier()) {
          byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
          byteValue = ColumnModifier.SORT_DESC.apply(byteValue, tempByteValue, 0, byteValue.length);
        }
        if (!literalExpression
            .getDataType()
            .isCoercibleTo(column.getDataType(), literalExpression.getValue())) {
          throw new TypeMismatchException(
              literalExpression.getDataType(),
              column.getDataType(),
              "expression: " + literalExpression.toString() + " in column " + column);
        }
        if (!column
            .getDataType()
            .isSizeCompatible(
                literalExpression.getDataType(),
                literalExpression.getValue(),
                byteValue,
                literalExpression.getMaxLength(),
                column.getMaxLength(),
                literalExpression.getScale(),
                column.getScale())) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_INCOMPATIBLE_WITH_TYPE)
              .setColumnName(column.getName().getString())
              .setMessage("value=" + literalExpression.toString())
              .build()
              .buildException();
        }
      }
      byteValue =
          column
              .getDataType()
              .coerceBytes(
                  byteValue,
                  literalExpression.getValue(),
                  literalExpression.getDataType(),
                  literalExpression.getMaxLength(),
                  literalExpression.getScale(),
                  column.getMaxLength(),
                  column.getScale());
      values[nodeIndex] = byteValue;
      nodeIndex++;
    }
    return new MutationPlan() {

      @Override
      public PhoenixConnection getConnection() {
        return connection;
      }

      @Override
      public ParameterMetaData getParameterMetaData() {
        return context.getBindManager().getParameterMetaData();
      }

      @Override
      public MutationState execute() {
        Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
        setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation);
        return new MutationState(tableRef, mutation, 0, maxSize, connection);
      }

      @Override
      public ExplainPlan getExplainPlan() throws SQLException {
        return new ExplainPlan(Collections.singletonList("PUT SINGLE ROW"));
      }
    };
  }
Esempio n. 14
0
 @Override
 public int newKey(ImmutableBytesWritable key, byte[][] values) {
   int i = 0;
   TrustedByteArrayOutputStream os =
       new TrustedByteArrayOutputStream(SchemaUtil.estimateKeyLength(this));
   try {
     List<PColumn> columns = getPKColumns();
     int nColumns = columns.size();
     PColumn lastPKColumn = columns.get(nColumns - 1);
     while (i < values.length && i < nColumns) {
       PColumn column = columns.get(i);
       PDataType type = column.getDataType();
       // This will throw if the value is null and the type doesn't allow null
       byte[] byteValue = values[i++];
       if (byteValue == null) {
         byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
       }
       // An empty byte array return value means null. Do this,
       // since a type may have muliple representations of null.
       // For example, VARCHAR treats both null and an empty string
       // as null. This way we don't need to leak that part of the
       // implementation outside of PDataType by checking the value
       // here.
       if (byteValue.length == 0 && !column.isNullable()) {
         throw new ConstraintViolationException(
             name.getString() + "." + column.getName().getString() + " may not be null");
       }
       Integer byteSize = column.getByteSize();
       if (type.isFixedWidth()) { // TODO: handle multi-byte characters
         if (byteValue.length != byteSize) {
           throw new ConstraintViolationException(
               name.getString()
                   + "."
                   + column.getName().getString()
                   + " must be "
                   + byteSize
                   + " bytes ("
                   + SchemaUtil.toString(type, byteValue)
                   + ")");
         }
       } else if (byteSize != null && byteValue.length > byteSize) {
         throw new ConstraintViolationException(
             name.getString()
                 + "."
                 + column.getName().getString()
                 + " may not exceed "
                 + byteSize
                 + " bytes ("
                 + SchemaUtil.toString(type, byteValue)
                 + ")");
       }
       os.write(byteValue, 0, byteValue.length);
       // Separate variable length column values in key with zero byte
       if (!type.isFixedWidth() && column != lastPKColumn) {
         os.write(SEPARATOR_BYTE);
       }
     }
     // If some non null pk values aren't set, then throw
     if (i < nColumns) {
       PColumn column = columns.get(i);
       PDataType type = column.getDataType();
       if (type.isFixedWidth() || !column.isNullable()) {
         throw new ConstraintViolationException(
             name.getString() + "." + column.getName().getString() + " may not be null");
       }
       // Separate variable length column values in key with zero byte
       if (column != lastPKColumn) {
         os.write(SEPARATOR_BYTE);
       }
     }
     key.set(os.getBuffer(), 0, os.size());
     return i;
   } finally {
     try {
       os.close();
     } catch (IOException e) {
       throw new RuntimeException(e); // Impossible
     }
   }
 }
Esempio n. 15
0
 private void assertColumnMetaData(ResultSet rs, String schema, String table, String column)
     throws SQLException {
   assertEquals(schema, rs.getString("TABLE_SCHEM"));
   assertEquals(table, rs.getString("TABLE_NAME"));
   assertEquals(SchemaUtil.normalizeIdentifier(column), rs.getString("COLUMN_NAME"));
 }