コード例 #1
0
 @Override
 public MetaDataMutationResult updateIndexState(
     List<Mutation> tableMetadata, String parentTableName) throws SQLException {
   byte[][] rowKeyMetadata = new byte[3][];
   SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata);
   Mutation m = MetaDataUtil.getTableHeaderRow(tableMetadata);
   ImmutableBytesWritable ptr = new ImmutableBytesWritable();
   if (!MetaDataUtil.getMutationValue(m, INDEX_STATE_BYTES, kvBuilder, ptr)) {
     throw new IllegalStateException();
   }
   PIndexState newState = PIndexState.fromSerializedValue(ptr.get()[ptr.getOffset()]);
   byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
   String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]);
   String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
   String indexTableName = SchemaUtil.getTableName(schemaName, indexName);
   PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes);
   PTable index = metaData.getTableRef(new PTableKey(tenantId, indexTableName)).getTable();
   index =
       PTableImpl.makePTable(
           index,
           newState == PIndexState.USABLE
               ? PIndexState.ACTIVE
               : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState);
   return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index);
 }
コード例 #2
0
 @Test
 public void testGetTableName() {
   String tableDisplayName = SchemaUtil.getTableName("schemaName", "tableName");
   assertEquals(tableDisplayName, "schemaName.tableName");
   tableDisplayName = SchemaUtil.getTableName(null, "tableName");
   assertEquals(tableDisplayName, "tableName");
 }
コード例 #3
0
ファイル: SchemaUtil.java プロジェクト: gkanade/phoenix
 /**
  * Normalizes the fulltableName . Uses {@linkplain normalizeIdentifier}
  *
  * @param fullTableName
  * @return
  */
 public static String normalizeFullTableName(String fullTableName) {
   String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName);
   String tableName = SchemaUtil.getTableNameFromFullName(fullTableName);
   String normalizedTableName = StringUtil.EMPTY_STRING;
   if (!schemaName.isEmpty()) {
     normalizedTableName = normalizeIdentifier(schemaName) + QueryConstants.NAME_SEPARATOR;
   }
   return normalizedTableName + normalizeIdentifier(tableName);
 }
コード例 #4
0
 public FunctionParseNode functionDistinct(String name, List<ParseNode> args) {
   if (CountAggregateFunction.NAME.equals(SchemaUtil.normalizeIdentifier(name))) {
     BuiltInFunctionInfo info =
         getInfo(SchemaUtil.normalizeIdentifier(DistinctCountAggregateFunction.NAME), args);
     return new DistinctCountParseNode(DistinctCountAggregateFunction.NAME, args, info);
   } else {
     throw new UnsupportedOperationException("DISTINCT not supported with " + name);
   }
 }
コード例 #5
0
 private static byte[] getTableName(List<Mutation> tableMetaData, byte[] physicalTableName) {
   if (physicalTableName != null) {
     return physicalTableName;
   }
   byte[][] rowKeyMetadata = new byte[3][];
   Mutation m = MetaDataUtil.getTableHeaderRow(tableMetaData);
   byte[] key = m.getRow();
   SchemaUtil.getVarChars(key, rowKeyMetadata);
   byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
   byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
   return SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes);
 }
コード例 #6
0
 private static void setValues(
     byte[][] values,
     int[] pkSlotIndex,
     int[] columnIndexes,
     PTable table,
     Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation) {
   Map<PColumn, byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
   byte[][] pkValues = new byte[table.getPKColumns().size()][];
   // If the table uses salting, the first byte is the salting byte, set to an empty array
   // here and we will fill in the byte later in PRowImpl.
   if (table.getBucketNum() != null) {
     pkValues[0] = new byte[] {0};
   }
   for (int i = 0; i < values.length; i++) {
     byte[] value = values[i];
     PColumn column = table.getColumns().get(columnIndexes[i]);
     if (SchemaUtil.isPKColumn(column)) {
       pkValues[pkSlotIndex[i]] = value;
     } else {
       columnValues.put(column, value);
     }
   }
   ImmutableBytesPtr ptr = new ImmutableBytesPtr();
   table.newKey(ptr, pkValues);
   mutation.put(ptr, columnValues);
 }
コード例 #7
0
  private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), TEST_PROPERTIES);
    HTableInterface hTable =
        services.getTable(
            SchemaUtil.getTableNameAsBytes(
                HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME, HBASE_DYNAMIC_COLUMNS));
    try {
      // Insert rows using standard HBase mechanism with standard HBase "types"
      List<Row> mutations = new ArrayList<Row>();
      byte[] dv = Bytes.toBytes("DV");
      byte[] first = Bytes.toBytes("F");
      byte[] f1v1 = Bytes.toBytes("F1V1");
      byte[] f1v2 = Bytes.toBytes("F1V2");
      byte[] f2v1 = Bytes.toBytes("F2V1");
      byte[] f2v2 = Bytes.toBytes("F2V2");
      byte[] key = Bytes.toBytes("entry1");

      Put put = new Put(key);
      put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
      put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
      put.add(FAMILY_NAME, f1v1, Bytes.toBytes("f1value1"));
      put.add(FAMILY_NAME, f1v2, Bytes.toBytes("f1value2"));
      put.add(FAMILY_NAME2, f2v1, Bytes.toBytes("f2value1"));
      put.add(FAMILY_NAME2, f2v2, Bytes.toBytes("f2value2"));
      mutations.add(put);

      hTable.batch(mutations);

    } finally {
      hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
  }
コード例 #8
0
 public static PTable getTable(Connection conn, String name) throws SQLException {
   PTable table = null;
   PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
   try {
     table = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), name));
   } catch (TableNotFoundException e) {
     String schemaName = SchemaUtil.getSchemaNameFromFullName(name);
     String tableName = SchemaUtil.getTableNameFromFullName(name);
     MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(schemaName, tableName);
     if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
       throw e;
     }
     table = result.getTable();
   }
   return table;
 }
コード例 #9
0
 /*
 Map all unique pairs <family, name>  to index. Table name is part of TableRowkey, so we do
 not care about it
  */
 private void initColumnIndexes() throws SQLException {
   columnIndexes = new TreeMap(Bytes.BYTES_COMPARATOR);
   int columnIndex = 0;
   for (int index = 0; index < logicalNames.size(); index++) {
     PTable table = PhoenixRuntime.getTable(conn, logicalNames.get(index));
     List<PColumn> cls = table.getColumns();
     for (int i = 0; i < cls.size(); i++) {
       PColumn c = cls.get(i);
       byte[] family = new byte[0];
       if (c.getFamilyName() != null) // Skip PK column
       family = c.getFamilyName().getBytes();
       byte[] name = c.getName().getBytes();
       byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, name);
       if (!columnIndexes.containsKey(cfn)) {
         columnIndexes.put(cfn, new Integer(columnIndex));
         columnIndex++;
       }
     }
     byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table);
     byte[] cfn =
         Bytes.add(
             emptyColumnFamily,
             QueryConstants.NAMESPACE_SEPARATOR_BYTES,
             QueryConstants.EMPTY_COLUMN_BYTES);
     columnIndexes.put(cfn, new Integer(columnIndex));
     columnIndex++;
   }
 }
コード例 #10
0
ファイル: SchemaUtil.java プロジェクト: gkanade/phoenix
 /**
  * Imperfect estimate of row size given a PTable TODO: keep row count in stats table and use total
  * size / row count instead
  *
  * @param table
  * @return estimate of size in bytes of a row
  */
 public static long estimateRowSize(PTable table) {
   int keyLength = estimateKeyLength(table);
   long rowSize = 0;
   for (PColumn column : table.getColumns()) {
     if (!SchemaUtil.isPKColumn(column)) {
       PDataType type = column.getDataType();
       Integer maxLength = column.getMaxLength();
       int valueLength =
           !type.isFixedWidth()
               ? VAR_KV_LENGTH_ESTIMATE
               : maxLength == null ? type.getByteSize() : maxLength;
       rowSize +=
           KeyValue.getKeyValueDataStructureSize(
               keyLength,
               column.getFamilyName().getBytes().length,
               column.getName().getBytes().length,
               valueLength);
     }
   }
   // Empty key value
   rowSize +=
       KeyValue.getKeyValueDataStructureSize(
           keyLength,
           getEmptyColumnFamily(table).length,
           QueryConstants.EMPTY_COLUMN_BYTES.length,
           0);
   return rowSize;
 }
コード例 #11
0
 @Test
 public void testGetColumnName() {
   String columnDisplayName;
   columnDisplayName =
       SchemaUtil.getMetaDataEntityName("schemaName", "tableName", "familyName", "columnName");
   assertEquals(columnDisplayName, "schemaName.tableName.familyName.columnName");
   columnDisplayName =
       SchemaUtil.getMetaDataEntityName(null, "tableName", "familyName", "columnName");
   assertEquals(columnDisplayName, "tableName.familyName.columnName");
   columnDisplayName =
       SchemaUtil.getMetaDataEntityName("schemaName", "tableName", null, "columnName");
   assertEquals(columnDisplayName, "schemaName.tableName.columnName");
   columnDisplayName = SchemaUtil.getMetaDataEntityName(null, null, "familyName", "columnName");
   assertEquals(columnDisplayName, "familyName.columnName");
   columnDisplayName = SchemaUtil.getMetaDataEntityName(null, null, null, "columnName");
   assertEquals(columnDisplayName, "columnName");
 }
コード例 #12
0
ファイル: FromCompiler.java プロジェクト: jffnothing/phoenix
 protected TableRef createTableRef(NamedTableNode tableNode, boolean updateCacheImmediately)
     throws SQLException {
   String tableName = tableNode.getName().getTableName();
   String schemaName = tableNode.getName().getSchemaName();
   long timeStamp = QueryConstants.UNSET_TIMESTAMP;
   String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
   PName tenantId = connection.getTenantId();
   PTable theTable = null;
   if (updateCacheImmediately || connection.getAutoCommit()) {
     MetaDataMutationResult result = client.updateCache(schemaName, tableName);
     timeStamp = result.getMutationTime();
     theTable = result.getTable();
     if (theTable == null) {
       throw new TableNotFoundException(schemaName, tableName, timeStamp);
     }
   } else {
     try {
       theTable = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
     } catch (TableNotFoundException e1) {
       if (tenantId != null) { // Check with null tenantId next
         try {
           theTable = connection.getMetaDataCache().getTable(new PTableKey(null, fullTableName));
         } catch (TableNotFoundException e2) {
         }
       }
     }
     // We always attempt to update the cache in the event of a TableNotFoundException
     if (theTable == null) {
       MetaDataMutationResult result = client.updateCache(schemaName, tableName);
       if (result.wasUpdated()) {
         timeStamp = result.getMutationTime();
         theTable = result.getTable();
       }
     }
     if (theTable == null) {
       throw new TableNotFoundException(schemaName, tableName, timeStamp);
     }
   }
   // Add any dynamic columns to the table declaration
   List<ColumnDef> dynamicColumns = tableNode.getDynamicColumns();
   theTable = addDynamicColumns(dynamicColumns, theTable);
   TableRef tableRef =
       new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty());
   if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
     logger.debug(
         "Re-resolved stale table "
             + fullTableName
             + " with seqNum "
             + tableRef.getTable().getSequenceNumber()
             + " at timestamp "
             + tableRef.getTable().getTimeStamp()
             + " with "
             + tableRef.getTable().getColumns().size()
             + " columns: "
             + tableRef.getTable().getColumns());
   }
   return tableRef;
 }
コード例 #13
0
  public SelectStatement select(
      List<SelectStatement> statements,
      List<OrderByNode> orderBy,
      LimitNode limit,
      int bindCount,
      boolean isAggregate) {
    if (statements.size() == 1)
      return select(statements.get(0), orderBy, limit, bindCount, isAggregate);

    // Get a list of adjusted aliases from a non-wildcard sub-select if any.
    // We do not check the number of select nodes among all sub-selects, as
    // it will be done later at compile stage. Empty or different aliases
    // are ignored, since they cannot be referred by outer queries.
    List<String> aliases = Lists.<String>newArrayList();
    Map<String, UDFParseNode> udfParseNodes = new HashMap<String, UDFParseNode>(1);
    for (int i = 0; i < statements.size() && aliases.isEmpty(); i++) {
      SelectStatement subselect = statements.get(i);
      udfParseNodes.putAll(subselect.getUdfParseNodes());
      if (!subselect.hasWildcard()) {
        for (AliasedNode aliasedNode : subselect.getSelect()) {
          String alias = aliasedNode.getAlias();
          if (alias == null) {
            alias = SchemaUtil.normalizeIdentifier(aliasedNode.getNode().getAlias());
          }
          aliases.add(alias == null ? createTempAlias() : alias);
        }
      }
    }

    List<AliasedNode> aliasedNodes;
    if (aliases.isEmpty()) {
      aliasedNodes = Lists.newArrayList(aliasedNode(null, wildcard()));
    } else {
      aliasedNodes = Lists.newArrayListWithExpectedSize(aliases.size());
      for (String alias : aliases) {
        aliasedNodes.add(aliasedNode(alias, column(null, alias, alias)));
      }
    }

    return select(
        null,
        HintNode.EMPTY_HINT_NODE,
        false,
        aliasedNodes,
        null,
        null,
        null,
        orderBy,
        limit,
        bindCount,
        false,
        false,
        statements,
        udfParseNodes);
  }
コード例 #14
0
 private static KeyValueSchema buildSchema(PTable table) {
   KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
   if (table != null) {
     for (PColumn column : table.getColumns()) {
       if (!SchemaUtil.isPKColumn(column)) {
         builder.addField(column);
       }
     }
   }
   return builder.build();
 }
コード例 #15
0
 public LiteralParseNode literal(String value, String sqlTypeName) throws SQLException {
   PDataType expectedType =
       sqlTypeName == null
           ? null
           : PDataType.fromSqlTypeName(SchemaUtil.normalizeIdentifier(sqlTypeName));
   if (expectedType == null || !expectedType.isCoercibleTo(PTimestamp.INSTANCE)) {
     throw TypeMismatchException.newException(expectedType, PTimestamp.INSTANCE);
   }
   Object typedValue = expectedType.toObject(value);
   return new LiteralParseNode(typedValue);
 }
コード例 #16
0
ファイル: FromCompiler.java プロジェクト: jffnothing/phoenix
 @Override
 public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
   String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
   List<TableRef> tableRefs = tableMap.get(fullTableName);
   if (tableRefs.size() == 0) {
     throw new TableNotFoundException(fullTableName);
   } else if (tableRefs.size() > 1) {
     throw new AmbiguousTableException(tableName);
   } else {
     return tableRefs.get(0);
   }
 }
コード例 #17
0
ファイル: SchemaUtil.java プロジェクト: gkanade/phoenix
 // Estimate the key length after pos slot for schema.
 private static int estimatePartLength(int pos, Iterator<PColumn> iterator) {
   int length = 0;
   while (iterator.hasNext()) {
     PColumn column = iterator.next();
     if (column.getDataType().isFixedWidth()) {
       length += SchemaUtil.getFixedByteSize(column);
     } else {
       length += 1; // SEPARATOR byte.
     }
   }
   return length;
 }
コード例 #18
0
  /**
   * Provides a mechanism to run SQL scripts against, where the arguments are: 1) connection URL
   * string 2) one or more paths to either SQL scripts or CSV files If a CurrentSCN property is set
   * on the connection URL, then it is incremented between processing, with each file being
   * processed by a new connection at the increment timestamp value.
   */
  public static void main(String[] args) {

    ExecutionCommand execCmd = ExecutionCommand.parseArgs(args);
    String jdbcUrl = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + execCmd.getConnectionString();

    PhoenixConnection conn = null;
    try {
      Properties props = new Properties();
      conn = DriverManager.getConnection(jdbcUrl, props).unwrap(PhoenixConnection.class);

      for (String inputFile : execCmd.getInputFiles()) {
        if (inputFile.endsWith(SQL_FILE_EXT)) {
          PhoenixRuntime.executeStatements(
              conn, new FileReader(inputFile), Collections.emptyList());
        } else if (inputFile.endsWith(CSV_FILE_EXT)) {

          String tableName = execCmd.getTableName();
          if (tableName == null) {
            tableName =
                SchemaUtil.normalizeIdentifier(
                    inputFile.substring(
                        inputFile.lastIndexOf(File.separatorChar) + 1,
                        inputFile.length() - CSV_FILE_EXT.length()));
          }
          CSVCommonsLoader csvLoader =
              new CSVCommonsLoader(
                  conn,
                  tableName,
                  execCmd.getColumns(),
                  execCmd.isStrict(),
                  execCmd.getFieldDelimiter(),
                  execCmd.getQuoteCharacter(),
                  execCmd.getEscapeCharacter(),
                  execCmd.getArrayElementSeparator());
          csvLoader.upsert(inputFile);
        }
      }
    } catch (Throwable t) {
      t.printStackTrace();
    } finally {
      if (conn != null) {
        try {
          conn.close();
        } catch (SQLException e) {
          // going to shut jvm down anyway. So might as well feast on it.
        }
      }
      System.exit(0);
    }
  }
コード例 #19
0
 public TupleProjector(ProjectedPTableWrapper projected) {
   List<PColumn> columns = projected.getTable().getColumns();
   expressions = new Expression[columns.size() - projected.getTable().getPKColumns().size()];
   // we do not count minNullableIndex for we might do later merge.
   KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0);
   int i = 0;
   for (PColumn column : projected.getTable().getColumns()) {
     if (!SchemaUtil.isPKColumn(column)) {
       builder.addField(column);
       expressions[i++] = projected.getSourceExpression(column);
     }
   }
   schema = builder.build();
   valueSet = ValueBitSet.newInstance(schema);
 }
コード例 #20
0
 @Override
 public MetaDataMutationResult getTable(
     PName tenantId,
     byte[] schemaBytes,
     byte[] tableBytes,
     long tableTimestamp,
     long clientTimestamp)
     throws SQLException {
   // Return result that will cause client to use it's own metadata instead of needing
   // to get anything from the server (since we don't have a connection)
   try {
     String fullTableName = SchemaUtil.getTableName(schemaBytes, tableBytes);
     PTable table = metaData.getTableRef(new PTableKey(tenantId, fullTableName)).getTable();
     return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, table, true);
   } catch (TableNotFoundException e) {
     return new MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 0, null);
   }
 }
コード例 #21
0
ファイル: ScanUtil.java プロジェクト: hzg0102/phoenix
 private static byte[] nextKey(byte[] key, RowKeySchema schema, ImmutableBytesWritable ptr) {
   int pos = 0;
   int maxOffset = schema.iterator(key, ptr);
   while (schema.next(ptr, pos, maxOffset) != null) {
     pos++;
   }
   Field field = schema.getField(pos - 1);
   if (!field.getDataType().isFixedWidth()) {
     byte[] newLowerRange = new byte[key.length + 1];
     System.arraycopy(key, 0, newLowerRange, 0, key.length);
     newLowerRange[key.length] =
         SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), key.length == 0, field);
     key = newLowerRange;
   } else {
     key = Arrays.copyOf(key, key.length);
   }
   ByteUtil.nextKey(key, key.length);
   return key;
 }
コード例 #22
0
ファイル: FromCompiler.java プロジェクト: jffnothing/phoenix
 protected PTable addDynamicColumns(List<ColumnDef> dynColumns, PTable theTable)
     throws SQLException {
   if (!dynColumns.isEmpty()) {
     List<PColumn> allcolumns = new ArrayList<PColumn>();
     List<PColumn> existingColumns = theTable.getColumns();
     // Need to skip the salting column, as it's added in the makePTable call below
     allcolumns.addAll(
         theTable.getBucketNum() == null
             ? existingColumns
             : existingColumns.subList(1, existingColumns.size()));
     // Position still based on with the salting columns
     int position = existingColumns.size();
     PName defaultFamilyName = PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(theTable));
     for (ColumnDef dynColumn : dynColumns) {
       PName familyName = defaultFamilyName;
       PName name = PNameFactory.newName(dynColumn.getColumnDefName().getColumnName());
       String family = dynColumn.getColumnDefName().getFamilyName();
       if (family != null) {
         theTable.getColumnFamily(family); // Verifies that column family exists
         familyName = PNameFactory.newName(family);
       }
       allcolumns.add(
           new PColumnImpl(
               name,
               familyName,
               dynColumn.getDataType(),
               dynColumn.getMaxLength(),
               dynColumn.getScale(),
               dynColumn.isNull(),
               position,
               dynColumn.getSortOrder(),
               dynColumn.getArraySize(),
               null,
               false));
       position++;
     }
     theTable = PTableImpl.makePTable(theTable, allcolumns);
   }
   return theTable;
 }
コード例 #23
0
ファイル: SchemaUtil.java プロジェクト: gkanade/phoenix
 // Go through each slot in the schema and try match it with the split byte array. If the split
 // does not confer to the schema, extends its length to match the schema.
 private static byte[] processSplit(byte[] split, LinkedHashSet<PColumn> pkColumns) {
   int pos = 0, offset = 0, maxOffset = split.length;
   Iterator<PColumn> iterator = pkColumns.iterator();
   while (pos < pkColumns.size()) {
     PColumn column = iterator.next();
     if (column.getDataType().isFixedWidth()) { // Fixed width
       int length = SchemaUtil.getFixedByteSize(column);
       if (maxOffset - offset < length) {
         // The split truncates the field. Fill in the rest of the part and any fields that
         // are missing after this field.
         int fillInLength = length - (maxOffset - offset);
         fillInLength += estimatePartLength(pos + 1, iterator);
         return ByteUtil.fillKey(split, split.length + fillInLength);
       }
       // Account for this field, move to next position;
       offset += length;
       pos++;
     } else { // Variable length
       // If we are the last slot, then we are done. Nothing needs to be filled in.
       if (pos == pkColumns.size() - 1) {
         break;
       }
       while (offset < maxOffset && split[offset] != QueryConstants.SEPARATOR_BYTE) {
         offset++;
       }
       if (offset == maxOffset) {
         // The var-length field does not end with a separator and it's not the last field.
         int fillInLength = 1; // SEPARATOR byte for the current var-length slot.
         fillInLength += estimatePartLength(pos + 1, iterator);
         return ByteUtil.fillKey(split, split.length + fillInLength);
       }
       // Move to the next position;
       offset += 1; // skip separator;
       pos++;
     }
   }
   return split;
 }
コード例 #24
0
ファイル: ScanRanges.java プロジェクト: zxs/phoenix
 private static List<byte[]> getPointKeys(
     List<List<KeyRange>> ranges, int[] slotSpan, RowKeySchema schema, Integer bucketNum) {
   if (ranges == null || ranges.isEmpty()) {
     return Collections.emptyList();
   }
   boolean isSalted = bucketNum != null;
   int count = 1;
   int offset = isSalted ? 1 : 0;
   // Skip salt byte range in the first position if salted
   for (int i = offset; i < ranges.size(); i++) {
     count *= ranges.get(i).size();
   }
   List<byte[]> keys = Lists.newArrayListWithExpectedSize(count);
   int[] position = new int[ranges.size()];
   int maxKeyLength = SchemaUtil.getMaxKeyLength(schema, ranges);
   int length;
   byte[] key = new byte[maxKeyLength];
   do {
     length =
         ScanUtil.setKey(
             schema,
             ranges,
             slotSpan,
             position,
             Bound.LOWER,
             key,
             offset,
             offset,
             ranges.size(),
             offset);
     if (isSalted) {
       key[0] = SaltingUtil.getSaltingByte(key, offset, length, bucketNum);
     }
     keys.add(Arrays.copyOf(key, length + offset));
   } while (incrementKey(ranges, position));
   return keys;
 }
コード例 #25
0
ファイル: FromCompiler.java プロジェクト: jffnothing/phoenix
 public static ColumnResolver getResolverForCreation(
     final CreateTableStatement statement, final PhoenixConnection connection)
     throws SQLException {
   TableName baseTable = statement.getBaseTableName();
   if (baseTable == null) {
     return EMPTY_TABLE_RESOLVER;
   }
   NamedTableNode tableNode =
       NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
   // Always use non-tenant-specific connection here
   try {
     SingleTableColumnResolver visitor =
         new SingleTableColumnResolver(connection, tableNode, true);
     return visitor;
   } catch (TableNotFoundException e) {
     // Used for mapped VIEW, since we won't be able to resolve that.
     // Instead, we create a table with just the dynamic columns.
     // A tenant-specific connection may not create a mapped VIEW.
     if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
       ConnectionQueryServices services = connection.getQueryServices();
       byte[] fullTableName =
           SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName());
       HTableInterface htable = null;
       try {
         htable = services.getTable(fullTableName);
       } catch (UnsupportedOperationException ignore) {
         throw e; // For Connectionless
       } finally {
         if (htable != null) Closeables.closeQuietly(htable);
       }
       tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs());
       return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp());
     }
     throw e;
   }
 }
コード例 #26
0
 private void assertColumnMetaData(ResultSet rs, String schema, String table, String column)
     throws SQLException {
   assertEquals(schema, rs.getString("TABLE_SCHEM"));
   assertEquals(table, rs.getString("TABLE_NAME"));
   assertEquals(SchemaUtil.normalizeIdentifier(column), rs.getString("COLUMN_NAME"));
 }
コード例 #27
0
  public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize =
        services
            .getProps()
            .getInt(
                QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final ColumnResolver resolver = FromCompiler.getResolverForMutation(upsert, connection);
    final TableRef tableRef = resolver.getTables().get(0);
    final PTable table = tableRef.getTable();
    if (table.getType() == PTableType.VIEW) {
      if (table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(
            table.getSchemaName().getString(), table.getTableName().getString());
      }
    }
    boolean isSalted = table.getBucketNum() != null;
    final boolean isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
    final boolean isSharedViewIndex = table.getViewIndexId() != null;
    String tenantId = isTenantSpecific ? connection.getTenantId().getString() : null;
    int posOffset = isSalted ? 1 : 0;
    // Setup array of column indexes parallel to values that are going to be set
    List<ColumnName> columnNodes = upsert.getColumns();
    final List<PColumn> allColumns = table.getColumns();
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();

    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<PColumn> targetColumns;
    if (table.getViewType() == ViewType.UPDATABLE) {
      addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumns.size());
      for (PColumn column : allColumns) {
        if (column.getViewConstant() != null) {
          addViewColumnsToBe.add(column);
        }
      }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    // Allow full row upsert if no columns or only dynamic ones are specified and values count match
    if (columnNodes.isEmpty()
        || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
      nColumnsToSet = allColumns.size() - posOffset;
      columnIndexesToBe = new int[nColumnsToSet];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
      targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
      int minPKPos = 0;
      if (isTenantSpecific) {
        PColumn tenantColumn = table.getPKColumns().get(minPKPos);
        columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
        targetColumns.set(minPKPos, tenantColumn);
        minPKPos++;
      }
      if (isSharedViewIndex) {
        PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
        columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
        targetColumns.set(minPKPos, indexIdColumn);
        minPKPos++;
      }
      for (int i = posOffset, j = 0; i < allColumns.size(); i++) {
        PColumn column = allColumns.get(i);
        if (SchemaUtil.isPKColumn(column)) {
          pkSlotIndexesToBe[i - posOffset] = j + posOffset;
          if (j++ < minPKPos) { // Skip, as it's already been set above
            continue;
          }
          minPKPos = 0;
        }
        columnIndexesToBe[i - posOffset + minPKPos] = i;
        targetColumns.set(i - posOffset + minPKPos, column);
      }
      if (!addViewColumnsToBe.isEmpty()) {
        // All view columns overlap in this case
        overlapViewColumnsToBe = addViewColumnsToBe;
        addViewColumnsToBe = Collections.emptySet();
      }
    } else {
      // Size for worse case
      int numColsInUpsert = columnNodes.size();
      nColumnsToSet =
          numColsInUpsert
              + addViewColumnsToBe.size()
              + (isTenantSpecific ? 1 : 0)
              + +(isSharedViewIndex ? 1 : 0);
      columnIndexesToBe = new int[nColumnsToSet];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
      targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
      Arrays.fill(
          columnIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      Arrays.fill(
          pkSlotIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
      int i = 0;
      // Add tenant column directly, as we don't want to resolve it as this will fail
      if (isTenantSpecific) {
        PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
        columnIndexesToBe[i] = tenantColumn.getPosition();
        pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
        targetColumns.set(i, tenantColumn);
        i++;
      }
      if (isSharedViewIndex) {
        PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
        columnIndexesToBe[i] = indexIdColumn.getPosition();
        pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
        targetColumns.set(i, indexIdColumn);
        i++;
      }
      for (ColumnName colName : columnNodes) {
        ColumnRef ref =
            resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
        PColumn column = ref.getColumn();
        if (IndexUtil.getViewConstantValue(column, ptr)) {
          if (overlapViewColumnsToBe.isEmpty()) {
            overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
          }
          nColumnsToSet--;
          overlapViewColumnsToBe.add(column);
          addViewColumnsToBe.remove(column);
        }
        columnIndexesToBe[i] = ref.getColumnPosition();
        targetColumns.set(i, column);
        if (SchemaUtil.isPKColumn(column)) {
          pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
        }
        i++;
      }
      for (PColumn column : addViewColumnsToBe) {
        columnIndexesToBe[i] = column.getPosition();
        targetColumns.set(i, column);
        if (SchemaUtil.isPKColumn(column)) {
          pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column));
        }
        i++;
      }
      for (i = posOffset; i < table.getPKColumns().size(); i++) {
        PColumn pkCol = table.getPKColumns().get(i);
        if (!pkColumnsSet.get(i)) {
          if (!pkCol.isNullable()) {
            throw new ConstraintViolationException(
                table.getName().getString()
                    + "."
                    + pkCol.getName().getString()
                    + " may not be null");
          }
        }
      }
    }

    List<ParseNode> valueNodes = upsert.getValues();
    QueryPlan plan = null;
    RowProjector rowProjectorToBe = null;
    final int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    UpsertingParallelIteratorFactory upsertParallelIteratorFactoryToBe = null;
    final boolean isAutoCommit = connection.getAutoCommit();
    if (valueNodes == null) {
      SelectStatement select = upsert.getSelect();
      assert (select != null);
      select = SubselectRewriter.flatten(select, connection);
      ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection);
      select = StatementNormalizer.normalize(select, selectResolver);
      select = prependTenantAndViewConstants(table, select, tenantId, addViewColumnsToBe);
      sameTable =
          select.getFrom().size() == 1 && tableRef.equals(selectResolver.getTables().get(0));
      /* We can run the upsert in a coprocessor if:
       * 1) from has only 1 table and the into table matches from table
       * 2) the select query isn't doing aggregation
       * 3) autoCommit is on
       * 4) the table is not immutable, as the client is the one that figures out the additional
       *    puts for index tables.
       * 5) no limit clause
       * Otherwise, run the query to pull the data from the server
       * and populate the MutationState (upto a limit).
       */
      runOnServer =
          sameTable
              && isAutoCommit
              && !table.isImmutableRows()
              && !select.isAggregate()
              && !select.isDistinct()
              && select.getLimit() == null
              && table.getBucketNum() == null;
      ParallelIteratorFactory parallelIteratorFactory;
      if (select.isAggregate() || select.isDistinct() || select.getLimit() != null) {
        parallelIteratorFactory = null;
      } else {
        // We can pipeline the upsert select instead of spooling everything to disk first,
        // if we don't have any post processing that's required.
        parallelIteratorFactory =
            upsertParallelIteratorFactoryToBe =
                new UpsertingParallelIteratorFactory(connection, tableRef);
      }
      // If we may be able to run on the server, add a hint that favors using the data table
      // if all else is equal.
      // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
      // as this would disallow running on the server. We currently use the row projector we
      // get back to figure this out.
      HintNode hint = upsert.getHint();
      if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
        hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
      }
      select = SelectStatement.create(select, hint);
      // Pass scan through if same table in upsert and select so that projection is computed
      // correctly
      // Use optimizer to choose the best plan
      plan =
          new QueryOptimizer(services)
              .optimize(statement, select, selectResolver, targetColumns, parallelIteratorFactory);
      runOnServer &= plan.getTableRef().equals(tableRef);
      rowProjectorToBe = plan.getProjector();
      nValuesToSet = rowProjectorToBe.getColumnCount();
      // Cannot auto commit if doing aggregation or topN or salted
      // Salted causes problems because the row may end up living on a different region
    } else {
      nValuesToSet =
          valueNodes.size()
              + addViewColumnsToBe.size()
              + (isTenantSpecific ? 1 : 0)
              + (isSharedViewIndex ? 1 : 0);
    }
    final RowProjector projector = rowProjectorToBe;
    final UpsertingParallelIteratorFactory upsertParallelIteratorFactory =
        upsertParallelIteratorFactoryToBe;
    final QueryPlan queryPlan = plan;
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
      nColumnsToSet = nValuesToSet;
      columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
      pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
    }

    if (nValuesToSet != nColumnsToSet) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH)
          .setMessage(
              "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet)
          .build()
          .buildException();
    }

    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;

    // TODO: break this up into multiple functions
    ////////////////////////////////////////////////////////////////////
    // UPSERT SELECT
    /////////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
      // Before we re-order, check that for updatable view columns
      // the projected expression either matches the column name or
      // is a constant with the same required value.
      throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run server-side (maybe)
      /////////////////////////////////////////////////////////////////////
      if (runOnServer) {
        // At most this array will grow bigger by the number of PK columns
        int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
        int[] reverseColumnIndexes = new int[table.getColumns().size()];
        List<Expression> projectedExpressions =
            Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
        Arrays.fill(reverseColumnIndexes, -1);
        for (int i = 0; i < nValuesToSet; i++) {
          projectedExpressions.add(projector.getColumnProjector(i).getExpression());
          reverseColumnIndexes[columnIndexes[i]] = i;
        }
        /*
         * Order projected columns and projected expressions with PK columns
         * leading order by slot position
         */
        int offset = table.getBucketNum() == null ? 0 : 1;
        for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
          PColumn column = table.getPKColumns().get(i + offset);
          int pos = reverseColumnIndexes[column.getPosition()];
          if (pos == -1) {
            // Last PK column may be fixed width and nullable
            // We don't want to insert a null expression b/c
            // it's not valid to set a fixed width type to null.
            if (column.getDataType().isFixedWidth()) {
              continue;
            }
            // Add literal null for missing PK columns
            pos = projectedExpressions.size();
            Expression literalNull =
                LiteralExpression.newConstant(null, column.getDataType(), true);
            projectedExpressions.add(literalNull);
            allColumnsIndexes[pos] = column.getPosition();
          }
          // Swap select expression at pos with i
          Collections.swap(projectedExpressions, i, pos);
          // Swap column indexes and reverse column indexes too
          int tempPos = allColumnsIndexes[i];
          allColumnsIndexes[i] = allColumnsIndexes[pos];
          allColumnsIndexes[pos] = tempPos;
          reverseColumnIndexes[tempPos] = reverseColumnIndexes[i];
          reverseColumnIndexes[i] = i;
        }
        // If any pk slots are changing, be conservative and don't run this server side.
        // If the row ends up living in a different region, we'll get an error otherwise.
        for (int i = 0; i < table.getPKColumns().size(); i++) {
          PColumn column = table.getPKColumns().get(i);
          Expression source = projectedExpressions.get(i);
          if (source == null
              || !source.equals(
                  new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) {
            // TODO: we could check the region boundaries to see if the pk will still be in it.
            runOnServer = false; // bail on running server side, since PK may be changing
            break;
          }
        }

        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run server-side
        /////////////////////////////////////////////////////////////////////
        if (runOnServer) {
          // Iterate through columns being projected
          List<PColumn> projectedColumns =
              Lists.newArrayListWithExpectedSize(projectedExpressions.size());
          for (int i = 0; i < projectedExpressions.size(); i++) {
            // Must make new column if position has changed
            PColumn column = allColumns.get(allColumnsIndexes[i]);
            projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i));
          }
          // Build table from projectedColumns
          PTable projectedTable = PTableImpl.makePTable(table, projectedColumns);

          SelectStatement select =
              SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
          final RowProjector aggProjector =
              ProjectionCompiler.compile(queryPlan.getContext(), select, GroupBy.EMPTY_GROUP_BY);
          /*
           * Transfer over PTable representing subset of columns selected, but all PK columns.
           * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
           * Transfer over List<Expression> for projection.
           * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
           * Create the PRow and get the mutations, adding them to the batch
           */
          final StatementContext context = queryPlan.getContext();
          final Scan scan = context.getScan();
          scan.setAttribute(
              BaseScannerRegionObserver.UPSERT_SELECT_TABLE,
              UngroupedAggregateRegionObserver.serialize(projectedTable));
          scan.setAttribute(
              BaseScannerRegionObserver.UPSERT_SELECT_EXPRS,
              UngroupedAggregateRegionObserver.serialize(projectedExpressions));
          // Ignore order by - it has no impact
          final QueryPlan aggPlan =
              new AggregatePlan(
                  context,
                  select,
                  tableRef,
                  aggProjector,
                  null,
                  OrderBy.EMPTY_ORDER_BY,
                  null,
                  GroupBy.EMPTY_GROUP_BY,
                  null);
          return new MutationPlan() {

            @Override
            public PhoenixConnection getConnection() {
              return connection;
            }

            @Override
            public ParameterMetaData getParameterMetaData() {
              return queryPlan.getContext().getBindManager().getParameterMetaData();
            }

            @Override
            public StatementContext getContext() {
              return queryPlan.getContext();
            }

            @Override
            public MutationState execute() throws SQLException {
              ImmutableBytesWritable ptr = context.getTempPtr();
              tableRef.getTable().getIndexMaintainers(ptr);
              ServerCache cache = null;
              try {
                if (ptr.getLength() > 0) {
                  IndexMetaDataCacheClient client =
                      new IndexMetaDataCacheClient(connection, tableRef);
                  cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                  byte[] uuidValue = cache.getId();
                  scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                }
                ResultIterator iterator = aggPlan.iterator();
                try {
                  Tuple row = iterator.next();
                  final long mutationCount =
                      (Long) aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                  return new MutationState(maxSize, connection) {
                    @Override
                    public long getUpdateCount() {
                      return mutationCount;
                    }
                  };
                } finally {
                  iterator.close();
                }
              } finally {
                if (cache != null) {
                  cache.close();
                }
              }
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
              List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
              List<String> planSteps =
                  Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
              planSteps.add("UPSERT ROWS");
              planSteps.addAll(queryPlanSteps);
              return new ExplainPlan(planSteps);
            }
          };
        }
      }

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run client-side
      /////////////////////////////////////////////////////////////////////
      return new MutationPlan() {

        @Override
        public PhoenixConnection getConnection() {
          return connection;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
          return queryPlan.getContext().getBindManager().getParameterMetaData();
        }

        @Override
        public StatementContext getContext() {
          return queryPlan.getContext();
        }

        @Override
        public MutationState execute() throws SQLException {
          ResultIterator iterator = queryPlan.iterator();
          if (upsertParallelIteratorFactory == null) {
            return upsertSelect(
                statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes);
          }
          upsertParallelIteratorFactory.setRowProjector(projector);
          upsertParallelIteratorFactory.setColumnIndexes(columnIndexes);
          upsertParallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
          Tuple tuple;
          long totalRowCount = 0;
          while ((tuple = iterator.next()) != null) { // Runs query
            Cell kv = tuple.getValue(0);
            totalRowCount +=
                PDataType.LONG
                    .getCodec()
                    .decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
          }
          // Return total number of rows that have been updated. In the case of auto commit being
          // off
          // the mutations will all be in the mutation state of the current connection.
          return new MutationState(maxSize, statement.getConnection(), totalRowCount);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
          List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps();
          List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
          planSteps.add("UPSERT SELECT");
          planSteps.addAll(queryPlanSteps);
          return new ExplainPlan(planSteps);
        }
      };
    }

    ////////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    /////////////////////////////////////////////////////////////////////
    int nodeIndex = 0;
    // initialze values with constant byte values first
    final byte[][] values = new byte[nValuesToSet][];
    if (isTenantSpecific) {
      values[nodeIndex++] = connection.getTenantId().getBytes();
    }
    if (isSharedViewIndex) {
      values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan());
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions =
        Lists.newArrayListWithExpectedSize(valueNodes.size());
    // First build all the expressions, as with sequences we want to collect them all first
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
      if (!valueNode.isStateless()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT)
            .build()
            .buildException();
      }
      PColumn column = allColumns.get(columnIndexes[nodeIndex]);
      expressionBuilder.setColumn(column);
      Expression expression = valueNode.accept(expressionBuilder);
      if (expression.getDataType() != null
          && !expression.getDataType().isCastableTo(column.getDataType())) {
        throw TypeMismatchException.newException(
            expression.getDataType(),
            column.getDataType(),
            "expression: " + expression.toString() + " in column " + column);
      }
      constantExpressions.add(expression);
      nodeIndex++;
    }
    return new MutationPlan() {

      @Override
      public PhoenixConnection getConnection() {
        return connection;
      }

      @Override
      public ParameterMetaData getParameterMetaData() {
        return context.getBindManager().getParameterMetaData();
      }

      @Override
      public StatementContext getContext() {
        return context;
      }

      @Override
      public MutationState execute() throws SQLException {
        ImmutableBytesWritable ptr = context.getTempPtr();
        final SequenceManager sequenceManager = context.getSequenceManager();
        // Next evaluate all the expressions
        int nodeIndex = nodeIndexOffset;
        Tuple tuple =
            sequenceManager.getSequenceCount() == 0 ? null : sequenceManager.newSequenceTuple(null);
        for (Expression constantExpression : constantExpressions) {
          PColumn column = allColumns.get(columnIndexes[nodeIndex]);
          constantExpression.evaluate(tuple, ptr);
          Object value = null;
          if (constantExpression.getDataType() != null) {
            value =
                constantExpression
                    .getDataType()
                    .toObject(
                        ptr,
                        constantExpression.getSortOrder(),
                        constantExpression.getMaxLength(),
                        constantExpression.getScale());
            if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) {
              throw TypeMismatchException.newException(
                  constantExpression.getDataType(),
                  column.getDataType(),
                  "expression: " + constantExpression.toString() + " in column " + column);
            }
            if (!column
                .getDataType()
                .isSizeCompatible(
                    ptr,
                    value,
                    constantExpression.getDataType(),
                    constantExpression.getMaxLength(),
                    constantExpression.getScale(),
                    column.getMaxLength(),
                    column.getScale())) {
              throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
                  .setColumnName(column.getName().getString())
                  .setMessage("value=" + constantExpression.toString())
                  .build()
                  .buildException();
            }
          }
          column
              .getDataType()
              .coerceBytes(
                  ptr,
                  value,
                  constantExpression.getDataType(),
                  constantExpression.getMaxLength(),
                  constantExpression.getScale(),
                  constantExpression.getSortOrder(),
                  column.getMaxLength(),
                  column.getScale(),
                  column.getSortOrder());
          if (overlapViewColumns.contains(column)
              && Bytes.compareTo(
                      ptr.get(),
                      ptr.getOffset(),
                      ptr.getLength(),
                      column.getViewConstant(),
                      0,
                      column.getViewConstant().length - 1)
                  != 0) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN)
                .setColumnName(column.getName().getString())
                .setMessage("value=" + constantExpression.toString())
                .build()
                .buildException();
          }
          values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr);
          nodeIndex++;
        }
        // Add columns based on view
        for (PColumn column : addViewColumns) {
          if (IndexUtil.getViewConstantValue(column, ptr)) {
            values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
          } else {
            throw new IllegalStateException();
          }
        }
        Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
        setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation);
        return new MutationState(tableRef, mutation, 0, maxSize, connection);
      }

      @Override
      public ExplainPlan getExplainPlan() throws SQLException {
        List<String> planSteps = Lists.newArrayListWithExpectedSize(2);
        if (context.getSequenceManager().getSequenceCount() > 0) {
          planSteps.add(
              "CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES");
        }
        planSteps.add("PUT SINGLE ROW");
        return new ExplainPlan(planSteps);
      }
    };
  }
コード例 #28
0
ファイル: ScanUtil.java プロジェクト: hzg0102/phoenix
  public static int setKey(
      RowKeySchema schema,
      List<List<KeyRange>> slots,
      int[] slotSpan,
      int[] position,
      Bound bound,
      byte[] key,
      int byteOffset,
      int slotStartIndex,
      int slotEndIndex,
      int schemaStartIndex) {
    int offset = byteOffset;
    boolean lastInclusiveUpperSingleKey = false;
    boolean anyInclusiveUpperRangeKey = false;
    // The index used for slots should be incremented by 1,
    // but the index for the field it represents in the schema
    // should be incremented by 1 + value in the current slotSpan index
    // slotSpan stores the number of columns beyond one that the range spans
    Field field = null;
    int i = slotStartIndex, fieldIndex = ScanUtil.getRowKeyPosition(slotSpan, slotStartIndex);
    for (i = slotStartIndex; i < slotEndIndex; i++) {
      // Build up the key by appending the bound of each key range
      // from the current position of each slot.
      KeyRange range = slots.get(i).get(position[i]);
      // Use last slot in a multi-span column to determine if fixed width
      field = schema.getField(fieldIndex + slotSpan[i]);
      boolean isFixedWidth = field.getDataType().isFixedWidth();
      fieldIndex += slotSpan[i] + 1;
      /*
       * If the current slot is unbound then stop if:
       * 1) setting the upper bound. There's no value in
       *    continuing because nothing will be filtered.
       * 2) setting the lower bound when the type is fixed length
       *    for the same reason. However, if the type is variable width
       *    continue building the key because null values will be filtered
       *    since our separator byte will be appended and incremented.
       */
      if (range.isUnbound(bound) && (bound == Bound.UPPER || isFixedWidth)) {
        break;
      }
      byte[] bytes = range.getRange(bound);
      System.arraycopy(bytes, 0, key, offset, bytes.length);
      offset += bytes.length;
      /*
       * We must add a terminator to a variable length key even for the last PK column if
       * the lower key is non inclusive or the upper key is inclusive. Otherwise, we'd be
       * incrementing the key value itself, and thus bumping it up too much.
       */
      boolean inclusiveUpper = range.isInclusive(bound) && bound == Bound.UPPER;
      boolean exclusiveLower = !range.isInclusive(bound) && bound == Bound.LOWER;
      // If we are setting the upper bound of using inclusive single key, we remember
      // to increment the key if we exit the loop after this iteration.
      //
      // We remember to increment the last slot if we are setting the upper bound with an
      // inclusive range key.
      //
      // We cannot combine the two flags together in case for single-inclusive key followed
      // by the range-exclusive key. In that case, we do not need to increment the end at the
      // end. But if we combine the two flag, the single inclusive key in the middle of the
      // key slots would cause the flag to become true.
      lastInclusiveUpperSingleKey = range.isSingleKey() && inclusiveUpper;
      anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper;
      // A null or empty byte array is always represented as a zero byte
      byte sepByte =
          SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0, field);

      if (!isFixedWidth
          && (fieldIndex < schema.getMaxFields()
              || inclusiveUpper
              || exclusiveLower
              || sepByte == QueryConstants.DESC_SEPARATOR_BYTE)) {
        key[offset++] = sepByte;
        // Set lastInclusiveUpperSingleKey back to false if this is the last pk column
        // as we don't want to increment the null byte in this case
        lastInclusiveUpperSingleKey &= i < schema.getMaxFields() - 1;
      }
      // If we are setting the lower bound with an exclusive range key, we need to bump the
      // slot up for each key part. For an upper bound, we bump up an inclusive key, but
      // only after the last key part.
      if (exclusiveLower) {
        if (!ByteUtil.nextKey(key, offset)) {
          // Special case for not being able to increment.
          // In this case we return a negative byteOffset to
          // remove this part from the key being formed. Since the
          // key has overflowed, this means that we should not
          // have an end key specified.
          return -byteOffset;
        }
        // We're filtering on values being non null here, but we still need the 0xFF
        // terminator, since DESC keys ignore the last byte as it's expected to be
        // the terminator. Without this, we'd ignore the separator byte that was
        // just added and incremented.
        if (!isFixedWidth
            && bytes.length == 0
            && SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), false, field)
                == QueryConstants.DESC_SEPARATOR_BYTE) {
          key[offset++] = QueryConstants.DESC_SEPARATOR_BYTE;
        }
      }
    }
    if (lastInclusiveUpperSingleKey || anyInclusiveUpperRangeKey) {
      if (!ByteUtil.nextKey(key, offset)) {
        // Special case for not being able to increment.
        // In this case we return a negative byteOffset to
        // remove this part from the key being formed. Since the
        // key has overflowed, this means that we should not
        // have an end key specified.
        return -byteOffset;
      }
    }
    // Remove trailing separator bytes, since the columns may have been added
    // after the table has data, in which case there won't be a separator
    // byte.
    if (bound == Bound.LOWER) {
      while (--i >= schemaStartIndex
          && offset > byteOffset
          && !(field = schema.getField(--fieldIndex)).getDataType().isFixedWidth()
          && field.getSortOrder() == SortOrder.ASC
          && key[offset - 1] == QueryConstants.SEPARATOR_BYTE) {
        offset--;
        fieldIndex -= slotSpan[i];
      }
    }
    return offset - byteOffset;
  }
コード例 #29
0
ファイル: SchemaUtil.java プロジェクト: gkanade/phoenix
 public static boolean isSystemTable(byte[] fullTableName) {
   String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName);
   if (QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) return true;
   return false;
 }
コード例 #30
0
ファイル: RowKeySchemaTest.java プロジェクト: CHATTG1/phoenix
  private void assertIteration(String dataColumns, String pk, Object[] values, String dataProps)
      throws Exception {
    String schemaName = "";
    String tableName = "T";
    Connection conn = DriverManager.getConnection(getUrl());
    String fullTableName =
        SchemaUtil.getTableName(
            SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier(tableName));
    conn.createStatement()
        .execute(
            "CREATE TABLE "
                + fullTableName
                + "("
                + dataColumns
                + " CONSTRAINT pk PRIMARY KEY ("
                + pk
                + "))  "
                + (dataProps.isEmpty() ? "" : dataProps));
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName));
    conn.close();
    StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES(");
    for (int i = 0; i < values.length; i++) {
      buf.append("?,");
    }
    buf.setCharAt(buf.length() - 1, ')');
    PreparedStatement stmt = conn.prepareStatement(buf.toString());
    for (int i = 0; i < values.length; i++) {
      stmt.setObject(i + 1, values[i]);
    }
    stmt.execute();
    Iterator<Pair<byte[], List<KeyValue>>> iterator =
        PhoenixRuntime.getUncommittedDataIterator(conn);
    List<KeyValue> dataKeyValues = iterator.next().getSecond();
    KeyValue keyValue = dataKeyValues.get(0);

    List<SortOrder> sortOrders = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
    for (PColumn col : table.getPKColumns()) {
      sortOrders.add(col.getSortOrder());
    }
    RowKeySchema schema = table.getRowKeySchema();
    int minOffset = keyValue.getRowOffset();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    int nExpectedValues = values.length;
    for (int i = values.length - 1; i >= 0; i--) {
      if (values[i] == null) {
        nExpectedValues--;
      } else {
        break;
      }
    }
    int i = 0;
    int maxOffset =
        schema.iterator(keyValue.getRowArray(), minOffset, keyValue.getRowLength(), ptr);
    for (i = 0; i < schema.getFieldCount(); i++) {
      Boolean hasValue = schema.next(ptr, i, maxOffset);
      if (hasValue == null) {
        break;
      }
      assertTrue(hasValue);
      PDataType type = PDataType.fromLiteral(values[i]);
      SortOrder sortOrder = sortOrders.get(i);
      Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder);
      assertEquals(values[i], value);
    }
    assertEquals(nExpectedValues, i);
    assertNull(schema.next(ptr, i, maxOffset));

    for (i--; i >= 0; i--) {
      Boolean hasValue = schema.previous(ptr, i, minOffset);
      if (hasValue == null) {
        break;
      }
      assertTrue(hasValue);
      PDataType type = PDataType.fromLiteral(values[i]);
      SortOrder sortOrder = sortOrders.get(i);
      Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder);
      assertEquals(values[i], value);
    }
    assertEquals(-1, i);
    assertNull(schema.previous(ptr, i, minOffset));
  }