private static void setValues( byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation) { Map<PColumn, byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length); byte[][] pkValues = new byte[table.getPKColumns().size()][]; // If the table uses salting, the first byte is the salting byte, set to an empty array // here and we will fill in the byte later in PRowImpl. if (table.getBucketNum() != null) { pkValues[0] = new byte[] {0}; } for (int i = 0; i < values.length; i++) { byte[] value = values[i]; PColumn column = table.getColumns().get(columnIndexes[i]); if (SchemaUtil.isPKColumn(column)) { pkValues[pkSlotIndex[i]] = value; } else { columnValues.put(column, value); } } ImmutableBytesPtr ptr = new ImmutableBytesPtr(); table.newKey(ptr, pkValues); mutation.put(ptr, columnValues); }
/** * Encode the primary key values from the table as a byte array. The values must be in the same * order as the primary key constraint. If the connection and table are both tenant-specific, the * tenant ID column must not be present in the values. * * @param conn an open connection * @param fullTableName the full table name * @param values the values of the primary key columns ordered in the same order as the primary * key constraint * @return the encoded byte array * @throws SQLException if the table cannot be found or the incorrect number of of values are * provided * @see #decodePK(Connection, String, byte[]) to decode the byte[] back to the values */ public static byte[] encodePK(Connection conn, String fullTableName, Object[] values) throws SQLException { PTable table = getTable(conn, fullTableName); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0); List<PColumn> pkColumns = table.getPKColumns(); if (pkColumns.size() - offset != values.length) { throw new SQLException( "Expected " + (pkColumns.size() - offset) + " but got " + values.length); } PDataType type = null; TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(table.getRowKeySchema().getEstimatedValueLength()); try { for (int i = offset; i < pkColumns.size(); i++) { if (type != null && !type.isFixedWidth()) { output.write(QueryConstants.SEPARATOR_BYTE); } type = pkColumns.get(i).getDataType(); byte[] value = type.toBytes(values[i - offset]); output.write(value); } return output.toByteArray(); } finally { try { output.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } }
public static short getMaxKeySeq(PTable table) { int offset = 0; if (table.getBucketNum() != null) { offset++; } // TODO: for tenant-specific table on tenant-specific connection, // we should subtract one for tenant column and another one for // index ID return (short) (table.getPKColumns().size() - offset); }
/** * Estimate the max key length in bytes of the PK for a given table * * @param table the table * @return the max PK length */ public static int estimateKeyLength(PTable table) { int maxKeyLength = 0; // Calculate the max length of a key (each part must currently be of a fixed width) int i = 0; List<PColumn> columns = table.getPKColumns(); while (i < columns.size()) { PColumn keyColumn = columns.get(i++); PDataType type = keyColumn.getDataType(); Integer maxLength = keyColumn.getMaxLength(); maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength; } return maxKeyLength; }
public MutationPlan compile(UpsertStatement upsert) throws SQLException { final PhoenixConnection connection = statement.getConnection(); ConnectionQueryServices services = connection.getQueryServices(); final int maxSize = services .getProps() .getInt( QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); final ColumnResolver resolver = FromCompiler.getResolverForMutation(upsert, connection); final TableRef tableRef = resolver.getTables().get(0); final PTable table = tableRef.getTable(); if (table.getType() == PTableType.VIEW) { if (table.getViewType().isReadOnly()) { throw new ReadOnlyTableException( table.getSchemaName().getString(), table.getTableName().getString()); } } boolean isSalted = table.getBucketNum() != null; final boolean isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null; final boolean isSharedViewIndex = table.getViewIndexId() != null; String tenantId = isTenantSpecific ? connection.getTenantId().getString() : null; int posOffset = isSalted ? 1 : 0; // Setup array of column indexes parallel to values that are going to be set List<ColumnName> columnNodes = upsert.getColumns(); final List<PColumn> allColumns = table.getColumns(); Set<PColumn> addViewColumnsToBe = Collections.emptySet(); Set<PColumn> overlapViewColumnsToBe = Collections.emptySet(); int[] columnIndexesToBe; int nColumnsToSet = 0; int[] pkSlotIndexesToBe; List<PColumn> targetColumns; if (table.getViewType() == ViewType.UPDATABLE) { addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumns.size()); for (PColumn column : allColumns) { if (column.getViewConstant() != null) { addViewColumnsToBe.add(column); } } } ImmutableBytesWritable ptr = new ImmutableBytesWritable(); // Allow full row upsert if no columns or only dynamic ones are specified and values count match if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) { nColumnsToSet = allColumns.size() - posOffset; columnIndexesToBe = new int[nColumnsToSet]; pkSlotIndexesToBe = new int[columnIndexesToBe.length]; targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null)); int minPKPos = 0; if (isTenantSpecific) { PColumn tenantColumn = table.getPKColumns().get(minPKPos); columnIndexesToBe[minPKPos] = tenantColumn.getPosition(); targetColumns.set(minPKPos, tenantColumn); minPKPos++; } if (isSharedViewIndex) { PColumn indexIdColumn = table.getPKColumns().get(minPKPos); columnIndexesToBe[minPKPos] = indexIdColumn.getPosition(); targetColumns.set(minPKPos, indexIdColumn); minPKPos++; } for (int i = posOffset, j = 0; i < allColumns.size(); i++) { PColumn column = allColumns.get(i); if (SchemaUtil.isPKColumn(column)) { pkSlotIndexesToBe[i - posOffset] = j + posOffset; if (j++ < minPKPos) { // Skip, as it's already been set above continue; } minPKPos = 0; } columnIndexesToBe[i - posOffset + minPKPos] = i; targetColumns.set(i - posOffset + minPKPos, column); } if (!addViewColumnsToBe.isEmpty()) { // All view columns overlap in this case overlapViewColumnsToBe = addViewColumnsToBe; addViewColumnsToBe = Collections.emptySet(); } } else { // Size for worse case int numColsInUpsert = columnNodes.size(); nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + +(isSharedViewIndex ? 1 : 0); columnIndexesToBe = new int[nColumnsToSet]; pkSlotIndexesToBe = new int[columnIndexesToBe.length]; targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null)); Arrays.fill( columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced Arrays.fill( pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced BitSet pkColumnsSet = new BitSet(table.getPKColumns().size()); int i = 0; // Add tenant column directly, as we don't want to resolve it as this will fail if (isTenantSpecific) { PColumn tenantColumn = table.getPKColumns().get(i + posOffset); columnIndexesToBe[i] = tenantColumn.getPosition(); pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset); targetColumns.set(i, tenantColumn); i++; } if (isSharedViewIndex) { PColumn indexIdColumn = table.getPKColumns().get(i + posOffset); columnIndexesToBe[i] = indexIdColumn.getPosition(); pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset); targetColumns.set(i, indexIdColumn); i++; } for (ColumnName colName : columnNodes) { ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()); PColumn column = ref.getColumn(); if (IndexUtil.getViewConstantValue(column, ptr)) { if (overlapViewColumnsToBe.isEmpty()) { overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size()); } nColumnsToSet--; overlapViewColumnsToBe.add(column); addViewColumnsToBe.remove(column); } columnIndexesToBe[i] = ref.getColumnPosition(); targetColumns.set(i, column); if (SchemaUtil.isPKColumn(column)) { pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition()); } i++; } for (PColumn column : addViewColumnsToBe) { columnIndexesToBe[i] = column.getPosition(); targetColumns.set(i, column); if (SchemaUtil.isPKColumn(column)) { pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column)); } i++; } for (i = posOffset; i < table.getPKColumns().size(); i++) { PColumn pkCol = table.getPKColumns().get(i); if (!pkColumnsSet.get(i)) { if (!pkCol.isNullable()) { throw new ConstraintViolationException( table.getName().getString() + "." + pkCol.getName().getString() + " may not be null"); } } } } List<ParseNode> valueNodes = upsert.getValues(); QueryPlan plan = null; RowProjector rowProjectorToBe = null; final int nValuesToSet; boolean sameTable = false; boolean runOnServer = false; UpsertingParallelIteratorFactory upsertParallelIteratorFactoryToBe = null; final boolean isAutoCommit = connection.getAutoCommit(); if (valueNodes == null) { SelectStatement select = upsert.getSelect(); assert (select != null); select = SubselectRewriter.flatten(select, connection); ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection); select = StatementNormalizer.normalize(select, selectResolver); select = prependTenantAndViewConstants(table, select, tenantId, addViewColumnsToBe); sameTable = select.getFrom().size() == 1 && tableRef.equals(selectResolver.getTables().get(0)); /* We can run the upsert in a coprocessor if: * 1) from has only 1 table and the into table matches from table * 2) the select query isn't doing aggregation * 3) autoCommit is on * 4) the table is not immutable, as the client is the one that figures out the additional * puts for index tables. * 5) no limit clause * Otherwise, run the query to pull the data from the server * and populate the MutationState (upto a limit). */ runOnServer = sameTable && isAutoCommit && !table.isImmutableRows() && !select.isAggregate() && !select.isDistinct() && select.getLimit() == null && table.getBucketNum() == null; ParallelIteratorFactory parallelIteratorFactory; if (select.isAggregate() || select.isDistinct() || select.getLimit() != null) { parallelIteratorFactory = null; } else { // We can pipeline the upsert select instead of spooling everything to disk first, // if we don't have any post processing that's required. parallelIteratorFactory = upsertParallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRef); } // If we may be able to run on the server, add a hint that favors using the data table // if all else is equal. // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing, // as this would disallow running on the server. We currently use the row projector we // get back to figure this out. HintNode hint = upsert.getHint(); if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE); } select = SelectStatement.create(select, hint); // Pass scan through if same table in upsert and select so that projection is computed // correctly // Use optimizer to choose the best plan plan = new QueryOptimizer(services) .optimize(statement, select, selectResolver, targetColumns, parallelIteratorFactory); runOnServer &= plan.getTableRef().equals(tableRef); rowProjectorToBe = plan.getProjector(); nValuesToSet = rowProjectorToBe.getColumnCount(); // Cannot auto commit if doing aggregation or topN or salted // Salted causes problems because the row may end up living on a different region } else { nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0); } final RowProjector projector = rowProjectorToBe; final UpsertingParallelIteratorFactory upsertParallelIteratorFactory = upsertParallelIteratorFactoryToBe; final QueryPlan queryPlan = plan; // Resize down to allow a subset of columns to be specifiable if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) { nColumnsToSet = nValuesToSet; columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet); pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet); } if (nValuesToSet != nColumnsToSet) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH) .setMessage( "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet) .build() .buildException(); } final int[] columnIndexes = columnIndexesToBe; final int[] pkSlotIndexes = pkSlotIndexesToBe; final Set<PColumn> addViewColumns = addViewColumnsToBe; final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe; // TODO: break this up into multiple functions //////////////////////////////////////////////////////////////////// // UPSERT SELECT ///////////////////////////////////////////////////////////////////// if (valueNodes == null) { // Before we re-order, check that for updatable view columns // the projected expression either matches the column name or // is a constant with the same required value. throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable); //////////////////////////////////////////////////////////////////// // UPSERT SELECT run server-side (maybe) ///////////////////////////////////////////////////////////////////// if (runOnServer) { // At most this array will grow bigger by the number of PK columns int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet); int[] reverseColumnIndexes = new int[table.getColumns().size()]; List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length); Arrays.fill(reverseColumnIndexes, -1); for (int i = 0; i < nValuesToSet; i++) { projectedExpressions.add(projector.getColumnProjector(i).getExpression()); reverseColumnIndexes[columnIndexes[i]] = i; } /* * Order projected columns and projected expressions with PK columns * leading order by slot position */ int offset = table.getBucketNum() == null ? 0 : 1; for (int i = 0; i < table.getPKColumns().size() - offset; i++) { PColumn column = table.getPKColumns().get(i + offset); int pos = reverseColumnIndexes[column.getPosition()]; if (pos == -1) { // Last PK column may be fixed width and nullable // We don't want to insert a null expression b/c // it's not valid to set a fixed width type to null. if (column.getDataType().isFixedWidth()) { continue; } // Add literal null for missing PK columns pos = projectedExpressions.size(); Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), true); projectedExpressions.add(literalNull); allColumnsIndexes[pos] = column.getPosition(); } // Swap select expression at pos with i Collections.swap(projectedExpressions, i, pos); // Swap column indexes and reverse column indexes too int tempPos = allColumnsIndexes[i]; allColumnsIndexes[i] = allColumnsIndexes[pos]; allColumnsIndexes[pos] = tempPos; reverseColumnIndexes[tempPos] = reverseColumnIndexes[i]; reverseColumnIndexes[i] = i; } // If any pk slots are changing, be conservative and don't run this server side. // If the row ends up living in a different region, we'll get an error otherwise. for (int i = 0; i < table.getPKColumns().size(); i++) { PColumn column = table.getPKColumns().get(i); Expression source = projectedExpressions.get(i); if (source == null || !source.equals( new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) { // TODO: we could check the region boundaries to see if the pk will still be in it. runOnServer = false; // bail on running server side, since PK may be changing break; } } //////////////////////////////////////////////////////////////////// // UPSERT SELECT run server-side ///////////////////////////////////////////////////////////////////// if (runOnServer) { // Iterate through columns being projected List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size()); for (int i = 0; i < projectedExpressions.size(); i++) { // Must make new column if position has changed PColumn column = allColumns.get(allColumnsIndexes[i]); projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i)); } // Build table from projectedColumns PTable projectedTable = PTableImpl.makePTable(table, projectedColumns); SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint()); final RowProjector aggProjector = ProjectionCompiler.compile(queryPlan.getContext(), select, GroupBy.EMPTY_GROUP_BY); /* * Transfer over PTable representing subset of columns selected, but all PK columns. * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones. * Transfer over List<Expression> for projection. * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map * Create the PRow and get the mutations, adding them to the batch */ final StatementContext context = queryPlan.getContext(); final Scan scan = context.getScan(); scan.setAttribute( BaseScannerRegionObserver.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserver.serialize(projectedTable)); scan.setAttribute( BaseScannerRegionObserver.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserver.serialize(projectedExpressions)); // Ignore order by - it has no impact final QueryPlan aggPlan = new AggregatePlan( context, select, tableRef, aggProjector, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null); return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return queryPlan.getContext().getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return queryPlan.getContext(); } @Override public MutationState execute() throws SQLException { ImmutableBytesWritable ptr = context.getTempPtr(); tableRef.getTable().getIndexMaintainers(ptr); ServerCache cache = null; try { if (ptr.getLength() > 0) { IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef); cache = client.addIndexMetadataCache(context.getScanRanges(), ptr); byte[] uuidValue = cache.getId(); scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); } ResultIterator iterator = aggPlan.iterator(); try { Tuple row = iterator.next(); final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr); return new MutationState(maxSize, connection) { @Override public long getUpdateCount() { return mutationCount; } }; } finally { iterator.close(); } } finally { if (cache != null) { cache.close(); } } } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps(); List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); planSteps.add("UPSERT ROWS"); planSteps.addAll(queryPlanSteps); return new ExplainPlan(planSteps); } }; } } //////////////////////////////////////////////////////////////////// // UPSERT SELECT run client-side ///////////////////////////////////////////////////////////////////// return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return queryPlan.getContext().getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return queryPlan.getContext(); } @Override public MutationState execute() throws SQLException { ResultIterator iterator = queryPlan.iterator(); if (upsertParallelIteratorFactory == null) { return upsertSelect( statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes); } upsertParallelIteratorFactory.setRowProjector(projector); upsertParallelIteratorFactory.setColumnIndexes(columnIndexes); upsertParallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes); Tuple tuple; long totalRowCount = 0; while ((tuple = iterator.next()) != null) { // Runs query Cell kv = tuple.getValue(0); totalRowCount += PDataType.LONG .getCodec() .decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault()); } // Return total number of rows that have been updated. In the case of auto commit being // off // the mutations will all be in the mutation state of the current connection. return new MutationState(maxSize, statement.getConnection(), totalRowCount); } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps(); List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); planSteps.add("UPSERT SELECT"); planSteps.addAll(queryPlanSteps); return new ExplainPlan(planSteps); } }; } //////////////////////////////////////////////////////////////////// // UPSERT VALUES ///////////////////////////////////////////////////////////////////// int nodeIndex = 0; // initialze values with constant byte values first final byte[][] values = new byte[nValuesToSet][]; if (isTenantSpecific) { values[nodeIndex++] = connection.getTenantId().getBytes(); } if (isSharedViewIndex) { values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId()); } final int nodeIndexOffset = nodeIndex; // Allocate array based on size of all columns in table, // since some values may not be set (if they're nullable). final StatementContext context = new StatementContext(statement, resolver, new Scan()); UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context); final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size()); // First build all the expressions, as with sequences we want to collect them all first // and initialize them in one batch for (ParseNode valueNode : valueNodes) { if (!valueNode.isStateless()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT) .build() .buildException(); } PColumn column = allColumns.get(columnIndexes[nodeIndex]); expressionBuilder.setColumn(column); Expression expression = valueNode.accept(expressionBuilder); if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) { throw TypeMismatchException.newException( expression.getDataType(), column.getDataType(), "expression: " + expression.toString() + " in column " + column); } constantExpressions.add(expression); nodeIndex++; } return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return context.getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return context; } @Override public MutationState execute() throws SQLException { ImmutableBytesWritable ptr = context.getTempPtr(); final SequenceManager sequenceManager = context.getSequenceManager(); // Next evaluate all the expressions int nodeIndex = nodeIndexOffset; Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null : sequenceManager.newSequenceTuple(null); for (Expression constantExpression : constantExpressions) { PColumn column = allColumns.get(columnIndexes[nodeIndex]); constantExpression.evaluate(tuple, ptr); Object value = null; if (constantExpression.getDataType() != null) { value = constantExpression .getDataType() .toObject( ptr, constantExpression.getSortOrder(), constantExpression.getMaxLength(), constantExpression.getScale()); if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) { throw TypeMismatchException.newException( constantExpression.getDataType(), column.getDataType(), "expression: " + constantExpression.toString() + " in column " + column); } if (!column .getDataType() .isSizeCompatible( ptr, value, constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) .setColumnName(column.getName().getString()) .setMessage("value=" + constantExpression.toString()) .build() .buildException(); } } column .getDataType() .coerceBytes( ptr, value, constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), constantExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder()); if (overlapViewColumns.contains(column) && Bytes.compareTo( ptr.get(), ptr.getOffset(), ptr.getLength(), column.getViewConstant(), 0, column.getViewConstant().length - 1) != 0) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) .setColumnName(column.getName().getString()) .setMessage("value=" + constantExpression.toString()) .build() .buildException(); } values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr); nodeIndex++; } // Add columns based on view for (PColumn column : addViewColumns) { if (IndexUtil.getViewConstantValue(column, ptr)) { values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr); } else { throw new IllegalStateException(); } } Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1); setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation); return new MutationState(tableRef, mutation, 0, maxSize, connection); } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> planSteps = Lists.newArrayListWithExpectedSize(2); if (context.getSequenceManager().getSequenceCount() > 0) { planSteps.add( "CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES"); } planSteps.add("PUT SINGLE ROW"); return new ExplainPlan(planSteps); } }; }
public static int getPKPosition(PTable table, PColumn column) { // TODO: when PColumn has getPKPosition, use that instead return table.getPKColumns().indexOf(column); }
private static QueryPlan addPlan( PhoenixStatement statement, SelectStatement select, PTable index, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory, QueryPlan dataPlan, boolean isHinted) throws SQLException { int nColumns = dataPlan.getProjector().getColumnCount(); String tableAlias = dataPlan.getTableRef().getTableAlias(); String alias = tableAlias == null ? null : '"' + tableAlias + '"'; // double quote in case it's case sensitive String schemaName = index.getParentSchemaName().getString(); schemaName = schemaName.length() == 0 ? null : '"' + schemaName + '"'; String tableName = '"' + index.getTableName().getString() + '"'; TableNode table = FACTORY.namedTable(alias, FACTORY.table(schemaName, tableName)); SelectStatement indexSelect = FACTORY.select(select, table); ColumnResolver resolver = FromCompiler.getResolverForQuery(indexSelect, statement.getConnection()); // We will or will not do tuple projection according to the data plan. boolean isProjected = dataPlan.getContext().getResolver().getTables().get(0).getTable().getType() == PTableType.PROJECTED; // Check index state of now potentially updated index table to make sure it's active if (PIndexState.ACTIVE.equals(resolver.getTables().get(0).getTable().getIndexState())) { try { // translate nodes that match expressions that are indexed to the associated column parse // node indexSelect = ParseNodeRewriter.rewrite( indexSelect, new IndexExpressionParseNodeRewriter( index, statement.getConnection(), indexSelect.getUdfParseNodes())); QueryCompiler compiler = new QueryCompiler( statement, indexSelect, resolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected); QueryPlan plan = compiler.compile(); // If query doesn't have where clause and some of columns to project are missing // in the index then we need to get missing columns from main table for each row in // local index. It's like full scan of both local index and data table which is inefficient. // Then we don't use the index. If all the columns to project are present in the index // then we can use the index even the query doesn't have where clause. if (index.getIndexType() == IndexType.LOCAL && indexSelect.getWhere() == null && !plan.getContext().getDataColumns().isEmpty()) { return null; } // Checking number of columns handles the wildcard cases correctly, as in that case the // index // must contain all columns from the data table to be able to be used. if (plan.getTableRef().getTable().getIndexState() == PIndexState.ACTIVE) { if (plan.getProjector().getColumnCount() == nColumns) { return plan; } else if (index.getIndexType() == IndexType.GLOBAL) { throw new ColumnNotFoundException("*"); } } } catch (ColumnNotFoundException e) { /* Means that a column is being used that's not in our index. * Since we currently don't keep stats, we don't know the selectivity of the index. * For now, if this is a hinted plan, we will try rewriting the query as a subquery; * otherwise we just don't use this index (as opposed to trying to join back from * the index table to the data table. */ SelectStatement dataSelect = (SelectStatement) dataPlan.getStatement(); ParseNode where = dataSelect.getWhere(); if (isHinted && where != null) { StatementContext context = new StatementContext(statement, resolver); WhereConditionRewriter whereRewriter = new WhereConditionRewriter(FromCompiler.getResolver(dataPlan.getTableRef()), context); where = where.accept(whereRewriter); if (where != null) { PTable dataTable = dataPlan.getTableRef().getTable(); List<PColumn> pkColumns = dataTable.getPKColumns(); List<AliasedNode> aliasedNodes = Lists.<AliasedNode>newArrayListWithExpectedSize(pkColumns.size()); List<ParseNode> nodes = Lists.<ParseNode>newArrayListWithExpectedSize(pkColumns.size()); boolean isSalted = dataTable.getBucketNum() != null; boolean isTenantSpecific = dataTable.isMultiTenant() && statement.getConnection().getTenantId() != null; int posOffset = (isSalted ? 1 : 0) + (isTenantSpecific ? 1 : 0); for (int i = posOffset; i < pkColumns.size(); i++) { PColumn column = pkColumns.get(i); String indexColName = IndexUtil.getIndexColumnName(column); ParseNode indexColNode = new ColumnParseNode(null, '"' + indexColName + '"', indexColName); PDataType indexColType = IndexUtil.getIndexColumnDataType(column); PDataType dataColType = column.getDataType(); if (indexColType != dataColType) { indexColNode = FACTORY.cast(indexColNode, dataColType, null, null); } aliasedNodes.add(FACTORY.aliasedNode(null, indexColNode)); nodes.add(new ColumnParseNode(null, '"' + column.getName().getString() + '"')); } SelectStatement innerSelect = FACTORY.select( indexSelect.getFrom(), indexSelect.getHint(), false, aliasedNodes, where, null, null, null, null, null, indexSelect.getBindCount(), false, indexSelect.hasSequence(), Collections.<SelectStatement>emptyList(), indexSelect.getUdfParseNodes()); ParseNode outerWhere = FACTORY.in( nodes.size() == 1 ? nodes.get(0) : FACTORY.rowValueConstructor(nodes), FACTORY.subquery(innerSelect, false), false, true); ParseNode extractedCondition = whereRewriter.getExtractedCondition(); if (extractedCondition != null) { outerWhere = FACTORY.and(Lists.newArrayList(outerWhere, extractedCondition)); } HintNode hint = HintNode.combine( HintNode.subtract( indexSelect.getHint(), new Hint[] {Hint.INDEX, Hint.NO_CHILD_PARENT_JOIN_OPTIMIZATION}), FACTORY.hint("NO_INDEX")); SelectStatement query = FACTORY.select(dataSelect, hint, outerWhere); ColumnResolver queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection()); query = SubqueryRewriter.transform(query, queryResolver, statement.getConnection()); queryResolver = FromCompiler.getResolverForQuery(query, statement.getConnection()); query = StatementNormalizer.normalize(query, queryResolver); QueryPlan plan = new QueryCompiler( statement, query, queryResolver, targetColumns, parallelIteratorFactory, dataPlan.getContext().getSequenceManager(), isProjected) .compile(); return plan; } } } } return null; }
@Override protected RegionScanner doPostScannerOpen( final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner s) throws IOException { byte[] isUngroupedAgg = scan.getAttribute(BaseScannerRegionObserver.UNGROUPED_AGG); if (isUngroupedAgg == null) { return s; } final ScanProjector p = ScanProjector.deserializeProjectorFromScan(scan); final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan); RegionScanner theScanner = s; if (p != null || j != null) { theScanner = new HashJoinRegionScanner(s, p, j, ScanUtil.getTenantId(scan), c.getEnvironment()); } final RegionScanner innerScanner = theScanner; byte[] indexUUID = scan.getAttribute(PhoenixIndexCodec.INDEX_UUID); PTable projectedTable = null; List<Expression> selectExpressions = null; byte[] upsertSelectTable = scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_TABLE); boolean isUpsert = false; boolean isDelete = false; byte[] deleteCQ = null; byte[] deleteCF = null; byte[][] values = null; byte[] emptyCF = null; ImmutableBytesWritable ptr = null; if (upsertSelectTable != null) { isUpsert = true; projectedTable = deserializeTable(upsertSelectTable); selectExpressions = deserializeExpressions(scan.getAttribute(BaseScannerRegionObserver.UPSERT_SELECT_EXPRS)); values = new byte[projectedTable.getPKColumns().size()][]; ptr = new ImmutableBytesWritable(); } else { byte[] isDeleteAgg = scan.getAttribute(BaseScannerRegionObserver.DELETE_AGG); isDelete = isDeleteAgg != null && Bytes.compareTo(PDataType.TRUE_BYTES, isDeleteAgg) == 0; if (!isDelete) { deleteCF = scan.getAttribute(BaseScannerRegionObserver.DELETE_CF); deleteCQ = scan.getAttribute(BaseScannerRegionObserver.DELETE_CQ); } emptyCF = scan.getAttribute(BaseScannerRegionObserver.EMPTY_CF); } int batchSize = 0; long ts = scan.getTimeRange().getMax(); HRegion region = c.getEnvironment().getRegion(); List<Mutation> mutations = Collections.emptyList(); if (isDelete || isUpsert || (deleteCQ != null && deleteCF != null) || emptyCF != null) { // TODO: size better mutations = Lists.newArrayListWithExpectedSize(1024); batchSize = c.getEnvironment() .getConfiguration() .getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE); } Aggregators aggregators = ServerAggregators.deserialize( scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration()); Aggregator[] rowAggregators = aggregators.getAggregators(); boolean hasMore; boolean hasAny = false; MultiKeyValueTuple result = new MultiKeyValueTuple(); if (logger.isInfoEnabled()) { logger.info("Starting ungrouped coprocessor scan " + scan); } long rowCount = 0; region.startRegionOperation(); try { do { List<Cell> results = new ArrayList<Cell>(); // Results are potentially returned even when the return value of s.next is false // since this is an indication of whether or not there are more values after the // ones returned hasMore = innerScanner.nextRaw(results); if (!results.isEmpty()) { rowCount++; result.setKeyValues(results); try { if (isDelete) { // FIXME: the version of the Delete constructor without the lock args was introduced // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version // of the client. Cell firstKV = results.get(0); Delete delete = new Delete( firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength(), ts); mutations.add(delete); } else if (isUpsert) { Arrays.fill(values, null); int i = 0; List<PColumn> projectedColumns = projectedTable.getColumns(); for (; i < projectedTable.getPKColumns().size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { values[i] = ptr.copyBytes(); // If SortOrder from expression in SELECT doesn't match the // column being projected into then invert the bits. if (expression.getSortOrder() != projectedColumns.get(i).getSortOrder()) { SortOrder.invert(values[i], 0, values[i], 0, values[i].length); } } } projectedTable.newKey(ptr, values); PRow row = projectedTable.newRow(kvBuilder, ts, ptr); for (; i < projectedColumns.size(); i++) { Expression expression = selectExpressions.get(i); if (expression.evaluate(result, ptr)) { PColumn column = projectedColumns.get(i); Object value = expression.getDataType().toObject(ptr, column.getSortOrder()); // We are guaranteed that the two column will have the same type. if (!column .getDataType() .isSizeCompatible( ptr, value, column.getDataType(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) { throw new ValueTypeIncompatibleException( column.getDataType(), column.getMaxLength(), column.getScale()); } column .getDataType() .coerceBytes( ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder()); byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr); row.setValue(column, bytes); } } for (Mutation mutation : row.toRowMutations()) { mutations.add(mutation); } } else if (deleteCF != null && deleteCQ != null) { // No need to search for delete column, since we project only it // if no empty key value is being set if (emptyCF == null || result.getValue(deleteCF, deleteCQ) != null) { Delete delete = new Delete( results.get(0).getRowArray(), results.get(0).getRowOffset(), results.get(0).getRowLength()); delete.deleteColumns(deleteCF, deleteCQ, ts); mutations.add(delete); } } if (emptyCF != null) { /* * If we've specified an emptyCF, then we need to insert an empty * key value "retroactively" for any key value that is visible at * the timestamp that the DDL was issued. Key values that are not * visible at this timestamp will not ever be projected up to * scans past this timestamp, so don't need to be considered. * We insert one empty key value per row per timestamp. */ Set<Long> timeStamps = Sets.newHashSetWithExpectedSize(results.size()); for (Cell kv : results) { long kvts = kv.getTimestamp(); if (!timeStamps.contains(kvts)) { Put put = new Put(kv.getRowArray(), kv.getRowOffset(), kv.getRowLength()); put.add( emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts, ByteUtil.EMPTY_BYTE_ARRAY); mutations.add(put); } } } // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config if (!mutations.isEmpty() && batchSize > 0 && mutations.size() % batchSize == 0) { commitBatch(region, mutations, indexUUID); mutations.clear(); } } catch (ConstraintViolationException e) { // Log and ignore in count logger.error( "Failed to create row in " + region.getRegionNameAsString() + " with values " + SchemaUtil.toString(values), e); continue; } aggregators.aggregate(rowAggregators, result); hasAny = true; } } while (hasMore); } finally { innerScanner.close(); region.closeRegionOperation(); } if (logger.isInfoEnabled()) { logger.info("Finished scanning " + rowCount + " rows for ungrouped coprocessor scan " + scan); } if (!mutations.isEmpty()) { commitBatch(region, mutations, indexUUID); } final boolean hadAny = hasAny; KeyValue keyValue = null; if (hadAny) { byte[] value = aggregators.toBytes(rowAggregators); keyValue = KeyValueUtil.newKeyValue( UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); } final KeyValue aggKeyValue = keyValue; RegionScanner scanner = new BaseRegionScanner() { private boolean done = !hadAny; @Override public HRegionInfo getRegionInfo() { return innerScanner.getRegionInfo(); } @Override public boolean isFilterDone() { return done; } @Override public void close() throws IOException { innerScanner.close(); } @Override public boolean next(List<Cell> results) throws IOException { if (done) return false; done = true; results.add(aggKeyValue); return false; } @Override public long getMaxResultSize() { return scan.getMaxResultSize(); } }; return scanner; }