public SingleTableColumnResolver( PhoenixConnection connection, NamedTableNode table, long timeStamp) throws SQLException { super(connection); List<PColumnFamily> families = Lists.newArrayListWithExpectedSize(table.getDynamicColumns().size()); for (ColumnDef def : table.getDynamicColumns()) { if (def.getColumnDefName().getFamilyName() != null) { families.add( new PColumnFamilyImpl( PNameFactory.newName(def.getColumnDefName().getFamilyName()), Collections.<PColumn>emptyList())); } } Long scn = connection.getSCN(); PTable theTable = new PTableImpl( connection.getTenantId(), table.getName().getSchemaName(), table.getName().getTableName(), scn == null ? HConstants.LATEST_TIMESTAMP : scn, families); theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable); alias = null; tableRefs = ImmutableList.of( new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty())); }
/** * Encode the primary key values from the table as a byte array. The values must be in the same * order as the primary key constraint. If the connection and table are both tenant-specific, the * tenant ID column must not be present in the values. * * @param conn an open connection * @param fullTableName the full table name * @param values the values of the primary key columns ordered in the same order as the primary * key constraint * @return the encoded byte array * @throws SQLException if the table cannot be found or the incorrect number of of values are * provided * @see #decodePK(Connection, String, byte[]) to decode the byte[] back to the values */ public static byte[] encodePK(Connection conn, String fullTableName, Object[] values) throws SQLException { PTable table = getTable(conn, fullTableName); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0); List<PColumn> pkColumns = table.getPKColumns(); if (pkColumns.size() - offset != values.length) { throw new SQLException( "Expected " + (pkColumns.size() - offset) + " but got " + values.length); } PDataType type = null; TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(table.getRowKeySchema().getEstimatedValueLength()); try { for (int i = offset; i < pkColumns.size(); i++) { if (type != null && !type.isFixedWidth()) { output.write(QueryConstants.SEPARATOR_BYTE); } type = pkColumns.get(i).getDataType(); byte[] value = type.toBytes(values[i - offset]); output.write(value); } return output.toByteArray(); } finally { try { output.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } }
protected TableRef createTableRef(NamedTableNode tableNode, boolean updateCacheImmediately) throws SQLException { String tableName = tableNode.getName().getTableName(); String schemaName = tableNode.getName().getSchemaName(); long timeStamp = QueryConstants.UNSET_TIMESTAMP; String fullTableName = SchemaUtil.getTableName(schemaName, tableName); PName tenantId = connection.getTenantId(); PTable theTable = null; if (updateCacheImmediately || connection.getAutoCommit()) { MetaDataMutationResult result = client.updateCache(schemaName, tableName); timeStamp = result.getMutationTime(); theTable = result.getTable(); if (theTable == null) { throw new TableNotFoundException(schemaName, tableName, timeStamp); } } else { try { theTable = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName)); } catch (TableNotFoundException e1) { if (tenantId != null) { // Check with null tenantId next try { theTable = connection.getMetaDataCache().getTable(new PTableKey(null, fullTableName)); } catch (TableNotFoundException e2) { } } } // We always attempt to update the cache in the event of a TableNotFoundException if (theTable == null) { MetaDataMutationResult result = client.updateCache(schemaName, tableName); if (result.wasUpdated()) { timeStamp = result.getMutationTime(); theTable = result.getTable(); } } if (theTable == null) { throw new TableNotFoundException(schemaName, tableName, timeStamp); } } // Add any dynamic columns to the table declaration List<ColumnDef> dynamicColumns = tableNode.getDynamicColumns(); theTable = addDynamicColumns(dynamicColumns, theTable); TableRef tableRef = new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty()); if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) { logger.debug( "Re-resolved stale table " + fullTableName + " with seqNum " + tableRef.getTable().getSequenceNumber() + " at timestamp " + tableRef.getTable().getTimeStamp() + " with " + tableRef.getTable().getColumns().size() + " columns: " + tableRef.getTable().getColumns()); } return tableRef; }
/** * Validates that the meta data is valid against the server meta data if we haven't yet done so. * Otherwise, for every UPSERT VALUES call, we'd need to hit the server to see if the meta data * has changed. * * @param connection * @return the server time to use for the upsert * @throws SQLException if the table or any columns no longer exist */ private long[] validate() throws SQLException { int i = 0; Long scn = connection.getSCN(); PName tenantId = connection.getTenantId(); MetaDataClient client = new MetaDataClient(connection); long[] timeStamps = new long[this.mutations.size()]; for (Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> entry : mutations.entrySet()) { TableRef tableRef = entry.getKey(); long serverTimeStamp = tableRef.getTimeStamp(); PTable table = tableRef.getTable(); // If we're auto committing, we've already validated the schema when we got the // ColumnResolver, // so no need to do it again here. if (!connection.getAutoCommit()) { MetaDataMutationResult result = client.updateCache(table.getSchemaName().getString(), table.getTableName().getString()); long timestamp = result.getMutationTime(); if (timestamp != QueryConstants.UNSET_TIMESTAMP) { serverTimeStamp = timestamp; if (result.wasUpdated()) { // TODO: use bitset? table = connection .getMetaDataCache() .getTable(new PTableKey(tenantId, table.getName().getString())); PColumn[] columns = new PColumn[table.getColumns().size()]; for (Map.Entry<ImmutableBytesPtr, Map<PColumn, byte[]>> rowEntry : entry.getValue().entrySet()) { Map<PColumn, byte[]> valueEntry = rowEntry.getValue(); if (valueEntry != PRow.DELETE_MARKER) { for (PColumn column : valueEntry.keySet()) { columns[column.getPosition()] = column; } } } for (PColumn column : columns) { if (column != null) { table .getColumnFamily(column.getFamilyName().getString()) .getColumn(column.getName().getString()); } } tableRef.setTable(table); } } } timeStamps[i++] = scn == null ? serverTimeStamp == QueryConstants.UNSET_TIMESTAMP ? HConstants.LATEST_TIMESTAMP : serverTimeStamp : scn; } return timeStamps; }
public static PTable getTable(Connection conn, String name) throws SQLException { PTable table = null; PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); try { table = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), name)); } catch (TableNotFoundException e) { String schemaName = SchemaUtil.getSchemaNameFromFullName(name); String tableName = SchemaUtil.getTableNameFromFullName(name); MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(schemaName, tableName); if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { throw e; } table = result.getTable(); } return table; }
/** * Decode a byte array value back into the Object values of the primary key constraint. If the * connection and table are both tenant-specific, the tenant ID column is not expected to have * been encoded and will not appear in the returned values. * * @param conn an open connection * @param name the full table name * @param value the value that was encoded with {@link #encodePK(Connection, String, Object[])} * @return the Object values encoded in the byte array value * @throws SQLException */ public static Object[] decodePK(Connection conn, String name, byte[] value) throws SQLException { PTable table = getTable(conn, name); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0); RowKeySchema schema = table.getRowKeySchema(); int nValues = schema.getMaxFields() - offset; Object[] values = new Object[nValues]; ImmutableBytesWritable ptr = new ImmutableBytesWritable(); int i = 0; schema.iterator(value, ptr, offset); while (i < nValues && schema.next(ptr, i, value.length) != null) { values[i] = schema.getField(i).getDataType().toObject(ptr); i++; } return values; }
@Test public void testGetSplitsWithSkipScanFilter() throws Exception { byte[][] splits = new byte[][] {Ka1A, Ka1B, Ka1E, Ka1G, Ka1I, Ka2A}; createTestTable(getUrl(), DDL, splits, null); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); PTable table = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME)); TableRef tableRef = new TableRef(table); List<HRegionLocation> regions = pconn .getQueryServices() .getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes()); List<KeyRange> ranges = getSplits(tableRef, scan, regions, scanRanges); assertEquals( "Unexpected number of splits: " + ranges.size(), expectedSplits.size(), ranges.size()); for (int i = 0; i < expectedSplits.size(); i++) { assertEquals(expectedSplits.get(i), ranges.get(i)); } }
public static ColumnResolver getResolverForCreation( final CreateTableStatement statement, final PhoenixConnection connection) throws SQLException { TableName baseTable = statement.getBaseTableName(); if (baseTable == null) { return EMPTY_TABLE_RESOLVER; } NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList()); // Always use non-tenant-specific connection here try { SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true); return visitor; } catch (TableNotFoundException e) { // Used for mapped VIEW, since we won't be able to resolve that. // Instead, we create a table with just the dynamic columns. // A tenant-specific connection may not create a mapped VIEW. if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) { ConnectionQueryServices services = connection.getQueryServices(); byte[] fullTableName = SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName()); HTableInterface htable = null; try { htable = services.getTable(fullTableName); } catch (UnsupportedOperationException ignore) { throw e; // For Connectionless } finally { if (htable != null) Closeables.closeQuietly(htable); } tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs()); return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp()); } throw e; } }
public MutationPlan compile(UpsertStatement upsert) throws SQLException { final PhoenixConnection connection = statement.getConnection(); ConnectionQueryServices services = connection.getQueryServices(); final int maxSize = services .getProps() .getInt( QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); final ColumnResolver resolver = FromCompiler.getResolverForMutation(upsert, connection); final TableRef tableRef = resolver.getTables().get(0); final PTable table = tableRef.getTable(); if (table.getType() == PTableType.VIEW) { if (table.getViewType().isReadOnly()) { throw new ReadOnlyTableException( table.getSchemaName().getString(), table.getTableName().getString()); } } boolean isSalted = table.getBucketNum() != null; final boolean isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null; final boolean isSharedViewIndex = table.getViewIndexId() != null; String tenantId = isTenantSpecific ? connection.getTenantId().getString() : null; int posOffset = isSalted ? 1 : 0; // Setup array of column indexes parallel to values that are going to be set List<ColumnName> columnNodes = upsert.getColumns(); final List<PColumn> allColumns = table.getColumns(); Set<PColumn> addViewColumnsToBe = Collections.emptySet(); Set<PColumn> overlapViewColumnsToBe = Collections.emptySet(); int[] columnIndexesToBe; int nColumnsToSet = 0; int[] pkSlotIndexesToBe; List<PColumn> targetColumns; if (table.getViewType() == ViewType.UPDATABLE) { addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumns.size()); for (PColumn column : allColumns) { if (column.getViewConstant() != null) { addViewColumnsToBe.add(column); } } } ImmutableBytesWritable ptr = new ImmutableBytesWritable(); // Allow full row upsert if no columns or only dynamic ones are specified and values count match if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) { nColumnsToSet = allColumns.size() - posOffset; columnIndexesToBe = new int[nColumnsToSet]; pkSlotIndexesToBe = new int[columnIndexesToBe.length]; targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null)); int minPKPos = 0; if (isTenantSpecific) { PColumn tenantColumn = table.getPKColumns().get(minPKPos); columnIndexesToBe[minPKPos] = tenantColumn.getPosition(); targetColumns.set(minPKPos, tenantColumn); minPKPos++; } if (isSharedViewIndex) { PColumn indexIdColumn = table.getPKColumns().get(minPKPos); columnIndexesToBe[minPKPos] = indexIdColumn.getPosition(); targetColumns.set(minPKPos, indexIdColumn); minPKPos++; } for (int i = posOffset, j = 0; i < allColumns.size(); i++) { PColumn column = allColumns.get(i); if (SchemaUtil.isPKColumn(column)) { pkSlotIndexesToBe[i - posOffset] = j + posOffset; if (j++ < minPKPos) { // Skip, as it's already been set above continue; } minPKPos = 0; } columnIndexesToBe[i - posOffset + minPKPos] = i; targetColumns.set(i - posOffset + minPKPos, column); } if (!addViewColumnsToBe.isEmpty()) { // All view columns overlap in this case overlapViewColumnsToBe = addViewColumnsToBe; addViewColumnsToBe = Collections.emptySet(); } } else { // Size for worse case int numColsInUpsert = columnNodes.size(); nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + +(isSharedViewIndex ? 1 : 0); columnIndexesToBe = new int[nColumnsToSet]; pkSlotIndexesToBe = new int[columnIndexesToBe.length]; targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null)); Arrays.fill( columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced Arrays.fill( pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced BitSet pkColumnsSet = new BitSet(table.getPKColumns().size()); int i = 0; // Add tenant column directly, as we don't want to resolve it as this will fail if (isTenantSpecific) { PColumn tenantColumn = table.getPKColumns().get(i + posOffset); columnIndexesToBe[i] = tenantColumn.getPosition(); pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset); targetColumns.set(i, tenantColumn); i++; } if (isSharedViewIndex) { PColumn indexIdColumn = table.getPKColumns().get(i + posOffset); columnIndexesToBe[i] = indexIdColumn.getPosition(); pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset); targetColumns.set(i, indexIdColumn); i++; } for (ColumnName colName : columnNodes) { ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()); PColumn column = ref.getColumn(); if (IndexUtil.getViewConstantValue(column, ptr)) { if (overlapViewColumnsToBe.isEmpty()) { overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size()); } nColumnsToSet--; overlapViewColumnsToBe.add(column); addViewColumnsToBe.remove(column); } columnIndexesToBe[i] = ref.getColumnPosition(); targetColumns.set(i, column); if (SchemaUtil.isPKColumn(column)) { pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition()); } i++; } for (PColumn column : addViewColumnsToBe) { columnIndexesToBe[i] = column.getPosition(); targetColumns.set(i, column); if (SchemaUtil.isPKColumn(column)) { pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column)); } i++; } for (i = posOffset; i < table.getPKColumns().size(); i++) { PColumn pkCol = table.getPKColumns().get(i); if (!pkColumnsSet.get(i)) { if (!pkCol.isNullable()) { throw new ConstraintViolationException( table.getName().getString() + "." + pkCol.getName().getString() + " may not be null"); } } } } List<ParseNode> valueNodes = upsert.getValues(); QueryPlan plan = null; RowProjector rowProjectorToBe = null; final int nValuesToSet; boolean sameTable = false; boolean runOnServer = false; UpsertingParallelIteratorFactory upsertParallelIteratorFactoryToBe = null; final boolean isAutoCommit = connection.getAutoCommit(); if (valueNodes == null) { SelectStatement select = upsert.getSelect(); assert (select != null); select = SubselectRewriter.flatten(select, connection); ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection); select = StatementNormalizer.normalize(select, selectResolver); select = prependTenantAndViewConstants(table, select, tenantId, addViewColumnsToBe); sameTable = select.getFrom().size() == 1 && tableRef.equals(selectResolver.getTables().get(0)); /* We can run the upsert in a coprocessor if: * 1) from has only 1 table and the into table matches from table * 2) the select query isn't doing aggregation * 3) autoCommit is on * 4) the table is not immutable, as the client is the one that figures out the additional * puts for index tables. * 5) no limit clause * Otherwise, run the query to pull the data from the server * and populate the MutationState (upto a limit). */ runOnServer = sameTable && isAutoCommit && !table.isImmutableRows() && !select.isAggregate() && !select.isDistinct() && select.getLimit() == null && table.getBucketNum() == null; ParallelIteratorFactory parallelIteratorFactory; if (select.isAggregate() || select.isDistinct() || select.getLimit() != null) { parallelIteratorFactory = null; } else { // We can pipeline the upsert select instead of spooling everything to disk first, // if we don't have any post processing that's required. parallelIteratorFactory = upsertParallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRef); } // If we may be able to run on the server, add a hint that favors using the data table // if all else is equal. // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing, // as this would disallow running on the server. We currently use the row projector we // get back to figure this out. HintNode hint = upsert.getHint(); if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE); } select = SelectStatement.create(select, hint); // Pass scan through if same table in upsert and select so that projection is computed // correctly // Use optimizer to choose the best plan plan = new QueryOptimizer(services) .optimize(statement, select, selectResolver, targetColumns, parallelIteratorFactory); runOnServer &= plan.getTableRef().equals(tableRef); rowProjectorToBe = plan.getProjector(); nValuesToSet = rowProjectorToBe.getColumnCount(); // Cannot auto commit if doing aggregation or topN or salted // Salted causes problems because the row may end up living on a different region } else { nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0); } final RowProjector projector = rowProjectorToBe; final UpsertingParallelIteratorFactory upsertParallelIteratorFactory = upsertParallelIteratorFactoryToBe; final QueryPlan queryPlan = plan; // Resize down to allow a subset of columns to be specifiable if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) { nColumnsToSet = nValuesToSet; columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet); pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet); } if (nValuesToSet != nColumnsToSet) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH) .setMessage( "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet) .build() .buildException(); } final int[] columnIndexes = columnIndexesToBe; final int[] pkSlotIndexes = pkSlotIndexesToBe; final Set<PColumn> addViewColumns = addViewColumnsToBe; final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe; // TODO: break this up into multiple functions //////////////////////////////////////////////////////////////////// // UPSERT SELECT ///////////////////////////////////////////////////////////////////// if (valueNodes == null) { // Before we re-order, check that for updatable view columns // the projected expression either matches the column name or // is a constant with the same required value. throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable); //////////////////////////////////////////////////////////////////// // UPSERT SELECT run server-side (maybe) ///////////////////////////////////////////////////////////////////// if (runOnServer) { // At most this array will grow bigger by the number of PK columns int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet); int[] reverseColumnIndexes = new int[table.getColumns().size()]; List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length); Arrays.fill(reverseColumnIndexes, -1); for (int i = 0; i < nValuesToSet; i++) { projectedExpressions.add(projector.getColumnProjector(i).getExpression()); reverseColumnIndexes[columnIndexes[i]] = i; } /* * Order projected columns and projected expressions with PK columns * leading order by slot position */ int offset = table.getBucketNum() == null ? 0 : 1; for (int i = 0; i < table.getPKColumns().size() - offset; i++) { PColumn column = table.getPKColumns().get(i + offset); int pos = reverseColumnIndexes[column.getPosition()]; if (pos == -1) { // Last PK column may be fixed width and nullable // We don't want to insert a null expression b/c // it's not valid to set a fixed width type to null. if (column.getDataType().isFixedWidth()) { continue; } // Add literal null for missing PK columns pos = projectedExpressions.size(); Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), true); projectedExpressions.add(literalNull); allColumnsIndexes[pos] = column.getPosition(); } // Swap select expression at pos with i Collections.swap(projectedExpressions, i, pos); // Swap column indexes and reverse column indexes too int tempPos = allColumnsIndexes[i]; allColumnsIndexes[i] = allColumnsIndexes[pos]; allColumnsIndexes[pos] = tempPos; reverseColumnIndexes[tempPos] = reverseColumnIndexes[i]; reverseColumnIndexes[i] = i; } // If any pk slots are changing, be conservative and don't run this server side. // If the row ends up living in a different region, we'll get an error otherwise. for (int i = 0; i < table.getPKColumns().size(); i++) { PColumn column = table.getPKColumns().get(i); Expression source = projectedExpressions.get(i); if (source == null || !source.equals( new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) { // TODO: we could check the region boundaries to see if the pk will still be in it. runOnServer = false; // bail on running server side, since PK may be changing break; } } //////////////////////////////////////////////////////////////////// // UPSERT SELECT run server-side ///////////////////////////////////////////////////////////////////// if (runOnServer) { // Iterate through columns being projected List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size()); for (int i = 0; i < projectedExpressions.size(); i++) { // Must make new column if position has changed PColumn column = allColumns.get(allColumnsIndexes[i]); projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i)); } // Build table from projectedColumns PTable projectedTable = PTableImpl.makePTable(table, projectedColumns); SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint()); final RowProjector aggProjector = ProjectionCompiler.compile(queryPlan.getContext(), select, GroupBy.EMPTY_GROUP_BY); /* * Transfer over PTable representing subset of columns selected, but all PK columns. * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones. * Transfer over List<Expression> for projection. * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map * Create the PRow and get the mutations, adding them to the batch */ final StatementContext context = queryPlan.getContext(); final Scan scan = context.getScan(); scan.setAttribute( BaseScannerRegionObserver.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserver.serialize(projectedTable)); scan.setAttribute( BaseScannerRegionObserver.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserver.serialize(projectedExpressions)); // Ignore order by - it has no impact final QueryPlan aggPlan = new AggregatePlan( context, select, tableRef, aggProjector, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null); return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return queryPlan.getContext().getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return queryPlan.getContext(); } @Override public MutationState execute() throws SQLException { ImmutableBytesWritable ptr = context.getTempPtr(); tableRef.getTable().getIndexMaintainers(ptr); ServerCache cache = null; try { if (ptr.getLength() > 0) { IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef); cache = client.addIndexMetadataCache(context.getScanRanges(), ptr); byte[] uuidValue = cache.getId(); scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); } ResultIterator iterator = aggPlan.iterator(); try { Tuple row = iterator.next(); final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr); return new MutationState(maxSize, connection) { @Override public long getUpdateCount() { return mutationCount; } }; } finally { iterator.close(); } } finally { if (cache != null) { cache.close(); } } } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps(); List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); planSteps.add("UPSERT ROWS"); planSteps.addAll(queryPlanSteps); return new ExplainPlan(planSteps); } }; } } //////////////////////////////////////////////////////////////////// // UPSERT SELECT run client-side ///////////////////////////////////////////////////////////////////// return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return queryPlan.getContext().getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return queryPlan.getContext(); } @Override public MutationState execute() throws SQLException { ResultIterator iterator = queryPlan.iterator(); if (upsertParallelIteratorFactory == null) { return upsertSelect( statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes); } upsertParallelIteratorFactory.setRowProjector(projector); upsertParallelIteratorFactory.setColumnIndexes(columnIndexes); upsertParallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes); Tuple tuple; long totalRowCount = 0; while ((tuple = iterator.next()) != null) { // Runs query Cell kv = tuple.getValue(0); totalRowCount += PDataType.LONG .getCodec() .decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault()); } // Return total number of rows that have been updated. In the case of auto commit being // off // the mutations will all be in the mutation state of the current connection. return new MutationState(maxSize, statement.getConnection(), totalRowCount); } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps(); List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); planSteps.add("UPSERT SELECT"); planSteps.addAll(queryPlanSteps); return new ExplainPlan(planSteps); } }; } //////////////////////////////////////////////////////////////////// // UPSERT VALUES ///////////////////////////////////////////////////////////////////// int nodeIndex = 0; // initialze values with constant byte values first final byte[][] values = new byte[nValuesToSet][]; if (isTenantSpecific) { values[nodeIndex++] = connection.getTenantId().getBytes(); } if (isSharedViewIndex) { values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId()); } final int nodeIndexOffset = nodeIndex; // Allocate array based on size of all columns in table, // since some values may not be set (if they're nullable). final StatementContext context = new StatementContext(statement, resolver, new Scan()); UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context); final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size()); // First build all the expressions, as with sequences we want to collect them all first // and initialize them in one batch for (ParseNode valueNode : valueNodes) { if (!valueNode.isStateless()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT) .build() .buildException(); } PColumn column = allColumns.get(columnIndexes[nodeIndex]); expressionBuilder.setColumn(column); Expression expression = valueNode.accept(expressionBuilder); if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) { throw TypeMismatchException.newException( expression.getDataType(), column.getDataType(), "expression: " + expression.toString() + " in column " + column); } constantExpressions.add(expression); nodeIndex++; } return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return context.getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return context; } @Override public MutationState execute() throws SQLException { ImmutableBytesWritable ptr = context.getTempPtr(); final SequenceManager sequenceManager = context.getSequenceManager(); // Next evaluate all the expressions int nodeIndex = nodeIndexOffset; Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null : sequenceManager.newSequenceTuple(null); for (Expression constantExpression : constantExpressions) { PColumn column = allColumns.get(columnIndexes[nodeIndex]); constantExpression.evaluate(tuple, ptr); Object value = null; if (constantExpression.getDataType() != null) { value = constantExpression .getDataType() .toObject( ptr, constantExpression.getSortOrder(), constantExpression.getMaxLength(), constantExpression.getScale()); if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) { throw TypeMismatchException.newException( constantExpression.getDataType(), column.getDataType(), "expression: " + constantExpression.toString() + " in column " + column); } if (!column .getDataType() .isSizeCompatible( ptr, value, constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) .setColumnName(column.getName().getString()) .setMessage("value=" + constantExpression.toString()) .build() .buildException(); } } column .getDataType() .coerceBytes( ptr, value, constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), constantExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder()); if (overlapViewColumns.contains(column) && Bytes.compareTo( ptr.get(), ptr.getOffset(), ptr.getLength(), column.getViewConstant(), 0, column.getViewConstant().length - 1) != 0) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) .setColumnName(column.getName().getString()) .setMessage("value=" + constantExpression.toString()) .build() .buildException(); } values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr); nodeIndex++; } // Add columns based on view for (PColumn column : addViewColumns) { if (IndexUtil.getViewConstantValue(column, ptr)) { values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr); } else { throw new IllegalStateException(); } } Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1); setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation); return new MutationState(tableRef, mutation, 0, maxSize, connection); } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> planSteps = Lists.newArrayListWithExpectedSize(2); if (context.getSequenceManager().getSequenceCount() > 0) { planSteps.add( "CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES"); } planSteps.add("PUT SINGLE ROW"); return new ExplainPlan(planSteps); } }; }
@SuppressWarnings("deprecation") public void commit() throws SQLException { int i = 0; byte[] tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); long[] serverTimeStamps = validate(); Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> iterator = this.mutations.entrySet().iterator(); List<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> committedList = Lists.newArrayListWithCapacity(this.mutations.size()); // add tracing for this operation TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables"); Span span = trace.getSpan(); while (iterator.hasNext()) { Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> entry = iterator.next(); Map<ImmutableBytesPtr, Map<PColumn, byte[]>> valuesMap = entry.getValue(); TableRef tableRef = entry.getKey(); PTable table = tableRef.getTable(); table.getIndexMaintainers(tempPtr); boolean hasIndexMaintainers = tempPtr.getLength() > 0; boolean isDataTable = true; long serverTimestamp = serverTimeStamps[i++]; Iterator<Pair<byte[], List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false); while (mutationsIterator.hasNext()) { Pair<byte[], List<Mutation>> pair = mutationsIterator.next(); byte[] htableName = pair.getFirst(); List<Mutation> mutations = pair.getSecond(); // create a span per target table // TODO maybe we can be smarter about the table name to string here? Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName)); int retryCount = 0; boolean shouldRetry = false; do { ServerCache cache = null; if (hasIndexMaintainers && isDataTable) { byte[] attribValue = null; byte[] uuidValue; if (IndexMetaDataCacheClient.useIndexMetadataCache( connection, mutations, tempPtr.getLength())) { IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef); cache = client.addIndexMetadataCache(mutations, tempPtr); child.addTimelineAnnotation("Updated index metadata cache"); uuidValue = cache.getId(); // If we haven't retried yet, retry for this case only, as it's possible that // a split will occur after we send the index metadata cache to all known // region servers. shouldRetry = true; } else { attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr); uuidValue = ServerCacheClient.generateId(); } // Either set the UUID to be able to access the index metadata from the cache // or set the index metadata directly on the Mutation for (Mutation mutation : mutations) { if (tenantId != null) { mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); } mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); if (attribValue != null) { mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue); } } } SQLException sqlE = null; HTableInterface hTable = connection.getQueryServices().getTable(htableName); try { if (logger.isDebugEnabled()) logMutationSize(hTable, mutations); long startTime = System.currentTimeMillis(); child.addTimelineAnnotation("Attempt " + retryCount); hTable.batch(mutations); child.stop(); shouldRetry = false; if (logger.isDebugEnabled()) logger.debug( "Total time for batch call of " + mutations.size() + " mutations into " + table.getName().getString() + ": " + (System.currentTimeMillis() - startTime) + " ms"); committedList.add(entry); } catch (Exception e) { SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e); if (inferredE != null) { if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) { // Swallow this exception once, as it's possible that we split after sending the // index metadata // and one of the region servers doesn't have it. This will cause it to have it the // next go around. // If it fails again, we don't retry. String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE; logger.warn(msg); connection.getQueryServices().clearTableRegionCache(htableName); // add a new child span as this one failed child.addTimelineAnnotation(msg); child.stop(); child = Tracing.child(span, "Failed batch, attempting retry"); continue; } e = inferredE; } // Throw to client with both what was committed so far and what is left to be committed. // That way, client can either undo what was done or try again with what was not done. sqlE = new CommitException( e, this, new MutationState( committedList, this.sizeOffset, this.maxSize, this.connection)); } finally { try { hTable.close(); } catch (IOException e) { if (sqlE != null) { sqlE.setNextException(ServerUtil.parseServerException(e)); } else { sqlE = ServerUtil.parseServerException(e); } } finally { try { if (cache != null) { cache.close(); } } finally { if (sqlE != null) { throw sqlE; } } } } } while (shouldRetry && retryCount++ < 1); isDataTable = false; } numRows -= entry.getValue().size(); iterator.remove(); // Remove batches as we process them } trace.close(); assert (numRows == 0); assert (this.mutations.isEmpty()); }
private void assertIteration(String dataColumns, String pk, Object[] values, String dataProps) throws Exception { String schemaName = ""; String tableName = "T"; Connection conn = DriverManager.getConnection(getUrl()); String fullTableName = SchemaUtil.getTableName( SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier(tableName)); conn.createStatement() .execute( "CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps)); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName)); conn.close(); StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES("); for (int i = 0; i < values.length; i++) { buf.append("?,"); } buf.setCharAt(buf.length() - 1, ')'); PreparedStatement stmt = conn.prepareStatement(buf.toString()); for (int i = 0; i < values.length; i++) { stmt.setObject(i + 1, values[i]); } stmt.execute(); Iterator<Pair<byte[], List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn); List<KeyValue> dataKeyValues = iterator.next().getSecond(); KeyValue keyValue = dataKeyValues.get(0); List<SortOrder> sortOrders = Lists.newArrayListWithExpectedSize(table.getPKColumns().size()); for (PColumn col : table.getPKColumns()) { sortOrders.add(col.getSortOrder()); } RowKeySchema schema = table.getRowKeySchema(); int minOffset = keyValue.getRowOffset(); ImmutableBytesWritable ptr = new ImmutableBytesWritable(); int nExpectedValues = values.length; for (int i = values.length - 1; i >= 0; i--) { if (values[i] == null) { nExpectedValues--; } else { break; } } int i = 0; int maxOffset = schema.iterator(keyValue.getRowArray(), minOffset, keyValue.getRowLength(), ptr); for (i = 0; i < schema.getFieldCount(); i++) { Boolean hasValue = schema.next(ptr, i, maxOffset); if (hasValue == null) { break; } assertTrue(hasValue); PDataType type = PDataType.fromLiteral(values[i]); SortOrder sortOrder = sortOrders.get(i); Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder); assertEquals(values[i], value); } assertEquals(nExpectedValues, i); assertNull(schema.next(ptr, i, maxOffset)); for (i--; i >= 0; i--) { Boolean hasValue = schema.previous(ptr, i, minOffset); if (hasValue == null) { break; } assertTrue(hasValue); PDataType type = PDataType.fromLiteral(values[i]); SortOrder sortOrder = sortOrders.get(i); Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder); assertEquals(values[i], value); } assertEquals(-1, i); assertNull(schema.previous(ptr, i, minOffset)); }