/** * Encode the primary key values from the table as a byte array. The values must be in the same * order as the primary key constraint. If the connection and table are both tenant-specific, the * tenant ID column must not be present in the values. * * @param conn an open connection * @param fullTableName the full table name * @param values the values of the primary key columns ordered in the same order as the primary * key constraint * @return the encoded byte array * @throws SQLException if the table cannot be found or the incorrect number of of values are * provided * @see #decodePK(Connection, String, byte[]) to decode the byte[] back to the values */ public static byte[] encodePK(Connection conn, String fullTableName, Object[] values) throws SQLException { PTable table = getTable(conn, fullTableName); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0); List<PColumn> pkColumns = table.getPKColumns(); if (pkColumns.size() - offset != values.length) { throw new SQLException( "Expected " + (pkColumns.size() - offset) + " but got " + values.length); } PDataType type = null; TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(table.getRowKeySchema().getEstimatedValueLength()); try { for (int i = offset; i < pkColumns.size(); i++) { if (type != null && !type.isFixedWidth()) { output.write(QueryConstants.SEPARATOR_BYTE); } type = pkColumns.get(i).getDataType(); byte[] value = type.toBytes(values[i - offset]); output.write(value); } return output.toByteArray(); } finally { try { output.close(); } catch (IOException e) { throw new RuntimeException(e); // Impossible } } }
/* Map all unique pairs <family, name> to index. Table name is part of TableRowkey, so we do not care about it */ private void initColumnIndexes() throws SQLException { columnIndexes = new TreeMap(Bytes.BYTES_COMPARATOR); int columnIndex = 0; for (int index = 0; index < logicalNames.size(); index++) { PTable table = PhoenixRuntime.getTable(conn, logicalNames.get(index)); List<PColumn> cls = table.getColumns(); for (int i = 0; i < cls.size(); i++) { PColumn c = cls.get(i); byte[] family = new byte[0]; if (c.getFamilyName() != null) // Skip PK column family = c.getFamilyName().getBytes(); byte[] name = c.getName().getBytes(); byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, name); if (!columnIndexes.containsKey(cfn)) { columnIndexes.put(cfn, new Integer(columnIndex)); columnIndex++; } } byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table); byte[] cfn = Bytes.add( emptyColumnFamily, QueryConstants.NAMESPACE_SEPARATOR_BYTES, QueryConstants.EMPTY_COLUMN_BYTES); columnIndexes.put(cfn, new Integer(columnIndex)); columnIndex++; } }
public static String findExistingColumn(PTable table, List<PColumn> columns) { for (PColumn column : columns) { PName familyName = column.getFamilyName(); if (familyName == null) { try { return table.getPKColumn(column.getName().getString()).getName().getString(); } catch (ColumnNotFoundException e) { continue; } } else { try { return table .getColumnFamily(familyName.getString()) .getColumn(column.getName().getString()) .getName() .getString(); } catch (ColumnFamilyNotFoundException e) { continue; // Shouldn't happen } catch (ColumnNotFoundException e) { continue; } } } return null; }
public OrderPreservingTracker( StatementContext context, GroupBy groupBy, Ordering ordering, int nNodes, TupleProjector projector) { this.context = context; if (groupBy.isEmpty()) { PTable table = context.getResolver().getTables().get(0).getTable(); this.isOrderPreserving = table.rowKeyOrderOptimizable(); boolean isSalted = table.getBucketNum() != null; boolean isMultiTenant = context.getConnection().getTenantId() != null && table.isMultiTenant(); boolean isSharedViewIndex = table.getViewIndexId() != null; // TODO: util for this offset, as it's computed in numerous places this.pkPositionOffset = (isSalted ? 1 : 0) + (isMultiTenant ? 1 : 0) + (isSharedViewIndex ? 1 : 0); } else { this.isOrderPreserving = true; this.pkPositionOffset = 0; } this.groupBy = groupBy; this.visitor = new TrackOrderPreservingExpressionVisitor(projector); this.orderPreservingInfos = Lists.newArrayListWithExpectedSize(nNodes); this.ordering = ordering; this.projector = projector; }
private static void setValues( byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation) { Map<PColumn, byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length); byte[][] pkValues = new byte[table.getPKColumns().size()][]; // If the table uses salting, the first byte is the salting byte, set to an empty array // here and we will fill in the byte later in PRowImpl. if (table.getBucketNum() != null) { pkValues[0] = new byte[] {0}; } for (int i = 0; i < values.length; i++) { byte[] value = values[i]; PColumn column = table.getColumns().get(columnIndexes[i]); if (SchemaUtil.isPKColumn(column)) { pkValues[pkSlotIndex[i]] = value; } else { columnValues.put(column, value); } } ImmutableBytesPtr ptr = new ImmutableBytesPtr(); table.newKey(ptr, pkValues); mutation.put(ptr, columnValues); }
/** * Returns the column info for the given column for the given table. * * @param table * @param columnName User-specified column name. May be family-qualified or bare. * @return columnInfo associated with the column in the table * @throws SQLException if parameters are null or if column is not found or if column is * ambiguous. */ public static ColumnInfo getColumnInfo(PTable table, String columnName) throws SQLException { if (table == null) { throw new SQLException("Table must not be null."); } if (columnName == null) { throw new SQLException("columnName must not be null."); } columnName = columnName.trim().toUpperCase(); PColumn pColumn = null; if (columnName.contains(QueryConstants.NAME_SEPARATOR)) { String[] tokens = columnName.split(QueryConstants.NAME_SEPARATOR_REGEX); if (tokens.length != 2) { throw new SQLException( String.format( "Unable to process column %s, expected family-qualified name.", columnName)); } String familyName = tokens[0]; String familyColumn = tokens[1]; PColumnFamily family = table.getColumnFamily(familyName); pColumn = family.getColumn(familyColumn); } else { pColumn = table.getColumn(columnName); } return getColumnInfo(pColumn); }
/** * Check that none of no columns in our updatable VIEW are changing values. * * @param tableRef * @param overlapViewColumns * @param targetColumns * @param projector * @throws SQLException */ private static void throwIfNotUpdatable( TableRef tableRef, Set<PColumn> overlapViewColumns, List<PColumn> targetColumns, RowProjector projector, boolean sameTable) throws SQLException { PTable table = tableRef.getTable(); if (table.getViewType() == ViewType.UPDATABLE && !overlapViewColumns.isEmpty()) { ImmutableBytesWritable ptr = new ImmutableBytesWritable(); for (int i = 0; i < targetColumns.size(); i++) { PColumn targetColumn = targetColumns.get(i); if (overlapViewColumns.contains(targetColumn)) { Expression source = projector.getColumnProjector(i).getExpression(); if (source.isStateless()) { source.evaluate(null, ptr); if (Bytes.compareTo( ptr.get(), ptr.getOffset(), ptr.getLength(), targetColumn.getViewConstant(), 0, targetColumn.getViewConstant().length - 1) == 0) { continue; } } throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) .setColumnName(targetColumn.getName().getString()) .build() .buildException(); } } } }
public static ImmutableBytesPtr getEmptyColumnFamilyPtr(PTable table) { List<PColumnFamily> families = table.getColumnFamilies(); return families.isEmpty() ? table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES_PTR : table.getDefaultFamilyName().getBytesPtr() : families.get(0).getName().getBytesPtr(); }
public static byte[] getTableKey(PTable dataTable) { PName tenantId = dataTable.getTenantId(); PName schemaName = dataTable.getSchemaName(); return getTableKey( tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(), schemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : schemaName.getBytes(), dataTable.getTableName().getBytes()); }
public static String getEmptyColumnFamilyAsString(PTable table) { List<PColumnFamily> families = table.getColumnFamilies(); return families.isEmpty() ? table.getDefaultFamilyName() == null ? QueryConstants.DEFAULT_COLUMN_FAMILY : table.getDefaultFamilyName().getString() : families.get(0).getName().getString(); }
@Test public void testCreateTableToBeTransactional() throws Exception { Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); String ddl = "CREATE TABLE TEST_TRANSACTIONAL_TABLE (k varchar primary key) transactional=true"; conn.createStatement().execute(ddl); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); PTable table = pconn.getTable(new PTableKey(null, "TEST_TRANSACTIONAL_TABLE")); HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes("TEST_TRANSACTIONAL_TABLE")); assertTrue(table.isTransactional()); assertTrue( htable .getTableDescriptor() .getCoprocessors() .contains(TransactionProcessor.class.getName())); try { ddl = "ALTER TABLE TEST_TRANSACTIONAL_TABLE SET transactional=false"; conn.createStatement().execute(ddl); fail(); } catch (SQLException e) { assertEquals(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX.getErrorCode(), e.getErrorCode()); } HBaseAdmin admin = pconn.getQueryServices().getAdmin(); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("TXN_TEST_EXISTING")); desc.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES)); admin.createTable(desc); ddl = "CREATE TABLE TXN_TEST_EXISTING (k varchar primary key) transactional=true"; conn.createStatement().execute(ddl); assertEquals( Boolean.TRUE.toString(), admin .getTableDescriptor(TableName.valueOf("TXN_TEST_EXISTING")) .getValue(TxConstants.READ_NON_TX_DATA)); // Should be ok, as HBase metadata should match existing metadata. ddl = "CREATE TABLE IF NOT EXISTS TEST_TRANSACTIONAL_TABLE (k varchar primary key)"; try { conn.createStatement().execute(ddl); fail(); } catch (SQLException e) { assertEquals(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX.getErrorCode(), e.getErrorCode()); } ddl += " transactional=true"; conn.createStatement().execute(ddl); table = pconn.getTable(new PTableKey(null, "TEST_TRANSACTIONAL_TABLE")); htable = pconn.getQueryServices().getTable(Bytes.toBytes("TEST_TRANSACTIONAL_TABLE")); assertTrue(table.isTransactional()); assertTrue( htable .getTableDescriptor() .getCoprocessors() .contains(TransactionProcessor.class.getName())); }
public static short getMaxKeySeq(PTable table) { int offset = 0; if (table.getBucketNum() != null) { offset++; } // TODO: for tenant-specific table on tenant-specific connection, // we should subtract one for tenant column and another one for // index ID return (short) (table.getPKColumns().size() - offset); }
@BeforeClass public static void doSetup() throws Exception { startServer(getUrl()); ensureTableCreated(getUrl(), ATABLE_NAME); ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME); ensureTableCreated(getUrl(), FUNKY_NAME); ensureTableCreated(getUrl(), PTSDB_NAME); ensureTableCreated(getUrl(), PTSDB2_NAME); ensureTableCreated(getUrl(), PTSDB3_NAME); ensureTableCreated(getUrl(), MULTI_CF_NAME); ensureTableCreated(getUrl(), JOIN_ORDER_TABLE_FULL_NAME); ensureTableCreated(getUrl(), JOIN_CUSTOMER_TABLE_FULL_NAME); ensureTableCreated(getUrl(), JOIN_ITEM_TABLE_FULL_NAME); ensureTableCreated(getUrl(), JOIN_SUPPLIER_TABLE_FULL_NAME); ensureTableCreated(getUrl(), TABLE_WITH_ARRAY); Properties props = new Properties(); props.setProperty( PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP)); PhoenixConnection conn = DriverManager.getConnection(PHOENIX_CONNECTIONLESS_JDBC_URL, props) .unwrap(PhoenixConnection.class); try { PTable table = conn.getTable(new PTableKey(null, ATABLE_NAME)); ATABLE = table; ORGANIZATION_ID = new ColumnRef(new TableRef(table), table.getColumn("ORGANIZATION_ID").getPosition()) .newColumnExpression(); ENTITY_ID = new ColumnRef(new TableRef(table), table.getColumn("ENTITY_ID").getPosition()) .newColumnExpression(); A_INTEGER = new ColumnRef(new TableRef(table), table.getColumn("A_INTEGER").getPosition()) .newColumnExpression(); A_STRING = new ColumnRef(new TableRef(table), table.getColumn("A_STRING").getPosition()) .newColumnExpression(); B_STRING = new ColumnRef(new TableRef(table), table.getColumn("B_STRING").getPosition()) .newColumnExpression(); A_DATE = new ColumnRef(new TableRef(table), table.getColumn("A_DATE").getPosition()) .newColumnExpression(); A_TIME = new ColumnRef(new TableRef(table), table.getColumn("A_TIME").getPosition()) .newColumnExpression(); A_TIMESTAMP = new ColumnRef(new TableRef(table), table.getColumn("A_TIMESTAMP").getPosition()) .newColumnExpression(); X_DECIMAL = new ColumnRef(new TableRef(table), table.getColumn("X_DECIMAL").getPosition()) .newColumnExpression(); } finally { conn.close(); } }
/** * Imperfect estimate of row size given a PTable TODO: keep row count in stats table and use total * size / row count instead * * @param table * @return estimate of size in bytes of a row */ public static long estimateRowSize(PTable table) { int keyLength = estimateKeyLength(table); long rowSize = 0; for (PColumn column : table.getColumns()) { if (!SchemaUtil.isPKColumn(column)) { PDataType type = column.getDataType(); Integer maxLength = column.getMaxLength(); int valueLength = !type.isFixedWidth() ? VAR_KV_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength; rowSize += KeyValue.getKeyValueDataStructureSize( keyLength, column.getFamilyName().getBytes().length, column.getName().getBytes().length, valueLength); } } // Empty key value rowSize += KeyValue.getKeyValueDataStructureSize( keyLength, getEmptyColumnFamily(table).length, QueryConstants.EMPTY_COLUMN_BYTES.length, 0); return rowSize; }
/** * Get list of ColumnInfos that contain Column Name and its associated PDataType for an import. * The supplied list of columns can be null -- if it is non-null, it represents a user-supplied * list of columns to be imported. * * @param conn Phoenix connection from which metadata will be read * @param tableName Phoenix table name whose columns are to be checked. Can include a schema name * @param columns user-supplied list of import columns, can be null */ public static List<ColumnInfo> generateColumnInfo( Connection conn, String tableName, List<String> columns) throws SQLException { PTable table = PhoenixRuntime.getTable(conn, tableName); List<ColumnInfo> columnInfoList = Lists.newArrayList(); Set<String> unresolvedColumnNames = new TreeSet<String>(); if (columns == null) { // use all columns in the table for (PColumn pColumn : table.getColumns()) { int sqlType = pColumn.getDataType().getResultSetSqlType(); columnInfoList.add(new ColumnInfo(pColumn.toString(), sqlType)); } } else { // Leave "null" as indication to skip b/c it doesn't exist for (int i = 0; i < columns.size(); i++) { String columnName = columns.get(i); try { ColumnInfo columnInfo = PhoenixRuntime.getColumnInfo(table, columnName); columnInfoList.add(columnInfo); } catch (ColumnNotFoundException cnfe) { unresolvedColumnNames.add(columnName.trim()); } catch (AmbiguousColumnException ace) { unresolvedColumnNames.add(columnName.trim()); } } } // if there exists columns that cannot be resolved, error out. if (unresolvedColumnNames.size() > 0) { StringBuilder exceptionMessage = new StringBuilder(); boolean first = true; exceptionMessage.append("Unable to resolve these column names:\n"); for (String col : unresolvedColumnNames) { if (first) first = false; else exceptionMessage.append(","); exceptionMessage.append(col); } exceptionMessage.append("\nAvailable columns with column families:\n"); first = true; for (PColumn pColumn : table.getColumns()) { if (first) first = false; else exceptionMessage.append(","); exceptionMessage.append(pColumn.toString()); } throw new SQLException(exceptionMessage.toString()); } return columnInfoList; }
@Override public Void visit(NamedTableNode tableNode) throws SQLException { String alias = tableNode.getAlias(); TableRef tableRef = createTableRef(tableNode, true); PTable theTable = tableRef.getTable(); if (alias != null) { tableMap.put(alias, tableRef); } String name = theTable.getName().getString(); // avoid having one name mapped to two identical TableRef. if (alias == null || !alias.equals(name)) { tableMap.put(name, tableRef); } tables.add(tableRef); return null; }
/** * Decode a byte array value back into the Object values of the primary key constraint. If the * connection and table are both tenant-specific, the tenant ID column is not expected to have * been encoded and will not appear in the returned values. * * @param conn an open connection * @param name the full table name * @param value the value that was encoded with {@link #encodePK(Connection, String, Object[])} * @return the Object values encoded in the byte array value * @throws SQLException */ public static Object[] decodePK(Connection conn, String name, byte[] value) throws SQLException { PTable table = getTable(conn, name); PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class); int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0); RowKeySchema schema = table.getRowKeySchema(); int nValues = schema.getMaxFields() - offset; Object[] values = new Object[nValues]; ImmutableBytesWritable ptr = new ImmutableBytesWritable(); int i = 0; schema.iterator(value, ptr, offset); while (i < nValues && schema.next(ptr, i, value.length) != null) { values[i] = schema.getField(i).getDataType().toObject(ptr); i++; } return values; }
public static boolean columnExists(PTable table, String columnName) { try { table.getColumn(columnName); return true; } catch (ColumnNotFoundException e) { return false; } catch (AmbiguousColumnException e) { return true; } }
private static KeyValueSchema buildSchema(PTable table) { KeyValueSchemaBuilder builder = new KeyValueSchemaBuilder(0); if (table != null) { for (PColumn column : table.getColumns()) { if (!SchemaUtil.isPKColumn(column)) { builder.addField(column); } } } return builder.build(); }
protected PTable addDynamicColumns(List<ColumnDef> dynColumns, PTable theTable) throws SQLException { if (!dynColumns.isEmpty()) { List<PColumn> allcolumns = new ArrayList<PColumn>(); List<PColumn> existingColumns = theTable.getColumns(); // Need to skip the salting column, as it's added in the makePTable call below allcolumns.addAll( theTable.getBucketNum() == null ? existingColumns : existingColumns.subList(1, existingColumns.size())); // Position still based on with the salting columns int position = existingColumns.size(); PName defaultFamilyName = PNameFactory.newName(SchemaUtil.getEmptyColumnFamily(theTable)); for (ColumnDef dynColumn : dynColumns) { PName familyName = defaultFamilyName; PName name = PNameFactory.newName(dynColumn.getColumnDefName().getColumnName()); String family = dynColumn.getColumnDefName().getFamilyName(); if (family != null) { theTable.getColumnFamily(family); // Verifies that column family exists familyName = PNameFactory.newName(family); } allcolumns.add( new PColumnImpl( name, familyName, dynColumn.getDataType(), dynColumn.getMaxLength(), dynColumn.getScale(), dynColumn.isNull(), position, dynColumn.getSortOrder(), dynColumn.getArraySize(), null, false)); position++; } theTable = PTableImpl.makePTable(theTable, allcolumns); } return theTable; }
/** * Validates that the meta data is valid against the server meta data if we haven't yet done so. * Otherwise, for every UPSERT VALUES call, we'd need to hit the server to see if the meta data * has changed. * * @param connection * @return the server time to use for the upsert * @throws SQLException if the table or any columns no longer exist */ private long[] validate() throws SQLException { int i = 0; Long scn = connection.getSCN(); PName tenantId = connection.getTenantId(); MetaDataClient client = new MetaDataClient(connection); long[] timeStamps = new long[this.mutations.size()]; for (Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> entry : mutations.entrySet()) { TableRef tableRef = entry.getKey(); long serverTimeStamp = tableRef.getTimeStamp(); PTable table = tableRef.getTable(); // If we're auto committing, we've already validated the schema when we got the // ColumnResolver, // so no need to do it again here. if (!connection.getAutoCommit()) { MetaDataMutationResult result = client.updateCache(table.getSchemaName().getString(), table.getTableName().getString()); long timestamp = result.getMutationTime(); if (timestamp != QueryConstants.UNSET_TIMESTAMP) { serverTimeStamp = timestamp; if (result.wasUpdated()) { // TODO: use bitset? table = connection .getMetaDataCache() .getTable(new PTableKey(tenantId, table.getName().getString())); PColumn[] columns = new PColumn[table.getColumns().size()]; for (Map.Entry<ImmutableBytesPtr, Map<PColumn, byte[]>> rowEntry : entry.getValue().entrySet()) { Map<PColumn, byte[]> valueEntry = rowEntry.getValue(); if (valueEntry != PRow.DELETE_MARKER) { for (PColumn column : valueEntry.keySet()) { columns[column.getPosition()] = column; } } } for (PColumn column : columns) { if (column != null) { table .getColumnFamily(column.getFamilyName().getString()) .getColumn(column.getName().getString()); } } tableRef.setTable(table); } } } timeStamps[i++] = scn == null ? serverTimeStamp == QueryConstants.UNSET_TIMESTAMP ? HConstants.LATEST_TIMESTAMP : serverTimeStamp : scn; } return timeStamps; }
private static SelectStatement prependTenantAndViewConstants( PTable table, SelectStatement select, String tenantId, Set<PColumn> addViewColumns) { if ((!table.isMultiTenant() || tenantId == null) && table.getViewIndexId() == null && addViewColumns.isEmpty()) { return select; } List<AliasedNode> selectNodes = newArrayListWithCapacity(select.getSelect().size() + 1 + addViewColumns.size()); if (table.isMultiTenant() && tenantId != null) { selectNodes.add(new AliasedNode(null, new LiteralParseNode(tenantId))); } if (table.getViewIndexId() != null) { selectNodes.add(new AliasedNode(null, new LiteralParseNode(table.getViewIndexId()))); } selectNodes.addAll(select.getSelect()); for (PColumn column : addViewColumns) { byte[] byteValue = column.getViewConstant(); Object value = column.getDataType().toObject(byteValue, 0, byteValue.length - 1); selectNodes.add(new AliasedNode(null, new LiteralParseNode(value))); } return SelectStatement.create(select, selectNodes); }
/** * Estimate the max key length in bytes of the PK for a given table * * @param table the table * @return the max PK length */ public static int estimateKeyLength(PTable table) { int maxKeyLength = 0; // Calculate the max length of a key (each part must currently be of a fixed width) int i = 0; List<PColumn> columns = table.getPKColumns(); while (i < columns.size()) { PColumn keyColumn = columns.get(i++); PDataType type = keyColumn.getDataType(); Integer maxLength = keyColumn.getMaxLength(); maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength; } return maxKeyLength; }
public static int getPKPosition(PTable table, PColumn column) { // TODO: when PColumn has getPKPosition, use that instead return table.getPKColumns().indexOf(column); }
public MutationPlan compile(UpsertStatement upsert) throws SQLException { final PhoenixConnection connection = statement.getConnection(); ConnectionQueryServices services = connection.getQueryServices(); final int maxSize = services .getProps() .getInt( QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); final ColumnResolver resolver = FromCompiler.getResolverForMutation(upsert, connection); final TableRef tableRef = resolver.getTables().get(0); final PTable table = tableRef.getTable(); if (table.getType() == PTableType.VIEW) { if (table.getViewType().isReadOnly()) { throw new ReadOnlyTableException( table.getSchemaName().getString(), table.getTableName().getString()); } } boolean isSalted = table.getBucketNum() != null; final boolean isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null; final boolean isSharedViewIndex = table.getViewIndexId() != null; String tenantId = isTenantSpecific ? connection.getTenantId().getString() : null; int posOffset = isSalted ? 1 : 0; // Setup array of column indexes parallel to values that are going to be set List<ColumnName> columnNodes = upsert.getColumns(); final List<PColumn> allColumns = table.getColumns(); Set<PColumn> addViewColumnsToBe = Collections.emptySet(); Set<PColumn> overlapViewColumnsToBe = Collections.emptySet(); int[] columnIndexesToBe; int nColumnsToSet = 0; int[] pkSlotIndexesToBe; List<PColumn> targetColumns; if (table.getViewType() == ViewType.UPDATABLE) { addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumns.size()); for (PColumn column : allColumns) { if (column.getViewConstant() != null) { addViewColumnsToBe.add(column); } } } ImmutableBytesWritable ptr = new ImmutableBytesWritable(); // Allow full row upsert if no columns or only dynamic ones are specified and values count match if (columnNodes.isEmpty() || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) { nColumnsToSet = allColumns.size() - posOffset; columnIndexesToBe = new int[nColumnsToSet]; pkSlotIndexesToBe = new int[columnIndexesToBe.length]; targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null)); int minPKPos = 0; if (isTenantSpecific) { PColumn tenantColumn = table.getPKColumns().get(minPKPos); columnIndexesToBe[minPKPos] = tenantColumn.getPosition(); targetColumns.set(minPKPos, tenantColumn); minPKPos++; } if (isSharedViewIndex) { PColumn indexIdColumn = table.getPKColumns().get(minPKPos); columnIndexesToBe[minPKPos] = indexIdColumn.getPosition(); targetColumns.set(minPKPos, indexIdColumn); minPKPos++; } for (int i = posOffset, j = 0; i < allColumns.size(); i++) { PColumn column = allColumns.get(i); if (SchemaUtil.isPKColumn(column)) { pkSlotIndexesToBe[i - posOffset] = j + posOffset; if (j++ < minPKPos) { // Skip, as it's already been set above continue; } minPKPos = 0; } columnIndexesToBe[i - posOffset + minPKPos] = i; targetColumns.set(i - posOffset + minPKPos, column); } if (!addViewColumnsToBe.isEmpty()) { // All view columns overlap in this case overlapViewColumnsToBe = addViewColumnsToBe; addViewColumnsToBe = Collections.emptySet(); } } else { // Size for worse case int numColsInUpsert = columnNodes.size(); nColumnsToSet = numColsInUpsert + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + +(isSharedViewIndex ? 1 : 0); columnIndexesToBe = new int[nColumnsToSet]; pkSlotIndexesToBe = new int[columnIndexesToBe.length]; targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length); targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null)); Arrays.fill( columnIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced Arrays.fill( pkSlotIndexesToBe, -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced BitSet pkColumnsSet = new BitSet(table.getPKColumns().size()); int i = 0; // Add tenant column directly, as we don't want to resolve it as this will fail if (isTenantSpecific) { PColumn tenantColumn = table.getPKColumns().get(i + posOffset); columnIndexesToBe[i] = tenantColumn.getPosition(); pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset); targetColumns.set(i, tenantColumn); i++; } if (isSharedViewIndex) { PColumn indexIdColumn = table.getPKColumns().get(i + posOffset); columnIndexesToBe[i] = indexIdColumn.getPosition(); pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset); targetColumns.set(i, indexIdColumn); i++; } for (ColumnName colName : columnNodes) { ColumnRef ref = resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName()); PColumn column = ref.getColumn(); if (IndexUtil.getViewConstantValue(column, ptr)) { if (overlapViewColumnsToBe.isEmpty()) { overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size()); } nColumnsToSet--; overlapViewColumnsToBe.add(column); addViewColumnsToBe.remove(column); } columnIndexesToBe[i] = ref.getColumnPosition(); targetColumns.set(i, column); if (SchemaUtil.isPKColumn(column)) { pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition()); } i++; } for (PColumn column : addViewColumnsToBe) { columnIndexesToBe[i] = column.getPosition(); targetColumns.set(i, column); if (SchemaUtil.isPKColumn(column)) { pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column)); } i++; } for (i = posOffset; i < table.getPKColumns().size(); i++) { PColumn pkCol = table.getPKColumns().get(i); if (!pkColumnsSet.get(i)) { if (!pkCol.isNullable()) { throw new ConstraintViolationException( table.getName().getString() + "." + pkCol.getName().getString() + " may not be null"); } } } } List<ParseNode> valueNodes = upsert.getValues(); QueryPlan plan = null; RowProjector rowProjectorToBe = null; final int nValuesToSet; boolean sameTable = false; boolean runOnServer = false; UpsertingParallelIteratorFactory upsertParallelIteratorFactoryToBe = null; final boolean isAutoCommit = connection.getAutoCommit(); if (valueNodes == null) { SelectStatement select = upsert.getSelect(); assert (select != null); select = SubselectRewriter.flatten(select, connection); ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection); select = StatementNormalizer.normalize(select, selectResolver); select = prependTenantAndViewConstants(table, select, tenantId, addViewColumnsToBe); sameTable = select.getFrom().size() == 1 && tableRef.equals(selectResolver.getTables().get(0)); /* We can run the upsert in a coprocessor if: * 1) from has only 1 table and the into table matches from table * 2) the select query isn't doing aggregation * 3) autoCommit is on * 4) the table is not immutable, as the client is the one that figures out the additional * puts for index tables. * 5) no limit clause * Otherwise, run the query to pull the data from the server * and populate the MutationState (upto a limit). */ runOnServer = sameTable && isAutoCommit && !table.isImmutableRows() && !select.isAggregate() && !select.isDistinct() && select.getLimit() == null && table.getBucketNum() == null; ParallelIteratorFactory parallelIteratorFactory; if (select.isAggregate() || select.isDistinct() || select.getLimit() != null) { parallelIteratorFactory = null; } else { // We can pipeline the upsert select instead of spooling everything to disk first, // if we don't have any post processing that's required. parallelIteratorFactory = upsertParallelIteratorFactoryToBe = new UpsertingParallelIteratorFactory(connection, tableRef); } // If we may be able to run on the server, add a hint that favors using the data table // if all else is equal. // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing, // as this would disallow running on the server. We currently use the row projector we // get back to figure this out. HintNode hint = upsert.getHint(); if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) { hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE); } select = SelectStatement.create(select, hint); // Pass scan through if same table in upsert and select so that projection is computed // correctly // Use optimizer to choose the best plan plan = new QueryOptimizer(services) .optimize(statement, select, selectResolver, targetColumns, parallelIteratorFactory); runOnServer &= plan.getTableRef().equals(tableRef); rowProjectorToBe = plan.getProjector(); nValuesToSet = rowProjectorToBe.getColumnCount(); // Cannot auto commit if doing aggregation or topN or salted // Salted causes problems because the row may end up living on a different region } else { nValuesToSet = valueNodes.size() + addViewColumnsToBe.size() + (isTenantSpecific ? 1 : 0) + (isSharedViewIndex ? 1 : 0); } final RowProjector projector = rowProjectorToBe; final UpsertingParallelIteratorFactory upsertParallelIteratorFactory = upsertParallelIteratorFactoryToBe; final QueryPlan queryPlan = plan; // Resize down to allow a subset of columns to be specifiable if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) { nColumnsToSet = nValuesToSet; columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet); pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet); } if (nValuesToSet != nColumnsToSet) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH) .setMessage( "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet) .build() .buildException(); } final int[] columnIndexes = columnIndexesToBe; final int[] pkSlotIndexes = pkSlotIndexesToBe; final Set<PColumn> addViewColumns = addViewColumnsToBe; final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe; // TODO: break this up into multiple functions //////////////////////////////////////////////////////////////////// // UPSERT SELECT ///////////////////////////////////////////////////////////////////// if (valueNodes == null) { // Before we re-order, check that for updatable view columns // the projected expression either matches the column name or // is a constant with the same required value. throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable); //////////////////////////////////////////////////////////////////// // UPSERT SELECT run server-side (maybe) ///////////////////////////////////////////////////////////////////// if (runOnServer) { // At most this array will grow bigger by the number of PK columns int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet); int[] reverseColumnIndexes = new int[table.getColumns().size()]; List<Expression> projectedExpressions = Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length); Arrays.fill(reverseColumnIndexes, -1); for (int i = 0; i < nValuesToSet; i++) { projectedExpressions.add(projector.getColumnProjector(i).getExpression()); reverseColumnIndexes[columnIndexes[i]] = i; } /* * Order projected columns and projected expressions with PK columns * leading order by slot position */ int offset = table.getBucketNum() == null ? 0 : 1; for (int i = 0; i < table.getPKColumns().size() - offset; i++) { PColumn column = table.getPKColumns().get(i + offset); int pos = reverseColumnIndexes[column.getPosition()]; if (pos == -1) { // Last PK column may be fixed width and nullable // We don't want to insert a null expression b/c // it's not valid to set a fixed width type to null. if (column.getDataType().isFixedWidth()) { continue; } // Add literal null for missing PK columns pos = projectedExpressions.size(); Expression literalNull = LiteralExpression.newConstant(null, column.getDataType(), true); projectedExpressions.add(literalNull); allColumnsIndexes[pos] = column.getPosition(); } // Swap select expression at pos with i Collections.swap(projectedExpressions, i, pos); // Swap column indexes and reverse column indexes too int tempPos = allColumnsIndexes[i]; allColumnsIndexes[i] = allColumnsIndexes[pos]; allColumnsIndexes[pos] = tempPos; reverseColumnIndexes[tempPos] = reverseColumnIndexes[i]; reverseColumnIndexes[i] = i; } // If any pk slots are changing, be conservative and don't run this server side. // If the row ends up living in a different region, we'll get an error otherwise. for (int i = 0; i < table.getPKColumns().size(); i++) { PColumn column = table.getPKColumns().get(i); Expression source = projectedExpressions.get(i); if (source == null || !source.equals( new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) { // TODO: we could check the region boundaries to see if the pk will still be in it. runOnServer = false; // bail on running server side, since PK may be changing break; } } //////////////////////////////////////////////////////////////////// // UPSERT SELECT run server-side ///////////////////////////////////////////////////////////////////// if (runOnServer) { // Iterate through columns being projected List<PColumn> projectedColumns = Lists.newArrayListWithExpectedSize(projectedExpressions.size()); for (int i = 0; i < projectedExpressions.size(); i++) { // Must make new column if position has changed PColumn column = allColumns.get(allColumnsIndexes[i]); projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i)); } // Build table from projectedColumns PTable projectedTable = PTableImpl.makePTable(table, projectedColumns); SelectStatement select = SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint()); final RowProjector aggProjector = ProjectionCompiler.compile(queryPlan.getContext(), select, GroupBy.EMPTY_GROUP_BY); /* * Transfer over PTable representing subset of columns selected, but all PK columns. * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones. * Transfer over List<Expression> for projection. * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map * Create the PRow and get the mutations, adding them to the batch */ final StatementContext context = queryPlan.getContext(); final Scan scan = context.getScan(); scan.setAttribute( BaseScannerRegionObserver.UPSERT_SELECT_TABLE, UngroupedAggregateRegionObserver.serialize(projectedTable)); scan.setAttribute( BaseScannerRegionObserver.UPSERT_SELECT_EXPRS, UngroupedAggregateRegionObserver.serialize(projectedExpressions)); // Ignore order by - it has no impact final QueryPlan aggPlan = new AggregatePlan( context, select, tableRef, aggProjector, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null); return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return queryPlan.getContext().getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return queryPlan.getContext(); } @Override public MutationState execute() throws SQLException { ImmutableBytesWritable ptr = context.getTempPtr(); tableRef.getTable().getIndexMaintainers(ptr); ServerCache cache = null; try { if (ptr.getLength() > 0) { IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef); cache = client.addIndexMetadataCache(context.getScanRanges(), ptr); byte[] uuidValue = cache.getId(); scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); } ResultIterator iterator = aggPlan.iterator(); try { Tuple row = iterator.next(); final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr); return new MutationState(maxSize, connection) { @Override public long getUpdateCount() { return mutationCount; } }; } finally { iterator.close(); } } finally { if (cache != null) { cache.close(); } } } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps(); List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); planSteps.add("UPSERT ROWS"); planSteps.addAll(queryPlanSteps); return new ExplainPlan(planSteps); } }; } } //////////////////////////////////////////////////////////////////// // UPSERT SELECT run client-side ///////////////////////////////////////////////////////////////////// return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return queryPlan.getContext().getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return queryPlan.getContext(); } @Override public MutationState execute() throws SQLException { ResultIterator iterator = queryPlan.iterator(); if (upsertParallelIteratorFactory == null) { return upsertSelect( statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes); } upsertParallelIteratorFactory.setRowProjector(projector); upsertParallelIteratorFactory.setColumnIndexes(columnIndexes); upsertParallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes); Tuple tuple; long totalRowCount = 0; while ((tuple = iterator.next()) != null) { // Runs query Cell kv = tuple.getValue(0); totalRowCount += PDataType.LONG .getCodec() .decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault()); } // Return total number of rows that have been updated. In the case of auto commit being // off // the mutations will all be in the mutation state of the current connection. return new MutationState(maxSize, statement.getConnection(), totalRowCount); } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps(); List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1); planSteps.add("UPSERT SELECT"); planSteps.addAll(queryPlanSteps); return new ExplainPlan(planSteps); } }; } //////////////////////////////////////////////////////////////////// // UPSERT VALUES ///////////////////////////////////////////////////////////////////// int nodeIndex = 0; // initialze values with constant byte values first final byte[][] values = new byte[nValuesToSet][]; if (isTenantSpecific) { values[nodeIndex++] = connection.getTenantId().getBytes(); } if (isSharedViewIndex) { values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId()); } final int nodeIndexOffset = nodeIndex; // Allocate array based on size of all columns in table, // since some values may not be set (if they're nullable). final StatementContext context = new StatementContext(statement, resolver, new Scan()); UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context); final List<Expression> constantExpressions = Lists.newArrayListWithExpectedSize(valueNodes.size()); // First build all the expressions, as with sequences we want to collect them all first // and initialize them in one batch for (ParseNode valueNode : valueNodes) { if (!valueNode.isStateless()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT) .build() .buildException(); } PColumn column = allColumns.get(columnIndexes[nodeIndex]); expressionBuilder.setColumn(column); Expression expression = valueNode.accept(expressionBuilder); if (expression.getDataType() != null && !expression.getDataType().isCastableTo(column.getDataType())) { throw TypeMismatchException.newException( expression.getDataType(), column.getDataType(), "expression: " + expression.toString() + " in column " + column); } constantExpressions.add(expression); nodeIndex++; } return new MutationPlan() { @Override public PhoenixConnection getConnection() { return connection; } @Override public ParameterMetaData getParameterMetaData() { return context.getBindManager().getParameterMetaData(); } @Override public StatementContext getContext() { return context; } @Override public MutationState execute() throws SQLException { ImmutableBytesWritable ptr = context.getTempPtr(); final SequenceManager sequenceManager = context.getSequenceManager(); // Next evaluate all the expressions int nodeIndex = nodeIndexOffset; Tuple tuple = sequenceManager.getSequenceCount() == 0 ? null : sequenceManager.newSequenceTuple(null); for (Expression constantExpression : constantExpressions) { PColumn column = allColumns.get(columnIndexes[nodeIndex]); constantExpression.evaluate(tuple, ptr); Object value = null; if (constantExpression.getDataType() != null) { value = constantExpression .getDataType() .toObject( ptr, constantExpression.getSortOrder(), constantExpression.getMaxLength(), constantExpression.getScale()); if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) { throw TypeMismatchException.newException( constantExpression.getDataType(), column.getDataType(), "expression: " + constantExpression.toString() + " in column " + column); } if (!column .getDataType() .isSizeCompatible( ptr, value, constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) .setColumnName(column.getName().getString()) .setMessage("value=" + constantExpression.toString()) .build() .buildException(); } } column .getDataType() .coerceBytes( ptr, value, constantExpression.getDataType(), constantExpression.getMaxLength(), constantExpression.getScale(), constantExpression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder()); if (overlapViewColumns.contains(column) && Bytes.compareTo( ptr.get(), ptr.getOffset(), ptr.getLength(), column.getViewConstant(), 0, column.getViewConstant().length - 1) != 0) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN) .setColumnName(column.getName().getString()) .setMessage("value=" + constantExpression.toString()) .build() .buildException(); } values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr); nodeIndex++; } // Add columns based on view for (PColumn column : addViewColumns) { if (IndexUtil.getViewConstantValue(column, ptr)) { values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr); } else { throw new IllegalStateException(); } } Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1); setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation); return new MutationState(tableRef, mutation, 0, maxSize, connection); } @Override public ExplainPlan getExplainPlan() throws SQLException { List<String> planSteps = Lists.newArrayListWithExpectedSize(2); if (context.getSequenceManager().getSequenceCount() > 0) { planSteps.add( "CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES"); } planSteps.add("PUT SINGLE ROW"); return new ExplainPlan(planSteps); } }; }
private static MutationState upsertSelect( PhoenixStatement statement, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes) throws SQLException { try { PhoenixConnection connection = statement.getConnection(); ConnectionQueryServices services = connection.getQueryServices(); int maxSize = services .getProps() .getInt( QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE); int batchSize = Math.min(connection.getMutateBatchSize(), maxSize); boolean isAutoCommit = connection.getAutoCommit(); byte[][] values = new byte[columnIndexes.length][]; int rowCount = 0; Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(batchSize); PTable table = tableRef.getTable(); ResultSet rs = new PhoenixResultSet(iterator, projector, statement); ImmutableBytesWritable ptr = new ImmutableBytesWritable(); while (rs.next()) { for (int i = 0; i < values.length; i++) { PColumn column = table.getColumns().get(columnIndexes[i]); byte[] bytes = rs.getBytes(i + 1); ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes); Object value = rs.getObject(i + 1); int rsPrecision = rs.getMetaData().getPrecision(i + 1); Integer precision = rsPrecision == 0 ? null : rsPrecision; int rsScale = rs.getMetaData().getScale(i + 1); Integer scale = rsScale == 0 ? null : rsScale; // We are guaranteed that the two column will have compatible types, // as we checked that before. if (!column .getDataType() .isSizeCompatible( ptr, value, column.getDataType(), precision, scale, column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY) .setColumnName(column.getName().getString()) .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)) .build() .buildException(); } column .getDataType() .coerceBytes( ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder()); values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr); } setValues(values, pkSlotIndexes, columnIndexes, table, mutation); rowCount++; // Commit a batch if auto commit is true and we're at our batch size if (isAutoCommit && rowCount % batchSize == 0) { MutationState state = new MutationState(tableRef, mutation, 0, maxSize, connection); connection.getMutationState().join(state); connection.commit(); mutation.clear(); } } // If auto commit is true, this last batch will be committed upon return return new MutationState( tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection); } finally { iterator.close(); } }
@SuppressWarnings("deprecation") public void commit() throws SQLException { int i = 0; byte[] tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getBytes(); long[] serverTimeStamps = validate(); Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> iterator = this.mutations.entrySet().iterator(); List<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> committedList = Lists.newArrayListWithCapacity(this.mutations.size()); // add tracing for this operation TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables"); Span span = trace.getSpan(); while (iterator.hasNext()) { Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> entry = iterator.next(); Map<ImmutableBytesPtr, Map<PColumn, byte[]>> valuesMap = entry.getValue(); TableRef tableRef = entry.getKey(); PTable table = tableRef.getTable(); table.getIndexMaintainers(tempPtr); boolean hasIndexMaintainers = tempPtr.getLength() > 0; boolean isDataTable = true; long serverTimestamp = serverTimeStamps[i++]; Iterator<Pair<byte[], List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false); while (mutationsIterator.hasNext()) { Pair<byte[], List<Mutation>> pair = mutationsIterator.next(); byte[] htableName = pair.getFirst(); List<Mutation> mutations = pair.getSecond(); // create a span per target table // TODO maybe we can be smarter about the table name to string here? Span child = Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName)); int retryCount = 0; boolean shouldRetry = false; do { ServerCache cache = null; if (hasIndexMaintainers && isDataTable) { byte[] attribValue = null; byte[] uuidValue; if (IndexMetaDataCacheClient.useIndexMetadataCache( connection, mutations, tempPtr.getLength())) { IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef); cache = client.addIndexMetadataCache(mutations, tempPtr); child.addTimelineAnnotation("Updated index metadata cache"); uuidValue = cache.getId(); // If we haven't retried yet, retry for this case only, as it's possible that // a split will occur after we send the index metadata cache to all known // region servers. shouldRetry = true; } else { attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr); uuidValue = ServerCacheClient.generateId(); } // Either set the UUID to be able to access the index metadata from the cache // or set the index metadata directly on the Mutation for (Mutation mutation : mutations) { if (tenantId != null) { mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId); } mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue); if (attribValue != null) { mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue); } } } SQLException sqlE = null; HTableInterface hTable = connection.getQueryServices().getTable(htableName); try { if (logger.isDebugEnabled()) logMutationSize(hTable, mutations); long startTime = System.currentTimeMillis(); child.addTimelineAnnotation("Attempt " + retryCount); hTable.batch(mutations); child.stop(); shouldRetry = false; if (logger.isDebugEnabled()) logger.debug( "Total time for batch call of " + mutations.size() + " mutations into " + table.getName().getString() + ": " + (System.currentTimeMillis() - startTime) + " ms"); committedList.add(entry); } catch (Exception e) { SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e); if (inferredE != null) { if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) { // Swallow this exception once, as it's possible that we split after sending the // index metadata // and one of the region servers doesn't have it. This will cause it to have it the // next go around. // If it fails again, we don't retry. String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE; logger.warn(msg); connection.getQueryServices().clearTableRegionCache(htableName); // add a new child span as this one failed child.addTimelineAnnotation(msg); child.stop(); child = Tracing.child(span, "Failed batch, attempting retry"); continue; } e = inferredE; } // Throw to client with both what was committed so far and what is left to be committed. // That way, client can either undo what was done or try again with what was not done. sqlE = new CommitException( e, this, new MutationState( committedList, this.sizeOffset, this.maxSize, this.connection)); } finally { try { hTable.close(); } catch (IOException e) { if (sqlE != null) { sqlE.setNextException(ServerUtil.parseServerException(e)); } else { sqlE = ServerUtil.parseServerException(e); } } finally { try { if (cache != null) { cache.close(); } } finally { if (sqlE != null) { throw sqlE; } } } } } while (shouldRetry && retryCount++ < 1); isDataTable = false; } numRows -= entry.getValue().size(); iterator.remove(); // Remove batches as we process them } trace.close(); assert (numRows == 0); assert (this.mutations.isEmpty()); }
public ServerCache addServerCache( ScanRanges keyRanges, final ImmutableBytesWritable cachePtr, final byte[] txState, final ServerCacheFactory cacheFactory, final TableRef cacheUsingTableRef) throws SQLException { ConnectionQueryServices services = connection.getQueryServices(); MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength()); List<Closeable> closeables = new ArrayList<Closeable>(); closeables.add(chunk); ServerCache hashCacheSpec = null; SQLException firstException = null; final byte[] cacheId = generateId(); /** Execute EndPoint in parallel on each server to send compressed hash cache */ // TODO: generalize and package as a per region server EndPoint caller // (ideally this would be functionality provided by the coprocessor framework) boolean success = false; ExecutorService executor = services.getExecutor(); List<Future<Boolean>> futures = Collections.emptyList(); try { final PTable cacheUsingTable = cacheUsingTableRef.getTable(); List<HRegionLocation> locations = services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes()); int nRegions = locations.size(); // Size these based on worst case futures = new ArrayList<Future<Boolean>>(nRegions); Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions); for (HRegionLocation entry : locations) { // Keep track of servers we've sent to and only send once byte[] regionStartKey = entry.getRegionInfo().getStartKey(); byte[] regionEndKey = entry.getRegionInfo().getEndKey(); if (!servers.contains(entry) && keyRanges.intersects( regionStartKey, regionEndKey, cacheUsingTable.getIndexType() == IndexType.LOCAL ? ScanUtil.getRowKeyOffset(regionStartKey, regionEndKey) : 0, true)) { // Call RPC once per server servers.add(entry); if (LOG.isDebugEnabled()) { LOG.debug( addCustomAnnotations("Adding cache entry to be sent for " + entry, connection)); } final byte[] key = entry.getRegionInfo().getStartKey(); final HTableInterface htable = services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes()); closeables.add(htable); futures.add( executor.submit( new JobCallable<Boolean>() { @Override public Boolean call() throws Exception { final Map<byte[], AddServerCacheResponse> results; try { results = htable.coprocessorService( ServerCachingService.class, key, key, new Batch.Call<ServerCachingService, AddServerCacheResponse>() { @Override public AddServerCacheResponse call(ServerCachingService instance) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<AddServerCacheResponse> rpcCallback = new BlockingRpcCallback<AddServerCacheResponse>(); AddServerCacheRequest.Builder builder = AddServerCacheRequest.newBuilder(); if (connection.getTenantId() != null) { try { byte[] tenantIdBytes = ScanUtil.getTenantIdBytes( cacheUsingTable.getRowKeySchema(), cacheUsingTable.getBucketNum() != null, connection.getTenantId(), cacheUsingTable.isMultiTenant()); builder.setTenantId(ByteStringer.wrap(tenantIdBytes)); } catch (SQLException e) { new IOException(e); } } builder.setCacheId(ByteStringer.wrap(cacheId)); builder.setCachePtr( org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr)); ServerCacheFactoryProtos.ServerCacheFactory.Builder svrCacheFactoryBuider = ServerCacheFactoryProtos.ServerCacheFactory .newBuilder(); svrCacheFactoryBuider.setClassName( cacheFactory.getClass().getName()); builder.setCacheFactory(svrCacheFactoryBuider.build()); builder.setTxState(HBaseZeroCopyByteString.wrap(txState)); instance.addServerCache( controller, builder.build(), rpcCallback); if (controller.getFailedOn() != null) { throw controller.getFailedOn(); } return rpcCallback.get(); } }); } catch (Throwable t) { throw new Exception(t); } if (results != null && results.size() == 1) { return results.values().iterator().next().getReturn(); } return false; } /** * Defines the grouping for round robin behavior. All threads spawned to process * this scan will be grouped together and time sliced with other simultaneously * executing parallel scans. */ @Override public Object getJobId() { return ServerCacheClient.this; } @Override public TaskExecutionMetricsHolder getTaskExecutionMetric() { return NO_OP_INSTANCE; } })); } else { if (LOG.isDebugEnabled()) { LOG.debug( addCustomAnnotations( "NOT adding cache entry to be sent for " + entry + " since one already exists for that entry", connection)); } } } hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength()); // Execute in parallel int timeoutMs = services .getProps() .getInt( QueryServices.THREAD_TIMEOUT_MS_ATTRIB, QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS); for (Future<Boolean> future : futures) { future.get(timeoutMs, TimeUnit.MILLISECONDS); } cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef); success = true; } catch (SQLException e) { firstException = e; } catch (Exception e) { firstException = new SQLException(e); } finally { try { if (!success) { SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec)); for (Future<Boolean> future : futures) { future.cancel(true); } } } finally { try { Closeables.closeAll(closeables); } catch (IOException e) { if (firstException == null) { firstException = new SQLException(e); } } finally { if (firstException != null) { throw firstException; } } } } if (LOG.isDebugEnabled()) { LOG.debug( addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection)); } return hashCacheSpec; }
public static boolean hasRowTimestampColumn(PTable table) { return table.getRowTimestampColPos() > 0; }
public static boolean isMetaTable(PTable table) { return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(table.getSchemaName().getString()) && PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE.equals(table.getTableName().getString()); }