Ejemplo n.º 1
0
 public SingleTableColumnResolver(
     PhoenixConnection connection, NamedTableNode table, long timeStamp) throws SQLException {
   super(connection);
   List<PColumnFamily> families =
       Lists.newArrayListWithExpectedSize(table.getDynamicColumns().size());
   for (ColumnDef def : table.getDynamicColumns()) {
     if (def.getColumnDefName().getFamilyName() != null) {
       families.add(
           new PColumnFamilyImpl(
               PNameFactory.newName(def.getColumnDefName().getFamilyName()),
               Collections.<PColumn>emptyList()));
     }
   }
   Long scn = connection.getSCN();
   PTable theTable =
       new PTableImpl(
           connection.getTenantId(),
           table.getName().getSchemaName(),
           table.getName().getTableName(),
           scn == null ? HConstants.LATEST_TIMESTAMP : scn,
           families);
   theTable = this.addDynamicColumns(table.getDynamicColumns(), theTable);
   alias = null;
   tableRefs =
       ImmutableList.of(
           new TableRef(alias, theTable, timeStamp, !table.getDynamicColumns().isEmpty()));
 }
Ejemplo n.º 2
0
  /**
   * Data is batched up based on connection batch size. Column PDataType is read from metadata and
   * is used to convert column value to correct type before upsert.
   *
   * <p>The format is determined by the supplied csvParser.
   *
   * @param csvParser CSVParser instance
   * @throws Exception
   */
  public void upsert(CSVParser csvParser) throws Exception {
    List<ColumnInfo> columnInfoList = buildColumnInfoList(csvParser);

    boolean wasAutoCommit = conn.getAutoCommit();
    try {
      conn.setAutoCommit(false);
      long start = System.currentTimeMillis();
      CsvUpsertListener upsertListener = new CsvUpsertListener(conn, conn.getMutateBatchSize());
      CsvUpsertExecutor csvUpsertExecutor =
          CsvUpsertExecutor.create(
              conn, tableName, columnInfoList, upsertListener, arrayElementSeparator);

      csvUpsertExecutor.execute(csvParser);
      csvUpsertExecutor.close();

      conn.commit();
      double elapsedDuration = ((System.currentTimeMillis() - start) / 1000.0);
      System.out.println(
          "CSV Upsert complete. " + upsertListener.getTotalUpsertCount() + " rows upserted");
      System.out.println("Time: " + elapsedDuration + " sec(s)\n");

    } finally {

      // release reader resources.
      if (csvParser != null) {
        csvParser.close();
      }
      if (wasAutoCommit) {
        conn.setAutoCommit(true);
      }
    }
  }
Ejemplo n.º 3
0
 /**
  * Runs a series of semicolon-terminated SQL statements using the connection provided, returning
  * the number of SQL statements executed. Note that if the connection has specified an SCN through
  * the {@link org.apache.phoenix.util.PhoenixRuntime#CURRENT_SCN_ATTRIB} connection property, then
  * the timestamp is bumped up by one after each statement execution.
  *
  * @param conn an open JDBC connection
  * @param reader a reader for semicolumn separated SQL statements
  * @param binds the binds for all statements
  * @return the number of SQL statements that were executed
  * @throws IOException
  * @throws SQLException
  */
 public static int executeStatements(Connection conn, Reader reader, List<Object> binds)
     throws IOException, SQLException {
   PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
   // Turn auto commit to true when running scripts in case there's DML
   pconn.setAutoCommit(true);
   return pconn.executeStatements(reader, binds, System.out);
 }
Ejemplo n.º 4
0
  /**
   * Get the list of uncommitted KeyValues for the connection. Currently used to write an
   * Phoenix-compliant HFile from a map/reduce job.
   *
   * @param conn an open JDBC connection
   * @return the list of HBase mutations for uncommitted data
   * @throws SQLException
   */
  public static Iterator<Pair<byte[], List<KeyValue>>> getUncommittedDataIterator(
      Connection conn, boolean includeMutableIndexes) throws SQLException {
    final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    final Iterator<Pair<byte[], List<Mutation>>> iterator =
        pconn.getMutationState().toMutations(includeMutableIndexes);
    return new Iterator<Pair<byte[], List<KeyValue>>>() {

      @Override
      public boolean hasNext() {
        return iterator.hasNext();
      }

      @Override
      public Pair<byte[], List<KeyValue>> next() {
        Pair<byte[], List<Mutation>> pair = iterator.next();
        List<KeyValue> keyValues =
            Lists.newArrayListWithExpectedSize(
                pair.getSecond().size() * 5); // Guess-timate 5 key values per row
        for (Mutation mutation : pair.getSecond()) {
          for (List<Cell> keyValueList : mutation.getFamilyCellMap().values()) {
            for (Cell keyValue : keyValueList) {
              keyValues.add(org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(keyValue));
            }
          }
        }
        Collections.sort(keyValues, pconn.getKeyValueBuilder().getKeyValueComparator());
        return new Pair<byte[], List<KeyValue>>(pair.getFirst(), keyValues);
      }

      @Override
      public void remove() {
        throw new UnsupportedOperationException();
      }
    };
  }
Ejemplo n.º 5
0
 /**
  * Encode the primary key values from the table as a byte array. The values must be in the same
  * order as the primary key constraint. If the connection and table are both tenant-specific, the
  * tenant ID column must not be present in the values.
  *
  * @param conn an open connection
  * @param fullTableName the full table name
  * @param values the values of the primary key columns ordered in the same order as the primary
  *     key constraint
  * @return the encoded byte array
  * @throws SQLException if the table cannot be found or the incorrect number of of values are
  *     provided
  * @see #decodePK(Connection, String, byte[]) to decode the byte[] back to the values
  */
 public static byte[] encodePK(Connection conn, String fullTableName, Object[] values)
     throws SQLException {
   PTable table = getTable(conn, fullTableName);
   PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
   int offset =
       (table.getBucketNum() == null ? 0 : 1)
           + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0);
   List<PColumn> pkColumns = table.getPKColumns();
   if (pkColumns.size() - offset != values.length) {
     throw new SQLException(
         "Expected " + (pkColumns.size() - offset) + " but got " + values.length);
   }
   PDataType type = null;
   TrustedByteArrayOutputStream output =
       new TrustedByteArrayOutputStream(table.getRowKeySchema().getEstimatedValueLength());
   try {
     for (int i = offset; i < pkColumns.size(); i++) {
       if (type != null && !type.isFixedWidth()) {
         output.write(QueryConstants.SEPARATOR_BYTE);
       }
       type = pkColumns.get(i).getDataType();
       byte[] value = type.toBytes(values[i - offset]);
       output.write(value);
     }
     return output.toByteArray();
   } finally {
     try {
       output.close();
     } catch (IOException e) {
       throw new RuntimeException(e); // Impossible
     }
   }
 }
  @Override
  protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context)
      throws IOException, InterruptedException {

    context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1);

    try {
      final List<Object> values = record.getValues();
      indxWritable.setValues(values);
      indxWritable.write(this.pStatement);
      this.pStatement.execute();

      final PhoenixConnection pconn = connection.unwrap(PhoenixConnection.class);
      MutationState currentMutationState = pconn.getMutationState();
      if (mutationState == null) {
        mutationState = currentMutationState;
        return;
      }
      // Keep accumulating Mutations till batch size
      mutationState.join(currentMutationState);

      // Write Mutation Batch
      if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize == 0) {
        writeBatch(mutationState, context);
        mutationState = null;
      }

      // Make sure progress is reported to Application Master.
      context.progress();
    } catch (SQLException e) {
      LOG.error(" Error {}  while read/write of a record ", e.getMessage());
      context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
      throw new RuntimeException(e);
    }
  }
Ejemplo n.º 7
0
 protected TableRef createTableRef(NamedTableNode tableNode, boolean updateCacheImmediately)
     throws SQLException {
   String tableName = tableNode.getName().getTableName();
   String schemaName = tableNode.getName().getSchemaName();
   long timeStamp = QueryConstants.UNSET_TIMESTAMP;
   String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
   PName tenantId = connection.getTenantId();
   PTable theTable = null;
   if (updateCacheImmediately || connection.getAutoCommit()) {
     MetaDataMutationResult result = client.updateCache(schemaName, tableName);
     timeStamp = result.getMutationTime();
     theTable = result.getTable();
     if (theTable == null) {
       throw new TableNotFoundException(schemaName, tableName, timeStamp);
     }
   } else {
     try {
       theTable = connection.getMetaDataCache().getTable(new PTableKey(tenantId, fullTableName));
     } catch (TableNotFoundException e1) {
       if (tenantId != null) { // Check with null tenantId next
         try {
           theTable = connection.getMetaDataCache().getTable(new PTableKey(null, fullTableName));
         } catch (TableNotFoundException e2) {
         }
       }
     }
     // We always attempt to update the cache in the event of a TableNotFoundException
     if (theTable == null) {
       MetaDataMutationResult result = client.updateCache(schemaName, tableName);
       if (result.wasUpdated()) {
         timeStamp = result.getMutationTime();
         theTable = result.getTable();
       }
     }
     if (theTable == null) {
       throw new TableNotFoundException(schemaName, tableName, timeStamp);
     }
   }
   // Add any dynamic columns to the table declaration
   List<ColumnDef> dynamicColumns = tableNode.getDynamicColumns();
   theTable = addDynamicColumns(dynamicColumns, theTable);
   TableRef tableRef =
       new TableRef(tableNode.getAlias(), theTable, timeStamp, !dynamicColumns.isEmpty());
   if (logger.isDebugEnabled() && timeStamp != QueryConstants.UNSET_TIMESTAMP) {
     logger.debug(
         "Re-resolved stale table "
             + fullTableName
             + " with seqNum "
             + tableRef.getTable().getSequenceNumber()
             + " at timestamp "
             + tableRef.getTable().getTimeStamp()
             + " with "
             + tableRef.getTable().getColumns().size()
             + " columns: "
             + tableRef.getTable().getColumns());
   }
   return tableRef;
 }
Ejemplo n.º 8
0
 /**
  * Validates that the meta data is valid against the server meta data if we haven't yet done so.
  * Otherwise, for every UPSERT VALUES call, we'd need to hit the server to see if the meta data
  * has changed.
  *
  * @param connection
  * @return the server time to use for the upsert
  * @throws SQLException if the table or any columns no longer exist
  */
 private long[] validate() throws SQLException {
   int i = 0;
   Long scn = connection.getSCN();
   PName tenantId = connection.getTenantId();
   MetaDataClient client = new MetaDataClient(connection);
   long[] timeStamps = new long[this.mutations.size()];
   for (Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> entry :
       mutations.entrySet()) {
     TableRef tableRef = entry.getKey();
     long serverTimeStamp = tableRef.getTimeStamp();
     PTable table = tableRef.getTable();
     // If we're auto committing, we've already validated the schema when we got the
     // ColumnResolver,
     // so no need to do it again here.
     if (!connection.getAutoCommit()) {
       MetaDataMutationResult result =
           client.updateCache(table.getSchemaName().getString(), table.getTableName().getString());
       long timestamp = result.getMutationTime();
       if (timestamp != QueryConstants.UNSET_TIMESTAMP) {
         serverTimeStamp = timestamp;
         if (result.wasUpdated()) {
           // TODO: use bitset?
           table =
               connection
                   .getMetaDataCache()
                   .getTable(new PTableKey(tenantId, table.getName().getString()));
           PColumn[] columns = new PColumn[table.getColumns().size()];
           for (Map.Entry<ImmutableBytesPtr, Map<PColumn, byte[]>> rowEntry :
               entry.getValue().entrySet()) {
             Map<PColumn, byte[]> valueEntry = rowEntry.getValue();
             if (valueEntry != PRow.DELETE_MARKER) {
               for (PColumn column : valueEntry.keySet()) {
                 columns[column.getPosition()] = column;
               }
             }
           }
           for (PColumn column : columns) {
             if (column != null) {
               table
                   .getColumnFamily(column.getFamilyName().getString())
                   .getColumn(column.getName().getString());
             }
           }
           tableRef.setTable(table);
         }
       }
     }
     timeStamps[i++] =
         scn == null
             ? serverTimeStamp == QueryConstants.UNSET_TIMESTAMP
                 ? HConstants.LATEST_TIMESTAMP
                 : serverTimeStamp
             : scn;
   }
   return timeStamps;
 }
 @BeforeClass
 public static void doSetup() throws Exception {
   startServer(getUrl());
   ensureTableCreated(getUrl(), ATABLE_NAME);
   ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME);
   ensureTableCreated(getUrl(), FUNKY_NAME);
   ensureTableCreated(getUrl(), PTSDB_NAME);
   ensureTableCreated(getUrl(), PTSDB2_NAME);
   ensureTableCreated(getUrl(), PTSDB3_NAME);
   ensureTableCreated(getUrl(), MULTI_CF_NAME);
   ensureTableCreated(getUrl(), JOIN_ORDER_TABLE_FULL_NAME);
   ensureTableCreated(getUrl(), JOIN_CUSTOMER_TABLE_FULL_NAME);
   ensureTableCreated(getUrl(), JOIN_ITEM_TABLE_FULL_NAME);
   ensureTableCreated(getUrl(), JOIN_SUPPLIER_TABLE_FULL_NAME);
   ensureTableCreated(getUrl(), TABLE_WITH_ARRAY);
   Properties props = new Properties();
   props.setProperty(
       PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP));
   PhoenixConnection conn =
       DriverManager.getConnection(PHOENIX_CONNECTIONLESS_JDBC_URL, props)
           .unwrap(PhoenixConnection.class);
   try {
     PTable table = conn.getTable(new PTableKey(null, ATABLE_NAME));
     ATABLE = table;
     ORGANIZATION_ID =
         new ColumnRef(new TableRef(table), table.getColumn("ORGANIZATION_ID").getPosition())
             .newColumnExpression();
     ENTITY_ID =
         new ColumnRef(new TableRef(table), table.getColumn("ENTITY_ID").getPosition())
             .newColumnExpression();
     A_INTEGER =
         new ColumnRef(new TableRef(table), table.getColumn("A_INTEGER").getPosition())
             .newColumnExpression();
     A_STRING =
         new ColumnRef(new TableRef(table), table.getColumn("A_STRING").getPosition())
             .newColumnExpression();
     B_STRING =
         new ColumnRef(new TableRef(table), table.getColumn("B_STRING").getPosition())
             .newColumnExpression();
     A_DATE =
         new ColumnRef(new TableRef(table), table.getColumn("A_DATE").getPosition())
             .newColumnExpression();
     A_TIME =
         new ColumnRef(new TableRef(table), table.getColumn("A_TIME").getPosition())
             .newColumnExpression();
     A_TIMESTAMP =
         new ColumnRef(new TableRef(table), table.getColumn("A_TIMESTAMP").getPosition())
             .newColumnExpression();
     X_DECIMAL =
         new ColumnRef(new TableRef(table), table.getColumn("X_DECIMAL").getPosition())
             .newColumnExpression();
   } finally {
     conn.close();
   }
 }
Ejemplo n.º 10
0
  /**
   * Provides a mechanism to run SQL scripts against, where the arguments are: 1) connection URL
   * string 2) one or more paths to either SQL scripts or CSV files If a CurrentSCN property is set
   * on the connection URL, then it is incremented between processing, with each file being
   * processed by a new connection at the increment timestamp value.
   */
  public static void main(String[] args) {

    ExecutionCommand execCmd = ExecutionCommand.parseArgs(args);
    String jdbcUrl = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + execCmd.getConnectionString();

    PhoenixConnection conn = null;
    try {
      Properties props = new Properties();
      conn = DriverManager.getConnection(jdbcUrl, props).unwrap(PhoenixConnection.class);

      for (String inputFile : execCmd.getInputFiles()) {
        if (inputFile.endsWith(SQL_FILE_EXT)) {
          PhoenixRuntime.executeStatements(
              conn, new FileReader(inputFile), Collections.emptyList());
        } else if (inputFile.endsWith(CSV_FILE_EXT)) {

          String tableName = execCmd.getTableName();
          if (tableName == null) {
            tableName =
                SchemaUtil.normalizeIdentifier(
                    inputFile.substring(
                        inputFile.lastIndexOf(File.separatorChar) + 1,
                        inputFile.length() - CSV_FILE_EXT.length()));
          }
          CSVCommonsLoader csvLoader =
              new CSVCommonsLoader(
                  conn,
                  tableName,
                  execCmd.getColumns(),
                  execCmd.isStrict(),
                  execCmd.getFieldDelimiter(),
                  execCmd.getQuoteCharacter(),
                  execCmd.getEscapeCharacter(),
                  execCmd.getArrayElementSeparator());
          csvLoader.upsert(inputFile);
        }
      }
    } catch (Throwable t) {
      t.printStackTrace();
    } finally {
      if (conn != null) {
        try {
          conn.close();
        } catch (SQLException e) {
          // going to shut jvm down anyway. So might as well feast on it.
        }
      }
      System.exit(0);
    }
  }
  @Override
  protected void setup(Context context) throws IOException, InterruptedException {

    Configuration conf = context.getConfiguration();

    // pass client configuration into driver
    Properties clientInfos = new Properties();
    for (Map.Entry<String, String> entry : conf) {
      clientInfos.setProperty(entry.getKey(), entry.getValue());
    }

    try {
      conn = (PhoenixConnection) QueryUtil.getConnectionOnServer(clientInfos, conf);
      // We are dependent on rolling back before performing commits, so we need to be sure
      // that auto-commit is not turned on
      conn.setAutoCommit(false);

      final String tableNamesConf = conf.get(TABLE_NAMES_CONFKEY);
      final String logicalNamesConf = conf.get(LOGICAL_NAMES_CONFKEY);
      tableNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(tableNamesConf);
      logicalNames = TargetTableRefFunctions.NAMES_FROM_JSON.apply(logicalNamesConf);

      initColumnIndexes();
    } catch (SQLException | ClassNotFoundException e) {
      throw new RuntimeException(e);
    }

    upsertListener =
        new MapperUpsertListener<RECORD>(
            context, conf.getBoolean(IGNORE_INVALID_ROW_CONFKEY, true));
    upsertExecutor = buildUpsertExecutor(conf);
    preUpdateProcessor = PhoenixConfigurationUtil.loadPreUpsertProcessor(conf);
  }
Ejemplo n.º 12
0
 public static PTable getTable(Connection conn, String name) throws SQLException {
   PTable table = null;
   PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
   try {
     table = pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), name));
   } catch (TableNotFoundException e) {
     String schemaName = SchemaUtil.getSchemaNameFromFullName(name);
     String tableName = SchemaUtil.getTableNameFromFullName(name);
     MetaDataMutationResult result = new MetaDataClient(pconn).updateCache(schemaName, tableName);
     if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
       throw e;
     }
     table = result.getTable();
   }
   return table;
 }
Ejemplo n.º 13
0
  private Iterator<Pair<byte[], List<Mutation>>> addRowMutations(
      final TableRef tableRef,
      final Map<ImmutableBytesPtr, Map<PColumn, byte[]>> values,
      long timestamp,
      boolean includeMutableIndexes) {
    final List<Mutation> mutations = Lists.newArrayListWithExpectedSize(values.size());
    Iterator<Map.Entry<ImmutableBytesPtr, Map<PColumn, byte[]>>> iterator =
        values.entrySet().iterator();
    while (iterator.hasNext()) {
      Map.Entry<ImmutableBytesPtr, Map<PColumn, byte[]>> rowEntry = iterator.next();
      ImmutableBytesPtr key = rowEntry.getKey();
      PRow row = tableRef.getTable().newRow(connection.getKeyValueBuilder(), timestamp, key);
      if (rowEntry.getValue() == PRow.DELETE_MARKER) { // means delete
        row.delete();
      } else {
        for (Map.Entry<PColumn, byte[]> valueEntry : rowEntry.getValue().entrySet()) {
          row.setValue(valueEntry.getKey(), valueEntry.getValue());
        }
      }
      mutations.addAll(row.toRowMutations());
    }
    final Iterator<PTable>
        indexes = // Only maintain tables with immutable rows through this client-side mechanism
        (tableRef.getTable().isImmutableRows() || includeMutableIndexes)
                ? IndexMaintainer.nonDisabledIndexIterator(
                    tableRef.getTable().getIndexes().iterator())
                : Iterators.<PTable>emptyIterator();
    return new Iterator<Pair<byte[], List<Mutation>>>() {
      boolean isFirst = true;

      @Override
      public boolean hasNext() {
        return isFirst || indexes.hasNext();
      }

      @Override
      public Pair<byte[], List<Mutation>> next() {
        if (isFirst) {
          isFirst = false;
          return new Pair<byte[], List<Mutation>>(
              tableRef.getTable().getPhysicalName().getBytes(), mutations);
        }
        PTable index = indexes.next();
        List<Mutation> indexMutations;
        try {
          indexMutations =
              IndexUtil.generateIndexData(
                  tableRef.getTable(), index, mutations, tempPtr, connection.getKeyValueBuilder());
        } catch (SQLException e) {
          throw new IllegalDataException(e);
        }
        return new Pair<byte[], List<Mutation>>(index.getPhysicalName().getBytes(), indexMutations);
      }

      @Override
      public void remove() {
        throw new UnsupportedOperationException();
      }
    };
  }
 /**
  * Remove the cached table from all region servers
  *
  * @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable,
  *     Scan, Set)})
  * @param servers list of servers upon which table was cached (filled in by {@link
  *     #addHashCache(HTable, Scan, Set)})
  * @throws SQLException
  * @throws IllegalStateException if hashed table cannot be removed on any region server on which
  *     it was added
  */
 private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers)
     throws SQLException {
   ConnectionQueryServices services = connection.getQueryServices();
   Throwable lastThrowable = null;
   TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
   byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
   HTableInterface iterateOverTable = services.getTable(tableName);
   List<HRegionLocation> locations = services.getAllTableRegions(tableName);
   Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
   /**
    * Allow for the possibility that the region we based where to send our cache has split and been
    * relocated to another region server *after* we sent it, but before we removed it. To
    * accommodate this, we iterate through the current metadata boundaries and remove the cache
    * once for each server that we originally sent to.
    */
   if (LOG.isDebugEnabled()) {
     LOG.debug("Removing Cache " + cacheId + " from servers.");
   }
   for (HRegionLocation entry : locations) {
     if (remainingOnServers.contains(entry)) { // Call once per server
       try {
         byte[] key = entry.getRegionInfo().getStartKey();
         iterateOverTable.coprocessorService(
             ServerCachingService.class,
             key,
             key,
             new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
               @Override
               public RemoveServerCacheResponse call(ServerCachingService instance)
                   throws IOException {
                 ServerRpcController controller = new ServerRpcController();
                 BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback =
                     new BlockingRpcCallback<RemoveServerCacheResponse>();
                 RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
                 if (connection.getTenantId() != null) {
                   builder.setTenantId(
                       HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
                 }
                 builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
                 instance.removeServerCache(controller, builder.build(), rpcCallback);
                 if (controller.getFailedOn() != null) {
                   throw controller.getFailedOn();
                 }
                 return rpcCallback.get();
               }
             });
         remainingOnServers.remove(entry);
       } catch (Throwable t) {
         lastThrowable = t;
         LOG.error("Error trying to remove hash cache for " + entry, t);
       }
     }
   }
   if (!remainingOnServers.isEmpty()) {
     LOG.warn("Unable to remove hash cache for " + remainingOnServers, lastThrowable);
   }
 }
Ejemplo n.º 15
0
 /**
  * Decode a byte array value back into the Object values of the primary key constraint. If the
  * connection and table are both tenant-specific, the tenant ID column is not expected to have
  * been encoded and will not appear in the returned values.
  *
  * @param conn an open connection
  * @param name the full table name
  * @param value the value that was encoded with {@link #encodePK(Connection, String, Object[])}
  * @return the Object values encoded in the byte array value
  * @throws SQLException
  */
 public static Object[] decodePK(Connection conn, String name, byte[] value) throws SQLException {
   PTable table = getTable(conn, name);
   PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
   int offset =
       (table.getBucketNum() == null ? 0 : 1)
           + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0);
   RowKeySchema schema = table.getRowKeySchema();
   int nValues = schema.getMaxFields() - offset;
   Object[] values = new Object[nValues];
   ImmutableBytesWritable ptr = new ImmutableBytesWritable();
   int i = 0;
   schema.iterator(value, ptr, offset);
   while (i < nValues && schema.next(ptr, i, value.length) != null) {
     values[i] = schema.getField(i).getDataType().toObject(ptr);
     i++;
   }
   return values;
 }
 @Override
 protected void cleanup(Context context) throws IOException, InterruptedException {
   try {
     if (conn != null) {
       conn.close();
     }
   } catch (SQLException e) {
     throw new RuntimeException(e);
   }
 }
  @SuppressWarnings("deprecation")
  @Override
  protected void map(LongWritable key, Text value, Context context)
      throws IOException, InterruptedException {
    if (conn == null) {
      throw new RuntimeException("Connection not initialized.");
    }
    try {
      RECORD record = null;
      try {
        record = getLineParser().parse(value.toString());
      } catch (IOException e) {
        context.getCounter(COUNTER_GROUP_NAME, "Parser errors").increment(1L);
        return;
      }

      if (record == null) {
        context.getCounter(COUNTER_GROUP_NAME, "Empty records").increment(1L);
        return;
      }
      upsertExecutor.execute(ImmutableList.<RECORD>of(record));
      Map<Integer, List<KeyValue>> map = new HashMap<>();
      Iterator<Pair<byte[], List<KeyValue>>> uncommittedDataIterator =
          PhoenixRuntime.getUncommittedDataIterator(conn, true);
      while (uncommittedDataIterator.hasNext()) {
        Pair<byte[], List<KeyValue>> kvPair = uncommittedDataIterator.next();
        List<KeyValue> keyValueList = kvPair.getSecond();
        keyValueList = preUpdateProcessor.preUpsert(kvPair.getFirst(), keyValueList);
        byte[] first = kvPair.getFirst();
        // Create a list of KV for each table
        for (int i = 0; i < tableNames.size(); i++) {
          if (Bytes.compareTo(Bytes.toBytes(tableNames.get(i)), first) == 0) {
            if (!map.containsKey(i)) {
              map.put(i, new ArrayList<KeyValue>());
            }
            List<KeyValue> list = map.get(i);
            for (KeyValue kv : keyValueList) {
              list.add(kv);
            }
            break;
          }
        }
      }
      for (Map.Entry<Integer, List<KeyValue>> rowEntry : map.entrySet()) {
        int tableIndex = rowEntry.getKey();
        List<KeyValue> lkv = rowEntry.getValue();
        // All KV values combines to a single byte array
        writeAggregatedRow(context, tableNames.get(tableIndex), lkv);
      }
      conn.rollback();
    } catch (Exception e) {
      throw new RuntimeException(e);
    }
  }
Ejemplo n.º 18
0
 protected static PhoenixConnection addMetaDataColumn(
     PhoenixConnection conn, long scn, String columnDef) throws SQLException {
   PhoenixConnection metaConnection = null;
   Statement stmt = null;
   try {
     metaConnection = new PhoenixConnection(conn.getQueryServices(), conn, scn);
     try {
       stmt = metaConnection.createStatement();
       stmt.executeUpdate("ALTER TABLE SYSTEM.\"TABLE\" ADD IF NOT EXISTS " + columnDef);
       return metaConnection;
     } finally {
       if (stmt != null) {
         stmt.close();
       }
     }
   } finally {
     if (metaConnection != null) {
       metaConnection.close();
     }
   }
 }
  @Test
  public void testGetSplitsWithSkipScanFilter() throws Exception {
    byte[][] splits = new byte[][] {Ka1A, Ka1B, Ka1E, Ka1G, Ka1I, Ka2A};
    createTestTable(getUrl(), DDL, splits, null);
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);

    PTable table =
        pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME));
    TableRef tableRef = new TableRef(table);
    List<HRegionLocation> regions =
        pconn
            .getQueryServices()
            .getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
    List<KeyRange> ranges = getSplits(tableRef, scan, regions, scanRanges);
    assertEquals(
        "Unexpected number of splits: " + ranges.size(), expectedSplits.size(), ranges.size());
    for (int i = 0; i < expectedSplits.size(); i++) {
      assertEquals(expectedSplits.get(i), ranges.get(i));
    }
  }
Ejemplo n.º 20
0
 @Override
 public void upsertDone(long upsertCount) {
   totalUpserts = upsertCount;
   if (upsertCount % upsertBatchSize == 0) {
     if (upsertCount % 1000 == 0) {
       LOG.info("Processed upsert #{}", upsertCount);
     }
     try {
       LOG.info("Committing after {} records", upsertCount);
       conn.commit();
     } catch (SQLException e) {
       throw new RuntimeException(e);
     }
   }
 }
Ejemplo n.º 21
0
 public static ColumnResolver getResolverForCreation(
     final CreateTableStatement statement, final PhoenixConnection connection)
     throws SQLException {
   TableName baseTable = statement.getBaseTableName();
   if (baseTable == null) {
     return EMPTY_TABLE_RESOLVER;
   }
   NamedTableNode tableNode =
       NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
   // Always use non-tenant-specific connection here
   try {
     SingleTableColumnResolver visitor =
         new SingleTableColumnResolver(connection, tableNode, true);
     return visitor;
   } catch (TableNotFoundException e) {
     // Used for mapped VIEW, since we won't be able to resolve that.
     // Instead, we create a table with just the dynamic columns.
     // A tenant-specific connection may not create a mapped VIEW.
     if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
       ConnectionQueryServices services = connection.getQueryServices();
       byte[] fullTableName =
           SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName());
       HTableInterface htable = null;
       try {
         htable = services.getTable(fullTableName);
       } catch (UnsupportedOperationException ignore) {
         throw e; // For Connectionless
       } finally {
         if (htable != null) Closeables.closeQuietly(htable);
       }
       tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs());
       return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp());
     }
     throw e;
   }
 }
Ejemplo n.º 22
0
  @Test
  public void testCreateTableToBeTransactional() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    String ddl = "CREATE TABLE TEST_TRANSACTIONAL_TABLE (k varchar primary key) transactional=true";
    conn.createStatement().execute(ddl);
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(null, "TEST_TRANSACTIONAL_TABLE"));
    HTableInterface htable =
        pconn.getQueryServices().getTable(Bytes.toBytes("TEST_TRANSACTIONAL_TABLE"));
    assertTrue(table.isTransactional());
    assertTrue(
        htable
            .getTableDescriptor()
            .getCoprocessors()
            .contains(TransactionProcessor.class.getName()));

    try {
      ddl = "ALTER TABLE TEST_TRANSACTIONAL_TABLE SET transactional=false";
      conn.createStatement().execute(ddl);
      fail();
    } catch (SQLException e) {
      assertEquals(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX.getErrorCode(), e.getErrorCode());
    }

    HBaseAdmin admin = pconn.getQueryServices().getAdmin();
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("TXN_TEST_EXISTING"));
    desc.addFamily(new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
    admin.createTable(desc);
    ddl = "CREATE TABLE TXN_TEST_EXISTING (k varchar primary key) transactional=true";
    conn.createStatement().execute(ddl);
    assertEquals(
        Boolean.TRUE.toString(),
        admin
            .getTableDescriptor(TableName.valueOf("TXN_TEST_EXISTING"))
            .getValue(TxConstants.READ_NON_TX_DATA));

    // Should be ok, as HBase metadata should match existing metadata.
    ddl = "CREATE TABLE IF NOT EXISTS TEST_TRANSACTIONAL_TABLE (k varchar primary key)";
    try {
      conn.createStatement().execute(ddl);
      fail();
    } catch (SQLException e) {
      assertEquals(SQLExceptionCode.TX_MAY_NOT_SWITCH_TO_NON_TX.getErrorCode(), e.getErrorCode());
    }
    ddl += " transactional=true";
    conn.createStatement().execute(ddl);
    table = pconn.getTable(new PTableKey(null, "TEST_TRANSACTIONAL_TABLE"));
    htable = pconn.getQueryServices().getTable(Bytes.toBytes("TEST_TRANSACTIONAL_TABLE"));
    assertTrue(table.isTransactional());
    assertTrue(
        htable
            .getTableDescriptor()
            .getCoprocessors()
            .contains(TransactionProcessor.class.getName()));
  }
Ejemplo n.º 23
0
  public Iterator<Pair<byte[], List<Mutation>>> toMutations(final boolean includeMutableIndexes) {
    final Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> iterator =
        this.mutations.entrySet().iterator();
    if (!iterator.hasNext()) {
      return Iterators.emptyIterator();
    }
    Long scn = connection.getSCN();
    final long timestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
    return new Iterator<Pair<byte[], List<Mutation>>>() {
      private Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> current =
          iterator.next();
      private Iterator<Pair<byte[], List<Mutation>>> innerIterator = init();

      private Iterator<Pair<byte[], List<Mutation>>> init() {
        return addRowMutations(
            current.getKey(), current.getValue(), timestamp, includeMutableIndexes);
      }

      @Override
      public boolean hasNext() {
        return innerIterator.hasNext() || iterator.hasNext();
      }

      @Override
      public Pair<byte[], List<Mutation>> next() {
        if (!innerIterator.hasNext()) {
          current = iterator.next();
        }
        return innerIterator.next();
      }

      @Override
      public void remove() {
        throw new UnsupportedOperationException();
      }
    };
  }
Ejemplo n.º 24
0
 private static MutationState upsertSelect(
     PhoenixStatement statement,
     TableRef tableRef,
     RowProjector projector,
     ResultIterator iterator,
     int[] columnIndexes,
     int[] pkSlotIndexes)
     throws SQLException {
   try {
     PhoenixConnection connection = statement.getConnection();
     ConnectionQueryServices services = connection.getQueryServices();
     int maxSize =
         services
             .getProps()
             .getInt(
                 QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                 QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
     int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
     boolean isAutoCommit = connection.getAutoCommit();
     byte[][] values = new byte[columnIndexes.length][];
     int rowCount = 0;
     Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation =
         Maps.newHashMapWithExpectedSize(batchSize);
     PTable table = tableRef.getTable();
     ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
     ImmutableBytesWritable ptr = new ImmutableBytesWritable();
     while (rs.next()) {
       for (int i = 0; i < values.length; i++) {
         PColumn column = table.getColumns().get(columnIndexes[i]);
         byte[] bytes = rs.getBytes(i + 1);
         ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
         Object value = rs.getObject(i + 1);
         int rsPrecision = rs.getMetaData().getPrecision(i + 1);
         Integer precision = rsPrecision == 0 ? null : rsPrecision;
         int rsScale = rs.getMetaData().getScale(i + 1);
         Integer scale = rsScale == 0 ? null : rsScale;
         // We are guaranteed that the two column will have compatible types,
         // as we checked that before.
         if (!column
             .getDataType()
             .isSizeCompatible(
                 ptr,
                 value,
                 column.getDataType(),
                 precision,
                 scale,
                 column.getMaxLength(),
                 column.getScale())) {
           throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
               .setColumnName(column.getName().getString())
               .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null))
               .build()
               .buildException();
         }
         column
             .getDataType()
             .coerceBytes(
                 ptr,
                 value,
                 column.getDataType(),
                 precision,
                 scale,
                 SortOrder.getDefault(),
                 column.getMaxLength(),
                 column.getScale(),
                 column.getSortOrder());
         values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
       }
       setValues(values, pkSlotIndexes, columnIndexes, table, mutation);
       rowCount++;
       // Commit a batch if auto commit is true and we're at our batch size
       if (isAutoCommit && rowCount % batchSize == 0) {
         MutationState state = new MutationState(tableRef, mutation, 0, maxSize, connection);
         connection.getMutationState().join(state);
         connection.commit();
         mutation.clear();
       }
     }
     // If auto commit is true, this last batch will be committed upon return
     return new MutationState(
         tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection);
   } finally {
     iterator.close();
   }
 }
  @Override
  public PeekingResultIterator newIterator(
      final StatementContext parentContext,
      ResultIterator iterator,
      Scan scan,
      String tableName,
      QueryPlan plan)
      throws SQLException {
    final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection);

    MutationState state = mutate(parentContext, iterator, clonedConnection);

    long totalRowCount = state.getUpdateCount();
    if (clonedConnection.getAutoCommit()) {
      clonedConnection.getMutationState().join(state);
      state = clonedConnection.getMutationState();
    }
    final MutationState finalState = state;

    byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
    KeyValue keyValue =
        KeyValueUtil.newKeyValue(
            UNGROUPED_AGG_ROW_KEY,
            SINGLE_COLUMN_FAMILY,
            SINGLE_COLUMN,
            AGG_TIMESTAMP,
            value,
            0,
            value.length);
    final Tuple tuple = new SingleKeyValueTuple(keyValue);
    return new PeekingResultIterator() {
      private boolean done = false;

      @Override
      public Tuple next() throws SQLException {
        if (done) {
          return null;
        }
        done = true;
        return tuple;
      }

      @Override
      public void explain(List<String> planSteps) {}

      @Override
      public void close() throws SQLException {
        try {
          /*
           * Join the child mutation states in close, since this is called in a single threaded manner
           * after the parallel results have been processed.
           * If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation
           * state (with no mutations). However, it still has the metrics for mutation work done by the
           * mutating-iterator. Joining the mutation state makes sure those metrics are passed over
           * to the parent connection.
           */
          MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
        } finally {
          clonedConnection.close();
        }
      }

      @Override
      public Tuple peek() throws SQLException {
        return done ? null : tuple;
      }
    };
  }
Ejemplo n.º 26
0
  public ServerCache addServerCache(
      ScanRanges keyRanges,
      final ImmutableBytesWritable cachePtr,
      final byte[] txState,
      final ServerCacheFactory cacheFactory,
      final TableRef cacheUsingTableRef)
      throws SQLException {
    ConnectionQueryServices services = connection.getQueryServices();
    MemoryChunk chunk = services.getMemoryManager().allocate(cachePtr.getLength());
    List<Closeable> closeables = new ArrayList<Closeable>();
    closeables.add(chunk);
    ServerCache hashCacheSpec = null;
    SQLException firstException = null;
    final byte[] cacheId = generateId();
    /** Execute EndPoint in parallel on each server to send compressed hash cache */
    // TODO: generalize and package as a per region server EndPoint caller
    // (ideally this would be functionality provided by the coprocessor framework)
    boolean success = false;
    ExecutorService executor = services.getExecutor();
    List<Future<Boolean>> futures = Collections.emptyList();
    try {
      final PTable cacheUsingTable = cacheUsingTableRef.getTable();
      List<HRegionLocation> locations =
          services.getAllTableRegions(cacheUsingTable.getPhysicalName().getBytes());
      int nRegions = locations.size();
      // Size these based on worst case
      futures = new ArrayList<Future<Boolean>>(nRegions);
      Set<HRegionLocation> servers = new HashSet<HRegionLocation>(nRegions);
      for (HRegionLocation entry : locations) {
        // Keep track of servers we've sent to and only send once
        byte[] regionStartKey = entry.getRegionInfo().getStartKey();
        byte[] regionEndKey = entry.getRegionInfo().getEndKey();
        if (!servers.contains(entry)
            && keyRanges.intersects(
                regionStartKey,
                regionEndKey,
                cacheUsingTable.getIndexType() == IndexType.LOCAL
                    ? ScanUtil.getRowKeyOffset(regionStartKey, regionEndKey)
                    : 0,
                true)) {
          // Call RPC once per server
          servers.add(entry);
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                addCustomAnnotations("Adding cache entry to be sent for " + entry, connection));
          }
          final byte[] key = entry.getRegionInfo().getStartKey();
          final HTableInterface htable =
              services.getTable(cacheUsingTableRef.getTable().getPhysicalName().getBytes());
          closeables.add(htable);
          futures.add(
              executor.submit(
                  new JobCallable<Boolean>() {

                    @Override
                    public Boolean call() throws Exception {
                      final Map<byte[], AddServerCacheResponse> results;
                      try {
                        results =
                            htable.coprocessorService(
                                ServerCachingService.class,
                                key,
                                key,
                                new Batch.Call<ServerCachingService, AddServerCacheResponse>() {
                                  @Override
                                  public AddServerCacheResponse call(ServerCachingService instance)
                                      throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<AddServerCacheResponse> rpcCallback =
                                        new BlockingRpcCallback<AddServerCacheResponse>();
                                    AddServerCacheRequest.Builder builder =
                                        AddServerCacheRequest.newBuilder();
                                    if (connection.getTenantId() != null) {
                                      try {
                                        byte[] tenantIdBytes =
                                            ScanUtil.getTenantIdBytes(
                                                cacheUsingTable.getRowKeySchema(),
                                                cacheUsingTable.getBucketNum() != null,
                                                connection.getTenantId(),
                                                cacheUsingTable.isMultiTenant());
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                      } catch (SQLException e) {
                                        new IOException(e);
                                      }
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    builder.setCachePtr(
                                        org.apache.phoenix.protobuf.ProtobufUtil.toProto(cachePtr));
                                    ServerCacheFactoryProtos.ServerCacheFactory.Builder
                                        svrCacheFactoryBuider =
                                            ServerCacheFactoryProtos.ServerCacheFactory
                                                .newBuilder();
                                    svrCacheFactoryBuider.setClassName(
                                        cacheFactory.getClass().getName());
                                    builder.setCacheFactory(svrCacheFactoryBuider.build());
                                    builder.setTxState(HBaseZeroCopyByteString.wrap(txState));
                                    instance.addServerCache(
                                        controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) {
                                      throw controller.getFailedOn();
                                    }
                                    return rpcCallback.get();
                                  }
                                });
                      } catch (Throwable t) {
                        throw new Exception(t);
                      }
                      if (results != null && results.size() == 1) {
                        return results.values().iterator().next().getReturn();
                      }
                      return false;
                    }

                    /**
                     * Defines the grouping for round robin behavior. All threads spawned to process
                     * this scan will be grouped together and time sliced with other simultaneously
                     * executing parallel scans.
                     */
                    @Override
                    public Object getJobId() {
                      return ServerCacheClient.this;
                    }

                    @Override
                    public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                      return NO_OP_INSTANCE;
                    }
                  }));
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug(
                addCustomAnnotations(
                    "NOT adding cache entry to be sent for "
                        + entry
                        + " since one already exists for that entry",
                    connection));
          }
        }
      }

      hashCacheSpec = new ServerCache(cacheId, servers, cachePtr.getLength());
      // Execute in parallel
      int timeoutMs =
          services
              .getProps()
              .getInt(
                  QueryServices.THREAD_TIMEOUT_MS_ATTRIB,
                  QueryServicesOptions.DEFAULT_THREAD_TIMEOUT_MS);
      for (Future<Boolean> future : futures) {
        future.get(timeoutMs, TimeUnit.MILLISECONDS);
      }

      cacheUsingTableRefMap.put(Bytes.mapKey(cacheId), cacheUsingTableRef);
      success = true;
    } catch (SQLException e) {
      firstException = e;
    } catch (Exception e) {
      firstException = new SQLException(e);
    } finally {
      try {
        if (!success) {
          SQLCloseables.closeAllQuietly(Collections.singletonList(hashCacheSpec));
          for (Future<Boolean> future : futures) {
            future.cancel(true);
          }
        }
      } finally {
        try {
          Closeables.closeAll(closeables);
        } catch (IOException e) {
          if (firstException == null) {
            firstException = new SQLException(e);
          }
        } finally {
          if (firstException != null) {
            throw firstException;
          }
        }
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug(
          addCustomAnnotations("Cache " + cacheId + " successfully added to servers.", connection));
    }
    return hashCacheSpec;
  }
Ejemplo n.º 27
0
  @SuppressWarnings("deprecation")
  public void commit() throws SQLException {
    int i = 0;
    byte[] tenantId = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
    long[] serverTimeStamps = validate();
    Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> iterator =
        this.mutations.entrySet().iterator();
    List<Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>>> committedList =
        Lists.newArrayListWithCapacity(this.mutations.size());

    // add tracing for this operation
    TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables");
    Span span = trace.getSpan();
    while (iterator.hasNext()) {
      Map.Entry<TableRef, Map<ImmutableBytesPtr, Map<PColumn, byte[]>>> entry = iterator.next();
      Map<ImmutableBytesPtr, Map<PColumn, byte[]>> valuesMap = entry.getValue();
      TableRef tableRef = entry.getKey();
      PTable table = tableRef.getTable();
      table.getIndexMaintainers(tempPtr);
      boolean hasIndexMaintainers = tempPtr.getLength() > 0;
      boolean isDataTable = true;
      long serverTimestamp = serverTimeStamps[i++];
      Iterator<Pair<byte[], List<Mutation>>> mutationsIterator =
          addRowMutations(tableRef, valuesMap, serverTimestamp, false);
      while (mutationsIterator.hasNext()) {
        Pair<byte[], List<Mutation>> pair = mutationsIterator.next();
        byte[] htableName = pair.getFirst();
        List<Mutation> mutations = pair.getSecond();

        // create a span per target table
        // TODO maybe we can be smarter about the table name to string here?
        Span child =
            Tracing.child(span, "Writing mutation batch for table: " + Bytes.toString(htableName));

        int retryCount = 0;
        boolean shouldRetry = false;
        do {
          ServerCache cache = null;
          if (hasIndexMaintainers && isDataTable) {
            byte[] attribValue = null;
            byte[] uuidValue;
            if (IndexMetaDataCacheClient.useIndexMetadataCache(
                connection, mutations, tempPtr.getLength())) {
              IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
              cache = client.addIndexMetadataCache(mutations, tempPtr);
              child.addTimelineAnnotation("Updated index metadata cache");
              uuidValue = cache.getId();
              // If we haven't retried yet, retry for this case only, as it's possible that
              // a split will occur after we send the index metadata cache to all known
              // region servers.
              shouldRetry = true;
            } else {
              attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr);
              uuidValue = ServerCacheClient.generateId();
            }
            // Either set the UUID to be able to access the index metadata from the cache
            // or set the index metadata directly on the Mutation
            for (Mutation mutation : mutations) {
              if (tenantId != null) {
                mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
              }
              mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
              if (attribValue != null) {
                mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue);
              }
            }
          }

          SQLException sqlE = null;
          HTableInterface hTable = connection.getQueryServices().getTable(htableName);
          try {
            if (logger.isDebugEnabled()) logMutationSize(hTable, mutations);
            long startTime = System.currentTimeMillis();
            child.addTimelineAnnotation("Attempt " + retryCount);
            hTable.batch(mutations);
            child.stop();
            shouldRetry = false;
            if (logger.isDebugEnabled())
              logger.debug(
                  "Total time for batch call of  "
                      + mutations.size()
                      + " mutations into "
                      + table.getName().getString()
                      + ": "
                      + (System.currentTimeMillis() - startTime)
                      + " ms");
            committedList.add(entry);
          } catch (Exception e) {
            SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
            if (inferredE != null) {
              if (shouldRetry
                  && retryCount == 0
                  && inferredE.getErrorCode()
                      == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
                // Swallow this exception once, as it's possible that we split after sending the
                // index metadata
                // and one of the region servers doesn't have it. This will cause it to have it the
                // next go around.
                // If it fails again, we don't retry.
                String msg =
                    "Swallowing exception and retrying after clearing meta cache on connection. "
                        + inferredE;
                logger.warn(msg);
                connection.getQueryServices().clearTableRegionCache(htableName);

                // add a new child span as this one failed
                child.addTimelineAnnotation(msg);
                child.stop();
                child = Tracing.child(span, "Failed batch, attempting retry");

                continue;
              }
              e = inferredE;
            }
            // Throw to client with both what was committed so far and what is left to be committed.
            // That way, client can either undo what was done or try again with what was not done.
            sqlE =
                new CommitException(
                    e,
                    this,
                    new MutationState(
                        committedList, this.sizeOffset, this.maxSize, this.connection));
          } finally {
            try {
              hTable.close();
            } catch (IOException e) {
              if (sqlE != null) {
                sqlE.setNextException(ServerUtil.parseServerException(e));
              } else {
                sqlE = ServerUtil.parseServerException(e);
              }
            } finally {
              try {
                if (cache != null) {
                  cache.close();
                }
              } finally {
                if (sqlE != null) {
                  throw sqlE;
                }
              }
            }
          }
        } while (shouldRetry && retryCount++ < 1);
        isDataTable = false;
      }
      numRows -= entry.getValue().size();
      iterator.remove(); // Remove batches as we process them
    }
    trace.close();
    assert (numRows == 0);
    assert (this.mutations.isEmpty());
  }
Ejemplo n.º 28
0
  private void assertIteration(String dataColumns, String pk, Object[] values, String dataProps)
      throws Exception {
    String schemaName = "";
    String tableName = "T";
    Connection conn = DriverManager.getConnection(getUrl());
    String fullTableName =
        SchemaUtil.getTableName(
            SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier(tableName));
    conn.createStatement()
        .execute(
            "CREATE TABLE "
                + fullTableName
                + "("
                + dataColumns
                + " CONSTRAINT pk PRIMARY KEY ("
                + pk
                + "))  "
                + (dataProps.isEmpty() ? "" : dataProps));
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName));
    conn.close();
    StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES(");
    for (int i = 0; i < values.length; i++) {
      buf.append("?,");
    }
    buf.setCharAt(buf.length() - 1, ')');
    PreparedStatement stmt = conn.prepareStatement(buf.toString());
    for (int i = 0; i < values.length; i++) {
      stmt.setObject(i + 1, values[i]);
    }
    stmt.execute();
    Iterator<Pair<byte[], List<KeyValue>>> iterator =
        PhoenixRuntime.getUncommittedDataIterator(conn);
    List<KeyValue> dataKeyValues = iterator.next().getSecond();
    KeyValue keyValue = dataKeyValues.get(0);

    List<SortOrder> sortOrders = Lists.newArrayListWithExpectedSize(table.getPKColumns().size());
    for (PColumn col : table.getPKColumns()) {
      sortOrders.add(col.getSortOrder());
    }
    RowKeySchema schema = table.getRowKeySchema();
    int minOffset = keyValue.getRowOffset();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    int nExpectedValues = values.length;
    for (int i = values.length - 1; i >= 0; i--) {
      if (values[i] == null) {
        nExpectedValues--;
      } else {
        break;
      }
    }
    int i = 0;
    int maxOffset =
        schema.iterator(keyValue.getRowArray(), minOffset, keyValue.getRowLength(), ptr);
    for (i = 0; i < schema.getFieldCount(); i++) {
      Boolean hasValue = schema.next(ptr, i, maxOffset);
      if (hasValue == null) {
        break;
      }
      assertTrue(hasValue);
      PDataType type = PDataType.fromLiteral(values[i]);
      SortOrder sortOrder = sortOrders.get(i);
      Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder);
      assertEquals(values[i], value);
    }
    assertEquals(nExpectedValues, i);
    assertNull(schema.next(ptr, i, maxOffset));

    for (i--; i >= 0; i--) {
      Boolean hasValue = schema.previous(ptr, i, minOffset);
      if (hasValue == null) {
        break;
      }
      assertTrue(hasValue);
      PDataType type = PDataType.fromLiteral(values[i]);
      SortOrder sortOrder = sortOrders.get(i);
      Object value = type.toObject(ptr, schema.getField(i).getDataType(), sortOrder);
      assertEquals(values[i], value);
    }
    assertEquals(-1, i);
    assertNull(schema.previous(ptr, i, minOffset));
  }
Ejemplo n.º 29
0
  public MutationPlan compile(UpsertStatement upsert) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize =
        services
            .getProps()
            .getInt(
                QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final ColumnResolver resolver = FromCompiler.getResolverForMutation(upsert, connection);
    final TableRef tableRef = resolver.getTables().get(0);
    final PTable table = tableRef.getTable();
    if (table.getType() == PTableType.VIEW) {
      if (table.getViewType().isReadOnly()) {
        throw new ReadOnlyTableException(
            table.getSchemaName().getString(), table.getTableName().getString());
      }
    }
    boolean isSalted = table.getBucketNum() != null;
    final boolean isTenantSpecific = table.isMultiTenant() && connection.getTenantId() != null;
    final boolean isSharedViewIndex = table.getViewIndexId() != null;
    String tenantId = isTenantSpecific ? connection.getTenantId().getString() : null;
    int posOffset = isSalted ? 1 : 0;
    // Setup array of column indexes parallel to values that are going to be set
    List<ColumnName> columnNodes = upsert.getColumns();
    final List<PColumn> allColumns = table.getColumns();
    Set<PColumn> addViewColumnsToBe = Collections.emptySet();
    Set<PColumn> overlapViewColumnsToBe = Collections.emptySet();

    int[] columnIndexesToBe;
    int nColumnsToSet = 0;
    int[] pkSlotIndexesToBe;
    List<PColumn> targetColumns;
    if (table.getViewType() == ViewType.UPDATABLE) {
      addViewColumnsToBe = Sets.newLinkedHashSetWithExpectedSize(allColumns.size());
      for (PColumn column : allColumns) {
        if (column.getViewConstant() != null) {
          addViewColumnsToBe.add(column);
        }
      }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    // Allow full row upsert if no columns or only dynamic ones are specified and values count match
    if (columnNodes.isEmpty()
        || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
      nColumnsToSet = allColumns.size() - posOffset;
      columnIndexesToBe = new int[nColumnsToSet];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
      targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
      int minPKPos = 0;
      if (isTenantSpecific) {
        PColumn tenantColumn = table.getPKColumns().get(minPKPos);
        columnIndexesToBe[minPKPos] = tenantColumn.getPosition();
        targetColumns.set(minPKPos, tenantColumn);
        minPKPos++;
      }
      if (isSharedViewIndex) {
        PColumn indexIdColumn = table.getPKColumns().get(minPKPos);
        columnIndexesToBe[minPKPos] = indexIdColumn.getPosition();
        targetColumns.set(minPKPos, indexIdColumn);
        minPKPos++;
      }
      for (int i = posOffset, j = 0; i < allColumns.size(); i++) {
        PColumn column = allColumns.get(i);
        if (SchemaUtil.isPKColumn(column)) {
          pkSlotIndexesToBe[i - posOffset] = j + posOffset;
          if (j++ < minPKPos) { // Skip, as it's already been set above
            continue;
          }
          minPKPos = 0;
        }
        columnIndexesToBe[i - posOffset + minPKPos] = i;
        targetColumns.set(i - posOffset + minPKPos, column);
      }
      if (!addViewColumnsToBe.isEmpty()) {
        // All view columns overlap in this case
        overlapViewColumnsToBe = addViewColumnsToBe;
        addViewColumnsToBe = Collections.emptySet();
      }
    } else {
      // Size for worse case
      int numColsInUpsert = columnNodes.size();
      nColumnsToSet =
          numColsInUpsert
              + addViewColumnsToBe.size()
              + (isTenantSpecific ? 1 : 0)
              + +(isSharedViewIndex ? 1 : 0);
      columnIndexesToBe = new int[nColumnsToSet];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = Lists.newArrayListWithExpectedSize(columnIndexesToBe.length);
      targetColumns.addAll(Collections.<PColumn>nCopies(columnIndexesToBe.length, null));
      Arrays.fill(
          columnIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      Arrays.fill(
          pkSlotIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
      int i = 0;
      // Add tenant column directly, as we don't want to resolve it as this will fail
      if (isTenantSpecific) {
        PColumn tenantColumn = table.getPKColumns().get(i + posOffset);
        columnIndexesToBe[i] = tenantColumn.getPosition();
        pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
        targetColumns.set(i, tenantColumn);
        i++;
      }
      if (isSharedViewIndex) {
        PColumn indexIdColumn = table.getPKColumns().get(i + posOffset);
        columnIndexesToBe[i] = indexIdColumn.getPosition();
        pkColumnsSet.set(pkSlotIndexesToBe[i] = i + posOffset);
        targetColumns.set(i, indexIdColumn);
        i++;
      }
      for (ColumnName colName : columnNodes) {
        ColumnRef ref =
            resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
        PColumn column = ref.getColumn();
        if (IndexUtil.getViewConstantValue(column, ptr)) {
          if (overlapViewColumnsToBe.isEmpty()) {
            overlapViewColumnsToBe = Sets.newHashSetWithExpectedSize(addViewColumnsToBe.size());
          }
          nColumnsToSet--;
          overlapViewColumnsToBe.add(column);
          addViewColumnsToBe.remove(column);
        }
        columnIndexesToBe[i] = ref.getColumnPosition();
        targetColumns.set(i, column);
        if (SchemaUtil.isPKColumn(column)) {
          pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
        }
        i++;
      }
      for (PColumn column : addViewColumnsToBe) {
        columnIndexesToBe[i] = column.getPosition();
        targetColumns.set(i, column);
        if (SchemaUtil.isPKColumn(column)) {
          pkColumnsSet.set(pkSlotIndexesToBe[i] = SchemaUtil.getPKPosition(table, column));
        }
        i++;
      }
      for (i = posOffset; i < table.getPKColumns().size(); i++) {
        PColumn pkCol = table.getPKColumns().get(i);
        if (!pkColumnsSet.get(i)) {
          if (!pkCol.isNullable()) {
            throw new ConstraintViolationException(
                table.getName().getString()
                    + "."
                    + pkCol.getName().getString()
                    + " may not be null");
          }
        }
      }
    }

    List<ParseNode> valueNodes = upsert.getValues();
    QueryPlan plan = null;
    RowProjector rowProjectorToBe = null;
    final int nValuesToSet;
    boolean sameTable = false;
    boolean runOnServer = false;
    UpsertingParallelIteratorFactory upsertParallelIteratorFactoryToBe = null;
    final boolean isAutoCommit = connection.getAutoCommit();
    if (valueNodes == null) {
      SelectStatement select = upsert.getSelect();
      assert (select != null);
      select = SubselectRewriter.flatten(select, connection);
      ColumnResolver selectResolver = FromCompiler.getResolverForQuery(select, connection);
      select = StatementNormalizer.normalize(select, selectResolver);
      select = prependTenantAndViewConstants(table, select, tenantId, addViewColumnsToBe);
      sameTable =
          select.getFrom().size() == 1 && tableRef.equals(selectResolver.getTables().get(0));
      /* We can run the upsert in a coprocessor if:
       * 1) from has only 1 table and the into table matches from table
       * 2) the select query isn't doing aggregation
       * 3) autoCommit is on
       * 4) the table is not immutable, as the client is the one that figures out the additional
       *    puts for index tables.
       * 5) no limit clause
       * Otherwise, run the query to pull the data from the server
       * and populate the MutationState (upto a limit).
       */
      runOnServer =
          sameTable
              && isAutoCommit
              && !table.isImmutableRows()
              && !select.isAggregate()
              && !select.isDistinct()
              && select.getLimit() == null
              && table.getBucketNum() == null;
      ParallelIteratorFactory parallelIteratorFactory;
      if (select.isAggregate() || select.isDistinct() || select.getLimit() != null) {
        parallelIteratorFactory = null;
      } else {
        // We can pipeline the upsert select instead of spooling everything to disk first,
        // if we don't have any post processing that's required.
        parallelIteratorFactory =
            upsertParallelIteratorFactoryToBe =
                new UpsertingParallelIteratorFactory(connection, tableRef);
      }
      // If we may be able to run on the server, add a hint that favors using the data table
      // if all else is equal.
      // TODO: it'd be nice if we could figure out in advance if the PK is potentially changing,
      // as this would disallow running on the server. We currently use the row projector we
      // get back to figure this out.
      HintNode hint = upsert.getHint();
      if (!upsert.getHint().hasHint(Hint.USE_INDEX_OVER_DATA_TABLE)) {
        hint = HintNode.create(hint, Hint.USE_DATA_OVER_INDEX_TABLE);
      }
      select = SelectStatement.create(select, hint);
      // Pass scan through if same table in upsert and select so that projection is computed
      // correctly
      // Use optimizer to choose the best plan
      plan =
          new QueryOptimizer(services)
              .optimize(statement, select, selectResolver, targetColumns, parallelIteratorFactory);
      runOnServer &= plan.getTableRef().equals(tableRef);
      rowProjectorToBe = plan.getProjector();
      nValuesToSet = rowProjectorToBe.getColumnCount();
      // Cannot auto commit if doing aggregation or topN or salted
      // Salted causes problems because the row may end up living on a different region
    } else {
      nValuesToSet =
          valueNodes.size()
              + addViewColumnsToBe.size()
              + (isTenantSpecific ? 1 : 0)
              + (isSharedViewIndex ? 1 : 0);
    }
    final RowProjector projector = rowProjectorToBe;
    final UpsertingParallelIteratorFactory upsertParallelIteratorFactory =
        upsertParallelIteratorFactoryToBe;
    final QueryPlan queryPlan = plan;
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty() && columnIndexesToBe.length >= nValuesToSet) {
      nColumnsToSet = nValuesToSet;
      columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
      pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
    }

    if (nValuesToSet != nColumnsToSet) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH)
          .setMessage(
              "Numbers of columns: " + nColumnsToSet + ". Number of values: " + nValuesToSet)
          .build()
          .buildException();
    }

    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;
    final Set<PColumn> addViewColumns = addViewColumnsToBe;
    final Set<PColumn> overlapViewColumns = overlapViewColumnsToBe;

    // TODO: break this up into multiple functions
    ////////////////////////////////////////////////////////////////////
    // UPSERT SELECT
    /////////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
      // Before we re-order, check that for updatable view columns
      // the projected expression either matches the column name or
      // is a constant with the same required value.
      throwIfNotUpdatable(tableRef, overlapViewColumnsToBe, targetColumns, projector, sameTable);

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run server-side (maybe)
      /////////////////////////////////////////////////////////////////////
      if (runOnServer) {
        // At most this array will grow bigger by the number of PK columns
        int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
        int[] reverseColumnIndexes = new int[table.getColumns().size()];
        List<Expression> projectedExpressions =
            Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
        Arrays.fill(reverseColumnIndexes, -1);
        for (int i = 0; i < nValuesToSet; i++) {
          projectedExpressions.add(projector.getColumnProjector(i).getExpression());
          reverseColumnIndexes[columnIndexes[i]] = i;
        }
        /*
         * Order projected columns and projected expressions with PK columns
         * leading order by slot position
         */
        int offset = table.getBucketNum() == null ? 0 : 1;
        for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
          PColumn column = table.getPKColumns().get(i + offset);
          int pos = reverseColumnIndexes[column.getPosition()];
          if (pos == -1) {
            // Last PK column may be fixed width and nullable
            // We don't want to insert a null expression b/c
            // it's not valid to set a fixed width type to null.
            if (column.getDataType().isFixedWidth()) {
              continue;
            }
            // Add literal null for missing PK columns
            pos = projectedExpressions.size();
            Expression literalNull =
                LiteralExpression.newConstant(null, column.getDataType(), true);
            projectedExpressions.add(literalNull);
            allColumnsIndexes[pos] = column.getPosition();
          }
          // Swap select expression at pos with i
          Collections.swap(projectedExpressions, i, pos);
          // Swap column indexes and reverse column indexes too
          int tempPos = allColumnsIndexes[i];
          allColumnsIndexes[i] = allColumnsIndexes[pos];
          allColumnsIndexes[pos] = tempPos;
          reverseColumnIndexes[tempPos] = reverseColumnIndexes[i];
          reverseColumnIndexes[i] = i;
        }
        // If any pk slots are changing, be conservative and don't run this server side.
        // If the row ends up living in a different region, we'll get an error otherwise.
        for (int i = 0; i < table.getPKColumns().size(); i++) {
          PColumn column = table.getPKColumns().get(i);
          Expression source = projectedExpressions.get(i);
          if (source == null
              || !source.equals(
                  new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) {
            // TODO: we could check the region boundaries to see if the pk will still be in it.
            runOnServer = false; // bail on running server side, since PK may be changing
            break;
          }
        }

        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run server-side
        /////////////////////////////////////////////////////////////////////
        if (runOnServer) {
          // Iterate through columns being projected
          List<PColumn> projectedColumns =
              Lists.newArrayListWithExpectedSize(projectedExpressions.size());
          for (int i = 0; i < projectedExpressions.size(); i++) {
            // Must make new column if position has changed
            PColumn column = allColumns.get(allColumnsIndexes[i]);
            projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i));
          }
          // Build table from projectedColumns
          PTable projectedTable = PTableImpl.makePTable(table, projectedColumns);

          SelectStatement select =
              SelectStatement.create(SelectStatement.COUNT_ONE, upsert.getHint());
          final RowProjector aggProjector =
              ProjectionCompiler.compile(queryPlan.getContext(), select, GroupBy.EMPTY_GROUP_BY);
          /*
           * Transfer over PTable representing subset of columns selected, but all PK columns.
           * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
           * Transfer over List<Expression> for projection.
           * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
           * Create the PRow and get the mutations, adding them to the batch
           */
          final StatementContext context = queryPlan.getContext();
          final Scan scan = context.getScan();
          scan.setAttribute(
              BaseScannerRegionObserver.UPSERT_SELECT_TABLE,
              UngroupedAggregateRegionObserver.serialize(projectedTable));
          scan.setAttribute(
              BaseScannerRegionObserver.UPSERT_SELECT_EXPRS,
              UngroupedAggregateRegionObserver.serialize(projectedExpressions));
          // Ignore order by - it has no impact
          final QueryPlan aggPlan =
              new AggregatePlan(
                  context,
                  select,
                  tableRef,
                  aggProjector,
                  null,
                  OrderBy.EMPTY_ORDER_BY,
                  null,
                  GroupBy.EMPTY_GROUP_BY,
                  null);
          return new MutationPlan() {

            @Override
            public PhoenixConnection getConnection() {
              return connection;
            }

            @Override
            public ParameterMetaData getParameterMetaData() {
              return queryPlan.getContext().getBindManager().getParameterMetaData();
            }

            @Override
            public StatementContext getContext() {
              return queryPlan.getContext();
            }

            @Override
            public MutationState execute() throws SQLException {
              ImmutableBytesWritable ptr = context.getTempPtr();
              tableRef.getTable().getIndexMaintainers(ptr);
              ServerCache cache = null;
              try {
                if (ptr.getLength() > 0) {
                  IndexMetaDataCacheClient client =
                      new IndexMetaDataCacheClient(connection, tableRef);
                  cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                  byte[] uuidValue = cache.getId();
                  scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                }
                ResultIterator iterator = aggPlan.iterator();
                try {
                  Tuple row = iterator.next();
                  final long mutationCount =
                      (Long) aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                  return new MutationState(maxSize, connection) {
                    @Override
                    public long getUpdateCount() {
                      return mutationCount;
                    }
                  };
                } finally {
                  iterator.close();
                }
              } finally {
                if (cache != null) {
                  cache.close();
                }
              }
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
              List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
              List<String> planSteps =
                  Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
              planSteps.add("UPSERT ROWS");
              planSteps.addAll(queryPlanSteps);
              return new ExplainPlan(planSteps);
            }
          };
        }
      }

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run client-side
      /////////////////////////////////////////////////////////////////////
      return new MutationPlan() {

        @Override
        public PhoenixConnection getConnection() {
          return connection;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
          return queryPlan.getContext().getBindManager().getParameterMetaData();
        }

        @Override
        public StatementContext getContext() {
          return queryPlan.getContext();
        }

        @Override
        public MutationState execute() throws SQLException {
          ResultIterator iterator = queryPlan.iterator();
          if (upsertParallelIteratorFactory == null) {
            return upsertSelect(
                statement, tableRef, projector, iterator, columnIndexes, pkSlotIndexes);
          }
          upsertParallelIteratorFactory.setRowProjector(projector);
          upsertParallelIteratorFactory.setColumnIndexes(columnIndexes);
          upsertParallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
          Tuple tuple;
          long totalRowCount = 0;
          while ((tuple = iterator.next()) != null) { // Runs query
            Cell kv = tuple.getValue(0);
            totalRowCount +=
                PDataType.LONG
                    .getCodec()
                    .decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
          }
          // Return total number of rows that have been updated. In the case of auto commit being
          // off
          // the mutations will all be in the mutation state of the current connection.
          return new MutationState(maxSize, statement.getConnection(), totalRowCount);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
          List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps();
          List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
          planSteps.add("UPSERT SELECT");
          planSteps.addAll(queryPlanSteps);
          return new ExplainPlan(planSteps);
        }
      };
    }

    ////////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    /////////////////////////////////////////////////////////////////////
    int nodeIndex = 0;
    // initialze values with constant byte values first
    final byte[][] values = new byte[nValuesToSet][];
    if (isTenantSpecific) {
      values[nodeIndex++] = connection.getTenantId().getBytes();
    }
    if (isSharedViewIndex) {
      values[nodeIndex++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
    }
    final int nodeIndexOffset = nodeIndex;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    final StatementContext context = new StatementContext(statement, resolver, new Scan());
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final List<Expression> constantExpressions =
        Lists.newArrayListWithExpectedSize(valueNodes.size());
    // First build all the expressions, as with sequences we want to collect them all first
    // and initialize them in one batch
    for (ParseNode valueNode : valueNodes) {
      if (!valueNode.isStateless()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT)
            .build()
            .buildException();
      }
      PColumn column = allColumns.get(columnIndexes[nodeIndex]);
      expressionBuilder.setColumn(column);
      Expression expression = valueNode.accept(expressionBuilder);
      if (expression.getDataType() != null
          && !expression.getDataType().isCastableTo(column.getDataType())) {
        throw TypeMismatchException.newException(
            expression.getDataType(),
            column.getDataType(),
            "expression: " + expression.toString() + " in column " + column);
      }
      constantExpressions.add(expression);
      nodeIndex++;
    }
    return new MutationPlan() {

      @Override
      public PhoenixConnection getConnection() {
        return connection;
      }

      @Override
      public ParameterMetaData getParameterMetaData() {
        return context.getBindManager().getParameterMetaData();
      }

      @Override
      public StatementContext getContext() {
        return context;
      }

      @Override
      public MutationState execute() throws SQLException {
        ImmutableBytesWritable ptr = context.getTempPtr();
        final SequenceManager sequenceManager = context.getSequenceManager();
        // Next evaluate all the expressions
        int nodeIndex = nodeIndexOffset;
        Tuple tuple =
            sequenceManager.getSequenceCount() == 0 ? null : sequenceManager.newSequenceTuple(null);
        for (Expression constantExpression : constantExpressions) {
          PColumn column = allColumns.get(columnIndexes[nodeIndex]);
          constantExpression.evaluate(tuple, ptr);
          Object value = null;
          if (constantExpression.getDataType() != null) {
            value =
                constantExpression
                    .getDataType()
                    .toObject(
                        ptr,
                        constantExpression.getSortOrder(),
                        constantExpression.getMaxLength(),
                        constantExpression.getScale());
            if (!constantExpression.getDataType().isCoercibleTo(column.getDataType(), value)) {
              throw TypeMismatchException.newException(
                  constantExpression.getDataType(),
                  column.getDataType(),
                  "expression: " + constantExpression.toString() + " in column " + column);
            }
            if (!column
                .getDataType()
                .isSizeCompatible(
                    ptr,
                    value,
                    constantExpression.getDataType(),
                    constantExpression.getMaxLength(),
                    constantExpression.getScale(),
                    column.getMaxLength(),
                    column.getScale())) {
              throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
                  .setColumnName(column.getName().getString())
                  .setMessage("value=" + constantExpression.toString())
                  .build()
                  .buildException();
            }
          }
          column
              .getDataType()
              .coerceBytes(
                  ptr,
                  value,
                  constantExpression.getDataType(),
                  constantExpression.getMaxLength(),
                  constantExpression.getScale(),
                  constantExpression.getSortOrder(),
                  column.getMaxLength(),
                  column.getScale(),
                  column.getSortOrder());
          if (overlapViewColumns.contains(column)
              && Bytes.compareTo(
                      ptr.get(),
                      ptr.getOffset(),
                      ptr.getLength(),
                      column.getViewConstant(),
                      0,
                      column.getViewConstant().length - 1)
                  != 0) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_UPDATE_VIEW_COLUMN)
                .setColumnName(column.getName().getString())
                .setMessage("value=" + constantExpression.toString())
                .build()
                .buildException();
          }
          values[nodeIndex] = ByteUtil.copyKeyBytesIfNecessary(ptr);
          nodeIndex++;
        }
        // Add columns based on view
        for (PColumn column : addViewColumns) {
          if (IndexUtil.getViewConstantValue(column, ptr)) {
            values[nodeIndex++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
          } else {
            throw new IllegalStateException();
          }
        }
        Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
        setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation);
        return new MutationState(tableRef, mutation, 0, maxSize, connection);
      }

      @Override
      public ExplainPlan getExplainPlan() throws SQLException {
        List<String> planSteps = Lists.newArrayListWithExpectedSize(2);
        if (context.getSequenceManager().getSequenceCount() > 0) {
          planSteps.add(
              "CLIENT RESERVE " + context.getSequenceManager().getSequenceCount() + " SEQUENCES");
        }
        planSteps.add("PUT SINGLE ROW");
        return new ExplainPlan(planSteps);
      }
    };
  }
  // TODO: share this with ConnectionQueryServicesImpl
  @Override
  public void init(String url, Properties props) throws SQLException {
    if (initialized) {
      if (initializationException != null) {
        throw initializationException;
      }
      return;
    }
    synchronized (this) {
      if (initialized) {
        if (initializationException != null) {
          throw initializationException;
        }
        return;
      }
      SQLException sqlE = null;
      PhoenixConnection metaConnection = null;
      try {
        Properties scnProps = PropertiesUtil.deepCopy(props);
        scnProps.setProperty(
            PhoenixRuntime.CURRENT_SCN_ATTRIB,
            Long.toString(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
        scnProps.remove(PhoenixRuntime.TENANT_ID_ATTRIB);
        String globalUrl = JDBCUtil.removeProperty(url, PhoenixRuntime.TENANT_ID_ATTRIB);
        metaConnection = new PhoenixConnection(this, globalUrl, scnProps, newEmptyMetaData());
        try {
          metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
        } catch (TableAlreadyExistsException ignore) {
          // Ignore, as this will happen if the SYSTEM.TABLE already exists at this fixed timestamp.
          // A TableAlreadyExistsException is not thrown, since the table only exists *after* this
          // fixed timestamp.
        }
        try {
          int nSaltBuckets = getSequenceSaltBuckets();
          String createTableStatement = Sequence.getCreateTableStatement(nSaltBuckets);
          metaConnection.createStatement().executeUpdate(createTableStatement);
        } catch (NewerTableAlreadyExistsException ignore) {
          // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed
          // timestamp.
          // A TableAlreadyExistsException is not thrown, since the table only exists *after* this
          // fixed timestamp.
        }
        try {
          metaConnection
              .createStatement()
              .executeUpdate(QueryConstants.CREATE_STATS_TABLE_METADATA);
        } catch (NewerTableAlreadyExistsException ignore) {
          // Ignore, as this will happen if the SYSTEM.SEQUENCE already exists at this fixed
          // timestamp.
          // A TableAlreadyExistsException is not thrown, since the table only exists *after* this
          // fixed timestamp.
        }

        try {
          metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_FUNCTION_METADATA);
        } catch (NewerTableAlreadyExistsException ignore) {
        }
      } catch (SQLException e) {
        sqlE = e;
      } finally {
        try {
          if (metaConnection != null) metaConnection.close();
        } catch (SQLException e) {
          if (sqlE != null) {
            sqlE.setNextException(e);
          } else {
            sqlE = e;
          }
        } finally {
          try {
            if (sqlE != null) {
              initializationException = sqlE;
              throw sqlE;
            }
          } finally {
            initialized = true;
          }
        }
      }
    }
  }