Beispiel #1
0
  public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
      connection.setAutoCommit(false);
      TableName tableNameNode = statement.getTableName();
      String schemaName = tableNameNode.getSchemaName();
      String tableName = tableNameNode.getTableName();
      PTable table = getLatestTable(schemaName, tableName); // TODO: Do in resolver?
      boolean retried = false;
      while (true) {
        final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
        ColumnRef columnRef = null;
        try {
          columnRef = resolver.resolveColumn((ColumnParseNode) statement.getColumnRef());
        } catch (ColumnNotFoundException e) {
          if (statement.ifExists()) {
            return new MutationState(0, connection);
          }
          throw e;
        }
        TableRef tableRef = columnRef.getTableRef();
        PColumn columnToDrop = columnRef.getColumn();
        if (SchemaUtil.isPKColumn(columnToDrop)) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK)
              .setColumnName(columnToDrop.getName().getString())
              .build()
              .buildException();
        }
        int columnCount = table.getColumns().size() - 1;
        String familyName = null;
        List<String> binds = Lists.newArrayListWithExpectedSize(4);
        StringBuilder buf =
            new StringBuilder(
                "DELETE FROM " + TYPE_SCHEMA + ".\"" + TYPE_TABLE + "\" WHERE " + TABLE_SCHEM_NAME);
        if (schemaName == null || schemaName.length() == 0) {
          buf.append(" IS NULL AND ");
        } else {
          buf.append(" = ? AND ");
          binds.add(schemaName);
        }
        buf.append(TABLE_NAME_NAME + " = ? AND " + COLUMN_NAME + " = ? AND " + TABLE_CAT_NAME);
        binds.add(tableName);
        binds.add(columnToDrop.getName().getString());
        if (columnToDrop.getFamilyName() == null) {
          buf.append(" IS NULL");
        } else {
          buf.append(" = ?");
          binds.add(familyName = columnToDrop.getFamilyName().getString());
        }

        PreparedStatement colDelete = connection.prepareStatement(buf.toString());
        for (int i = 0; i < binds.size(); i++) {
          colDelete.setString(i + 1, binds.get(i));
        }
        colDelete.execute();

        PreparedStatement colUpdate = connection.prepareStatement(UPDATE_COLUMN_POSITION);
        colUpdate.setString(1, schemaName);
        colUpdate.setString(2, tableName);
        for (int i = columnToDrop.getPosition() + 1; i < table.getColumns().size(); i++) {
          PColumn column = table.getColumns().get(i);
          colUpdate.setString(3, column.getName().getString());
          colUpdate.setString(
              4, column.getFamilyName() == null ? null : column.getFamilyName().getString());
          colUpdate.setInt(5, i);
          colUpdate.execute();
        }
        final long seqNum = table.getSequenceNumber() + 1;
        PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE);
        tableUpsert.setString(1, schemaName);
        tableUpsert.setString(2, tableName);
        tableUpsert.setString(3, table.getType().getSerializedValue());
        tableUpsert.setLong(4, seqNum);
        tableUpsert.setInt(5, columnCount);
        tableUpsert.execute();

        final List<Mutation> tableMetaData = connection.getMutationState().toMutations();
        connection.rollback();
        // If we're dropping the last KV colum, we have to pass an indication along to the
        // dropColumn call
        // to populate a new empty KV column
        byte[] emptyCF = null;
        if (table.getType() != PTableType.VIEW
            && !SchemaUtil.isPKColumn(columnToDrop)
            && table.getColumnFamilies().get(0).getName().equals(columnToDrop.getFamilyName())
            && table.getColumnFamilies().get(0).getColumns().size() == 1) {
          emptyCF =
              SchemaUtil.getEmptyColumnFamily(
                  table.getColumnFamilies().subList(1, table.getColumnFamilies().size()));
        }
        MetaDataMutationResult result =
            connection
                .getQueryServices()
                .dropColumn(
                    tableMetaData,
                    emptyCF != null
                            && Bytes.compareTo(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES) == 0
                        ? emptyCF
                        : null);
        try {
          MutationCode code = processMutationResult(schemaName, tableName, result);
          if (code == MutationCode.COLUMN_NOT_FOUND) {
            connection.addTable(schemaName, result.getTable());
            if (!statement.ifExists()) {
              throw new ColumnNotFoundException(
                  schemaName, tableName, familyName, columnToDrop.getName().getString());
            }
            return new MutationState(0, connection);
          }
          connection.removeColumn(
              schemaName,
              tableName,
              familyName,
              columnToDrop.getName().getString(),
              seqNum,
              result.getMutationTime());
          // If we have a VIEW, then only delete the metadata, and leave the table data alone
          if (table.getType() != PTableType.VIEW) {
            connection.setAutoCommit(true);
            Long scn = connection.getSCN();
            // Delete everything in the column. You'll still be able to do queries at earlier
            // timestamps
            long ts = (scn == null ? result.getMutationTime() : scn);
            MutationPlan plan =
                new PostDDLCompiler(connection)
                    .compile(tableRef, emptyCF, Collections.singletonList(columnToDrop), ts);
            return connection.getQueryServices().updateData(plan);
          }
          return new MutationState(0, connection);
        } catch (ConcurrentTableMutationException e) {
          if (retried) {
            throw e;
          }
          table = connection.getPMetaData().getSchema(schemaName).getTable(tableName);
          retried = true;
        }
      }
    } finally {
      connection.setAutoCommit(wasAutoCommit);
    }
  }
Beispiel #2
0
  public MutationState createTable(CreateTableStatement statement, byte[][] splits)
      throws SQLException {
    PTableType tableType = statement.getTableType();
    boolean isView = tableType == PTableType.VIEW;
    if (isView && !statement.getProps().isEmpty()) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_TABLE_CONFIG)
          .build()
          .buildException();
    }
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
      connection.setAutoCommit(false);
      TableName tableNameNode = statement.getTableName();
      String schemaName = tableNameNode.getSchemaName();
      String tableName = tableNameNode.getTableName();

      PrimaryKeyConstraint pkConstraint = statement.getPrimaryKeyConstraint();
      String pkName = null;
      Set<String> pkColumns = Collections.<String>emptySet();
      Iterator<String> pkColumnsIterator = Iterators.emptyIterator();
      if (pkConstraint != null) {
        pkColumns = pkConstraint.getColumnNames();
        pkColumnsIterator = pkColumns.iterator();
        pkName = pkConstraint.getName();
      }

      List<ColumnDef> colDefs = statement.getColumnDefs();
      List<PColumn> columns = Lists.newArrayListWithExpectedSize(colDefs.size());
      PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN);
      int columnOrdinal = 0;
      Map<String, PName> familyNames = Maps.newLinkedHashMap();
      boolean isPK = false;
      for (ColumnDef colDef : colDefs) {
        if (colDef.isPK()) {
          if (isPK) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS)
                .setColumnName(colDef.getColumnDefName().getColumnName().getName())
                .build()
                .buildException();
          }
          isPK = true;
        }
        PColumn column = newColumn(columnOrdinal++, colDef, pkConstraint);
        if (SchemaUtil.isPKColumn(column)) {
          // TODO: remove this constraint?
          if (!pkColumns.isEmpty()
              && !column.getName().getString().equals(pkColumnsIterator.next())) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_OUT_OF_ORDER)
                .setSchemaName(schemaName)
                .setTableName(tableName)
                .setColumnName(column.getName().getString())
                .build()
                .buildException();
          }
        }
        columns.add(column);
        if (colDef.getDataType() == PDataType.BINARY && colDefs.size() > 1) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.BINARY_IN_ROW_KEY)
              .setSchemaName(schemaName)
              .setTableName(tableName)
              .setColumnName(column.getName().getString())
              .build()
              .buildException();
        }
        if (column.getFamilyName() != null) {
          familyNames.put(column.getFamilyName().getString(), column.getFamilyName());
        }
      }
      if (!isPK && pkColumns.isEmpty()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_MISSING)
            .setSchemaName(schemaName)
            .setTableName(tableName)
            .build()
            .buildException();
      }

      List<Pair<byte[], Map<String, Object>>> familyPropList =
          Lists.newArrayListWithExpectedSize(familyNames.size());
      Map<String, Object> commonFamilyProps = Collections.emptyMap();
      Map<String, Object> tableProps = Collections.emptyMap();
      if (!statement.getProps().isEmpty()) {
        if (statement.isView()) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.VIEW_WITH_PROPERTIES)
              .build()
              .buildException();
        }
        for (String familyName : statement.getProps().keySet()) {
          if (!familyName.equals(QueryConstants.ALL_FAMILY_PROPERTIES_KEY)) {
            if (familyNames.get(familyName) == null) {
              throw new SQLExceptionInfo.Builder(SQLExceptionCode.PROPERTIES_FOR_FAMILY)
                  .setFamilyName(familyName)
                  .build()
                  .buildException();
            }
          }
        }
        commonFamilyProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());
        tableProps = Maps.newHashMapWithExpectedSize(statement.getProps().size());

        Collection<Pair<String, Object>> props =
            statement.getProps().get(QueryConstants.ALL_FAMILY_PROPERTIES_KEY);
        // Somewhat hacky way of determining if property is for HColumnDescriptor or
        // HTableDescriptor
        HColumnDescriptor defaultDescriptor =
            new HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
        for (Pair<String, Object> prop : props) {
          if (defaultDescriptor.getValue(prop.getFirst()) != null) {
            commonFamilyProps.put(prop.getFirst(), prop.getSecond());
          } else {
            tableProps.put(prop.getFirst(), prop.getSecond());
          }
        }
      }

      for (PName familyName : familyNames.values()) {
        Collection<Pair<String, Object>> props = statement.getProps().get(familyName.getString());
        if (props.isEmpty()) {
          familyPropList.add(
              new Pair<byte[], Map<String, Object>>(familyName.getBytes(), commonFamilyProps));
        } else {
          Map<String, Object> combinedFamilyProps =
              Maps.newHashMapWithExpectedSize(props.size() + commonFamilyProps.size());
          combinedFamilyProps.putAll(commonFamilyProps);
          for (Pair<String, Object> prop : props) {
            combinedFamilyProps.put(prop.getFirst(), prop.getSecond());
          }
          familyPropList.add(
              new Pair<byte[], Map<String, Object>>(familyName.getBytes(), combinedFamilyProps));
        }
      }

      // Bootstrapping for our SYSTEM.TABLE that creates itself before it exists
      if (tableType == PTableType.SYSTEM) {
        PTable table =
            new PTableImpl(
                new PNameImpl(tableName),
                tableType,
                MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                0,
                QueryConstants.SYSTEM_TABLE_PK_NAME,
                null,
                columns);
        connection.addTable(schemaName, table);
      }

      for (PColumn column : columns) {
        addColumnMutation(schemaName, tableName, column, colUpsert);
      }

      Integer saltBucketNum = (Integer) tableProps.remove(PhoenixDatabaseMetaData.SALT_BUCKETS);
      if (saltBucketNum != null
          && (saltBucketNum <= 0 || saltBucketNum > SaltingUtil.MAX_BUCKET_NUM)) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INVALID_BUCKET_NUM)
            .build()
            .buildException();
      }

      PreparedStatement tableUpsert = connection.prepareStatement(CREATE_TABLE);
      tableUpsert.setString(1, schemaName);
      tableUpsert.setString(2, tableName);
      tableUpsert.setString(3, tableType.getSerializedValue());
      tableUpsert.setInt(4, 0);
      tableUpsert.setInt(5, columnOrdinal);
      if (saltBucketNum != null) {
        tableUpsert.setInt(6, saltBucketNum);
      } else {
        tableUpsert.setNull(6, Types.INTEGER);
      }
      tableUpsert.setString(7, pkName);
      tableUpsert.execute();

      final List<Mutation> tableMetaData = connection.getMutationState().toMutations();
      connection.rollback();

      MetaDataMutationResult result =
          connection
              .getQueryServices()
              .createTable(tableMetaData, isView, tableProps, familyPropList, splits);
      MutationCode code = result.getMutationCode();
      switch (code) {
        case TABLE_ALREADY_EXISTS:
          connection.addTable(schemaName, result.getTable());
          if (!statement.ifNotExists()) {
            throw new TableAlreadyExistsException(schemaName, tableName);
          }
          break;
        case NEWER_TABLE_FOUND:
          // TODO: add table if in result?
          throw new NewerTableAlreadyExistsException(schemaName, tableName);
        case UNALLOWED_TABLE_MUTATION:
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)
              .setSchemaName(schemaName)
              .setTableName(tableName)
              .build()
              .buildException();
        default:
          PTable table =
              new PTableImpl(
                  new PNameImpl(tableName),
                  tableType,
                  result.getMutationTime(),
                  0,
                  pkName,
                  saltBucketNum,
                  columns);
          connection.addTable(schemaName, table);
          if (tableType == PTableType.USER) {
            connection.setAutoCommit(true);
            // Delete everything in the column. You'll still be able to do queries at earlier
            // timestamps
            Long scn = connection.getSCN();
            long ts = (scn == null ? result.getMutationTime() : scn);
            PSchema schema =
                new PSchemaImpl(
                    schemaName,
                    ImmutableMap.<String, PTable>of(table.getName().getString(), table));
            TableRef tableRef = new TableRef(null, table, schema, ts);
            byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table.getColumnFamilies());
            MutationPlan plan =
                new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts);
            return connection.getQueryServices().updateData(plan);
          }
          break;
      }
      return new MutationState(0, connection);
    } finally {
      connection.setAutoCommit(wasAutoCommit);
    }
  }
Beispiel #3
0
  public MutationState addColumn(AddColumnStatement statement) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
      connection.setAutoCommit(false);
      TableName tableNameNode = statement.getTableName();
      String schemaName = tableNameNode.getSchemaName();
      String tableName = tableNameNode.getTableName();

      PTable table = getLatestTable(schemaName, tableName);
      PSchema schema = connection.getPMetaData().getSchema(schemaName);
      boolean retried = false;
      while (true) {
        int ordinalPosition = table.getColumns().size();

        // TODO: disallow adding columns if last column is fixed width and nullable
        List<PColumn> columns = Lists.newArrayListWithExpectedSize(1);
        ColumnDef colDef = statement.getColumnDef();
        if (!colDef.isNull() && colDef.isPK()) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.NOT_NULLABLE_COLUMN_IN_ROW_KEY)
              .setColumnName(colDef.getColumnDefName().getColumnName().getName())
              .build()
              .buildException();
        }

        PreparedStatement colUpsert = connection.prepareStatement(INSERT_COLUMN);
        Pair<byte[], Map<String, Object>> family = null;
        PColumn column = newColumn(ordinalPosition++, colDef, null);
        addColumnMutation(schemaName, tableName, column, colUpsert);
        columns.add(column);
        if (column.getFamilyName() != null) {
          family =
              new Pair<byte[], Map<String, Object>>(
                  column.getFamilyName().getBytes(), statement.getProps());
        }
        final long seqNum = table.getSequenceNumber() + 1;
        PreparedStatement tableUpsert = connection.prepareStatement(MUTATE_TABLE);
        tableUpsert.setString(1, schemaName);
        tableUpsert.setString(2, tableName);
        tableUpsert.setString(3, table.getType().getSerializedValue());
        tableUpsert.setLong(4, seqNum);
        tableUpsert.setInt(5, ordinalPosition);
        tableUpsert.execute();

        final List<Mutation> tableMetaData = connection.getMutationState().toMutations();
        connection.rollback();
        byte[] emptyCF = null;
        if (table.getType() != PTableType.VIEW
            && family != null
            && table.getColumnFamilies().isEmpty()) {
          emptyCF = family.getFirst();
        }
        MetaDataMutationResult result =
            connection
                .getQueryServices()
                .addColumn(tableMetaData, table.getType() == PTableType.VIEW, family);
        try {
          MutationCode code = processMutationResult(schemaName, tableName, result);
          if (code == MutationCode.COLUMN_ALREADY_EXISTS) {
            connection.addTable(schemaName, result.getTable());
            if (!statement.ifNotExists()) {
              throw new ColumnAlreadyExistsException(
                  schemaName, tableName, SchemaUtil.findExistingColumn(result.getTable(), columns));
            }
            return new MutationState(0, connection);
          }
          connection.addColumn(schemaName, tableName, columns, seqNum, result.getMutationTime());
          if (emptyCF != null) {
            Long scn = connection.getSCN();
            connection.setAutoCommit(true);
            // Delete everything in the column. You'll still be able to do queries at earlier
            // timestamps
            long ts = (scn == null ? result.getMutationTime() : scn);
            TableRef tableRef = new TableRef(null, table, schema, ts);
            MutationPlan plan =
                new PostDDLCompiler(connection).compile(tableRef, emptyCF, null, ts);
            return connection.getQueryServices().updateData(plan);
          }
          return new MutationState(0, connection);
        } catch (ConcurrentTableMutationException e) {
          if (retried) {
            throw e;
          }
          table = connection.getPMetaData().getSchema(schemaName).getTable(tableName);
          retried = true;
        }
      }
    } finally {
      connection.setAutoCommit(wasAutoCommit);
    }
  }
  public MutationPlan compile(UpsertStatement upsert, List<Object> binds) throws SQLException {
    final PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    final int maxSize =
        services
            .getProps()
            .getInt(
                QueryServices.MAX_MUTATION_SIZE_ATTRIB,
                QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    final ColumnResolver resolver = FromCompiler.getResolver(upsert, connection);
    final TableRef tableRef = resolver.getTables().get(0);
    PTable table = tableRef.getTable();
    if (table.getType() == PTableType.VIEW) {
      throw new ReadOnlyTableException("Mutations not allowed for a view (" + tableRef + ")");
    }
    Scan scan = new Scan();
    final StatementContext context =
        new StatementContext(connection, resolver, binds, upsert.getBindCount(), scan);
    // Setup array of column indexes parallel to values that are going to be set
    List<ColumnName> columnNodes = upsert.getColumns();
    List<PColumn> allColumns = table.getColumns();

    int[] columnIndexesToBe;
    int[] pkSlotIndexesToBe;
    PColumn[] targetColumns;
    // Allow full row upsert if no columns or only dynamic one are specified and values count match
    if (columnNodes.isEmpty()
        || columnNodes.size() == upsert.getTable().getDynamicColumns().size()) {
      columnIndexesToBe = new int[allColumns.size()];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = new PColumn[columnIndexesToBe.length];
      int j = table.getBucketNum() == null ? 0 : 1; // Skip over the salting byte.
      for (int i = 0; i < allColumns.size(); i++) {
        columnIndexesToBe[i] = i;
        targetColumns[i] = allColumns.get(i);
        if (SchemaUtil.isPKColumn(allColumns.get(i))) {
          pkSlotIndexesToBe[i] = j++;
        }
      }
    } else {
      columnIndexesToBe = new int[columnNodes.size()];
      pkSlotIndexesToBe = new int[columnIndexesToBe.length];
      targetColumns = new PColumn[columnIndexesToBe.length];
      Arrays.fill(
          columnIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      Arrays.fill(
          pkSlotIndexesToBe,
          -1); // TODO: necessary? So we'll get an AIOB exception if it's not replaced
      BitSet pkColumnsSet = new BitSet(table.getPKColumns().size());
      for (int i = 0; i < columnNodes.size(); i++) {
        ColumnName colName = columnNodes.get(i);
        ColumnRef ref =
            resolver.resolveColumn(null, colName.getFamilyName(), colName.getColumnName());
        columnIndexesToBe[i] = ref.getColumnPosition();
        targetColumns[i] = ref.getColumn();
        if (SchemaUtil.isPKColumn(ref.getColumn())) {
          pkColumnsSet.set(pkSlotIndexesToBe[i] = ref.getPKSlotPosition());
        }
      }
      int i = table.getBucketNum() == null ? 0 : 1;
      for (; i < table.getPKColumns().size(); i++) {
        PColumn pkCol = table.getPKColumns().get(i);
        if (!pkColumnsSet.get(i)) {
          if (!pkCol.isNullable()) {
            throw new ConstraintViolationException(
                table.getName().getString()
                    + "."
                    + pkCol.getName().getString()
                    + " may not be null");
          }
        }
      }
    }

    List<ParseNode> valueNodes = upsert.getValues();
    QueryPlan plan = null;
    RowProjector projector = null;
    int nValuesToSet;
    boolean runOnServer = false;
    if (valueNodes == null) {
      SelectStatement select = upsert.getSelect();
      assert (select != null);
      TableRef selectTableRef = FromCompiler.getResolver(select, connection).getTables().get(0);
      boolean sameTable = tableRef.equals(selectTableRef);
      // Pass scan through if same table in upsert and select so that projection is computed
      // correctly
      QueryCompiler compiler =
          new QueryCompiler(connection, 0, sameTable ? scan : new Scan(), targetColumns);
      plan = compiler.compile(select, binds);
      projector = plan.getProjector();
      nValuesToSet = projector.getColumnCount();
      // Cannot auto commit if doing aggregation or topN or salted
      // Salted causes problems because the row may end up living on a different region
      runOnServer =
          plan.getGroupBy() == null
              && sameTable
              && select.getOrderBy().isEmpty()
              && table.getBucketNum() == null;
    } else {
      nValuesToSet = valueNodes.size();
    }
    final QueryPlan queryPlan = plan;
    // Resize down to allow a subset of columns to be specifiable
    if (columnNodes.isEmpty()) {
      columnIndexesToBe = Arrays.copyOf(columnIndexesToBe, nValuesToSet);
      pkSlotIndexesToBe = Arrays.copyOf(pkSlotIndexesToBe, nValuesToSet);
    }

    if (nValuesToSet != columnIndexesToBe.length) {
      throw new SQLExceptionInfo.Builder(SQLExceptionCode.UPSERT_COLUMN_NUMBERS_MISMATCH)
          .setMessage(
              "Numbers of columns: "
                  + columnIndexesToBe.length
                  + ". Number of values: "
                  + nValuesToSet)
          .build()
          .buildException();
    }

    final int[] columnIndexes = columnIndexesToBe;
    final int[] pkSlotIndexes = pkSlotIndexesToBe;

    // TODO: break this up into multiple functions
    ////////////////////////////////////////////////////////////////////
    // UPSERT SELECT
    /////////////////////////////////////////////////////////////////////
    if (valueNodes == null) {
      /* We can run the upsert in a coprocessor if:
       * 1) the into table matches from table
       * 2) the select query isn't doing aggregation
       * 3) autoCommit is on
       * 4) not topN
       * Otherwise, run the query to pull the data from the server
       * and populate the MutationState (upto a limit).
       */
      final boolean isAutoCommit = connection.getAutoCommit();
      runOnServer &= isAutoCommit;

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run server-side (maybe)
      /////////////////////////////////////////////////////////////////////
      if (runOnServer) {
        // At most this array will grow bigger by the number of PK columns
        int[] allColumnsIndexes = Arrays.copyOf(columnIndexes, columnIndexes.length + nValuesToSet);
        int[] reverseColumnIndexes = new int[table.getColumns().size()];
        List<Expression> projectedExpressions =
            Lists.newArrayListWithExpectedSize(reverseColumnIndexes.length);
        Arrays.fill(reverseColumnIndexes, -1);
        for (int i = 0; i < nValuesToSet; i++) {
          projectedExpressions.add(projector.getColumnProjector(i).getExpression());
          reverseColumnIndexes[columnIndexes[i]] = i;
        }
        /*
         * Order projected columns and projected expressions with PK columns
         * leading order by slot position
         */
        int offset = table.getBucketNum() == null ? 0 : 1;
        for (int i = 0; i < table.getPKColumns().size() - offset; i++) {
          PColumn column = table.getPKColumns().get(i + offset);
          int pos = reverseColumnIndexes[column.getPosition()];
          if (pos == -1) {
            // Last PK column may be fixed width and nullable
            // We don't want to insert a null expression b/c
            // it's not valid to set a fixed width type to null.
            if (column.getDataType().isFixedWidth()) {
              continue;
            }
            // Add literal null for missing PK columns
            pos = projectedExpressions.size();
            Expression literalNull = LiteralExpression.newConstant(null, column.getDataType());
            projectedExpressions.add(literalNull);
            allColumnsIndexes[pos] = column.getPosition();
          }
          // Swap select expression at pos with i
          Collections.swap(projectedExpressions, i, pos);
          // Swap column indexes and reverse column indexes too
          int tempPos = allColumnsIndexes[i];
          allColumnsIndexes[i] = allColumnsIndexes[pos];
          allColumnsIndexes[pos] = tempPos;
          reverseColumnIndexes[tempPos] = reverseColumnIndexes[i];
          reverseColumnIndexes[i] = i;
        }
        // If any pk slots are changing, be conservative and don't run this server side.
        // If the row ends up living in a different region, we'll get an error otherwise.
        for (int i = 0; i < table.getPKColumns().size(); i++) {
          PColumn column = table.getPKColumns().get(i);
          Expression source = projectedExpressions.get(i);
          if (source == null
              || !source.equals(
                  new ColumnRef(tableRef, column.getPosition()).newColumnExpression())) {
            // TODO: we could check the region boundaries to see if the pk will still be in it.
            runOnServer = false; // bail on running server side, since PK may be changing
            break;
          }
        }

        ////////////////////////////////////////////////////////////////////
        // UPSERT SELECT run server-side
        /////////////////////////////////////////////////////////////////////
        if (runOnServer) {
          // Iterate through columns being projected
          List<PColumn> projectedColumns =
              Lists.newArrayListWithExpectedSize(projectedExpressions.size());
          for (int i = 0; i < projectedExpressions.size(); i++) {
            // Must make new column if position has changed
            PColumn column = allColumns.get(allColumnsIndexes[i]);
            projectedColumns.add(column.getPosition() == i ? column : new PColumnImpl(column, i));
          }
          // Build table from projectedColumns
          PTable projectedTable =
              PTableImpl.makePTable(
                  table.getName(),
                  table.getType(),
                  table.getTimeStamp(),
                  table.getSequenceNumber(),
                  table.getPKName(),
                  table.getBucketNum(),
                  projectedColumns);

          // Remove projection of empty column, since it can lead to problems when building another
          // projection
          // using this same scan. TODO: move projection code to a later stage, like
          // QueryPlan.newScanner to
          // prevent having to do this.
          ScanUtil.removeEmptyColumnFamily(context.getScan(), table);
          List<AliasedNode> select =
              Collections.<AliasedNode>singletonList(
                  NODE_FACTORY.aliasedNode(
                      null,
                      NODE_FACTORY.function(
                          CountAggregateFunction.NORMALIZED_NAME, LiteralParseNode.STAR)));
          // Ignore order by - it has no impact
          final RowProjector aggProjector =
              ProjectionCompiler.getRowProjector(
                  context, select, false, GroupBy.EMPTY_GROUP_BY, OrderBy.EMPTY_ORDER_BY, null);
          /*
           * Transfer over PTable representing subset of columns selected, but all PK columns.
           * Move columns setting PK first in pkSlot order, adding LiteralExpression of null for any missing ones.
           * Transfer over List<Expression> for projection.
           * In region scan, evaluate expressions in order, collecting first n columns for PK and collection non PK in mutation Map
           * Create the PRow and get the mutations, adding them to the batch
           */
          scan.setAttribute(
              UngroupedAggregateRegionObserver.UPSERT_SELECT_TABLE,
              UngroupedAggregateRegionObserver.serialize(projectedTable));
          scan.setAttribute(
              UngroupedAggregateRegionObserver.UPSERT_SELECT_EXPRS,
              UngroupedAggregateRegionObserver.serialize(projectedExpressions));
          final QueryPlan aggPlan =
              new AggregatePlan(
                  context,
                  tableRef,
                  projector,
                  null,
                  GroupBy.EMPTY_GROUP_BY,
                  false,
                  null,
                  OrderBy.EMPTY_ORDER_BY);
          return new MutationPlan() {

            @Override
            public PhoenixConnection getConnection() {
              return connection;
            }

            @Override
            public ParameterMetaData getParameterMetaData() {
              return context.getBindManager().getParameterMetaData();
            }

            @Override
            public MutationState execute() throws SQLException {
              Scanner scanner = aggPlan.getScanner();
              ResultIterator iterator = scanner.iterator();
              try {
                Tuple row = iterator.next();
                ImmutableBytesWritable ptr = context.getTempPtr();
                final long mutationCount =
                    (Long) aggProjector.getColumnProjector(0).getValue(row, PDataType.LONG, ptr);
                return new MutationState(maxSize, connection) {
                  @Override
                  public long getUpdateCount() {
                    return mutationCount;
                  }
                };
              } finally {
                iterator.close();
              }
            }

            @Override
            public ExplainPlan getExplainPlan() throws SQLException {
              List<String> queryPlanSteps = aggPlan.getExplainPlan().getPlanSteps();
              List<String> planSteps =
                  Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
              planSteps.add("UPSERT ROWS");
              planSteps.addAll(queryPlanSteps);
              return new ExplainPlan(planSteps);
            }
          };
        }
      }

      ////////////////////////////////////////////////////////////////////
      // UPSERT SELECT run client-side
      /////////////////////////////////////////////////////////////////////
      final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
      return new MutationPlan() {

        @Override
        public PhoenixConnection getConnection() {
          return connection;
        }

        @Override
        public ParameterMetaData getParameterMetaData() {
          return context.getBindManager().getParameterMetaData();
        }

        @Override
        public MutationState execute() throws SQLException {
          byte[][] values = new byte[columnIndexes.length][];
          Scanner scanner = queryPlan.getScanner();
          int estSize = scanner.getEstimatedSize();
          int rowCount = 0;
          Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation =
              Maps.newHashMapWithExpectedSize(estSize);
          ResultSet rs = new PhoenixResultSet(scanner, statement);
          PTable table = tableRef.getTable();
          PColumn column;
          while (rs.next()) {
            for (int i = 0; i < values.length; i++) {
              column = table.getColumns().get(columnIndexes[i]);
              byte[] byteValue = rs.getBytes(i + 1);
              Object value = rs.getObject(i + 1);
              int rsPrecision = rs.getMetaData().getPrecision(i + 1);
              Integer precision = rsPrecision == 0 ? null : rsPrecision;
              int rsScale = rs.getMetaData().getScale(i + 1);
              Integer scale = rsScale == 0 ? null : rsScale;
              // If ColumnModifier from expression in SELECT doesn't match the
              // column being projected into then invert the bits.
              if (column.getColumnModifier() == ColumnModifier.SORT_DESC) {
                byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
                byteValue =
                    ColumnModifier.SORT_DESC.apply(byteValue, tempByteValue, 0, byteValue.length);
              }
              // We are guaranteed that the two column will have compatible types,
              // as we checked that before.
              if (!column
                  .getDataType()
                  .isSizeCompatible(
                      column.getDataType(),
                      value,
                      byteValue,
                      precision,
                      column.getMaxLength(),
                      scale,
                      column.getScale())) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_INCOMPATIBLE_WITH_TYPE)
                    .setColumnName(column.getName().getString())
                    .build()
                    .buildException();
              }
              values[i] =
                  column
                      .getDataType()
                      .coerceBytes(
                          byteValue,
                          value,
                          column.getDataType(),
                          precision,
                          scale,
                          column.getMaxLength(),
                          column.getScale());
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
              MutationState state = new MutationState(tableRef, mutation, 0, maxSize, connection);
              connection.getMutationState().join(state);
              connection.commit();
              mutation.clear();
            }
          }
          // If auto commit is true, this last batch will be committed upon return
          return new MutationState(
              tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection);
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
          List<String> queryPlanSteps = queryPlan.getExplainPlan().getPlanSteps();
          List<String> planSteps = Lists.newArrayListWithExpectedSize(queryPlanSteps.size() + 1);
          planSteps.add("UPSERT SELECT");
          planSteps.addAll(queryPlanSteps);
          return new ExplainPlan(planSteps);
        }
      };
    }

    ////////////////////////////////////////////////////////////////////
    // UPSERT VALUES
    /////////////////////////////////////////////////////////////////////
    int nodeIndex = 0;
    // Allocate array based on size of all columns in table,
    // since some values may not be set (if they're nullable).
    UpsertValuesCompiler expressionBuilder = new UpsertValuesCompiler(context);
    final byte[][] values = new byte[nValuesToSet][];
    for (ParseNode valueNode : valueNodes) {
      if (!valueNode.isConstant()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.VALUE_IN_UPSERT_NOT_CONSTANT)
            .build()
            .buildException();
      }
      PColumn column = allColumns.get(columnIndexes[nodeIndex]);
      expressionBuilder.setColumn(column);
      LiteralExpression literalExpression = (LiteralExpression) valueNode.accept(expressionBuilder);
      byte[] byteValue = literalExpression.getBytes();
      if (literalExpression.getDataType() != null) {
        // If ColumnModifier from expression in SELECT doesn't match the
        // column being projected into then invert the bits.
        if (literalExpression.getColumnModifier() != column.getColumnModifier()) {
          byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
          byteValue = ColumnModifier.SORT_DESC.apply(byteValue, tempByteValue, 0, byteValue.length);
        }
        if (!literalExpression
            .getDataType()
            .isCoercibleTo(column.getDataType(), literalExpression.getValue())) {
          throw new TypeMismatchException(
              literalExpression.getDataType(),
              column.getDataType(),
              "expression: " + literalExpression.toString() + " in column " + column);
        }
        if (!column
            .getDataType()
            .isSizeCompatible(
                literalExpression.getDataType(),
                literalExpression.getValue(),
                byteValue,
                literalExpression.getMaxLength(),
                column.getMaxLength(),
                literalExpression.getScale(),
                column.getScale())) {
          throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_INCOMPATIBLE_WITH_TYPE)
              .setColumnName(column.getName().getString())
              .setMessage("value=" + literalExpression.toString())
              .build()
              .buildException();
        }
      }
      byteValue =
          column
              .getDataType()
              .coerceBytes(
                  byteValue,
                  literalExpression.getValue(),
                  literalExpression.getDataType(),
                  literalExpression.getMaxLength(),
                  literalExpression.getScale(),
                  column.getMaxLength(),
                  column.getScale());
      values[nodeIndex] = byteValue;
      nodeIndex++;
    }
    return new MutationPlan() {

      @Override
      public PhoenixConnection getConnection() {
        return connection;
      }

      @Override
      public ParameterMetaData getParameterMetaData() {
        return context.getBindManager().getParameterMetaData();
      }

      @Override
      public MutationState execute() {
        Map<ImmutableBytesPtr, Map<PColumn, byte[]>> mutation = Maps.newHashMapWithExpectedSize(1);
        setValues(values, pkSlotIndexes, columnIndexes, tableRef.getTable(), mutation);
        return new MutationState(tableRef, mutation, 0, maxSize, connection);
      }

      @Override
      public ExplainPlan getExplainPlan() throws SQLException {
        return new ExplainPlan(Collections.singletonList("PUT SINGLE ROW"));
      }
    };
  }