コード例 #1
0
  @Override
  protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context)
      throws IOException, InterruptedException {

    context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1);

    try {
      final List<Object> values = record.getValues();
      indxWritable.setValues(values);
      indxWritable.write(this.pStatement);
      this.pStatement.execute();

      final PhoenixConnection pconn = connection.unwrap(PhoenixConnection.class);
      MutationState currentMutationState = pconn.getMutationState();
      if (mutationState == null) {
        mutationState = currentMutationState;
        return;
      }
      // Keep accumulating Mutations till batch size
      mutationState.join(currentMutationState);

      // Write Mutation Batch
      if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize == 0) {
        writeBatch(mutationState, context);
        mutationState = null;
      }

      // Make sure progress is reported to Application Master.
      context.progress();
    } catch (SQLException e) {
      LOG.error(" Error {}  while read/write of a record ", e.getMessage());
      context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
      throw new RuntimeException(e);
    }
  }
コード例 #2
0
  private void writeBatch(MutationState mutationState, Context context)
      throws IOException, SQLException, InterruptedException {
    final Iterator<Pair<byte[], List<Mutation>>> iterator = mutationState.toMutations(true, null);
    while (iterator.hasNext()) {
      Pair<byte[], List<Mutation>> mutationPair = iterator.next();

      writer.write(mutationPair.getSecond());
      context
          .getCounter(PhoenixJobCounters.OUTPUT_RECORDS)
          .increment(mutationPair.getSecond().size());
    }
    connection.rollback();
  }
コード例 #3
0
  @Override
  public PeekingResultIterator newIterator(
      final StatementContext parentContext,
      ResultIterator iterator,
      Scan scan,
      String tableName,
      QueryPlan plan)
      throws SQLException {
    final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection);

    MutationState state = mutate(parentContext, iterator, clonedConnection);

    long totalRowCount = state.getUpdateCount();
    if (clonedConnection.getAutoCommit()) {
      clonedConnection.getMutationState().join(state);
      state = clonedConnection.getMutationState();
    }
    final MutationState finalState = state;

    byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
    KeyValue keyValue =
        KeyValueUtil.newKeyValue(
            UNGROUPED_AGG_ROW_KEY,
            SINGLE_COLUMN_FAMILY,
            SINGLE_COLUMN,
            AGG_TIMESTAMP,
            value,
            0,
            value.length);
    final Tuple tuple = new SingleKeyValueTuple(keyValue);
    return new PeekingResultIterator() {
      private boolean done = false;

      @Override
      public Tuple next() throws SQLException {
        if (done) {
          return null;
        }
        done = true;
        return tuple;
      }

      @Override
      public void explain(List<String> planSteps) {}

      @Override
      public void close() throws SQLException {
        try {
          /*
           * Join the child mutation states in close, since this is called in a single threaded manner
           * after the parallel results have been processed.
           * If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation
           * state (with no mutations). However, it still has the metrics for mutation work done by the
           * mutating-iterator. Joining the mutation state makes sure those metrics are passed over
           * to the parent connection.
           */
          MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
        } finally {
          clonedConnection.close();
        }
      }

      @Override
      public Tuple peek() throws SQLException {
        return done ? null : tuple;
      }
    };
  }