Ejemplo n.º 1
0
 @Override
 public void execute(Tuple tuple) {
   try {
     boolean forceFlush = false;
     if (TupleUtils.isTick(tuple)) {
       LOG.debug(
           "TICK received! current batch status [{}/{}]",
           tupleBatch.size(),
           options.getBatchSize());
       collector.ack(tuple);
       forceFlush = true;
     } else {
       List<String> partitionVals = options.getMapper().mapPartitions(tuple);
       HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options);
       HiveWriter writer = getOrCreateWriter(endPoint);
       if (timeToSendHeartBeat.compareAndSet(true, false)) {
         enableHeartBeatOnAllWriters();
       }
       writer.write(options.getMapper().mapRecord(tuple));
       tupleBatch.add(tuple);
       if (tupleBatch.size() >= options.getBatchSize()) forceFlush = true;
     }
     if (forceFlush && !tupleBatch.isEmpty()) {
       flushAllWriters(true);
       LOG.info("acknowledging tuples after writers flushed ");
       for (Tuple t : tupleBatch) collector.ack(t);
       tupleBatch.clear();
     }
   } catch (Exception e) {
     this.collector.reportError(e);
     collector.fail(tuple);
     try {
       flushAndCloseWriters();
       LOG.info("acknowledging tuples after writers flushed and closed");
       for (Tuple t : tupleBatch) collector.ack(t);
       tupleBatch.clear();
     } catch (Exception e1) {
       // If flushAndClose fails assume tuples are lost, do not ack
       LOG.warn("Error while flushing and closing writers, tuples will NOT be acknowledged");
       for (Tuple t : tupleBatch) collector.fail(t);
       tupleBatch.clear();
     }
   }
 }
Ejemplo n.º 2
0
 @Override
 public void execute(Tuple tuple) {
   try {
     List<String> partitionVals = options.getMapper().mapPartitions(tuple);
     HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options);
     HiveWriter writer = getOrCreateWriter(endPoint);
     if (timeToSendHeartBeat.compareAndSet(true, false)) {
       enableHeartBeatOnAllWriters();
     }
     writer.write(options.getMapper().mapRecord(tuple));
     currentBatchSize++;
     if (currentBatchSize >= options.getBatchSize()) {
       flushAllWriters();
       currentBatchSize = 0;
     }
     collector.ack(tuple);
   } catch (Exception e) {
     this.collector.reportError(e);
     collector.fail(tuple);
     flushAndCloseWriters();
   }
 }