Example #1
0
 @Override
 public void execute(Tuple tuple) {
   if (TupleUtils.isTick(tuple)) {
     LOG.debug("Received tick tuple, triggering emit of current window counts");
     emitCurrentWindowCounts();
   } else {
     countObjAndAck(tuple);
   }
 }
Example #2
0
  @Override
  public void execute(final Tuple input) {
    if (TupleUtils.isTick(input)) {
      collector.ack(input);
      return; // Do not try to send ticks to Kafka
    }
    K key = null;
    V message = null;
    String topic = null;
    try {
      key = mapper.getKeyFromTuple(input);
      message = mapper.getMessageFromTuple(input);
      topic = topicSelector.getTopic(input);
      if (topic != null) {
        Callback callback = null;

        if (!fireAndForget && async) {
          callback =
              new Callback() {
                @Override
                public void onCompletion(RecordMetadata ignored, Exception e) {
                  synchronized (collector) {
                    if (e != null) {
                      collector.reportError(e);
                      collector.fail(input);
                    } else {
                      collector.ack(input);
                    }
                  }
                }
              };
        }
        Future<RecordMetadata> result =
            producer.send(new ProducerRecord<K, V>(topic, key, message), callback);
        if (!async) {
          try {
            result.get();
            collector.ack(input);
          } catch (ExecutionException err) {
            collector.reportError(err);
            collector.fail(input);
          }
        } else if (fireAndForget) {
          collector.ack(input);
        }
      } else {
        LOG.warn("skipping key = " + key + ", topic selector returned null.");
        collector.ack(input);
      }
    } catch (Exception ex) {
      collector.reportError(ex);
      collector.fail(input);
    }
  }
Example #3
0
 @Override
 public void execute(Tuple tuple) {
   try {
     boolean forceFlush = false;
     if (TupleUtils.isTick(tuple)) {
       LOG.debug(
           "TICK received! current batch status [{}/{}]",
           tupleBatch.size(),
           options.getBatchSize());
       collector.ack(tuple);
       forceFlush = true;
     } else {
       List<String> partitionVals = options.getMapper().mapPartitions(tuple);
       HiveEndPoint endPoint = HiveUtils.makeEndPoint(partitionVals, options);
       HiveWriter writer = getOrCreateWriter(endPoint);
       if (timeToSendHeartBeat.compareAndSet(true, false)) {
         enableHeartBeatOnAllWriters();
       }
       writer.write(options.getMapper().mapRecord(tuple));
       tupleBatch.add(tuple);
       if (tupleBatch.size() >= options.getBatchSize()) forceFlush = true;
     }
     if (forceFlush && !tupleBatch.isEmpty()) {
       flushAllWriters(true);
       LOG.info("acknowledging tuples after writers flushed ");
       for (Tuple t : tupleBatch) collector.ack(t);
       tupleBatch.clear();
     }
   } catch (Exception e) {
     this.collector.reportError(e);
     collector.fail(tuple);
     try {
       flushAndCloseWriters();
       LOG.info("acknowledging tuples after writers flushed and closed");
       for (Tuple t : tupleBatch) collector.ack(t);
       tupleBatch.clear();
     } catch (Exception e1) {
       // If flushAndClose fails assume tuples are lost, do not ack
       LOG.warn("Error while flushing and closing writers, tuples will NOT be acknowledged");
       for (Tuple t : tupleBatch) collector.fail(t);
       tupleBatch.clear();
     }
   }
 }
  public void execute(Tuple tuple) {
    if (TupleUtils.isTick(tuple)) {
      System.out.println(state);
      // TODO emit config according to current stats
      return;
    }
    /** obtain the message from a bolt */
    int srcTasktId = (Integer) tuple.getValueByField("taskId");
    int anomalyType = (Integer) tuple.getValueByField("anomalyType");
    /** we check for all known error codes */
    switch (anomalyType) {
      case 1:
        { // 1 means lots of hits to a single port
          Integer port = (Integer) tuple.getValueByField("anomalyData");
          /*
          if (changed == false) {
              // 11 is the code to add port to list of blocked ports
              int code = 11;
              if (lvl0.contains(srcTasktId)) {
                  emitBulkConfig(lvl1, code, port);
              } else{
                  emitBulkConfig(lvl0, code, port);
              }
              changed = true;
          }

          //System.out.println("Problem with port " + port); // for testing
          */

          state.addPortHit(port, srcTasktId);
          break;
        }
      case 2:
        {
            /* 2 means hits to an unexpected port */
          int port = (Integer) tuple.getValueByField("anomalyData");
          // System.out.println("got "+ count + " form " + srcTasktId);
          state.addUnexpPortHit(port, srcTasktId);
          break;
        }
      case 3:
        {
            /* 3 means lots of hits to the same dest IP */
          InetAddress ip = (InetAddress) tuple.getValueByField("anomalyData");
          state.addIpHit(ip, srcTasktId);
          break;
        }
      case 4:
        {
            /* 4 means hits to an unexpected IP */
          InetAddress ip = (InetAddress) tuple.getValueByField("anomalyData");
          /*
          if (changed1 == false) {
              int code = 10; // 10 means change the general scanning pattern for that bolt
              if (lvl0.contains(srcTasktId)) {
                  emitBulkConfig(lvl1, code, 1);
                  emitBulkConfig(lvl0, code, 0);
              } else{
                  emitBulkConfig(lvl1, code, 0);
                  emitBulkConfig(lvl0, code, 1);
              }
              changed1 = true;
          }
          */
          state.addUnexpIpHit(ip, srcTasktId);
          break;
        }
      case 5:
        {
            /* 5 means a dropped packet */
          InetAddress ip = (InetAddress) tuple.getValueByField("anomalyData");
          state.addDropPacket(ip, srcTasktId);
          // System.out.println("Dropped: " + ip.getHostAddress()); // for testing
          break;
        }
      case 6:
        {
            /* anomalious flag trafic */
          InetAddress ip = (InetAddress) tuple.getValueByField("anomalyData");
          state.addBadFlag(ip, srcTasktId);
        }
    }
    collector.ack(tuple);
  }