예제 #1
0
 private void doProcessItems() throws ProcessingException {
   // process the quarantined items and remove them as they're processed
   // don't process work if this node's operations have been disabled
   if (!cluster.areOperationsEnabled()) {
     return;
   } else {
     if (config.isBatchingEnabled() && config.getBatchSize() > 0) {
       processBatchedItems();
     } else {
       processListSnapshot();
     }
     if (toolkitList.isEmpty() && stopState == STOP_STATE.STOP_REQUESTED) {
       signalStop();
     }
   }
 }
예제 #2
0
  /**
   * This method process items from bucket. Execution of this method does not guarantee that items
   * from a non empty bucket will be processed.
   */
  private void processItems() throws ProcessingException {
    final int workSize;

    bucketWriteLock.lock();
    try {
      // set some state related to this processing run
      lastProcessingTimeMillis = baselinedCurrentTimeMillis();

      workSize = toolkitList.size();
      // if there's no work that needs to be done, stop the processing
      if (0 == workSize) {
        LOGGER.warn(getThreadName() + " : processItems : nothing to process");
        return;
      }
      // filter might remove items from list, so this should be with-in writeLock
      filterQuarantined();
    } finally {
      bucketWriteLock.unlock();
    }

    // if the batching is enabled and work size is smaller than batch size, don't process anything
    // as long as the
    // max allowed fall behind delay hasn't expired
    final int batchSize = config.getBatchSize();
    if (config.isBatchingEnabled() && batchSize > 0) {
      // wait for another round if the batch size hasn't been filled up yet and the max write delay
      // hasn't expired yet
      if (workSize < batchSize
          && config.getMaxAllowedFallBehind() > lastProcessingTimeMillis - lastWorkDoneMillis) {
        bucketReadLock.lock();
        try {
          if (stopState == STOP_STATE.NORMAL) {
            LOGGER.warn(
                getThreadName()
                    + " : processItems : only "
                    + workSize
                    + " work items available, waiting for "
                    + batchSize
                    + " items to fill up a batch");
            return;
          }
        } finally {
          bucketReadLock.unlock();
        }
      }

      // enforce the rate limit and wait for another round if too much would be processed compared
      // to
      // the last time when a batch was executed
      final int rateLimit = config.getRateLimit();
      if (rateLimit > 0) {
        final long secondsSinceLastWorkDone;
        final int effectiveBatchSize;
        bucketReadLock.lock();
        try {
          if (stopState == STOP_STATE.NORMAL) {
            secondsSinceLastWorkDone = (baselinedCurrentTimeMillis() - lastWorkDoneMillis) / 1000;
            effectiveBatchSize = determineBatchSize();
            long maxBatchSizeSinceLastWorkDone = rateLimit * secondsSinceLastWorkDone;
            if (effectiveBatchSize > maxBatchSizeSinceLastWorkDone) {
              LOGGER.warn(
                  getThreadName()
                      + " : processItems() : last work was done "
                      + secondsSinceLastWorkDone
                      + " seconds ago, processing "
                      + effectiveBatchSize
                      + " batch items would exceed the rate limit of "
                      + rateLimit
                      + ", waiting for a while.");
              return;
            }
          }
        } finally {
          bucketReadLock.unlock();
        }
      }
    }

    bucketWriteLock.lock();
    try {
      lastWorkDoneMillis = baselinedCurrentTimeMillis();
    } finally {
      bucketWriteLock.unlock();
    }

    doProcessItems();
  }