Example #1
0
  private void processBatchedItems() throws ProcessingException {
    final int effectiveBatchSize = determineBatchSize();
    List<E> batch = getItemsFromQueue(effectiveBatchSize);
    final int retryAttempts = config.getRetryAttempts();
    int executionsLeft = retryAttempts + 1;
    while (executionsLeft-- > 0) {
      try {
        processor.process(batch);
        break;
      } catch (final RuntimeException e) {
        LOGGER.warn(
            "processBatchedItems caught error while processing batch of "
                + batch.size()
                + " error "
                + e);
        if (executionsLeft <= 0) {
          for (E item : batch) {
            try {
              processor.throwAway(item, e);
            } catch (final Throwable th) {
              LOGGER.warn(
                  "processBatchedItems caught error while throwing away an item: "
                      + item
                      + " error "
                      + th);
            }
          }
        } else {
          LOGGER.warn(
              getThreadName()
                  + " : processBatchedItems() : exception during processing, retrying in "
                  + retryAttempts
                  + " milliseconds, "
                  + executionsLeft
                  + " retries left : "
                  + e);
          try {
            Thread.sleep(config.getRetryAttemptDelay());
          } catch (InterruptedException e1) {
            Thread.currentThread().interrupt();
            throw e;
          }
        }
      }
    }

    removeFromQueue(effectiveBatchSize);
  }
Example #2
0
 private void doProcessItems() throws ProcessingException {
   // process the quarantined items and remove them as they're processed
   // don't process work if this node's operations have been disabled
   if (!cluster.areOperationsEnabled()) {
     return;
   } else {
     if (config.isBatchingEnabled() && config.getBatchSize() > 0) {
       processBatchedItems();
     } else {
       processListSnapshot();
     }
     if (toolkitList.isEmpty() && stopState == STOP_STATE.STOP_REQUESTED) {
       signalStop();
     }
   }
 }
Example #3
0
 // Do not take any clustered write lock in this path.
 public void add(final E item) {
   if (null == item) return;
   int maxQueueSize = config.getMaxQueueSize();
   bucketWriteLock.lock();
   boolean interrupted = false;
   try {
     if (maxQueueSize != UNLIMITED_QUEUE_SIZE) {
       while (!isCancelled() && toolkitList.size() >= maxQueueSize) {
         try {
           bucketNotFull.await();
         } catch (final InterruptedException e) {
           interrupted = true;
         }
       }
     }
     boolean signalNotEmpty = toolkitList.isEmpty();
     toolkitList.unlockedAdd(item);
     if (signalNotEmpty) {
       bucketNotEmpty.signalAll();
     }
   } finally {
     bucketWriteLock.unlock();
     if (interrupted) {
       Thread.currentThread().interrupt();
     }
   }
 }
Example #4
0
 private int determineBatchSize() {
   int batchSize = config.getBatchSize();
   int listSize = toolkitList.size();
   if (listSize < batchSize) {
     batchSize = listSize;
   }
   return batchSize;
 }
Example #5
0
 @Override
 public synchronized AsyncCoordinator getOrCreateAsyncCoordinator(
     final Ehcache cache, final AsyncConfig config) {
   final String fullAsyncName = toolkitInstanceFactory.getFullAsyncName(cache);
   final ToolkitCache<String, AsyncConfig> configMap =
       toolkitInstanceFactory.getOrCreateAsyncConfigMap();
   AsyncConfig oldConfig = configMap.putIfAbsent(fullAsyncName, config);
   if (oldConfig != null && !oldConfig.equals(config)) {
     throw new IllegalArgumentException(
         "can not get AsyncCoordinator "
             + fullAsyncName
             + " for same name but different configs.\nExisting config\n"
             + oldConfig
             + "\nNew Config\n"
             + config);
   }
   AsyncCoordinatorImpl async = localMap.get(fullAsyncName);
   if (async != null) {
     if (oldConfig == null) {
       throw new IllegalArgumentException(
           "AsyncCoordinator "
               + fullAsyncName
               + " created for this node but entry not present in configMap");
     }
   } else {
     Callback stopCallable =
         new Callback() {
           @Override
           public void callback() {
             synchronized (AsyncCoordinatorFactoryImpl.this) {
               localMap.remove(fullAsyncName);
             }
           }
         };
     async = new AsyncCoordinatorImpl(fullAsyncName, config, toolkitInstanceFactory, stopCallable);
     localMap.put(fullAsyncName, async);
   }
   return async;
 }
Example #6
0
 private void processSingleItem() throws ProcessingException {
   // process the next item
   final E item = getItemsFromQueue(1).get(0);
   final int retryAttempts = config.getRetryAttempts();
   int executionsLeft = retryAttempts + 1;
   while (executionsLeft-- > 0) {
     try {
       processor.process(item);
       break;
     } catch (final RuntimeException e) {
       if (executionsLeft <= 0) {
         try {
           processor.throwAway(item, e);
         } catch (final Throwable th) {
           LOGGER.warn(
               "processSingleItem caught error while throwing away an item: " + item + " " + th);
         }
       } else {
         LOGGER.warn(
             getThreadName()
                 + " : processSingleItem() : exception during processing, retrying in "
                 + retryAttempts
                 + " milliseconds, "
                 + executionsLeft
                 + " retries left : "
                 + e.getMessage());
         try {
           Thread.sleep(config.getRetryAttemptDelay());
         } catch (InterruptedException e1) {
           Thread.currentThread().interrupt();
           throw e;
         }
       }
     }
   }
   removeFromQueue(1);
 }
Example #7
0
 public ProcessingBucket(
     String bucketName,
     AsyncConfig config,
     ToolkitListInternal<E> toolkitList,
     ClusterInfo cluster,
     ItemProcessor<E> processor,
     boolean workingOnDeadBucket) {
   this.bucketName = bucketName;
   this.config = config;
   this.cluster = cluster;
   this.processor = processor;
   this.toolkitList = toolkitList;
   this.baselineTimestampMillis = System.currentTimeMillis();
   ReentrantReadWriteLock bucketLock = new ReentrantReadWriteLock();
   this.bucketReadLock = bucketLock.readLock();
   this.bucketWriteLock = bucketLock.writeLock();
   this.bucketNotEmpty = bucketWriteLock.newCondition();
   this.bucketNotFull = bucketWriteLock.newCondition();
   this.stoppedButBucketNotEmpty = bucketWriteLock.newCondition();
   this.workDelay = new AtomicLong(config.getWorkDelay());
   this.workingOnDeadBucket = workingOnDeadBucket;
   this.processingWorkerRunnable = new ProcessingWorker(threadNamePrefix + bucketName);
   this.destroyAfterStop = true;
 }
Example #8
0
  /**
   * This method process items from bucket. Execution of this method does not guarantee that items
   * from a non empty bucket will be processed.
   */
  private void processItems() throws ProcessingException {
    final int workSize;

    bucketWriteLock.lock();
    try {
      // set some state related to this processing run
      lastProcessingTimeMillis = baselinedCurrentTimeMillis();

      workSize = toolkitList.size();
      // if there's no work that needs to be done, stop the processing
      if (0 == workSize) {
        LOGGER.warn(getThreadName() + " : processItems : nothing to process");
        return;
      }
      // filter might remove items from list, so this should be with-in writeLock
      filterQuarantined();
    } finally {
      bucketWriteLock.unlock();
    }

    // if the batching is enabled and work size is smaller than batch size, don't process anything
    // as long as the
    // max allowed fall behind delay hasn't expired
    final int batchSize = config.getBatchSize();
    if (config.isBatchingEnabled() && batchSize > 0) {
      // wait for another round if the batch size hasn't been filled up yet and the max write delay
      // hasn't expired yet
      if (workSize < batchSize
          && config.getMaxAllowedFallBehind() > lastProcessingTimeMillis - lastWorkDoneMillis) {
        bucketReadLock.lock();
        try {
          if (stopState == STOP_STATE.NORMAL) {
            LOGGER.warn(
                getThreadName()
                    + " : processItems : only "
                    + workSize
                    + " work items available, waiting for "
                    + batchSize
                    + " items to fill up a batch");
            return;
          }
        } finally {
          bucketReadLock.unlock();
        }
      }

      // enforce the rate limit and wait for another round if too much would be processed compared
      // to
      // the last time when a batch was executed
      final int rateLimit = config.getRateLimit();
      if (rateLimit > 0) {
        final long secondsSinceLastWorkDone;
        final int effectiveBatchSize;
        bucketReadLock.lock();
        try {
          if (stopState == STOP_STATE.NORMAL) {
            secondsSinceLastWorkDone = (baselinedCurrentTimeMillis() - lastWorkDoneMillis) / 1000;
            effectiveBatchSize = determineBatchSize();
            long maxBatchSizeSinceLastWorkDone = rateLimit * secondsSinceLastWorkDone;
            if (effectiveBatchSize > maxBatchSizeSinceLastWorkDone) {
              LOGGER.warn(
                  getThreadName()
                      + " : processItems() : last work was done "
                      + secondsSinceLastWorkDone
                      + " seconds ago, processing "
                      + effectiveBatchSize
                      + " batch items would exceed the rate limit of "
                      + rateLimit
                      + ", waiting for a while.");
              return;
            }
          }
        } finally {
          bucketReadLock.unlock();
        }
      }
    }

    bucketWriteLock.lock();
    try {
      lastWorkDoneMillis = baselinedCurrentTimeMillis();
    } finally {
      bucketWriteLock.unlock();
    }

    doProcessItems();
  }