public DistStageAck executeOnSlave() {
    if (!isServiceRunning()) {
      log.info("Not running test on this slave as service is not running.");
      return successfulResponse();
    }
    runningTest = (RunningTest) slaveState.get(RunningTest.nameFor(testName));
    if (runningTest == null) {
      runningTest = new RunningTest();
      slaveState.put(RunningTest.nameFor(testName), runningTest);
      slaveState.addServiceListener(runningTest);
    } else if (runningTest.isTerminated()) {
      return errorResponse("The test was terminated in previous iteration");
    }
    prepare();
    runningTest.setMinThreadCreationDelay(minThreadCreationDelay);
    runningTest.setMinWaitingThreads(rampUpMinWaitingThreads);
    runningTest.setMaxThreads(maxThreads);
    runningTest.setLogExceptions(logRequestExceptions, logTransactionExceptions);
    runningTest.updateSelector(createSelector());

    log.info("Starting test " + testName + " ramp-up");
    int minThreads = Math.max(this.minThreads, rampUpMinWaitingThreads + 1);
    while (runningTest.getUsedThreads() < minThreads) {
      // one of the started threads may already create his sibling;
      // fail silently if we attempt to reach the limit
      runningTest.addStressor(true);
    }
    int lastThreads = 0;
    long now = TimeService.currentTimeMillis();
    long lastThreadsChange = now;
    long startTime = now;
    while (!runningTest.isTerminated()) {
      int currentThreads = runningTest.getUsedThreads();
      if (currentThreads != lastThreads) {
        lastThreadsChange = now;
        lastThreads = currentThreads;
      }
      if (now < startTime + rampUpMinDuration || now < lastThreadsChange + rampUpMinSteadyPeriod) {
        if (rampUpMaxDuration > 0 && now > startTime + rampUpMaxDuration) {
          break;
        }
        try {
          Thread.sleep(1000);
        } catch (InterruptedException e) {
          log.warn("Interruptions should not happen.", e);
        }
      } else {
        break;
      }
      now = TimeService.currentTimeMillis();
    }
    if (runningTest.isTerminated()) {
      return errorResponse("Test was terminated during ramp-up");
    } else if (rampUpMaxDuration > 0 && now >= startTime + rampUpMaxDuration) {
      return errorResponse("Ramp-up has not stabilized within timeout");
    } else if (runningTest.isReachedMax()) {
      return errorResponse("Max thread count reached during ramp-up");
    }
    return successfulResponse();
  }
 @Override
 public void run() {
   while (true) {
     log("Running check");
     long pre = TimeService.nanoTime();
     Map<Thread, StackTraceElement[]> stacks = Thread.getAllStackTraces();
     long post = TimeService.nanoTime();
     log("Thread.getAllStackTraces() took " + (post - pre) + " nanoseconds");
     for (Thread t : stacks.keySet()) {
       String name = t.getName();
       if (!name.equals(WATCHDOG) && (mask == null || name.contains(mask))) {
         StackTraceElement[] stack = stacks.get(t);
         if (stack.length < shortStack) {
           continue;
         }
         boolean stuck = isStuck(t, stack);
         if (!onlyStuck || stuck) {
           traceStack(t, stack, stuck);
         }
       }
     }
     lastStacks = stacks;
     try {
       Thread.sleep(period);
     } catch (InterruptedException e) {
       log.error("Thread has been interrupted", e);
       Thread.currentThread().interrupt();
     }
   }
 }
 private boolean check(List<Event> history) {
   if (history.isEmpty()) return !changed;
   long lastChange = history.get(history.size() - 1).getTime().getTime();
   long current = TimeService.currentTimeMillis();
   boolean hasChanged = lastChange + period > current;
   return hasChanged == changed;
 }
  @Override
  public DistStageAck executeOnSlave() {
    random = new Random(randomSeed + slaveState.getSlaveIndex());

    if (ramPercentage > 0 && valueCount > 0) {
      return errorResponse(
          "Either valueCount or ramPercentageDataSize should be specified, but not both");
    }

    if (ramPercentage > 1) {
      return errorResponse("The percentage of RAM can not be greater than one.");
    }

    if (shareWords && !limitWordCount) {
      return errorResponse(
          "The shareWords property can only be true when limitWordCount is also true.");
    }

    if (limitWordCount && !stringData) {
      return errorResponse(
          "The limitWordCount property can only be true when stringData is also true.");
    }

    if (valueByteOverhead == -1 && cacheInformation == null) {
      return errorResponse("The valueByteOverhead property must be supplied for this cache.");
    }

    /*
     * If valueByteOverhead is not specified, then try to retrieve the byte overhead from the
     * CacheWrapper
     */
    if (valueByteOverhead == -1 && cacheInformation != null) {
      valueByteOverhead = cacheInformation.getCache(null).getEntryOverhead();
    }

    runtime = Runtime.getRuntime();
    int valueSizeWithOverhead = valueByteOverhead;
    /*
     * String data is twice the size of a byte array
     */
    if (stringData) {
      valueSizeWithOverhead += (valueSize * 2);
    } else {
      valueSizeWithOverhead += valueSize;
    }

    if (ramPercentage > 0) {
      System.gc();
      targetMemoryUse = (long) (runtime.maxMemory() * ramPercentage);
      log.trace("targetMemoryUse: " + Utils.kbString(targetMemoryUse));

      nodePutCount = (long) Math.ceil(targetMemoryUse / valueSizeWithOverhead);
    } else {
      long totalPutCount = valueCount;
      if (targetMemoryUse > 0) {
        if (targetMemoryUse % valueSizeWithOverhead != 0) {
          log.warn(
              "The supplied value for targetMemoryUse ("
                  + targetMemoryUse
                  + ") is not evenly divisible by the value size plus byte overhead ("
                  + valueSizeWithOverhead
                  + ")");
        }
        totalPutCount = targetMemoryUse / valueSizeWithOverhead;
      }
      nodePutCount = (long) Math.ceil(totalPutCount / slaveState.getClusterSize());
      /*
       * Add one to the nodeCount on each slave with an index less than the remainder so that the
       * correct number of values are written to the cache
       */
      if ((totalPutCount % slaveState.getClusterSize() != 0)
          && slaveState.getSlaveIndex() < (totalPutCount % slaveState.getClusterSize())) {
        nodePutCount++;
      }
    }

    long putCount = nodePutCount;
    long bytesWritten = 0;
    BasicOperations.Cache<String, Object> cache = basicOperations.getCache(bucket);
    try {
      byte[] buffer = new byte[valueSize];
      Statistics stats = new DefaultStatistics(new DefaultOperationStats());
      stats.begin();
      while (putCount > 0) {
        String key =
            Integer.toString(slaveState.getSlaveIndex())
                + "-"
                + putCount
                + ":"
                + TimeService.nanoTime();

        long start = -1;
        boolean success = false;
        String cacheData = null;

        if (stringData) {
          cacheData = generateRandomStringData(valueSize);
        } else {
          random.nextBytes(buffer);
        }

        for (int i = 0; i < putRetryCount; ++i) {
          try {
            if (stringData) {
              if (putCount % 5000 == 0) {
                log.info(i + ": Writing string length " + valueSize + " to cache key: " + key);
              }

              start = TimeService.nanoTime();
              cache.put(key, cacheData);
            } else {
              if (putCount % 5000 == 0) {
                log.info(i + ": Writing " + valueSize + " bytes to cache key: " + key);
              }

              start = TimeService.nanoTime();
              cache.put(key, buffer);
            }
            long durationNanos = TimeService.nanoTime() - start;
            stats.registerRequest(durationNanos, BasicOperations.PUT);
            if (printWriteStatistics) {
              log.info(
                  "Put on slave"
                      + slaveState.getSlaveIndex()
                      + " took "
                      + Utils.prettyPrintTime(durationNanos, TimeUnit.NANOSECONDS));
            }
            success = true;
            break;
          } catch (Exception e) {
            // If the put fails, sleep to see if staggering the put will succeed
            Thread.sleep(maxSleepInterval * 1000);
          }
        }
        if (!success) {
          return errorResponse(
              "Failed to insert entry into cache",
              new RuntimeException(
                  String.format("Failed to insert entry %d times.", putRetryCount)));
        }

        if (stringData) {
          bytesWritten += (valueSize * 2);
        } else {
          bytesWritten += valueSize;
        }

        putCount--;
      }
      stats.end();
      System.gc();
      log.info(
          "Memory - free: "
              + Utils.kbString(runtime.freeMemory())
              + " - max: "
              + Utils.kbString(runtime.maxMemory())
              + "- total: "
              + Utils.kbString(runtime.totalMemory()));
      log.debug(
          "nodePutCount = "
              + nodePutCount
              + "; bytesWritten = "
              + bytesWritten
              + "; targetMemoryUse = "
              + targetMemoryUse
              + "; countOfWordsInData = "
              + countOfWordsInData);
      return new DataInsertAck(
          slaveState,
          nodePutCount,
          cacheInformation.getCache(null).getLocallyStoredSize(),
          bytesWritten,
          targetMemoryUse,
          countOfWordsInData,
          wordCount,
          stats);
    } catch (Exception e) {
      return errorResponse("An exception occurred", e);
    } finally {
      // Log the word counts for this node
      if (stringData && !wordCount.isEmpty()) {
        log.debug("Word counts for node" + slaveState.getSlaveIndex());
        log.debug("--------------------");
        for (Map.Entry<String, Integer> entry : wordCount.entrySet()) {
          log.debug("key: " + entry.getKey() + "; value: " + entry.getValue());
        }
        log.debug("--------------------");
      }
    }
  }