@Override public StageResult processAckOnMaster(List<DistStageAck> acks) { StageResult result = super.processAckOnMaster(acks); if (result.isError()) return result; log.info("--------------------"); if (ramPercentage > 0) { if (stringData) { log.info( "Filled cache with String objects totaling " + Math.round(ramPercentage * 100) + "% of the Java heap"); } else { log.info( "Filled cache with byte arrays totaling " + Math.round(ramPercentage * 100) + "% of the Java heap"); } } if (ramPercentage < 0 && targetMemoryUse > 0) { if (stringData) { log.info("Filled cache with String objects totaling " + Utils.kbString(targetMemoryUse)); } else { log.info("Filled cache with byte arrays totaling " + Utils.kbString(targetMemoryUse)); } } if (valueCount > 0) { if (stringData) { log.info( "Filled cache with " + Utils.kbString((valueSize * 2) * valueCount) + " of " + valueSize + " character String objects"); } else { log.info( "Filled cache with " + Utils.kbString(valueSize * valueCount) + " of " + Utils.kbString(valueSize) + " byte arrays"); } } Report report = masterState.getReport(); Report.Test test = report.createTest("Random_Data_Stage", null, true); int testIteration = test.getIterations().size(); Map<Integer, Report.SlaveResult> nodeKeyCountsResult = new HashMap<Integer, Report.SlaveResult>(); Map<Integer, Report.SlaveResult> nodeTargetMemoryUseResult = new HashMap<Integer, Report.SlaveResult>(); Map<Integer, Report.SlaveResult> nodeCountOfWordsInDataResult = new HashMap<Integer, Report.SlaveResult>(); Map<Integer, Report.SlaveResult> nodeBytesWritten = new HashMap<Integer, Report.SlaveResult>(); long totalValues = 0; long totalBytes = 0; long totalNodeWordCount = 0; Map<String, Integer> clusterWordCount = new TreeMap<String, Integer>(); for (DataInsertAck ack : Projections.instancesOf(acks, DataInsertAck.class)) { if (ack.wordCount != null) { for (Map.Entry<String, Integer> entry : ack.wordCount.entrySet()) { if (clusterWordCount.containsKey(entry.getKey())) { clusterWordCount.put( entry.getKey(), clusterWordCount.get(entry.getKey()) + entry.getValue()); } else { clusterWordCount.put(entry.getKey(), entry.getValue()); } } } nodeKeyCountsResult.put( ack.getSlaveIndex(), new Report.SlaveResult(Long.toString(ack.nodeKeyCount), false)); nodeBytesWritten.put( ack.getSlaveIndex(), new Report.SlaveResult(Long.toString(ack.bytesWritten), false)); test.addStatistics( testIteration, ack.getSlaveIndex(), Collections.singletonList(ack.nodePutStats)); totalValues += ack.nodePutCount; totalBytes += ack.bytesWritten; String logInfo = "Slave " + ack.getSlaveIndex() + " wrote " + ack.nodePutCount + " values to the cache with a total size of " + Utils.kbString(ack.bytesWritten); if (ramPercentage > 0) { logInfo += "; targetMemoryUse = " + Utils.kbString(ack.targetMemoryUse); nodeTargetMemoryUseResult.put( ack.getSlaveIndex(), new Report.SlaveResult(Long.toString(ack.targetMemoryUse), false)); } if (stringData) { logInfo += "; countOfWordsInData = " + ack.countOfWordsInData; nodeCountOfWordsInDataResult.put( ack.getSlaveIndex(), new Report.SlaveResult(Long.toString(ack.countOfWordsInData), false)); } log.info(logInfo); } log.info( "The cache contains " + totalValues + " values with a total size of " + Utils.kbString(totalBytes)); if (limitWordCount) { int totalWordCount = maxWordCount; if (!shareWords) { totalWordCount = maxWordCount * slaveState.getClusterSize(); } log.info( "Up to " + totalWordCount + " words were generated with a maximum length of " + maxWordLength + " characters"); } if (!clusterWordCount.isEmpty()) { log.info(clusterWordCount.size() + "words were actually generated"); log.info("--------------------"); log.info("Cluster wide word count:"); for (String key : clusterWordCount.keySet()) { log.info("word: " + key + "; count: " + clusterWordCount.get(key)); } // TODO Will this take too much memory? // masterState.put(RANDOMDATA_CLUSTER_WORDCOUNT_KEY, clusterWordCount); } log.info("--------------------"); masterState.put(RANDOMDATA_TOTALBYTES_KEY, totalBytes); test.addResult( testIteration, new Report.TestResult( "Kilobytes written per node", nodeBytesWritten, Utils.kbString(totalBytes), false)); test.addResult( testIteration, new Report.TestResult("Key count per node", nodeKeyCountsResult, "", false)); if (!nodeTargetMemoryUseResult.isEmpty()) { test.addResult( testIteration, new Report.TestResult( "Target memory use per node", nodeTargetMemoryUseResult, Utils.kbString(totalBytes), false)); } if (!nodeCountOfWordsInDataResult.isEmpty()) { test.addResult( testIteration, new Report.TestResult( "Count of words in data per node", nodeCountOfWordsInDataResult, Long.toString(totalNodeWordCount), false)); } return StageResult.SUCCESS; }
@Override public DistStageAck executeOnSlave() { random = new Random(randomSeed + slaveState.getSlaveIndex()); if (ramPercentage > 0 && valueCount > 0) { return errorResponse( "Either valueCount or ramPercentageDataSize should be specified, but not both"); } if (ramPercentage > 1) { return errorResponse("The percentage of RAM can not be greater than one."); } if (shareWords && !limitWordCount) { return errorResponse( "The shareWords property can only be true when limitWordCount is also true."); } if (limitWordCount && !stringData) { return errorResponse( "The limitWordCount property can only be true when stringData is also true."); } if (valueByteOverhead == -1 && cacheInformation == null) { return errorResponse("The valueByteOverhead property must be supplied for this cache."); } /* * If valueByteOverhead is not specified, then try to retrieve the byte overhead from the * CacheWrapper */ if (valueByteOverhead == -1 && cacheInformation != null) { valueByteOverhead = cacheInformation.getCache(null).getEntryOverhead(); } runtime = Runtime.getRuntime(); int valueSizeWithOverhead = valueByteOverhead; /* * String data is twice the size of a byte array */ if (stringData) { valueSizeWithOverhead += (valueSize * 2); } else { valueSizeWithOverhead += valueSize; } if (ramPercentage > 0) { System.gc(); targetMemoryUse = (long) (runtime.maxMemory() * ramPercentage); log.trace("targetMemoryUse: " + Utils.kbString(targetMemoryUse)); nodePutCount = (long) Math.ceil(targetMemoryUse / valueSizeWithOverhead); } else { long totalPutCount = valueCount; if (targetMemoryUse > 0) { if (targetMemoryUse % valueSizeWithOverhead != 0) { log.warn( "The supplied value for targetMemoryUse (" + targetMemoryUse + ") is not evenly divisible by the value size plus byte overhead (" + valueSizeWithOverhead + ")"); } totalPutCount = targetMemoryUse / valueSizeWithOverhead; } nodePutCount = (long) Math.ceil(totalPutCount / slaveState.getClusterSize()); /* * Add one to the nodeCount on each slave with an index less than the remainder so that the * correct number of values are written to the cache */ if ((totalPutCount % slaveState.getClusterSize() != 0) && slaveState.getSlaveIndex() < (totalPutCount % slaveState.getClusterSize())) { nodePutCount++; } } long putCount = nodePutCount; long bytesWritten = 0; BasicOperations.Cache<String, Object> cache = basicOperations.getCache(bucket); try { byte[] buffer = new byte[valueSize]; Statistics stats = new DefaultStatistics(new DefaultOperationStats()); stats.begin(); while (putCount > 0) { String key = Integer.toString(slaveState.getSlaveIndex()) + "-" + putCount + ":" + TimeService.nanoTime(); long start = -1; boolean success = false; String cacheData = null; if (stringData) { cacheData = generateRandomStringData(valueSize); } else { random.nextBytes(buffer); } for (int i = 0; i < putRetryCount; ++i) { try { if (stringData) { if (putCount % 5000 == 0) { log.info(i + ": Writing string length " + valueSize + " to cache key: " + key); } start = TimeService.nanoTime(); cache.put(key, cacheData); } else { if (putCount % 5000 == 0) { log.info(i + ": Writing " + valueSize + " bytes to cache key: " + key); } start = TimeService.nanoTime(); cache.put(key, buffer); } long durationNanos = TimeService.nanoTime() - start; stats.registerRequest(durationNanos, BasicOperations.PUT); if (printWriteStatistics) { log.info( "Put on slave" + slaveState.getSlaveIndex() + " took " + Utils.prettyPrintTime(durationNanos, TimeUnit.NANOSECONDS)); } success = true; break; } catch (Exception e) { // If the put fails, sleep to see if staggering the put will succeed Thread.sleep(maxSleepInterval * 1000); } } if (!success) { return errorResponse( "Failed to insert entry into cache", new RuntimeException( String.format("Failed to insert entry %d times.", putRetryCount))); } if (stringData) { bytesWritten += (valueSize * 2); } else { bytesWritten += valueSize; } putCount--; } stats.end(); System.gc(); log.info( "Memory - free: " + Utils.kbString(runtime.freeMemory()) + " - max: " + Utils.kbString(runtime.maxMemory()) + "- total: " + Utils.kbString(runtime.totalMemory())); log.debug( "nodePutCount = " + nodePutCount + "; bytesWritten = " + bytesWritten + "; targetMemoryUse = " + targetMemoryUse + "; countOfWordsInData = " + countOfWordsInData); return new DataInsertAck( slaveState, nodePutCount, cacheInformation.getCache(null).getLocallyStoredSize(), bytesWritten, targetMemoryUse, countOfWordsInData, wordCount, stats); } catch (Exception e) { return errorResponse("An exception occurred", e); } finally { // Log the word counts for this node if (stringData && !wordCount.isEmpty()) { log.debug("Word counts for node" + slaveState.getSlaveIndex()); log.debug("--------------------"); for (Map.Entry<String, Integer> entry : wordCount.entrySet()) { log.debug("key: " + entry.getKey() + "; value: " + entry.getValue()); } log.debug("--------------------"); } } }