コード例 #1
0
 @Destroy
 public void destroy() {
   Utils.shutdownAndWait(scheduledExecutor);
   Utils.setField(ForkJoinPool.class, "factory", ForkJoinPool.commonPool(), null);
   ForkJoinPool common = ForkJoinPool.commonPool();
   Utils.setField(ForkJoinPool.class, "common", ForkJoinPool.commonPool(), null);
   Utils.shutdownAndWait(common);
 }
コード例 #2
0
ファイル: Master.java プロジェクト: rvansa/radargun
  private int executeStage(Configuration configuration, Cluster cluster, int stageId) {
    Stage stage =
        masterConfig
            .getScenario()
            .getStage(
                stageId,
                state,
                getCurrentExtras(masterConfig, configuration, cluster),
                state.getReport());
    InitHelper.init(stage);
    StageResult result;
    try {
      if (stage instanceof MasterStage) {
        result = executeMasterStage((MasterStage) stage);
      } else if (stage instanceof DistStage) {
        result = executeDistStage(stageId, (DistStage) stage);
      } else {
        log.error("Stage '" + stage.getName() + "' is neither master nor distributed");
        return -1;
      }
    } finally {
      InitHelper.destroy(stage);
    }

    if (result == StageResult.SUCCESS) {
      return stageId + 1;
    } else if (result == StageResult.FAIL || result == StageResult.EXIT) {
      returnCode = masterConfig.getConfigurations().indexOf(configuration) + 1;
      if (result == StageResult.EXIT) {
        exitFlag = true;
      }
      return -1;
    } else if (result == StageResult.BREAK || result == StageResult.CONTINUE) {
      Stack<String> repeatNames = (Stack<String>) state.get(RepeatStage.REPEAT_NAMES);
      String nextLabel;
      if (repeatNames == null || repeatNames.isEmpty()) {
        log.warn("BREAK or CONTINUE used out of any repeat.");
        return -1;
      } else if (result == StageResult.BREAK) {
        nextLabel = Utils.concat(".", "repeat", repeatNames.peek(), "end");
      } else if (result == StageResult.CONTINUE) {
        nextLabel = Utils.concat(".", "repeat", repeatNames.peek(), "begin");
      } else throw new IllegalStateException();
      int nextStageId = masterConfig.getScenario().getLabel(nextLabel);
      if (nextStageId < 0) {
        log.error("No label '" + nextLabel + "' defined");
      }
      return nextStageId;
    } else {
      throw new IllegalStateException("Result does not match to any type.");
    }
  }
コード例 #3
0
 private void init() {
   if (initialized) return;
   if (!service.isRunning()) {
     throw new IllegalStateException("init() can be called only when the service is started");
   }
   EntityManagerFactory emf = service.getEntityManagerFactory();
   try {
     // TODO: find getter somewhere
     jta =
         PersistenceUnitTransactionType.JTA.equals(
             Utils.getField(EntityManagerFactoryImpl.class, emf, "transactionType"));
   } catch (Exception e) {
     throw new RuntimeException(e);
   }
   initialized = true;
 }
コード例 #4
0
  public static void generate(ClusterTimeSeriesReport report, String reportDir, String fileName)
      throws IOException {
    File root = new File(reportDir);
    if (!root.exists()) {
      if (!root.mkdirs()) {
        log.warn(
            "Could not create root dir : "
                + root.getAbsolutePath()
                + " This might result in reports not being generated");
      } else {
        log.info("Created root file: " + root);
      }
    }
    File chartFile = new File(root, fileName + ".png");
    Utils.backupFile(chartFile);

    ChartUtilities.saveChartAsPNG(chartFile, createChart(report), 1024, 768);

    log.info("Chart saved as " + chartFile);
  }
コード例 #5
0
  @Override
  public StageResult processAckOnMaster(List<DistStageAck> acks) {
    StageResult result = super.processAckOnMaster(acks);
    if (result.isError()) return result;

    log.info("--------------------");
    if (ramPercentage > 0) {
      if (stringData) {
        log.info(
            "Filled cache with String objects totaling "
                + Math.round(ramPercentage * 100)
                + "% of the Java heap");
      } else {
        log.info(
            "Filled cache with byte arrays totaling "
                + Math.round(ramPercentage * 100)
                + "% of the Java heap");
      }
    }
    if (ramPercentage < 0 && targetMemoryUse > 0) {
      if (stringData) {
        log.info("Filled cache with String objects totaling " + Utils.kbString(targetMemoryUse));
      } else {
        log.info("Filled cache with byte arrays totaling " + Utils.kbString(targetMemoryUse));
      }
    }
    if (valueCount > 0) {
      if (stringData) {
        log.info(
            "Filled cache with "
                + Utils.kbString((valueSize * 2) * valueCount)
                + " of "
                + valueSize
                + " character String objects");
      } else {
        log.info(
            "Filled cache with "
                + Utils.kbString(valueSize * valueCount)
                + " of "
                + Utils.kbString(valueSize)
                + " byte arrays");
      }
    }

    Report report = masterState.getReport();
    Report.Test test = report.createTest("Random_Data_Stage", null, true);
    int testIteration = test.getIterations().size();

    Map<Integer, Report.SlaveResult> nodeKeyCountsResult =
        new HashMap<Integer, Report.SlaveResult>();
    Map<Integer, Report.SlaveResult> nodeTargetMemoryUseResult =
        new HashMap<Integer, Report.SlaveResult>();
    Map<Integer, Report.SlaveResult> nodeCountOfWordsInDataResult =
        new HashMap<Integer, Report.SlaveResult>();
    Map<Integer, Report.SlaveResult> nodeBytesWritten = new HashMap<Integer, Report.SlaveResult>();

    long totalValues = 0;
    long totalBytes = 0;
    long totalNodeWordCount = 0;
    Map<String, Integer> clusterWordCount = new TreeMap<String, Integer>();
    for (DataInsertAck ack : Projections.instancesOf(acks, DataInsertAck.class)) {
      if (ack.wordCount != null) {
        for (Map.Entry<String, Integer> entry : ack.wordCount.entrySet()) {
          if (clusterWordCount.containsKey(entry.getKey())) {
            clusterWordCount.put(
                entry.getKey(), clusterWordCount.get(entry.getKey()) + entry.getValue());
          } else {
            clusterWordCount.put(entry.getKey(), entry.getValue());
          }
        }
      }

      nodeKeyCountsResult.put(
          ack.getSlaveIndex(), new Report.SlaveResult(Long.toString(ack.nodeKeyCount), false));
      nodeBytesWritten.put(
          ack.getSlaveIndex(), new Report.SlaveResult(Long.toString(ack.bytesWritten), false));
      test.addStatistics(
          testIteration, ack.getSlaveIndex(), Collections.singletonList(ack.nodePutStats));

      totalValues += ack.nodePutCount;
      totalBytes += ack.bytesWritten;
      String logInfo =
          "Slave "
              + ack.getSlaveIndex()
              + " wrote "
              + ack.nodePutCount
              + " values to the cache with a total size of "
              + Utils.kbString(ack.bytesWritten);
      if (ramPercentage > 0) {
        logInfo += "; targetMemoryUse = " + Utils.kbString(ack.targetMemoryUse);
        nodeTargetMemoryUseResult.put(
            ack.getSlaveIndex(), new Report.SlaveResult(Long.toString(ack.targetMemoryUse), false));
      }
      if (stringData) {
        logInfo += "; countOfWordsInData = " + ack.countOfWordsInData;
        nodeCountOfWordsInDataResult.put(
            ack.getSlaveIndex(),
            new Report.SlaveResult(Long.toString(ack.countOfWordsInData), false));
      }
      log.info(logInfo);
    }
    log.info(
        "The cache contains "
            + totalValues
            + " values with a total size of "
            + Utils.kbString(totalBytes));
    if (limitWordCount) {
      int totalWordCount = maxWordCount;
      if (!shareWords) {
        totalWordCount = maxWordCount * slaveState.getClusterSize();
      }
      log.info(
          "Up to "
              + totalWordCount
              + " words were generated with a maximum length of "
              + maxWordLength
              + " characters");
    }
    if (!clusterWordCount.isEmpty()) {
      log.info(clusterWordCount.size() + "words were actually generated");
      log.info("--------------------");
      log.info("Cluster wide word count:");
      for (String key : clusterWordCount.keySet()) {
        log.info("word: " + key + "; count: " + clusterWordCount.get(key));
      }
      // TODO Will this take too much memory?
      //         masterState.put(RANDOMDATA_CLUSTER_WORDCOUNT_KEY, clusterWordCount);
    }
    log.info("--------------------");

    masterState.put(RANDOMDATA_TOTALBYTES_KEY, totalBytes);
    test.addResult(
        testIteration,
        new Report.TestResult(
            "Kilobytes written per node", nodeBytesWritten, Utils.kbString(totalBytes), false));

    test.addResult(
        testIteration, new Report.TestResult("Key count per node", nodeKeyCountsResult, "", false));
    if (!nodeTargetMemoryUseResult.isEmpty()) {
      test.addResult(
          testIteration,
          new Report.TestResult(
              "Target memory use per node",
              nodeTargetMemoryUseResult,
              Utils.kbString(totalBytes),
              false));
    }
    if (!nodeCountOfWordsInDataResult.isEmpty()) {
      test.addResult(
          testIteration,
          new Report.TestResult(
              "Count of words in data per node",
              nodeCountOfWordsInDataResult,
              Long.toString(totalNodeWordCount),
              false));
    }

    return StageResult.SUCCESS;
  }
コード例 #6
0
  @Override
  public DistStageAck executeOnSlave() {
    random = new Random(randomSeed + slaveState.getSlaveIndex());

    if (ramPercentage > 0 && valueCount > 0) {
      return errorResponse(
          "Either valueCount or ramPercentageDataSize should be specified, but not both");
    }

    if (ramPercentage > 1) {
      return errorResponse("The percentage of RAM can not be greater than one.");
    }

    if (shareWords && !limitWordCount) {
      return errorResponse(
          "The shareWords property can only be true when limitWordCount is also true.");
    }

    if (limitWordCount && !stringData) {
      return errorResponse(
          "The limitWordCount property can only be true when stringData is also true.");
    }

    if (valueByteOverhead == -1 && cacheInformation == null) {
      return errorResponse("The valueByteOverhead property must be supplied for this cache.");
    }

    /*
     * If valueByteOverhead is not specified, then try to retrieve the byte overhead from the
     * CacheWrapper
     */
    if (valueByteOverhead == -1 && cacheInformation != null) {
      valueByteOverhead = cacheInformation.getCache(null).getEntryOverhead();
    }

    runtime = Runtime.getRuntime();
    int valueSizeWithOverhead = valueByteOverhead;
    /*
     * String data is twice the size of a byte array
     */
    if (stringData) {
      valueSizeWithOverhead += (valueSize * 2);
    } else {
      valueSizeWithOverhead += valueSize;
    }

    if (ramPercentage > 0) {
      System.gc();
      targetMemoryUse = (long) (runtime.maxMemory() * ramPercentage);
      log.trace("targetMemoryUse: " + Utils.kbString(targetMemoryUse));

      nodePutCount = (long) Math.ceil(targetMemoryUse / valueSizeWithOverhead);
    } else {
      long totalPutCount = valueCount;
      if (targetMemoryUse > 0) {
        if (targetMemoryUse % valueSizeWithOverhead != 0) {
          log.warn(
              "The supplied value for targetMemoryUse ("
                  + targetMemoryUse
                  + ") is not evenly divisible by the value size plus byte overhead ("
                  + valueSizeWithOverhead
                  + ")");
        }
        totalPutCount = targetMemoryUse / valueSizeWithOverhead;
      }
      nodePutCount = (long) Math.ceil(totalPutCount / slaveState.getClusterSize());
      /*
       * Add one to the nodeCount on each slave with an index less than the remainder so that the
       * correct number of values are written to the cache
       */
      if ((totalPutCount % slaveState.getClusterSize() != 0)
          && slaveState.getSlaveIndex() < (totalPutCount % slaveState.getClusterSize())) {
        nodePutCount++;
      }
    }

    long putCount = nodePutCount;
    long bytesWritten = 0;
    BasicOperations.Cache<String, Object> cache = basicOperations.getCache(bucket);
    try {
      byte[] buffer = new byte[valueSize];
      Statistics stats = new DefaultStatistics(new DefaultOperationStats());
      stats.begin();
      while (putCount > 0) {
        String key =
            Integer.toString(slaveState.getSlaveIndex())
                + "-"
                + putCount
                + ":"
                + TimeService.nanoTime();

        long start = -1;
        boolean success = false;
        String cacheData = null;

        if (stringData) {
          cacheData = generateRandomStringData(valueSize);
        } else {
          random.nextBytes(buffer);
        }

        for (int i = 0; i < putRetryCount; ++i) {
          try {
            if (stringData) {
              if (putCount % 5000 == 0) {
                log.info(i + ": Writing string length " + valueSize + " to cache key: " + key);
              }

              start = TimeService.nanoTime();
              cache.put(key, cacheData);
            } else {
              if (putCount % 5000 == 0) {
                log.info(i + ": Writing " + valueSize + " bytes to cache key: " + key);
              }

              start = TimeService.nanoTime();
              cache.put(key, buffer);
            }
            long durationNanos = TimeService.nanoTime() - start;
            stats.registerRequest(durationNanos, BasicOperations.PUT);
            if (printWriteStatistics) {
              log.info(
                  "Put on slave"
                      + slaveState.getSlaveIndex()
                      + " took "
                      + Utils.prettyPrintTime(durationNanos, TimeUnit.NANOSECONDS));
            }
            success = true;
            break;
          } catch (Exception e) {
            // If the put fails, sleep to see if staggering the put will succeed
            Thread.sleep(maxSleepInterval * 1000);
          }
        }
        if (!success) {
          return errorResponse(
              "Failed to insert entry into cache",
              new RuntimeException(
                  String.format("Failed to insert entry %d times.", putRetryCount)));
        }

        if (stringData) {
          bytesWritten += (valueSize * 2);
        } else {
          bytesWritten += valueSize;
        }

        putCount--;
      }
      stats.end();
      System.gc();
      log.info(
          "Memory - free: "
              + Utils.kbString(runtime.freeMemory())
              + " - max: "
              + Utils.kbString(runtime.maxMemory())
              + "- total: "
              + Utils.kbString(runtime.totalMemory()));
      log.debug(
          "nodePutCount = "
              + nodePutCount
              + "; bytesWritten = "
              + bytesWritten
              + "; targetMemoryUse = "
              + targetMemoryUse
              + "; countOfWordsInData = "
              + countOfWordsInData);
      return new DataInsertAck(
          slaveState,
          nodePutCount,
          cacheInformation.getCache(null).getLocallyStoredSize(),
          bytesWritten,
          targetMemoryUse,
          countOfWordsInData,
          wordCount,
          stats);
    } catch (Exception e) {
      return errorResponse("An exception occurred", e);
    } finally {
      // Log the word counts for this node
      if (stringData && !wordCount.isEmpty()) {
        log.debug("Word counts for node" + slaveState.getSlaveIndex());
        log.debug("--------------------");
        for (Map.Entry<String, Integer> entry : wordCount.entrySet()) {
          log.debug("key: " + entry.getKey() + "; value: " + entry.getValue());
        }
        log.debug("--------------------");
      }
    }
  }
コード例 #7
0
 public String formatTime(double value) {
   return Utils.prettyPrintTime((long) value, TimeUnit.NANOSECONDS).replaceAll(" ", "&nbsp;");
 }
コード例 #8
0
ファイル: Master.java プロジェクト: rvansa/radargun
  public void run() throws Exception {
    try {
      connection =
          new RemoteSlaveConnection(
              masterConfig.getMaxClusterSize(), masterConfig.getHost(), masterConfig.getPort());
      connection.establish();
      state.setMaxClusterSize(masterConfig.getMaxClusterSize());
      // let's create reporters now to fail soon in case of misconfiguration
      ArrayList<Reporter> reporters = new ArrayList<>();
      for (ReporterConfiguration reporterConfiguration : masterConfig.getReporters()) {
        for (ReporterConfiguration.Report report : reporterConfiguration.getReports()) {
          reporters.add(
              ReporterHelper.createReporter(reporterConfiguration.type, report.getProperties()));
        }
      }

      long benchmarkStart = System.currentTimeMillis();
      for (Configuration configuration : masterConfig.getConfigurations()) {
        log.info("Started benchmarking configuration '" + configuration.name + "'");
        state.setConfigName(configuration.name);
        long configStart = System.currentTimeMillis();
        for (Cluster cluster : masterConfig.getClusters()) {
          int clusterSize = cluster.getSize();
          log.info("Starting scenario on " + cluster);
          connection.sendCluster(cluster);
          connection.sendConfiguration(configuration);
          // here we should restart, therefore, we have to send it again
          connection.restartSlaves(clusterSize);
          connection.sendCluster(cluster);
          connection.sendConfiguration(configuration);
          connection.sendScenario(masterConfig.getScenario(), clusterSize);
          state.setCluster(cluster);
          state.setReport(new Report(configuration, cluster));
          long clusterStart = System.currentTimeMillis();
          int stageCount = masterConfig.getScenario().getStageCount();
          int scenarioDestroyId = stageCount - 2;
          int scenarioCleanupId = stageCount - 1;
          try {
            try {
              // ScenarioDestroy and ScenarioCleanup are special ones, executed always
              int nextStageId = 0;
              do {
                nextStageId = executeStage(configuration, cluster, nextStageId);
              } while (nextStageId >= 0 && nextStageId < scenarioDestroyId);
              // run ScenarioDestroy
            } finally {
              executeStage(configuration, cluster, scenarioDestroyId);
            }
          } finally {
            // run ScenarioCleanup
            executeStage(configuration, cluster, scenarioCleanupId);
          }
          log.info(
              "Finished scenario on "
                  + cluster
                  + " in "
                  + Utils.getMillisDurationString(System.currentTimeMillis() - clusterStart));
          state.getReport().addTimelines(connection.receiveTimelines(clusterSize));
          reports.add(state.getReport());
          if (exitFlag) {
            break;
          }
        }
        log.info(
            "Finished benchmarking configuration '"
                + configuration.name
                + "' in "
                + Utils.getMillisDurationString(System.currentTimeMillis() - configStart));
        if (exitFlag) {
          log.info("Exiting whole benchmark");
          break;
        }
      }
      log.info(
          "Executed all benchmarks in "
              + Utils.getMillisDurationString(System.currentTimeMillis() - benchmarkStart)
              + ", reporting...");
      for (Reporter reporter : reporters) {
        try {
          log.info("Running reporter " + reporter);
          reporter.run(Collections.unmodifiableList(reports));
        } catch (Exception e) {
          log.error("Error in reporter " + reporter, e);
          returnCode = 127;
        } finally {
          InitHelper.destroy(reporter);
        }
      }
      String reportersMessage =
          reporters.isEmpty()
              ? "No reporters have been specified."
              : "All reporters have been executed, exiting.";
      log.info(reportersMessage);
    } catch (Throwable e) {
      log.error("Exception in Master.run: ", e);
      returnCode = 127;
    } finally {
      if (connection != null) {
        connection.release();
      }
      ShutDownHook.exit(returnCode);
    }
  }