public void
      doShouldRunReadOnlyLdbcWorkloadWithNothingDbWhileIgnoringScheduledStartTimesAndReturnExpectedMetrics(
          int threadCount,
          long operationCount,
          CompletionTimeService completionTimeService,
          ConcurrentErrorReporter errorReporter)
          throws InterruptedException, DbException, WorkloadException, IOException,
              MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
              ExecutionException {
    ControlService controlService = null;
    Db db = null;
    Workload workload = null;
    MetricsService metricsService = null;
    try {
      Map<String, String> paramsMap =
          LdbcSnbInteractiveWorkloadConfiguration.defaultReadOnlyConfigSF1();
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.PARAMETERS_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.UPDATES_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      // Driver-specific parameters
      String name = null;
      String dbClassName = DummyLdbcSnbInteractiveDb.class.getName();
      String workloadClassName = LdbcSnbInteractiveWorkload.class.getName();
      int statusDisplayInterval = 1;
      TimeUnit timeUnit = TimeUnit.NANOSECONDS;
      String resultDirPath = temporaryFolder.newFolder().getAbsolutePath();
      double timeCompressionRatio = 1.0;
      Set<String> peerIds = new HashSet<>();
      ConsoleAndFileDriverConfiguration.ConsoleAndFileValidationParamOptions validationParams =
          null;
      String dbValidationFilePath = null;
      boolean calculateWorkloadStatistics = false;
      long spinnerSleepDuration = 0l;
      boolean printHelp = false;
      boolean ignoreScheduledStartTimes = true;
      long warmupCount = 100;

      ConsoleAndFileDriverConfiguration configuration =
          new ConsoleAndFileDriverConfiguration(
              paramsMap,
              name,
              dbClassName,
              workloadClassName,
              operationCount,
              threadCount,
              statusDisplayInterval,
              timeUnit,
              resultDirPath,
              timeCompressionRatio,
              peerIds,
              validationParams,
              dbValidationFilePath,
              calculateWorkloadStatistics,
              spinnerSleepDuration,
              printHelp,
              ignoreScheduledStartTimes,
              warmupCount);

      configuration =
          (ConsoleAndFileDriverConfiguration)
              configuration.applyArgs(
                  MapUtils.loadPropertiesToMap(
                      TestUtils.getResource("/snb/interactive/updateStream.properties")));

      controlService =
          new LocalControlService(
              timeSource.nowAsMilli() + 1000,
              configuration,
              new Log4jLoggingServiceFactory(false),
              timeSource);
      LoggingService loggingService =
          new Log4jLoggingServiceFactory(false).loggingServiceFor("Test");
      workload = new LdbcSnbInteractiveWorkload();
      workload.init(configuration);
      db = new DummyLdbcSnbInteractiveDb();
      db.init(configuration.asMap(), loggingService, workload.operationTypeToClassMapping());
      GeneratorFactory gf = new GeneratorFactory(new RandomDataGeneratorFactory(42L));
      Iterator<Operation> operations =
          gf.limit(
              WorkloadStreams.mergeSortedByStartTimeExcludingChildOperationGenerators(
                  gf, workload.streams(gf, true)),
              configuration.operationCount());
      Iterator<Operation> timeMappedOperations =
          gf.timeOffsetAndCompress(operations, controlService.workloadStartTimeAsMilli(), 1.0);
      WorkloadStreams workloadStreams = new WorkloadStreams();
      workloadStreams.setAsynchronousStream(
          new HashSet<Class<? extends Operation>>(),
          new HashSet<Class<? extends Operation>>(),
          Collections.<Operation>emptyIterator(),
          timeMappedOperations,
          null);

      File resultsLog = temporaryFolder.newFile();
      SimpleCsvFileWriter csvResultsLogWriter =
          new SimpleCsvFileWriter(resultsLog, SimpleCsvFileWriter.DEFAULT_COLUMN_SEPARATOR);
      metricsService =
          ThreadedQueuedMetricsService.newInstanceUsingBlockingBoundedQueue(
              timeSource,
              errorReporter,
              configuration.timeUnit(),
              ThreadedQueuedMetricsService.DEFAULT_HIGHEST_EXPECTED_RUNTIME_DURATION_AS_NANO,
              csvResultsLogWriter,
              workload.operationTypeToClassMapping(),
              LOGGING_SERVICE_FACTORY);

      int boundedQueueSize = DefaultQueues.DEFAULT_BOUND_1000;
      WorkloadRunner runner =
          new WorkloadRunner(
              timeSource,
              db,
              workloadStreams,
              metricsService,
              errorReporter,
              completionTimeService,
              controlService.loggingServiceFactory(),
              controlService.configuration().threadCount(),
              controlService.configuration().statusDisplayIntervalAsSeconds(),
              controlService.configuration().spinnerSleepDurationAsMilli(),
              controlService.configuration().ignoreScheduledStartTimes(),
              boundedQueueSize);

      runner.getFuture().get();

      WorkloadResultsSnapshot workloadResults = metricsService.getWriter().results();
      SimpleDetailedWorkloadMetricsFormatter metricsFormatter =
          new SimpleDetailedWorkloadMetricsFormatter();

      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          errorReporter.errorEncountered(),
          is(false));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.latestFinishTimeAsMilli() >= workloadResults.startTimeAsMilli(),
          is(true));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.totalOperationCount(),
          is(operationCount));

      WorkloadResultsSnapshot workloadResultsFromJson =
          WorkloadResultsSnapshot.fromJson(workloadResults.toJson());

      assertThat(errorReporter.toString(), workloadResults, equalTo(workloadResultsFromJson));
      assertThat(
          errorReporter.toString(),
          workloadResults.toJson(),
          equalTo(workloadResultsFromJson.toJson()));

      csvResultsLogWriter.close();
      SimpleCsvFileReader csvResultsLogReader =
          new SimpleCsvFileReader(
              resultsLog, SimpleCsvFileReader.DEFAULT_COLUMN_SEPARATOR_REGEX_STRING);
      assertThat(
          (long) Iterators.size(csvResultsLogReader),
          is(configuration.operationCount())); // NOT + 1 because I didn't add csv headers
      csvResultsLogReader.close();

      operationCount = metricsService.getWriter().results().totalOperationCount();
      double operationsPerSecond =
          Math.round(
              ((double) operationCount / workloadResults.totalRunDurationAsNano())
                  * ONE_SECOND_AS_NANO);
      double microSecondPerOperation =
          (double) TimeUnit.NANOSECONDS.toMicros(workloadResults.totalRunDurationAsNano())
              / operationCount;
      System.out.println(
          format(
              "[%s threads] Completed %s operations in %s = %s op/sec = 1 op/%s us",
              threadCount,
              numberFormatter.format(operationCount),
              TEMPORAL_UTIL.nanoDurationToString(workloadResults.totalRunDurationAsNano()),
              doubleNumberFormatter.format(operationsPerSecond),
              doubleNumberFormatter.format(microSecondPerOperation)));
    } finally {
      System.out.println(errorReporter.toString());
      if (null != controlService) {
        controlService.shutdown();
      }
      if (null != db) {
        db.close();
      }
      if (null != workload) {
        workload.close();
      }
      if (null != metricsService) {
        metricsService.shutdown();
      }
      if (null != completionTimeService) {
        completionTimeService.shutdown();
      }
    }
  }
  @Test
  public void shouldRunReadOnlyLdbcWorkloadWithNothingDbAndCrashInSaneManner()
      throws InterruptedException, DbException, WorkloadException, IOException,
          MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
          ExecutionException {
    int threadCount = 4;
    long operationCount = 100000;

    ControlService controlService = null;
    Db db = null;
    Workload workload = null;
    MetricsService metricsService = null;
    CompletionTimeService completionTimeService = null;
    ConcurrentErrorReporter errorReporter = new ConcurrentErrorReporter();
    try {
      Map<String, String> paramsMap =
          LdbcSnbInteractiveWorkloadConfiguration.defaultReadOnlyConfigSF1();
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.PARAMETERS_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.UPDATES_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      // Driver-specific parameters
      String name = null;
      String dbClassName = DummyLdbcSnbInteractiveDb.class.getName();
      String workloadClassName = LdbcSnbInteractiveWorkload.class.getName();
      int statusDisplayInterval = 1;
      TimeUnit timeUnit = TimeUnit.NANOSECONDS;
      String resultDirPath = temporaryFolder.newFolder().getAbsolutePath();
      double timeCompressionRatio = 0.0000001;
      Set<String> peerIds = new HashSet<>();
      ConsoleAndFileDriverConfiguration.ConsoleAndFileValidationParamOptions validationParams =
          null;
      String dbValidationFilePath = null;
      boolean calculateWorkloadStatistics = false;
      long spinnerSleepDuration = 0l;
      boolean printHelp = false;
      boolean ignoreScheduledStartTimes = false;
      long warmupCount = 100;

      ConsoleAndFileDriverConfiguration configuration =
          new ConsoleAndFileDriverConfiguration(
              paramsMap,
              name,
              dbClassName,
              workloadClassName,
              operationCount,
              threadCount,
              statusDisplayInterval,
              timeUnit,
              resultDirPath,
              timeCompressionRatio,
              peerIds,
              validationParams,
              dbValidationFilePath,
              calculateWorkloadStatistics,
              spinnerSleepDuration,
              printHelp,
              ignoreScheduledStartTimes,
              warmupCount);

      configuration =
          (ConsoleAndFileDriverConfiguration)
              configuration.applyArgs(
                  MapUtils.loadPropertiesToMap(
                      TestUtils.getResource("/snb/interactive/updateStream.properties")));

      controlService =
          new LocalControlService(
              timeSource.nowAsMilli(),
              configuration,
              new Log4jLoggingServiceFactory(false),
              timeSource);
      LoggingService loggingService =
          new Log4jLoggingServiceFactory(false).loggingServiceFor("Test");

      GeneratorFactory gf = new GeneratorFactory(new RandomDataGeneratorFactory(42L));
      boolean returnStreamsWithDbConnector = true;
      Tuple3<WorkloadStreams, Workload, Long> workloadStreamsAndWorkload =
          WorkloadStreams.createNewWorkloadWithOffsetAndLimitedWorkloadStreams(
              configuration,
              gf,
              returnStreamsWithDbConnector,
              configuration.warmupCount(),
              configuration.operationCount(),
              LOGGING_SERVICE_FACTORY);

      workload = workloadStreamsAndWorkload._2();

      WorkloadStreams workloadStreams =
          WorkloadStreams.timeOffsetAndCompressWorkloadStreams(
              workloadStreamsAndWorkload._1(),
              controlService.workloadStartTimeAsMilli(),
              configuration.timeCompressionRatio(),
              gf);

      File resultsLog = temporaryFolder.newFile();
      SimpleCsvFileWriter csvResultsLogWriter =
          new SimpleCsvFileWriter(resultsLog, SimpleCsvFileWriter.DEFAULT_COLUMN_SEPARATOR);
      metricsService =
          ThreadedQueuedMetricsService.newInstanceUsingBlockingBoundedQueue(
              timeSource,
              errorReporter,
              configuration.timeUnit(),
              ThreadedQueuedMetricsService.DEFAULT_HIGHEST_EXPECTED_RUNTIME_DURATION_AS_NANO,
              csvResultsLogWriter,
              workload.operationTypeToClassMapping(),
              LOGGING_SERVICE_FACTORY);

      completionTimeService =
          completionTimeServiceAssistant.newSynchronizedConcurrentCompletionTimeServiceFromPeerIds(
              controlService.configuration().peerIds());

      db = new DummyLdbcSnbInteractiveDb();
      db.init(
          configuration
              .applyArg(DummyLdbcSnbInteractiveDb.CRASH_ON_ARG, LdbcQuery4.class.getName())
              .asMap(),
          loggingService,
          workload.operationTypeToClassMapping());

      int boundedQueueSize = DefaultQueues.DEFAULT_BOUND_1000;
      WorkloadRunner runner =
          new WorkloadRunner(
              timeSource,
              db,
              workloadStreams,
              metricsService,
              errorReporter,
              completionTimeService,
              controlService.loggingServiceFactory(),
              controlService.configuration().threadCount(),
              controlService.configuration().statusDisplayIntervalAsSeconds(),
              controlService.configuration().spinnerSleepDurationAsMilli(),
              controlService.configuration().ignoreScheduledStartTimes(),
              boundedQueueSize);

      runner.getFuture().get();
      csvResultsLogWriter.close();
    } finally {
      try {
        controlService.shutdown();
      } catch (Throwable e) {
        System.out.println(
            format(
                "Unclean %s shutdown -- but it's OK", controlService.getClass().getSimpleName()));
      }
      try {
        db.close();
      } catch (Throwable e) {
        System.out.println(
            format("Unclean %s shutdown -- but it's OK", db.getClass().getSimpleName()));
      }
      try {
        workload.close();
      } catch (Throwable e) {
        System.out.println(
            format("Unclean %s shutdown -- but it's OK", workload.getClass().getSimpleName()));
      }
      try {
        metricsService.shutdown();
      } catch (Throwable e) {
        System.out.println(
            format(
                "Unclean %s shutdown -- but it's OK", metricsService.getClass().getSimpleName()));
      }
      try {
        completionTimeService.shutdown();
      } catch (Throwable e) {
        System.out.println(
            format(
                "Unclean %s shutdown -- but it's OK",
                completionTimeService.getClass().getSimpleName()));
      }
      System.out.println(errorReporter.toString());
      assertTrue(errorReporter.errorEncountered());
    }
  }
Beispiel #3
0
  @Override
  public WorkloadStreams getStreams(GeneratorFactory gf, boolean hasDbConnected)
      throws WorkloadException {
    long workloadStartTimeAsMilli = 0;

    /**
     * **************************
     *
     * <p>Initial Insert Operation Generator
     *
     * <p>**************************
     */
    // Load Insert Keys
    MinMaxGenerator<Long> insertKeyGenerator = gf.minMaxGenerator(gf.incrementing(0l, 1l), 0l, 0l);

    // Insert Fields: Names & Values
    Iterator<Long> fieldValueLengthGenerator = gf.uniform(1l, 100l);
    Iterator<Iterator<Byte>> randomFieldValueGenerator =
        gf.sizedUniformBytesGenerator(fieldValueLengthGenerator);
    List<Tuple3<Double, String, Iterator<Iterator<Byte>>>> valuedFields = new ArrayList<>();
    for (int i = 0; i < NUMBER_OF_FIELDS_IN_RECORD; i++) {
      valuedFields.add(Tuple.tuple3(1d, FIELD_NAME_PREFIX + i, randomFieldValueGenerator));
    }
    Iterator<Map<String, Iterator<Byte>>> insertValuedFieldGenerator =
        gf.weightedDiscreteMap(valuedFields, NUMBER_OF_FIELDS_IN_RECORD);

    Iterator<Operation> initialInsertOperationGenerator =
        gf.limit(
            new InsertOperationGenerator(
                TABLE, gf.prefix(insertKeyGenerator, KEY_NAME_PREFIX), insertValuedFieldGenerator),
            INITIAL_INSERT_COUNT);

    /**
     * **************************
     *
     * <p>Insert Operation Generator
     *
     * <p>**************************
     */
    // Transaction Insert Keys
    InsertOperationGenerator transactionalInsertOperationGenerator =
        new InsertOperationGenerator(
            TABLE, gf.prefix(insertKeyGenerator, KEY_NAME_PREFIX), insertValuedFieldGenerator);

    /**
     * **************************
     *
     * <p>Read Operation Generator
     *
     * <p>**************************
     */
    // Read/Update Keys
    Iterator<String> requestKeyGenerator =
        gf.prefix(gf.dynamicRangeUniform(insertKeyGenerator), KEY_NAME_PREFIX);

    // Read Fields: Names
    List<Tuple2<Double, String>> fields = new ArrayList<>();
    for (int i = 0; i < NUMBER_OF_FIELDS_IN_RECORD; i++) {
      fields.add(Tuple.tuple2(1d, FIELD_NAME_PREFIX + i));
    }

    Iterator<List<String>> readFieldsGenerator =
        gf.weightedDiscreteList(fields, NUMBER_OF_FIELDS_TO_READ);

    ReadOperationGenerator readOperationGenerator =
        new ReadOperationGenerator(TABLE, requestKeyGenerator, readFieldsGenerator);

    /**
     * **************************
     *
     * <p>Update Operation Generator
     *
     * <p>**************************
     */
    // Update Fields: Names & Values
    Iterator<Map<String, Iterator<Byte>>> updateValuedFieldsGenerator =
        gf.weightedDiscreteMap(valuedFields, NUMBER_OF_FIELDS_TO_UPDATE);

    UpdateOperationGenerator updateOperationGenerator =
        new UpdateOperationGenerator(TABLE, requestKeyGenerator, updateValuedFieldsGenerator);

    /**
     * **************************
     *
     * <p>Scan Operation Generator
     *
     * <p>**************************
     */
    // Scan Fields: Names & Values
    Iterator<List<String>> scanFieldsGenerator =
        gf.weightedDiscreteList(fields, NUMBER_OF_FIELDS_TO_READ);

    // Scan Length: Number of Records
    Iterator<Integer> scanLengthGenerator = gf.uniform(MIN_SCAN_LENGTH, MAX_SCAN_LENGTH);

    ScanOperationGenerator scanOperationGenerator =
        new ScanOperationGenerator(
            TABLE, requestKeyGenerator, scanLengthGenerator, scanFieldsGenerator);

    /**
     * **************************
     *
     * <p>ReadModifyWrite Operation Generator
     *
     * <p>**************************
     */
    ReadModifyWriteOperationGenerator readModifyWriteOperationGenerator =
        new ReadModifyWriteOperationGenerator(
            TABLE, requestKeyGenerator, readFieldsGenerator, updateValuedFieldsGenerator);

    /**
     * **************************
     *
     * <p>Transactional Workload Operations
     *
     * <p>**************************
     */
    // proportion of transactions reads/update/insert/scan/read-modify-write
    List<Tuple2<Double, Iterator<Operation>>> operations = new ArrayList<>();
    operations.add(Tuple.tuple2(READ_RATIO, (Iterator<Operation>) readOperationGenerator));
    operations.add(Tuple.tuple2(UPDATE_RATIO, (Iterator<Operation>) updateOperationGenerator));
    operations.add(
        Tuple.tuple2(INSERT_RATIO, (Iterator<Operation>) transactionalInsertOperationGenerator));
    operations.add(Tuple.tuple2(SCAN_RATIO, (Iterator<Operation>) scanOperationGenerator));
    operations.add(
        Tuple.tuple2(
            READ_MODIFY_WRITE_RATIO, (Iterator<Operation>) readModifyWriteOperationGenerator));

    Iterator<Operation> transactionalOperationGenerator =
        gf.weightedDiscreteDereferencing(operations);

    // iterates initialInsertOperationGenerator before starting with
    // transactionalInsertOperationGenerator
    Iterator<Operation> workloadOperations =
        Iterators.concat(initialInsertOperationGenerator, transactionalOperationGenerator);

    Iterator<Long> startTimesAsMilli = gf.incrementing(workloadStartTimeAsMilli + 1, 100l);
    Iterator<Long> dependencyTimesAsMilli = gf.constant(workloadStartTimeAsMilli);

    WorkloadStreams workloadStreams = new WorkloadStreams();
    workloadStreams.setAsynchronousStream(
        Sets.<Class<? extends Operation>>newHashSet(),
        Sets.<Class<? extends Operation>>newHashSet(),
        Collections.<Operation>emptyIterator(),
        gf.assignDependencyTimes(
            dependencyTimesAsMilli, gf.assignStartTimes(startTimesAsMilli, workloadOperations)),
        null);
    return workloadStreams;
  }