Exemplo n.º 1
0
  @Test
  public void testContextAwareTimer() {
    ContextAwareTimer jobTotalDuration = this.context.contextAwareTimer(TOTAL_DURATION);
    Assert.assertEquals(
        this.context
            .getTimers()
            .get(
                MetricRegistry.name(
                    this.context.metricNamePrefix(false), jobTotalDuration.getName())),
        jobTotalDuration);
    Assert.assertEquals(jobTotalDuration.getContext(), this.context);
    Assert.assertEquals(jobTotalDuration.getName(), TOTAL_DURATION);

    Assert.assertTrue(jobTotalDuration.getTags().isEmpty());
    jobTotalDuration.addTag(new Tag<String>(METRIC_GROUP_KEY, INPUT_RECORDS_GROUP));
    Assert.assertEquals(jobTotalDuration.getTags().size(), 1);
    Assert.assertEquals(jobTotalDuration.getTags().get(0).getKey(), METRIC_GROUP_KEY);
    Assert.assertEquals(jobTotalDuration.getTags().get(0).getValue(), INPUT_RECORDS_GROUP);
    Assert.assertEquals(
        jobTotalDuration.getFullyQualifiedName(false),
        MetricRegistry.name(INPUT_RECORDS_GROUP, TOTAL_DURATION));

    jobTotalDuration.update(50, TimeUnit.SECONDS);
    jobTotalDuration.update(100, TimeUnit.SECONDS);
    jobTotalDuration.update(150, TimeUnit.SECONDS);
    Assert.assertEquals(jobTotalDuration.getCount(), 3l);
    Assert.assertEquals(jobTotalDuration.getSnapshot().getMin(), TimeUnit.SECONDS.toNanos(50l));
    Assert.assertEquals(jobTotalDuration.getSnapshot().getMax(), TimeUnit.SECONDS.toNanos(150l));

    Assert.assertTrue(jobTotalDuration.time().stop() >= 0l);
  }
Exemplo n.º 2
0
  @Test(groups = "slow")
  public void testInsertionTiming() {
    int keySpaceSize = 10000;
    int k = 100;
    int maxAdd = 100;
    TopK<Integer> topK = getInstance(keySpaceSize, k);

    LOG.info("Timing add() performance with keySpaceSize = %s, k = %s", keySpaceSize, k);

    Random random = new Random(0);
    long totalTime = 0;
    long count = 0;
    long begin = System.nanoTime();

    while (System.nanoTime() - begin < TEST_TIME_NANOS) {
      long start = System.nanoTime();

      topK.add(random.nextInt(keySpaceSize), random.nextInt(maxAdd));

      if (System.nanoTime() - begin > TimeUnit.SECONDS.toNanos(1)) {
        // discard the first second of measurements
        totalTime += System.nanoTime() - start;
        ++count;
      }
    }

    LOG.info(
        "Processed %s entries in %s ms. Insertion rate = %s entries/s",
        count,
        TimeUnit.NANOSECONDS.toMillis(totalTime),
        count / (totalTime * 1.0 / TimeUnit.SECONDS.toNanos(1)));
  }
Exemplo n.º 3
0
  @Test
  public void contextDeadlineShouldNotOverrideSmallerMetadataTimeout() {
    long deadlineNanos = TimeUnit.SECONDS.toNanos(2);
    Context context =
        Context.current()
            .withDeadlineAfter(deadlineNanos, TimeUnit.NANOSECONDS, deadlineCancellationExecutor);
    context.attach();

    CallOptions callOpts = CallOptions.DEFAULT.withDeadlineAfter(1, TimeUnit.SECONDS);
    ClientCallImpl<Void, Void> call =
        new ClientCallImpl<Void, Void>(
            DESCRIPTOR,
            MoreExecutors.directExecutor(),
            callOpts,
            provider,
            deadlineCancellationExecutor);

    Metadata headers = new Metadata();

    call.start(callListener, headers);

    assertTrue(headers.containsKey(GrpcUtil.TIMEOUT_KEY));
    Long timeout = headers.get(GrpcUtil.TIMEOUT_KEY);
    assertNotNull(timeout);

    long callOptsNanos = TimeUnit.SECONDS.toNanos(1);
    long deltaNanos = TimeUnit.MILLISECONDS.toNanos(400);
    assertTimeoutBetween(timeout, callOptsNanos - deltaNanos, callOptsNanos);
  }
  @Test
  public void testCompareToOverflow() {
    // no overflow, but close
    Delayed d1 =
        BurstFilter.createLogDelay(
            Long.MAX_VALUE - TimeUnit.SECONDS.toNanos(10) - System.nanoTime());

    // Overflow
    Delayed d2 =
        BurstFilter.createLogDelay(
            Long.MAX_VALUE + TimeUnit.SECONDS.toNanos(10) - System.nanoTime());

    assertThat(d2, is(greaterThan(d1)));
  }
  /**
   * Test to make sure we reschedule the task for execution if the new requested execution is
   * earlier than the previous one
   */
  public void testRescheduleForEarlierTime() throws InterruptedException {
    ScheduledExecutorService ex = Executors.newScheduledThreadPool(1);
    MyConflationListener listener = new MyConflationListener();
    OneTaskOnlyExecutor decorator = new OneTaskOnlyExecutor(ex, listener);

    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicInteger counter = new AtomicInteger();

    Runnable increment =
        new Runnable() {

          public void run() {
            counter.incrementAndGet();
          }
        };

    decorator.schedule(increment, 120, TimeUnit.SECONDS);
    decorator.schedule(increment, 10, TimeUnit.MILLISECONDS);

    long start = System.nanoTime();

    ex.shutdown();
    ex.awaitTermination(60, TimeUnit.SECONDS);
    long elapsed = System.nanoTime() - start;
    assertEquals(1, counter.get());
    assertEquals(1, listener.getDropCount());
    assertTrue(elapsed < TimeUnit.SECONDS.toNanos(120));
  }
 public static void waitForRehashToComplete(Cache... caches) {
   // give it 1 second to start rehashing
   // TODO Should look at the last committed view instead and check if it contains all the caches
   LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   for (Cache c : caches) {
     CacheViewsManager cacheViewsManager =
         TestingUtil.extractGlobalComponent(c.getCacheManager(), CacheViewsManager.class);
     RpcManager rpcManager = TestingUtil.extractComponent(c, RpcManager.class);
     while (cacheViewsManager.getCommittedView(c.getName()).getMembers().size() != caches.length) {
       if (System.currentTimeMillis() > giveup) {
         String message =
             String.format(
                 "Timed out waiting for rehash to complete on node %s, expected member list is %s, current member list is %s!",
                 rpcManager.getAddress(),
                 Arrays.toString(caches),
                 cacheViewsManager.getCommittedView(c.getName()));
         log.error(message);
         throw new RuntimeException(message);
       }
       LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
     }
     log.trace("Node " + rpcManager.getAddress() + " finished rehash task.");
   }
 }
Exemplo n.º 7
0
public class RandomTypeUtil {

  public static final long NANOSECONDS_PER_SECOND = TimeUnit.SECONDS.toNanos(1);

  public static Timestamp getRandTimestamp(Random r) {
    String optionalNanos = "";
    if (r.nextInt(2) == 1) {
      optionalNanos =
          String.format(".%09d", Integer.valueOf(0 + r.nextInt((int) NANOSECONDS_PER_SECOND)));
    }
    String timestampStr =
        String.format(
            "%04d-%02d-%02d %02d:%02d:%02d%s",
            Integer.valueOf(0 + r.nextInt(10000)), // year
            Integer.valueOf(1 + r.nextInt(12)), // month
            Integer.valueOf(1 + r.nextInt(28)), // day
            Integer.valueOf(0 + r.nextInt(24)), // hour
            Integer.valueOf(0 + r.nextInt(60)), // minute
            Integer.valueOf(0 + r.nextInt(60)), // second
            optionalNanos);
    Timestamp timestampVal;
    try {
      timestampVal = Timestamp.valueOf(timestampStr);
    } catch (Exception e) {
      System.err.println("Timestamp string " + timestampStr + " did not parse");
      throw e;
    }
    return timestampVal;
  }
}
 public void testNonStopTimer() throws Exception {
   long startTime = System.nanoTime();
   int loopTmes = 4;
   long timeout = 500;
   for (int i = 0; i < loopTmes; i++) {
     System.out.println("executing loop count" + i);
     nonStopManager.begin(timeout);
     try {
       blockUntilAborted();
     } finally {
       Assert.assertTrue(abortableOperationManager.isAborted());
       Thread.currentThread().interrupt();
       nonStopManager.finish();
       // check that aborted status is cleared.
       Assert.assertFalse(abortableOperationManager.isAborted());
       // check that interrupted flag is cleared.
       Assert.assertFalse(Thread.interrupted());
     }
   }
   long timeTaken = System.nanoTime() - startTime;
   System.out.println("time taken to execute operations " + timeTaken);
   Assert.assertTrue(
       (timeTaken >= loopTmes * TimeUnit.MILLISECONDS.toNanos(timeout)
           && timeTaken
               < (loopTmes * TimeUnit.MILLISECONDS.toNanos(timeout)
                   + TimeUnit.SECONDS.toNanos(2))));
 }
Exemplo n.º 9
0
    private boolean sendMessages(
        RingBuffer<byte[]> ringBuffer, long messagesPerSecond, int runtimeSeconds)
        throws InterruptedException {
      LOGGER.info("Rate: " + messagesPerSecond + ", for " + runtimeSeconds + "s");

      long runtimeNanos = TimeUnit.SECONDS.toNanos(runtimeSeconds);

      long t0 = System.nanoTime();
      long delta = 0;
      long sent = 0;

      try {
        do {
          delta = System.nanoTime() - t0;
          long shouldHaveSent = (messagesPerSecond * delta) / 1000000000;

          for (; sent < shouldHaveSent; sent++) {
            if (!send(ringBuffer)) {
              return false;
            }
          }

          LockSupport.parkNanos(1);
        } while (delta <= runtimeNanos);

        Thread.sleep(1000);
        return ringBuffer.hasAvailableCapacity(ringBuffer.getBufferSize());

      } finally {
        while (!ringBuffer.hasAvailableCapacity(ringBuffer.getBufferSize())) {
          Thread.sleep(1000);
        }
      }
    }
Exemplo n.º 10
0
  @Test
  public void testContextAwareTimer() {
    ContextAwareTimer jobTotalDuration = this.context.contextAwareTimer(TOTAL_DURATION);
    Assert.assertEquals(
        this.context.getTimers().get(jobTotalDuration.getName()),
        jobTotalDuration.getInnerMetric());
    Assert.assertEquals(jobTotalDuration.getContext(), this.context);
    Assert.assertEquals(jobTotalDuration.getName(), TOTAL_DURATION);

    jobTotalDuration.update(50, TimeUnit.SECONDS);
    jobTotalDuration.update(100, TimeUnit.SECONDS);
    jobTotalDuration.update(150, TimeUnit.SECONDS);
    Assert.assertEquals(jobTotalDuration.getCount(), 3l);
    Assert.assertEquals(jobTotalDuration.getSnapshot().getMin(), TimeUnit.SECONDS.toNanos(50l));
    Assert.assertEquals(jobTotalDuration.getSnapshot().getMax(), TimeUnit.SECONDS.toNanos(150l));

    Assert.assertTrue(jobTotalDuration.time().stop() >= 0l);
  }
Exemplo n.º 11
0
 protected DataWriter createFileDataWriter(Iterable<DataRecorderChannel> channels) {
   int writesPerSecond =
       (int) ((double) TimeUnit.SECONDS.toNanos(1) / TimeUnit.MILLISECONDS.toNanos(20));
   return new FileDataWriter(
       channels,
       dataRecorderFilenameGenerator,
       writesPerSecond,
       estimatedRecordDurationInSeconds);
 }
Exemplo n.º 12
0
  private List<Page> waitForPages(Operator operator, int expectedPageCount)
      throws InterruptedException {
    // read expected pages or until 10 seconds has passed
    long endTime = System.nanoTime() + TimeUnit.SECONDS.toNanos(10);
    List<Page> outputPages = new ArrayList<>();

    boolean greaterThanZero = false;
    while (System.nanoTime() < endTime) {
      if (operator.isFinished()) {
        break;
      }

      if (operator.getOperatorContext().getOperatorStats().getSystemMemoryReservation().toBytes()
          > 0) {
        greaterThanZero = true;
        break;
      } else {
        Thread.sleep(10);
      }
    }
    assertTrue(greaterThanZero);

    while (outputPages.size() < expectedPageCount && System.nanoTime() < endTime) {
      assertEquals(operator.needsInput(), false);
      if (operator.isFinished()) {
        break;
      }

      Page outputPage = operator.getOutput();
      if (outputPage != null) {
        outputPages.add(outputPage);
      } else {
        Thread.sleep(10);
      }
    }

    // sleep for a bit to make sure that there aren't extra pages on the way
    Thread.sleep(10);

    // verify state
    assertEquals(operator.needsInput(), false);
    assertNull(operator.getOutput());

    // verify pages
    assertEquals(outputPages.size(), expectedPageCount);
    for (Page page : outputPages) {
      assertPageEquals(operator.getTypes(), page, PAGE);
    }

    assertEquals(
        operator.getOperatorContext().getOperatorStats().getSystemMemoryReservation().toBytes(), 0);

    return outputPages;
  }
Exemplo n.º 13
0
  @Test
  public void testCleanupWithIndexes()
      throws IOException, ExecutionException, InterruptedException {
    Keyspace keyspace = Keyspace.open(KEYSPACE1);
    ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(CF1);

    List<Row> rows;

    // insert data and verify we get it back w/ range query
    fillCF(cfs, LOOPS);
    rows = Util.getRangeSlice(cfs);
    assertEquals(LOOPS, rows.size());

    SecondaryIndex index = cfs.indexManager.getIndexForColumn(COLUMN);
    long start = System.nanoTime();
    while (!index.isIndexBuilt(COLUMN) && System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10))
      Thread.sleep(10);

    // verify we get it back w/ index query too
    IndexExpression expr = new IndexExpression(COLUMN, IndexExpression.Operator.EQ, VALUE);
    List<IndexExpression> clause = Arrays.asList(expr);
    IDiskAtomFilter filter = new IdentityQueryFilter();
    IPartitioner p = StorageService.getPartitioner();
    Range<RowPosition> range = Util.range("", "");
    rows = keyspace.getColumnFamilyStore(CF1).search(range, clause, filter, Integer.MAX_VALUE);
    assertEquals(LOOPS, rows.size());

    // we don't allow cleanup when the local host has no range to avoid wipping up all data when a
    // node has not join the ring.
    // So to make sure cleanup erase everything here, we give the localhost the tiniest possible
    // range.
    TokenMetadata tmd = StorageService.instance.getTokenMetadata();
    byte[] tk1 = new byte[1], tk2 = new byte[1];
    tk1[0] = 2;
    tk2[0] = 1;
    tmd.updateNormalToken(new BytesToken(tk1), InetAddress.getByName("127.0.0.1"));
    tmd.updateNormalToken(new BytesToken(tk2), InetAddress.getByName("127.0.0.2"));

    CompactionManager.instance.performCleanup(cfs, new CounterId.OneShotRenewer());

    // row data should be gone
    rows = Util.getRangeSlice(cfs);
    assertEquals(0, rows.size());

    // not only should it be gone but there should be no data on disk, not even tombstones
    assert cfs.getSSTables().isEmpty();

    // 2ary indexes should result in no results, too (although tombstones won't be gone until
    // compacted)
    rows = cfs.search(range, clause, filter, Integer.MAX_VALUE);
    assertEquals(0, rows.size());
  }
  /**
   * Checks if there have been more requests than allowed through maxConfigCheck in a 10 second
   * period.
   *
   * <p>If this is the case, then true is returned. If the timeframe between two distinct requests
   * is more than 10 seconds, a fresh timeframe starts. This means that 10 calls every second would
   * trigger an update while 1 operation, then a 11 second sleep and one more operation would not.
   *
   * @return true if there were more config check requests than maxConfigCheck in the 10 second
   *     period.
   */
  protected boolean pastReconnThreshold() {
    long currentTime = System.nanoTime();

    if (currentTime - thresholdLastCheck >= TimeUnit.SECONDS.toNanos(10)) {
      configThresholdCount.set(0);
    }

    thresholdLastCheck = currentTime;
    if (configThresholdCount.incrementAndGet() >= maxConfigCheck) {
      return true;
    }

    return false;
  }
 private long getExpectedDelayNanos(RobotsTxt robotsTxt) {
   long delayNanos = millisToNanos(defaultDelay);
   if (isUsingRobotsTxtCrawlDelay(robotsTxt)) {
     delayNanos = TimeUnit.SECONDS.toNanos((long) (robotsTxt.getCrawlDelay()));
   } else {
     for (DelaySchedule schedule : schedules) {
       if (schedule.isCurrentTimeInSchedule()) {
         delayNanos = millisToNanos(schedule.getDelay());
         break;
       }
     }
   }
   return delayNanos;
 }
Exemplo n.º 16
0
 private IndexReader createIndex(int docCount, int facetFields, boolean ram)
     throws CorruptIndexException, LockObtainFailedException, IOException {
   Directory directory;
   if (ram) {
     directory = new RAMDirectory();
   } else {
     File dir = new File("./target/tmp/facet_tmp");
     if (dir.exists()) {
       directory = FSDirectory.open(dir);
       if (DirectoryReader.indexExists(directory)) {
         DirectoryReader reader = DirectoryReader.open(directory);
         if (reader.numDocs() == docCount) {
           return reader;
         }
         reader.close();
         directory.close();
       }
     }
     rmr(dir);
     directory = FSDirectory.open(dir);
   }
   IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
   IndexWriter writer = new IndexWriter(directory, conf);
   FieldType fieldType = new FieldType();
   fieldType.setStored(true);
   fieldType.setIndexed(true);
   fieldType.setOmitNorms(true);
   long start = System.nanoTime();
   for (int i = 0; i < docCount; i++) {
     long now = System.nanoTime();
     if (start + TimeUnit.SECONDS.toNanos(5) < now) {
       System.out.println("Indexing doc " + i + " of " + docCount);
       start = System.nanoTime();
     }
     Document document = new Document();
     document.add(new Field("f1", "value", fieldType));
     document.add(new Field("f2", "v" + i, fieldType));
     for (int f = 0; f < facetFields; f++) {
       document.add(new Field("facet" + f, "value", fieldType));
     }
     writer.addDocument(document);
   }
   writer.close();
   return DirectoryReader.open(directory);
 }
Exemplo n.º 17
0
  private void waitForFinished(Operator operator) throws InterruptedException {
    // wait for finished or until 10 seconds has passed
    long endTime = System.nanoTime() + TimeUnit.SECONDS.toNanos(10);
    while (System.nanoTime() < endTime) {
      assertEquals(operator.needsInput(), false);
      assertNull(operator.getOutput());
      if (operator.isFinished()) {
        break;
      }
      Thread.sleep(10);
    }

    // verify final state
    assertEquals(operator.isFinished(), true);
    assertEquals(operator.needsInput(), false);
    assertNull(operator.getOutput());
    assertEquals(
        operator.getOperatorContext().getOperatorStats().getSystemMemoryReservation().toBytes(), 0);
  }
Exemplo n.º 18
0
  public void test(int threadCount) {
    threads = new ReadThread[threadCount];

    for (int k = 0; k < threads.length; k++) {
      threads[k] = new ReadThread(k);
    }

    long startNs = System.nanoTime();

    startAll(threads);
    joinAll(threads);

    long transactionCount = threadCount * readCount;

    long durationNs = System.nanoTime() - startNs;
    double transactionsPerSecond =
        (1.0d * transactionCount * TimeUnit.SECONDS.toNanos(1)) / durationNs;
    System.out.printf("Performance %s transactions/second\n", format(transactionsPerSecond));
  }
Exemplo n.º 19
0
 public Cookie(
     String name,
     String value,
     String domain,
     String path,
     int maxAge,
     boolean secure,
     int version,
     String comment) {
   this.name = name;
   this.value = value;
   this.domain = domain;
   this.path = path;
   this.maxAge = maxAge;
   this.secure = secure;
   this.version = version;
   this.comment = comment;
   this.expirationTime = maxAge < 0 ? -1 : System.nanoTime() + TimeUnit.SECONDS.toNanos(maxAge);
 }
Exemplo n.º 20
0
 public static void waitForInitRehashToComplete(Cache... caches) {
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   for (Cache c : caches) {
     DistributionManagerImpl dmi =
         (DistributionManagerImpl) TestingUtil.extractComponent(c, DistributionManager.class);
     while (!dmi.isJoinComplete()) {
       if (System.currentTimeMillis() > giveup) {
         String message =
             "Timed out waiting for join to complete on node "
                 + dmi.getRpcManager().getAddress()
                 + " !";
         log.error(message);
         throw new RuntimeException(message);
       }
       LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
     }
     log.trace("Node " + dmi.getRpcManager().getAddress() + " finished join task.");
   }
 }
Exemplo n.º 21
0
  @Test
  public void clientPingsServerHttp2() throws Exception {
    peer.setVariantAndClient(HTTP_2, false);

    // write the mocking script
    peer.acceptFrame(); // PING
    peer.sendFrame().ping(true, 1, 5);
    peer.play();

    // play it back
    FramedConnection connection = connection(peer, HTTP_2);
    Ping ping = connection.ping();
    assertTrue(ping.roundTripTime() > 0);
    assertTrue(ping.roundTripTime() < TimeUnit.SECONDS.toNanos(1));

    // verify the peer received what was expected
    MockSpdyPeer.InFrame pingFrame = peer.takeFrame();
    assertEquals(0, pingFrame.streamId);
    assertEquals(1, pingFrame.payload1);
    assertEquals(0x4f4b6f6b, pingFrame.payload2); // connection.ping() sets this.
    assertFalse(pingFrame.ack);
  }
 public static void waitForRehashToComplete(Cache cache, int groupSize) {
   LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   CacheViewsManager cacheViewsManager =
       TestingUtil.extractGlobalComponent(cache.getCacheManager(), CacheViewsManager.class);
   RpcManager rpcManager = TestingUtil.extractComponent(cache, RpcManager.class);
   while (cacheViewsManager.getCommittedView(cache.getName()).getMembers().size() != groupSize) {
     if (System.currentTimeMillis() > giveup) {
       String message =
           String.format(
               "Timed out waiting for rehash to complete on node %s, expected member count %s, current member count is %s!",
               rpcManager.getAddress(),
               groupSize,
               cacheViewsManager.getCommittedView(cache.getName()));
       log.error(message);
       throw new RuntimeException(message);
     }
     LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
   }
   log.trace("Node " + rpcManager.getAddress() + " finished rehash task.");
 }
Exemplo n.º 23
0
 private void loadObjects(
     NamedCache cache, ObjectGenerator<?, ?> generator, long rangeStart, long rangeFinish) {
   cache = TxLite.ensureWriteable(cache);
   int putSize = 100;
   long blockTs = System.nanoTime();
   long blockStart = rangeStart;
   for (long i = rangeStart; i < rangeFinish; i += putSize) {
     if (i % 10000 == 0) {
       String stats = "";
       if (i > blockStart) {
         long blockSize = i - blockStart;
         long blockTime = System.nanoTime() - blockTs;
         double avg = (((double) blockSize) / blockTime) * TimeUnit.SECONDS.toNanos(1);
         stats =
             " block "
                 + blockSize
                 + " in "
                 + TimeUnit.NANOSECONDS.toMillis(blockTime)
                 + "ms, AVG: "
                 + avg
                 + " put/sec, "
                 + avg / putSize
                 + " tx/sec, batchSize "
                 + putSize;
       }
       //		        println("Done " + (i - rangeStart) + stats);
       println("Done " + cache.size() + stats);
       blockTs = System.nanoTime();
       blockStart = i;
     }
     long j = Math.min(rangeFinish, i + putSize);
     cache.putAll(generator.generate(i, j));
     TxLite.commit(cache);
   }
   //		TxLite.commit(cache);
   TxLite.closeSession(cache);
 }
Exemplo n.º 24
0
  @Test // see SSHD-554
  public void testSetSocketOptions() throws Exception {
    try (SshServer sshd = setupTestServer()) {
      PropertyResolverUtils.updateProperty(sshd, FactoryManager.SOCKET_KEEPALIVE, true);
      PropertyResolverUtils.updateProperty(sshd, FactoryManager.SOCKET_LINGER, 5);
      PropertyResolverUtils.updateProperty(sshd, FactoryManager.SOCKET_RCVBUF, 1024);
      PropertyResolverUtils.updateProperty(sshd, FactoryManager.SOCKET_REUSEADDR, true);
      PropertyResolverUtils.updateProperty(sshd, FactoryManager.SOCKET_SNDBUF, 1024);
      PropertyResolverUtils.updateProperty(sshd, FactoryManager.TCP_NODELAY, true);

      sshd.start();

      int port = sshd.getPort();
      long startTime = System.nanoTime();
      try (Socket s = new Socket(TEST_LOCALHOST, port)) {
        long endTime = System.nanoTime();
        long duration = endTime - startTime;
        assertTrue(
            "Connect duration is too high: " + duration, duration <= TimeUnit.SECONDS.toNanos(15L));
      } finally {
        sshd.stop();
      }
    }
  }
Exemplo n.º 25
0
  public ALM_FS_Tracefile(AL_FixedScheduleLoadGenerator owner) {
    this.settings = owner.getSettings();

    this.experimentStart = owner.getScheduler().now() + TimeUnit.SECONDS.toNanos(2);
    System.out.println("LOAD_GENERATOR: start at " + experimentStart);

    // try to load trace-file:
    System.out.println("TRACE_READER: loading trace file");
    String traceFilePath =
        Paths.getProperty("LG_TRACE_FILE_PATH") + settings.getProperty("AL-TRACE_FILE-NAME");
    resetReader(traceFilePath);

    // create client
    owner.getLoadGenerator().commandLineParameters.gMixTool = ToolName.CLIENT;
    this.client = new AnonNode(owner.getLoadGenerator().commandLineParameters);
    this.scheduleTarget = new ALRR_BasicWriter(this, client.IS_DUPLEX);

    // determine number of clients and lines; create ClientWrapper objects:
    this.clientReferences = new HashMap<Integer, ALRR_ClientWrapper>(1000); // TODO: dynamic
    String line;
    try {
      while ((line = traceReader.readLine()) != null) {
        Integer id = Integer.parseInt(line.split("(,|;)")[1]);
        ALRR_ClientWrapper cw = clientReferences.get(id);
        if (cw == null) clientReferences.put(id, new ALRR_ClientWrapper(id));
      }
    } catch (IOException e) {
      e.printStackTrace();
      throw new RuntimeException("TRACE_READER: error traversing trace file");
    }
    System.out.println(
        "TRACE_READER: found traces for " + clientReferences.size() + " clients in the trace file");
    resetReader(traceFilePath);

    // create array etc.:
    this.clientsArray = new ALRR_ClientWrapper[clientReferences.size()];
    int i = 0;
    for (ALRR_ClientWrapper cw : clientReferences.values()) {
      clientsArray[i++] = cw;
      // cw.requestQueue = new ArrayBlockingQueue<ArrayIterator<ApplicationLevelMessage>>(3);
      // if (settings.getPropertyAsBoolean("GLOBAL_IS_DUPLEX"))
      //	cw.replyQueue = new ConcurrentLinkedQueue<ApplicationLevelMessage> ();
    }

    // generate and connect sockets
    CommunicationMode cm =
        client.IS_DUPLEX ? CommunicationMode.DUPLEX : CommunicationMode.SIMPLEX_SENDER;
    for (ALRR_ClientWrapper cw : clientsArray) // generate sockets
    cw.socket = client.createStreamSocket(cm, client.ROUTING_MODE != RoutingMode.CASCADE);
    // connect sockets:
    int port = settings.getPropertyAsInt("SERVICE_PORT1");
    System.out.println("LOAD_GENERATOR: connecting clients...");
    for (ALRR_ClientWrapper cw : clientsArray)
      try {
        cw.socket.connect(port);
        cw.outputStream = new BufferedOutputStream(cw.socket.getOutputStream());
        if (client.IS_DUPLEX) cw.inputStream = cw.socket.getInputStream();
      } catch (IOException e) {
        e.printStackTrace();
      }

    if (client.IS_DUPLEX) {
      this.replyReceiver = new ALRR_ReplyReceiver(clientsArray, settings);
      // this.replyReceiver.registerObserver(this);
      this.replyReceiver.start();
    }
  }
Exemplo n.º 26
0
/** Immutable data about an image and the transformations that will be applied to it. */
public final class Request {
  private static final long TOO_LONG_LOG = TimeUnit.SECONDS.toNanos(5);

  /** A unique ID for the request. */
  int id;
  /** The time that the request was first submitted (in nanos). */
  long started;
  /** The {@link NetworkPolicy} to use for this request. */
  int networkPolicy;

  /**
   * The image URI.
   *
   * <p>This is mutually exclusive with {@link #resourceId}.
   */
  public final Uri uri;
  /**
   * The image resource ID.
   *
   * <p>This is mutually exclusive with {@link #uri}.
   */
  public final int resourceId;
  /**
   * Optional stable key for this request to be used instead of the URI or resource ID when caching.
   * Two requests with the same value are considered to be for the same resource.
   */
  public final String stableKey;
  /** List of custom transformations to be applied after the built-in transformations. */
  public final List<Transformation> transformations;
  /** Target image width for resizing. */
  public final int targetWidth;
  /** Target image height for resizing. */
  public final int targetHeight;
  /**
   * True if the final image should use the 'centerCrop' scale technique.
   *
   * <p>This is mutually exclusive with {@link #centerInside}.
   */
  public final boolean centerCrop;
  /**
   * True if the final image should use the 'centerInside' scale technique.
   *
   * <p>This is mutually exclusive with {@link #centerCrop}.
   */
  public final boolean centerInside;

  public final boolean onlyScaleDown;
  /** Amount to rotate the image in degrees. */
  public final float rotationDegrees;
  /** Rotation pivot on the X axis. */
  public final float rotationPivotX;
  /** Rotation pivot on the Y axis. */
  public final float rotationPivotY;
  /** Whether or not {@link #rotationPivotX} and {@link #rotationPivotY} are set. */
  public final boolean hasRotationPivot;
  /** True if image should be decoded with inPurgeable and inInputShareable. */
  public final boolean purgeable;
  /** Target image config for decoding. */
  public final Bitmap.Config config;
  /** The priority of this request. */
  public final Priority priority;

  private Request(
      Uri uri,
      int resourceId,
      String stableKey,
      List<Transformation> transformations,
      int targetWidth,
      int targetHeight,
      boolean centerCrop,
      boolean centerInside,
      boolean onlyScaleDown,
      float rotationDegrees,
      float rotationPivotX,
      float rotationPivotY,
      boolean hasRotationPivot,
      boolean purgeable,
      Bitmap.Config config,
      Priority priority) {
    this.uri = uri;
    this.resourceId = resourceId;
    this.stableKey = stableKey;
    if (transformations == null) {
      this.transformations = null;
    } else {
      this.transformations = unmodifiableList(transformations);
    }
    this.targetWidth = targetWidth;
    this.targetHeight = targetHeight;
    this.centerCrop = centerCrop;
    this.centerInside = centerInside;
    this.onlyScaleDown = onlyScaleDown;
    this.rotationDegrees = rotationDegrees;
    this.rotationPivotX = rotationPivotX;
    this.rotationPivotY = rotationPivotY;
    this.hasRotationPivot = hasRotationPivot;
    this.purgeable = purgeable;
    this.config = config;
    this.priority = priority;
  }

  @Override
  public String toString() {
    final StringBuilder builder = new StringBuilder("Request{");
    if (resourceId > 0) {
      builder.append(resourceId);
    } else {
      builder.append(uri);
    }
    if (transformations != null && !transformations.isEmpty()) {
      for (Transformation transformation : transformations) {
        builder.append(' ').append(transformation.key());
      }
    }
    if (stableKey != null) {
      builder.append(" stableKey(").append(stableKey).append(')');
    }
    if (targetWidth > 0) {
      builder.append(" resize(").append(targetWidth).append(',').append(targetHeight).append(')');
    }
    if (centerCrop) {
      builder.append(" centerCrop");
    }
    if (centerInside) {
      builder.append(" centerInside");
    }
    if (rotationDegrees != 0) {
      builder.append(" rotation(").append(rotationDegrees);
      if (hasRotationPivot) {
        builder.append(" @ ").append(rotationPivotX).append(',').append(rotationPivotY);
      }
      builder.append(')');
    }
    if (purgeable) {
      builder.append(" purgeable");
    }
    if (config != null) {
      builder.append(' ').append(config);
    }
    builder.append('}');

    return builder.toString();
  }

  String logId() {
    long delta = System.nanoTime() - started;
    if (delta > TOO_LONG_LOG) {
      return plainId() + '+' + TimeUnit.NANOSECONDS.toSeconds(delta) + 's';
    }
    return plainId() + '+' + TimeUnit.NANOSECONDS.toMillis(delta) + "ms";
  }

  String plainId() {
    return "[R" + id + ']';
  }

  String getName() {
    if (uri != null) {
      return String.valueOf(uri.getPath());
    }
    return Integer.toHexString(resourceId);
  }

  public boolean hasSize() {
    return targetWidth != 0 || targetHeight != 0;
  }

  boolean needsTransformation() {
    return needsMatrixTransform() || hasCustomTransformations();
  }

  boolean needsMatrixTransform() {
    return hasSize() || rotationDegrees != 0;
  }

  boolean hasCustomTransformations() {
    return transformations != null;
  }

  public Builder buildUpon() {
    return new Builder(this);
  }

  /** Builder for creating {@link Request} instances. */
  public static final class Builder {
    private Uri uri;
    private int resourceId;
    private String stableKey;
    private int targetWidth;
    private int targetHeight;
    private boolean centerCrop;
    private boolean centerInside;
    private boolean onlyScaleDown;
    private float rotationDegrees;
    private float rotationPivotX;
    private float rotationPivotY;
    private boolean hasRotationPivot;
    private boolean purgeable;
    private List<Transformation> transformations;
    private Bitmap.Config config;
    private Priority priority;

    /** Start building a request using the specified {@link Uri}. */
    public Builder(@NonNull Uri uri) {
      setUri(uri);
    }

    /** Start building a request using the specified resource ID. */
    public Builder(@DrawableRes int resourceId) {
      setResourceId(resourceId);
    }

    Builder(Uri uri, int resourceId, Bitmap.Config bitmapConfig) {
      this.uri = uri;
      this.resourceId = resourceId;
      this.config = bitmapConfig;
    }

    private Builder(Request request) {
      uri = request.uri;
      resourceId = request.resourceId;
      stableKey = request.stableKey;
      targetWidth = request.targetWidth;
      targetHeight = request.targetHeight;
      centerCrop = request.centerCrop;
      centerInside = request.centerInside;
      rotationDegrees = request.rotationDegrees;
      rotationPivotX = request.rotationPivotX;
      rotationPivotY = request.rotationPivotY;
      hasRotationPivot = request.hasRotationPivot;
      purgeable = request.purgeable;
      onlyScaleDown = request.onlyScaleDown;
      if (request.transformations != null) {
        transformations = new ArrayList<Transformation>(request.transformations);
      }
      config = request.config;
      priority = request.priority;
    }

    boolean hasImage() {
      return uri != null || resourceId != 0;
    }

    boolean hasSize() {
      return targetWidth != 0 || targetHeight != 0;
    }

    boolean hasPriority() {
      return priority != null;
    }

    /**
     * Set the target image Uri.
     *
     * <p>This will clear an image resource ID if one is set.
     */
    public Builder setUri(@NonNull Uri uri) {
      if (uri == null) {
        throw new IllegalArgumentException("Image URI may not be null.");
      }
      this.uri = uri;
      this.resourceId = 0;
      return this;
    }

    /**
     * Set the target image resource ID.
     *
     * <p>This will clear an image Uri if one is set.
     */
    public Builder setResourceId(@DrawableRes int resourceId) {
      if (resourceId == 0) {
        throw new IllegalArgumentException("Image resource ID may not be 0.");
      }
      this.resourceId = resourceId;
      this.uri = null;
      return this;
    }

    /**
     * Set the stable key to be used instead of the URI or resource ID when caching. Two requests
     * with the same value are considered to be for the same resource.
     */
    public Builder stableKey(@Nullable String stableKey) {
      this.stableKey = stableKey;
      return this;
    }

    /**
     * Resize the image to the specified size in pixels. Use 0 as desired dimension to resize
     * keeping aspect ratio.
     */
    public Builder resize(@Px int targetWidth, @Px int targetHeight) {
      if (targetWidth < 0) {
        throw new IllegalArgumentException("Width must be positive number or 0.");
      }
      if (targetHeight < 0) {
        throw new IllegalArgumentException("Height must be positive number or 0.");
      }
      if (targetHeight == 0 && targetWidth == 0) {
        throw new IllegalArgumentException("At least one dimension has to be positive number.");
      }
      this.targetWidth = targetWidth;
      this.targetHeight = targetHeight;
      return this;
    }

    /** Clear the resize transformation, if any. This will also clear center crop/inside if set. */
    public Builder clearResize() {
      targetWidth = 0;
      targetHeight = 0;
      centerCrop = false;
      centerInside = false;
      return this;
    }

    /**
     * Crops an image inside of the bounds specified by {@link #resize(int, int)} rather than
     * distorting the aspect ratio. This cropping technique scales the image so that it fills the
     * requested bounds and then crops the extra.
     */
    public Builder centerCrop() {
      if (centerInside) {
        throw new IllegalStateException("Center crop can not be used after calling centerInside");
      }
      centerCrop = true;
      return this;
    }

    /** Clear the center crop transformation flag, if set. */
    public Builder clearCenterCrop() {
      centerCrop = false;
      return this;
    }

    /**
     * Centers an image inside of the bounds specified by {@link #resize(int, int)}. This scales the
     * image so that both dimensions are equal to or less than the requested bounds.
     */
    public Builder centerInside() {
      if (centerCrop) {
        throw new IllegalStateException("Center inside can not be used after calling centerCrop");
      }
      centerInside = true;
      return this;
    }

    /** Clear the center inside transformation flag, if set. */
    public Builder clearCenterInside() {
      centerInside = false;
      return this;
    }

    /**
     * Only resize an image if the original image size is bigger than the target size specified by
     * {@link #resize(int, int)}.
     */
    public Builder onlyScaleDown() {
      if (targetHeight == 0 && targetWidth == 0) {
        throw new IllegalStateException("onlyScaleDown can not be applied without resize");
      }
      onlyScaleDown = true;
      return this;
    }

    /** Clear the onlyScaleUp flag, if set. * */
    public Builder clearOnlyScaleDown() {
      onlyScaleDown = false;
      return this;
    }

    /** Rotate the image by the specified degrees. */
    public Builder rotate(float degrees) {
      rotationDegrees = degrees;
      return this;
    }

    /** Rotate the image by the specified degrees around a pivot point. */
    public Builder rotate(float degrees, float pivotX, float pivotY) {
      rotationDegrees = degrees;
      rotationPivotX = pivotX;
      rotationPivotY = pivotY;
      hasRotationPivot = true;
      return this;
    }

    /** Clear the rotation transformation, if any. */
    public Builder clearRotation() {
      rotationDegrees = 0;
      rotationPivotX = 0;
      rotationPivotY = 0;
      hasRotationPivot = false;
      return this;
    }

    public Builder purgeable() {
      purgeable = true;
      return this;
    }

    /** Decode the image using the specified config. */
    public Builder config(@NonNull Bitmap.Config config) {
      if (config == null) {
        throw new IllegalArgumentException("config == null");
      }
      this.config = config;
      return this;
    }

    /** Execute request using the specified priority. */
    public Builder priority(@NonNull Priority priority) {
      if (priority == null) {
        throw new IllegalArgumentException("Priority invalid.");
      }
      if (this.priority != null) {
        throw new IllegalStateException("Priority already set.");
      }
      this.priority = priority;
      return this;
    }

    /**
     * Add a custom transformation to be applied to the image.
     *
     * <p>Custom transformations will always be run after the built-in transformations.
     */
    public Builder transform(@NonNull Transformation transformation) {
      if (transformation == null) {
        throw new IllegalArgumentException("Transformation must not be null.");
      }
      if (transformation.key() == null) {
        throw new IllegalArgumentException("Transformation key must not be null.");
      }
      if (transformations == null) {
        transformations = new ArrayList<Transformation>(2);
      }
      transformations.add(transformation);
      return this;
    }

    /**
     * Add a list of custom transformations to be applied to the image.
     *
     * <p>Custom transformations will always be run after the built-in transformations.
     */
    public Builder transform(@NonNull List<? extends Transformation> transformations) {
      if (transformations == null) {
        throw new IllegalArgumentException("Transformation list must not be null.");
      }
      for (int i = 0, size = transformations.size(); i < size; i++) {
        transform(transformations.get(i));
      }
      return this;
    }

    /** Create the immutable {@link Request} object. */
    public Request build() {
      if (centerInside && centerCrop) {
        throw new IllegalStateException("Center crop and center inside can not be used together.");
      }
      if (centerCrop && (targetWidth == 0 && targetHeight == 0)) {
        throw new IllegalStateException(
            "Center crop requires calling resize with positive width and height.");
      }
      if (centerInside && (targetWidth == 0 && targetHeight == 0)) {
        throw new IllegalStateException(
            "Center inside requires calling resize with positive width and height.");
      }
      if (priority == null) {
        priority = Priority.NORMAL;
      }
      return new Request(
          uri,
          resourceId,
          stableKey,
          transformations,
          targetWidth,
          targetHeight,
          centerCrop,
          centerInside,
          onlyScaleDown,
          rotationDegrees,
          rotationPivotX,
          rotationPivotY,
          hasRotationPivot,
          purgeable,
          config,
          priority);
    }
  }
}
Exemplo n.º 27
0
 @Test
 public void park() {
   LockSupport.parkNanos(this, TimeUnit.SECONDS.toNanos(10));
 }
Exemplo n.º 28
0
    /**
     * Compiles the source code in each supplied CompilationUnitBuilder into a CompilationUnit and
     * reports errors.
     *
     * <p>A compilation unit is considered invalid if any of its dependencies (recursively) isn't
     * being compiled and isn't in allValidClasses, or if it has a signature that doesn't match a
     * dependency. Valid compilation units will be added to cachedUnits and the unit cache, and
     * their types will be added to allValidClasses. Invalid compilation units will be removed.
     *
     * <p>I/O: serializes the AST of each Java type to DiskCache. (This happens even if the
     * compilation unit is later dropped.) If we're using the persistent unit cache, each valid unit
     * will also be serialized to the gwt-unitcache file. (As a result, each AST will be copied
     * there from the DiskCache.) A new persistent unit cache file will be created each time
     * compile() is called (if there's at least one valid unit) and the entire cache will be
     * rewritten to disk every {@link PersistentUnitCache#CACHE_FILE_THRESHOLD} files.
     *
     * <p>This function won't report errors in invalid source files unless suppressErrors is false.
     * Instead, a summary giving the number of invalid files will be logged.
     *
     * <p>If the JDT compiler aborts, logs an error and throws UnableToCompleteException. (This
     * doesn't happen for normal compile errors.)
     */
    Collection<CompilationUnit> compile(
        TreeLogger logger,
        CompilerContext compilerContext,
        Collection<CompilationUnitBuilder> builders,
        Map<CompilationUnitBuilder, CompilationUnit> cachedUnits,
        EventType eventType)
        throws UnableToCompleteException {
      UnitCache unitCache = compilerContext.getUnitCache();
      // Initialize the set of valid classes to the initially cached units.
      for (CompilationUnit unit : cachedUnits.values()) {
        for (CompiledClass cc : unit.getCompiledClasses()) {
          // Map by source name.
          String sourceName = cc.getSourceName();
          allValidClasses.put(sourceName, cc);
        }
      }

      ArrayList<CompilationUnit> resultUnits = new ArrayList<CompilationUnit>();
      do {
        final TreeLogger branch = logger.branch(TreeLogger.TRACE, "Compiling...");
        // Compile anything that needs to be compiled.
        buildQueue = new LinkedBlockingQueue<CompilationUnitBuilder>();
        final ArrayList<CompilationUnit> newlyBuiltUnits = new ArrayList<CompilationUnit>();
        final CompilationUnitBuilder sentinel = CompilationUnitBuilder.create((GeneratedUnit) null);
        final Throwable[] workerException = new Throwable[1];
        final ProgressLogger progressLogger =
            new ProgressLogger(branch, TreeLogger.TRACE, builders.size(), 10);
        Thread buildThread =
            new Thread() {
              @Override
              public void run() {
                int processedCompilationUnitBuilders = 0;
                try {
                  do {
                    CompilationUnitBuilder builder = buildQueue.take();
                    if (!progressLogger.isTimerStarted()) {
                      // Set start time here, after first job has arrived, since it can take a
                      // little
                      // while for the first job to arrive, and this helps with the accuracy of the
                      // estimated times.
                      progressLogger.startTimer();
                    }
                    if (builder == sentinel) {
                      return;
                    }
                    // Expensive, must serialize GWT AST types to bytes.
                    CompilationUnit unit = builder.build();
                    newlyBuiltUnits.add(unit);

                    processedCompilationUnitBuilders++;
                    progressLogger.updateProgress(processedCompilationUnitBuilders);
                  } while (true);
                } catch (Throwable e) {
                  workerException[0] = e;
                }
              }
            };
        buildThread.setName("CompilationUnitBuilder");
        buildThread.start();
        Event jdtCompilerEvent = SpeedTracerLogger.start(eventType);
        long compilationStartNanos = System.nanoTime();
        try {
          compiler.doCompile(branch, builders);
        } finally {
          jdtCompilerEvent.end();
        }
        buildQueue.add(sentinel);
        try {
          buildThread.join();
          long compilationNanos = System.nanoTime() - compilationStartNanos;
          // Convert nanos to seconds.
          double compilationSeconds = compilationNanos / (double) TimeUnit.SECONDS.toNanos(1);
          branch.log(
              TreeLogger.TRACE,
              String.format("Compilation completed in %.02f seconds", compilationSeconds));
          if (workerException[0] != null) {
            throw workerException[0];
          }
        } catch (RuntimeException e) {
          throw e;
        } catch (Throwable e) {
          throw new RuntimeException("Exception processing units", e);
        } finally {
          buildQueue = null;
        }
        resultUnits.addAll(newlyBuiltUnits);
        builders.clear();

        // Resolve all newly built unit deps against the global classes.
        for (CompilationUnit unit : newlyBuiltUnits) {
          unit.getDependencies().resolve(allValidClasses);
        }

        /*
         * Invalidate any cached units with invalid refs.
         */
        Collection<CompilationUnit> invalidatedUnits = new ArrayList<CompilationUnit>();
        for (Iterator<Entry<CompilationUnitBuilder, CompilationUnit>> it =
                cachedUnits.entrySet().iterator();
            it.hasNext(); ) {
          Entry<CompilationUnitBuilder, CompilationUnit> entry = it.next();
          CompilationUnit unit = entry.getValue();
          boolean isValid = unit.getDependencies().validate(logger, allValidClasses);
          if (isValid && unit.isError()) {
            // See if the unit has classes that can't provide a
            // NameEnvironmentAnswer
            for (CompiledClass cc : unit.getCompiledClasses()) {
              try {
                cc.getNameEnvironmentAnswer();
              } catch (ClassFormatException ex) {
                isValid = false;
                break;
              }
            }
          }
          if (!isValid) {
            if (logger.isLoggable(TreeLogger.TRACE)) {
              logger.log(TreeLogger.TRACE, "Invalid Unit: " + unit.getTypeName());
            }
            invalidatedUnits.add(unit);
            builders.add(entry.getKey());
            it.remove();
          }
        }

        if (invalidatedUnits.size() > 0) {
          if (logger.isLoggable(TreeLogger.TRACE)) {
            logger.log(TreeLogger.TRACE, "Invalid units found: " + invalidatedUnits.size());
          }
        }

        // Any units we invalidated must now be removed from the valid classes.
        for (CompilationUnit unit : invalidatedUnits) {
          for (CompiledClass cc : unit.getCompiledClasses()) {
            allValidClasses.remove(cc.getSourceName());
          }
        }
      } while (builders.size() > 0);

      for (CompilationUnit unit : resultUnits) {
        unitCache.add(unit);
      }

      // Any remaining cached units are valid now.
      resultUnits.addAll(cachedUnits.values());

      // Done with a pass of the build - tell the cache its OK to cleanup
      // stale cache files.
      unitCache.cleanup(logger);

      // Sort, then report all errors (re-report for cached units).
      Collections.sort(resultUnits, CompilationUnit.COMPARATOR);
      logger = logger.branch(TreeLogger.DEBUG, "Validating units:");
      int errorCount = 0;
      for (CompilationUnit unit : resultUnits) {
        if (CompilationProblemReporter.reportErrors(logger, unit, suppressErrors)) {
          errorCount++;
        }
      }
      if (suppressErrors
          && errorCount > 0
          && !logger.isLoggable(TreeLogger.TRACE)
          && logger.isLoggable(TreeLogger.INFO)) {
        logger.log(
            TreeLogger.INFO,
            "Ignored "
                + errorCount
                + " unit"
                + (errorCount > 1 ? "s" : "")
                + " with compilation errors in first pass.\n"
                + "Compile with -strict or with -logLevel set to TRACE or DEBUG to see all errors.");
      }
      return resultUnits;
    }
Exemplo n.º 29
0
public class EmbeddedPingPong {
  private static final int PING_STREAM_ID = SampleConfiguration.PING_STREAM_ID;
  private static final int PONG_STREAM_ID = SampleConfiguration.PONG_STREAM_ID;
  private static final int NUMBER_OF_MESSAGES = SampleConfiguration.NUMBER_OF_MESSAGES;
  private static final int WARMUP_NUMBER_OF_MESSAGES =
      SampleConfiguration.WARMUP_NUMBER_OF_MESSAGES;
  private static final int WARMUP_NUMBER_OF_ITERATIONS =
      SampleConfiguration.WARMUP_NUMBER_OF_ITERATIONS;
  private static final int MESSAGE_LENGTH = SampleConfiguration.MESSAGE_LENGTH;
  private static final int FRAGMENT_COUNT_LIMIT = SampleConfiguration.FRAGMENT_COUNT_LIMIT;
  private static final int FRAME_COUNT_LIMIT = SampleConfiguration.FRAGMENT_COUNT_LIMIT;
  private static final String PING_CHANNEL = SampleConfiguration.PING_CHANNEL;
  private static final String PONG_CHANNEL = SampleConfiguration.PONG_CHANNEL;

  private static final UnsafeBuffer ATOMIC_BUFFER =
      new UnsafeBuffer(BufferUtil.allocateDirectAligned(MESSAGE_LENGTH, BitUtil.CACHE_LINE_LENGTH));
  private static final Histogram HISTOGRAM = new Histogram(TimeUnit.SECONDS.toNanos(10), 3);
  private static final CountDownLatch PONG_IMAGE_LATCH = new CountDownLatch(1);
  private static final BusySpinIdleStrategy PING_HANDLER_IDLE_STRATEGY = new BusySpinIdleStrategy();
  private static final AtomicBoolean RUNNING = new AtomicBoolean(true);

  public static void main(final String[] args) throws Exception {
    MediaDriver.loadPropertiesFiles(args);

    final MediaDriver.Context ctx =
        new MediaDriver.Context()
            .threadingMode(ThreadingMode.DEDICATED)
            .conductorIdleStrategy(new BackoffIdleStrategy(1, 1, 1, 1))
            .receiverIdleStrategy(new NoOpIdleStrategy())
            .senderIdleStrategy(new NoOpIdleStrategy());

    try (final MediaDriver ignored = MediaDriver.launch(ctx)) {
      final Thread pongThread = startPong(ignored.aeronDirectoryName());
      pongThread.start();

      runPing(ignored.aeronDirectoryName());
      RUNNING.set(false);
      pongThread.join();

      System.out.println("Shutdown Driver...");
    }
  }

  private static void runPing(final String embeddedDirName) throws InterruptedException {
    final Aeron.Context ctx =
        new Aeron.Context()
            .availableImageHandler(EmbeddedPingPong::availablePongImageHandler)
            .aeronDirectoryName(embeddedDirName);

    System.out.println("Publishing Ping at " + PING_CHANNEL + " on stream Id " + PING_STREAM_ID);
    System.out.println("Subscribing Pong at " + PONG_CHANNEL + " on stream Id " + PONG_STREAM_ID);
    System.out.println("Message size of " + MESSAGE_LENGTH + " bytes");

    final FragmentAssembler dataHandler = new FragmentAssembler(EmbeddedPingPong::pongHandler);

    try (final Aeron aeron = Aeron.connect(ctx);
        final Publication pingPublication = aeron.addPublication(PING_CHANNEL, PING_STREAM_ID);
        final Subscription pongSubscription = aeron.addSubscription(PONG_CHANNEL, PONG_STREAM_ID)) {
      System.out.println("Waiting for new image from Pong...");

      PONG_IMAGE_LATCH.await();

      System.out.println(
          "Warming up... "
              + WARMUP_NUMBER_OF_ITERATIONS
              + " iterations of "
              + WARMUP_NUMBER_OF_MESSAGES
              + " messages");

      for (int i = 0; i < WARMUP_NUMBER_OF_ITERATIONS; i++) {
        roundTripMessages(
            dataHandler, pingPublication, pongSubscription, WARMUP_NUMBER_OF_MESSAGES);
      }

      Thread.sleep(100);
      final ContinueBarrier barrier = new ContinueBarrier("Execute again?");

      do {
        HISTOGRAM.reset();
        System.out.println("Pinging " + NUMBER_OF_MESSAGES + " messages");

        roundTripMessages(dataHandler, pingPublication, pongSubscription, NUMBER_OF_MESSAGES);

        System.out.println("Histogram of RTT latencies in microseconds.");
        HISTOGRAM.outputPercentileDistribution(System.out, 1000.0);
      } while (barrier.await());
    }
  }

  private static Thread startPong(final String embeddedDirName) {
    return new Thread() {
      public void run() {
        System.out.println(
            "Subscribing Ping at " + PING_CHANNEL + " on stream Id " + PING_STREAM_ID);
        System.out.println(
            "Publishing Pong at " + PONG_CHANNEL + " on stream Id " + PONG_STREAM_ID);

        final Aeron.Context ctx = new Aeron.Context().aeronDirectoryName(embeddedDirName);

        try (final Aeron aeron = Aeron.connect(ctx);
            final Publication pongPublication = aeron.addPublication(PONG_CHANNEL, PONG_STREAM_ID);
            final Subscription pingSubscription =
                aeron.addSubscription(PING_CHANNEL, PING_STREAM_ID)) {
          final FragmentAssembler dataHandler =
              new FragmentAssembler(
                  (buffer, offset, length, header) ->
                      pingHandler(pongPublication, buffer, offset, length));

          while (RUNNING.get()) {
            PING_HANDLER_IDLE_STRATEGY.idle(pingSubscription.poll(dataHandler, FRAME_COUNT_LIMIT));
          }

          System.out.println("Shutting down...");
        }
      }
    };
  }

  private static void roundTripMessages(
      final FragmentHandler fragmentHandler,
      final Publication pingPublication,
      final Subscription pongSubscription,
      final int numMessages) {
    final IdleStrategy idleStrategy = new BusySpinIdleStrategy();

    for (int i = 0; i < numMessages; i++) {
      do {
        ATOMIC_BUFFER.putLong(0, System.nanoTime());
      } while (pingPublication.offer(ATOMIC_BUFFER, 0, MESSAGE_LENGTH) < 0L);

      idleStrategy.reset();
      while (pongSubscription.poll(fragmentHandler, FRAGMENT_COUNT_LIMIT) <= 0) {
        idleStrategy.idle();
      }
    }
  }

  private static void pongHandler(
      final DirectBuffer buffer, final int offset, final int length, final Header header) {
    final long pingTimestamp = buffer.getLong(offset);
    final long rttNs = System.nanoTime() - pingTimestamp;

    HISTOGRAM.recordValue(rttNs);
  }

  private static void availablePongImageHandler(final Image image) {
    final Subscription subscription = image.subscription();
    if (PONG_STREAM_ID == subscription.streamId() && PONG_CHANNEL.equals(subscription.channel())) {
      PONG_IMAGE_LATCH.countDown();
    }
  }

  public static void pingHandler(
      final Publication pongPublication,
      final DirectBuffer buffer,
      final int offset,
      final int length) {
    if (pongPublication.offer(buffer, offset, length) < 0L) {
      return;
    }

    PING_HANDLER_IDLE_STRATEGY.reset();

    while (pongPublication.offer(buffer, offset, length) < 0L) {
      PING_HANDLER_IDLE_STRATEGY.idle();
    }
  }
}
Exemplo n.º 30
0
public class WorkloadRunnerTest {
  private static final TemporalUtil TEMPORAL_UTIL = new TemporalUtil();
  private static final LoggingServiceFactory LOGGING_SERVICE_FACTORY =
      new Log4jLoggingServiceFactory(false);
  DecimalFormat numberFormatter = new DecimalFormat("###,###,###,###");
  DecimalFormat doubleNumberFormatter = new DecimalFormat("###,###,###,##0.00");

  @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder();
  private static final long ONE_SECOND_AS_NANO = TimeUnit.SECONDS.toNanos(1);

  TimeSource timeSource = new SystemTimeSource();
  CompletionTimeServiceAssistant completionTimeServiceAssistant =
      new CompletionTimeServiceAssistant();

  @Test
  public void shouldRunReadOnlyLdbcWorkloadWithNothingDbAndCrashInSaneManner()
      throws InterruptedException, DbException, WorkloadException, IOException,
          MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
          ExecutionException {
    int threadCount = 4;
    long operationCount = 100000;

    ControlService controlService = null;
    Db db = null;
    Workload workload = null;
    MetricsService metricsService = null;
    CompletionTimeService completionTimeService = null;
    ConcurrentErrorReporter errorReporter = new ConcurrentErrorReporter();
    try {
      Map<String, String> paramsMap =
          LdbcSnbInteractiveWorkloadConfiguration.defaultReadOnlyConfigSF1();
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.PARAMETERS_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.UPDATES_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      // Driver-specific parameters
      String name = null;
      String dbClassName = DummyLdbcSnbInteractiveDb.class.getName();
      String workloadClassName = LdbcSnbInteractiveWorkload.class.getName();
      int statusDisplayInterval = 1;
      TimeUnit timeUnit = TimeUnit.NANOSECONDS;
      String resultDirPath = temporaryFolder.newFolder().getAbsolutePath();
      double timeCompressionRatio = 0.0000001;
      Set<String> peerIds = new HashSet<>();
      ConsoleAndFileDriverConfiguration.ConsoleAndFileValidationParamOptions validationParams =
          null;
      String dbValidationFilePath = null;
      boolean calculateWorkloadStatistics = false;
      long spinnerSleepDuration = 0l;
      boolean printHelp = false;
      boolean ignoreScheduledStartTimes = false;
      long warmupCount = 100;

      ConsoleAndFileDriverConfiguration configuration =
          new ConsoleAndFileDriverConfiguration(
              paramsMap,
              name,
              dbClassName,
              workloadClassName,
              operationCount,
              threadCount,
              statusDisplayInterval,
              timeUnit,
              resultDirPath,
              timeCompressionRatio,
              peerIds,
              validationParams,
              dbValidationFilePath,
              calculateWorkloadStatistics,
              spinnerSleepDuration,
              printHelp,
              ignoreScheduledStartTimes,
              warmupCount);

      configuration =
          (ConsoleAndFileDriverConfiguration)
              configuration.applyArgs(
                  MapUtils.loadPropertiesToMap(
                      TestUtils.getResource("/snb/interactive/updateStream.properties")));

      controlService =
          new LocalControlService(
              timeSource.nowAsMilli(),
              configuration,
              new Log4jLoggingServiceFactory(false),
              timeSource);
      LoggingService loggingService =
          new Log4jLoggingServiceFactory(false).loggingServiceFor("Test");

      GeneratorFactory gf = new GeneratorFactory(new RandomDataGeneratorFactory(42L));
      boolean returnStreamsWithDbConnector = true;
      Tuple3<WorkloadStreams, Workload, Long> workloadStreamsAndWorkload =
          WorkloadStreams.createNewWorkloadWithOffsetAndLimitedWorkloadStreams(
              configuration,
              gf,
              returnStreamsWithDbConnector,
              configuration.warmupCount(),
              configuration.operationCount(),
              LOGGING_SERVICE_FACTORY);

      workload = workloadStreamsAndWorkload._2();

      WorkloadStreams workloadStreams =
          WorkloadStreams.timeOffsetAndCompressWorkloadStreams(
              workloadStreamsAndWorkload._1(),
              controlService.workloadStartTimeAsMilli(),
              configuration.timeCompressionRatio(),
              gf);

      File resultsLog = temporaryFolder.newFile();
      SimpleCsvFileWriter csvResultsLogWriter =
          new SimpleCsvFileWriter(resultsLog, SimpleCsvFileWriter.DEFAULT_COLUMN_SEPARATOR);
      metricsService =
          ThreadedQueuedMetricsService.newInstanceUsingBlockingBoundedQueue(
              timeSource,
              errorReporter,
              configuration.timeUnit(),
              ThreadedQueuedMetricsService.DEFAULT_HIGHEST_EXPECTED_RUNTIME_DURATION_AS_NANO,
              csvResultsLogWriter,
              workload.operationTypeToClassMapping(),
              LOGGING_SERVICE_FACTORY);

      completionTimeService =
          completionTimeServiceAssistant.newSynchronizedConcurrentCompletionTimeServiceFromPeerIds(
              controlService.configuration().peerIds());

      db = new DummyLdbcSnbInteractiveDb();
      db.init(
          configuration
              .applyArg(DummyLdbcSnbInteractiveDb.CRASH_ON_ARG, LdbcQuery4.class.getName())
              .asMap(),
          loggingService,
          workload.operationTypeToClassMapping());

      int boundedQueueSize = DefaultQueues.DEFAULT_BOUND_1000;
      WorkloadRunner runner =
          new WorkloadRunner(
              timeSource,
              db,
              workloadStreams,
              metricsService,
              errorReporter,
              completionTimeService,
              controlService.loggingServiceFactory(),
              controlService.configuration().threadCount(),
              controlService.configuration().statusDisplayIntervalAsSeconds(),
              controlService.configuration().spinnerSleepDurationAsMilli(),
              controlService.configuration().ignoreScheduledStartTimes(),
              boundedQueueSize);

      runner.getFuture().get();
      csvResultsLogWriter.close();
    } finally {
      try {
        controlService.shutdown();
      } catch (Throwable e) {
        System.out.println(
            format(
                "Unclean %s shutdown -- but it's OK", controlService.getClass().getSimpleName()));
      }
      try {
        db.close();
      } catch (Throwable e) {
        System.out.println(
            format("Unclean %s shutdown -- but it's OK", db.getClass().getSimpleName()));
      }
      try {
        workload.close();
      } catch (Throwable e) {
        System.out.println(
            format("Unclean %s shutdown -- but it's OK", workload.getClass().getSimpleName()));
      }
      try {
        metricsService.shutdown();
      } catch (Throwable e) {
        System.out.println(
            format(
                "Unclean %s shutdown -- but it's OK", metricsService.getClass().getSimpleName()));
      }
      try {
        completionTimeService.shutdown();
      } catch (Throwable e) {
        System.out.println(
            format(
                "Unclean %s shutdown -- but it's OK",
                completionTimeService.getClass().getSimpleName()));
      }
      System.out.println(errorReporter.toString());
      assertTrue(errorReporter.errorEncountered());
    }
  }

  @Test
  public void shouldRunReadOnlyLdbcWorkloadWithNothingDbAndReturnExpectedMetrics()
      throws InterruptedException, DbException, WorkloadException, IOException,
          MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
          ExecutionException {
    List<Integer> threadCounts = Lists.newArrayList(1, 2, 4, 8);
    long operationCount = 100000;
    for (int threadCount : threadCounts) {
      doShouldRunReadOnlyLdbcWorkloadWithNothingDbAndReturnExpectedMetricsIncludingResultsLog(
          threadCount, operationCount);
    }
  }

  public void
      doShouldRunReadOnlyLdbcWorkloadWithNothingDbAndReturnExpectedMetricsIncludingResultsLog(
          int threadCount, long operationCount)
          throws InterruptedException, DbException, WorkloadException, IOException,
              MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
              ExecutionException {
    ControlService controlService = null;
    Db db = null;
    Workload workload = null;
    MetricsService metricsService = null;
    CompletionTimeService completionTimeService = null;
    ConcurrentErrorReporter errorReporter = new ConcurrentErrorReporter();
    try {
      Map<String, String> paramsMap =
          LdbcSnbInteractiveWorkloadConfiguration.defaultReadOnlyConfigSF1();
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.PARAMETERS_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.UPDATES_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      // Driver-specific parameters
      String name = null;
      String dbClassName = DummyLdbcSnbInteractiveDb.class.getName();
      String workloadClassName = LdbcSnbInteractiveWorkload.class.getName();
      int statusDisplayInterval = 1;
      TimeUnit timeUnit = TimeUnit.NANOSECONDS;
      String resultDirPath = temporaryFolder.newFolder().getAbsolutePath();
      double timeCompressionRatio = 0.0000001;
      Set<String> peerIds = new HashSet<>();
      ConsoleAndFileDriverConfiguration.ConsoleAndFileValidationParamOptions validationParams =
          null;
      String dbValidationFilePath = null;
      boolean calculateWorkloadStatistics = false;
      long spinnerSleepDuration = 0l;
      boolean printHelp = false;
      boolean ignoreScheduledStartTimes = false;
      long warmupCount = 100;

      ConsoleAndFileDriverConfiguration configuration =
          new ConsoleAndFileDriverConfiguration(
              paramsMap,
              name,
              dbClassName,
              workloadClassName,
              operationCount,
              threadCount,
              statusDisplayInterval,
              timeUnit,
              resultDirPath,
              timeCompressionRatio,
              peerIds,
              validationParams,
              dbValidationFilePath,
              calculateWorkloadStatistics,
              spinnerSleepDuration,
              printHelp,
              ignoreScheduledStartTimes,
              warmupCount);

      configuration =
          (ConsoleAndFileDriverConfiguration)
              configuration.applyArgs(
                  MapUtils.loadPropertiesToMap(
                      TestUtils.getResource("/snb/interactive/updateStream.properties")));

      controlService =
          new LocalControlService(
              timeSource.nowAsMilli(),
              configuration,
              new Log4jLoggingServiceFactory(false),
              timeSource);
      LoggingService loggingService =
          new Log4jLoggingServiceFactory(false).loggingServiceFor("Test");

      GeneratorFactory gf = new GeneratorFactory(new RandomDataGeneratorFactory(42L));
      boolean returnStreamsWithDbConnector = true;
      Tuple3<WorkloadStreams, Workload, Long> workloadStreamsAndWorkload =
          WorkloadStreams.createNewWorkloadWithOffsetAndLimitedWorkloadStreams(
              configuration,
              gf,
              returnStreamsWithDbConnector,
              configuration.warmupCount(),
              configuration.operationCount(),
              LOGGING_SERVICE_FACTORY);

      workload = workloadStreamsAndWorkload._2();

      WorkloadStreams workloadStreams =
          WorkloadStreams.timeOffsetAndCompressWorkloadStreams(
              workloadStreamsAndWorkload._1(),
              controlService.workloadStartTimeAsMilli(),
              configuration.timeCompressionRatio(),
              gf);

      File resultsLog = temporaryFolder.newFile();
      SimpleCsvFileWriter csvResultsLogWriter =
          new SimpleCsvFileWriter(resultsLog, SimpleCsvFileWriter.DEFAULT_COLUMN_SEPARATOR);
      metricsService =
          ThreadedQueuedMetricsService.newInstanceUsingBlockingBoundedQueue(
              timeSource,
              errorReporter,
              configuration.timeUnit(),
              ThreadedQueuedMetricsService.DEFAULT_HIGHEST_EXPECTED_RUNTIME_DURATION_AS_NANO,
              csvResultsLogWriter,
              workload.operationTypeToClassMapping(),
              LOGGING_SERVICE_FACTORY);

      completionTimeService =
          completionTimeServiceAssistant.newSynchronizedConcurrentCompletionTimeServiceFromPeerIds(
              controlService.configuration().peerIds());

      db = new DummyLdbcSnbInteractiveDb();
      db.init(configuration.asMap(), loggingService, workload.operationTypeToClassMapping());

      int boundedQueueSize = DefaultQueues.DEFAULT_BOUND_1000;
      WorkloadRunner runner =
          new WorkloadRunner(
              timeSource,
              db,
              workloadStreams,
              metricsService,
              errorReporter,
              completionTimeService,
              controlService.loggingServiceFactory(),
              controlService.configuration().threadCount(),
              controlService.configuration().statusDisplayIntervalAsSeconds(),
              controlService.configuration().spinnerSleepDurationAsMilli(),
              controlService.configuration().ignoreScheduledStartTimes(),
              boundedQueueSize);

      runner.getFuture().get();

      WorkloadResultsSnapshot workloadResults = metricsService.getWriter().results();

      SimpleDetailedWorkloadMetricsFormatter metricsFormatter =
          new SimpleDetailedWorkloadMetricsFormatter();

      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          errorReporter.errorEncountered(),
          is(false));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.startTimeAsMilli() >= controlService.workloadStartTimeAsMilli(),
          is(true));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.latestFinishTimeAsMilli() >= workloadResults.startTimeAsMilli(),
          is(true));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.totalOperationCount(),
          allOf(
              greaterThanOrEqualTo(percent(operationCount, 0.9)),
              lessThanOrEqualTo(percent(operationCount, 1.1))));

      WorkloadResultsSnapshot workloadResultsFromJson =
          WorkloadResultsSnapshot.fromJson(workloadResults.toJson());

      assertThat(errorReporter.toString(), workloadResults, equalTo(workloadResultsFromJson));
      assertThat(
          errorReporter.toString(),
          workloadResults.toJson(),
          equalTo(workloadResultsFromJson.toJson()));

      csvResultsLogWriter.close();
      SimpleCsvFileReader csvResultsLogReader =
          new SimpleCsvFileReader(
              resultsLog, SimpleCsvFileReader.DEFAULT_COLUMN_SEPARATOR_REGEX_STRING);
      // NOT + 1 because I didn't add csv headers
      // GREATER THAN or equal because number of Short Reads is operation result-dependent
      assertThat(
          (long) Iterators.size(csvResultsLogReader),
          allOf(
              greaterThanOrEqualTo(percent(configuration.operationCount(), 0.9)),
              lessThanOrEqualTo(percent(configuration.operationCount(), 1.1))));
      csvResultsLogReader.close();

      operationCount = metricsService.getWriter().results().totalOperationCount();
      double operationsPerSecond =
          Math.round(
              ((double) operationCount / workloadResults.totalRunDurationAsNano())
                  * ONE_SECOND_AS_NANO);
      double microSecondPerOperation =
          (double) TimeUnit.NANOSECONDS.toMicros(workloadResults.totalRunDurationAsNano())
              / operationCount;
      System.out.println(
          format(
              "[%s threads] Completed %s operations in %s = %s op/sec = 1 op/%s us",
              threadCount,
              numberFormatter.format(operationCount),
              TEMPORAL_UTIL.nanoDurationToString(workloadResults.totalRunDurationAsNano()),
              doubleNumberFormatter.format(operationsPerSecond),
              doubleNumberFormatter.format(microSecondPerOperation)));
    } finally {
      System.out.println(errorReporter.toString());
      if (null != controlService) {
        controlService.shutdown();
      }
      if (null != db) {
        db.close();
      }
      if (null != workload) {
        workload.close();
      }
      if (null != metricsService) {
        metricsService.shutdown();
      }
      if (null != completionTimeService) {
        completionTimeService.shutdown();
      }
    }
  }

  @Test
  public void shouldRunReadWriteLdbcWorkloadWithNothingDbAndReturnExpectedMetrics()
      throws InterruptedException, DbException, WorkloadException, IOException,
          MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
          ExecutionException {
    List<Integer> threadCounts = Lists.newArrayList(1, 2, 4, 8);
    long operationCount = 10000;
    for (int threadCount : threadCounts) {
      doShouldRunReadWriteLdbcWorkloadWithNothingDbAndReturnExpectedMetricsIncludingResultsLog(
          threadCount, operationCount);
    }
  }

  public void
      doShouldRunReadWriteLdbcWorkloadWithNothingDbAndReturnExpectedMetricsIncludingResultsLog(
          int threadCount, long operationCount)
          throws InterruptedException, DbException, WorkloadException, IOException,
              MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
              ExecutionException {
    ControlService controlService = null;
    Db db = null;
    Workload workload = null;
    MetricsService metricsService = null;
    CompletionTimeService completionTimeService = null;
    ConcurrentErrorReporter errorReporter = new ConcurrentErrorReporter();
    try {
      Map<String, String> paramsMap = LdbcSnbInteractiveWorkloadConfiguration.defaultConfigSF1();
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.PARAMETERS_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.UPDATES_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      // Driver-specific parameters
      String name = null;
      String dbClassName = DummyLdbcSnbInteractiveDb.class.getName();
      String workloadClassName = LdbcSnbInteractiveWorkload.class.getName();
      int statusDisplayInterval = 1;
      TimeUnit timeUnit = TimeUnit.NANOSECONDS;
      String resultDirPath = temporaryFolder.newFolder().getAbsolutePath();
      double timeCompressionRatio = 0.000001;
      Set<String> peerIds = new HashSet<>();
      ConsoleAndFileDriverConfiguration.ConsoleAndFileValidationParamOptions validationParams =
          null;
      String dbValidationFilePath = null;
      boolean calculateWorkloadStatistics = false;
      long spinnerSleepDuration = 0l;
      boolean printHelp = false;
      boolean ignoreScheduledStartTimes = false;
      long warmupCount = 100;

      ConsoleAndFileDriverConfiguration configuration =
          new ConsoleAndFileDriverConfiguration(
              paramsMap,
              name,
              dbClassName,
              workloadClassName,
              operationCount,
              threadCount,
              statusDisplayInterval,
              timeUnit,
              resultDirPath,
              timeCompressionRatio,
              peerIds,
              validationParams,
              dbValidationFilePath,
              calculateWorkloadStatistics,
              spinnerSleepDuration,
              printHelp,
              ignoreScheduledStartTimes,
              warmupCount);

      configuration =
          (ConsoleAndFileDriverConfiguration)
              configuration.applyArgs(
                  MapUtils.loadPropertiesToMap(
                      TestUtils.getResource("/snb/interactive/updateStream.properties")));

      controlService =
          new LocalControlService(
              timeSource.nowAsMilli(),
              configuration,
              new Log4jLoggingServiceFactory(false),
              timeSource);
      LoggingService loggingService =
          new Log4jLoggingServiceFactory(false).loggingServiceFor("Test");

      GeneratorFactory gf = new GeneratorFactory(new RandomDataGeneratorFactory(42L));
      boolean returnStreamsWithDbConnector = true;
      Tuple3<WorkloadStreams, Workload, Long> workloadStreamsAndWorkload =
          WorkloadStreams.createNewWorkloadWithOffsetAndLimitedWorkloadStreams(
              configuration,
              gf,
              returnStreamsWithDbConnector,
              configuration.warmupCount(),
              configuration.operationCount(),
              LOGGING_SERVICE_FACTORY);

      workload = workloadStreamsAndWorkload._2();

      WorkloadStreams workloadStreams =
          WorkloadStreams.timeOffsetAndCompressWorkloadStreams(
              workloadStreamsAndWorkload._1(),
              controlService.workloadStartTimeAsMilli(),
              configuration.timeCompressionRatio(),
              gf);

      File resultsLog = temporaryFolder.newFile();
      SimpleCsvFileWriter csvResultsLogWriter =
          new SimpleCsvFileWriter(resultsLog, SimpleCsvFileWriter.DEFAULT_COLUMN_SEPARATOR);
      metricsService =
          ThreadedQueuedMetricsService.newInstanceUsingBlockingBoundedQueue(
              timeSource,
              errorReporter,
              configuration.timeUnit(),
              ThreadedQueuedMetricsService.DEFAULT_HIGHEST_EXPECTED_RUNTIME_DURATION_AS_NANO,
              csvResultsLogWriter,
              workload.operationTypeToClassMapping(),
              LOGGING_SERVICE_FACTORY);

      db = new DummyLdbcSnbInteractiveDb();
      db.init(configuration.asMap(), loggingService, workload.operationTypeToClassMapping());

      completionTimeService =
          completionTimeServiceAssistant.newSynchronizedConcurrentCompletionTimeServiceFromPeerIds(
              controlService.configuration().peerIds());

      int boundedQueueSize = DefaultQueues.DEFAULT_BOUND_1000;
      WorkloadRunner runner =
          new WorkloadRunner(
              timeSource,
              db,
              workloadStreams,
              metricsService,
              errorReporter,
              completionTimeService,
              controlService.loggingServiceFactory(),
              controlService.configuration().threadCount(),
              controlService.configuration().statusDisplayIntervalAsSeconds(),
              controlService.configuration().spinnerSleepDurationAsMilli(),
              controlService.configuration().ignoreScheduledStartTimes(),
              boundedQueueSize);

      runner.getFuture().get();

      WorkloadResultsSnapshot workloadResults = metricsService.getWriter().results();

      SimpleDetailedWorkloadMetricsFormatter metricsFormatter =
          new SimpleDetailedWorkloadMetricsFormatter();

      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          errorReporter.errorEncountered(),
          is(false));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.startTimeAsMilli() >= controlService.workloadStartTimeAsMilli(),
          is(true));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.latestFinishTimeAsMilli() >= workloadResults.startTimeAsMilli(),
          is(true));
      // GREATER THAN or equal because number of Short Reads is operation result-dependent
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.totalOperationCount(),
          allOf(
              greaterThanOrEqualTo(percent(operationCount, 0.9)),
              lessThanOrEqualTo(percent(operationCount, 1.1))));

      WorkloadResultsSnapshot workloadResultsFromJson =
          WorkloadResultsSnapshot.fromJson(workloadResults.toJson());

      assertThat(errorReporter.toString(), workloadResults, equalTo(workloadResultsFromJson));
      assertThat(
          errorReporter.toString(),
          workloadResults.toJson(),
          equalTo(workloadResultsFromJson.toJson()));

      csvResultsLogWriter.close();
      SimpleCsvFileReader csvResultsLogReader =
          new SimpleCsvFileReader(
              resultsLog, SimpleCsvFileReader.DEFAULT_COLUMN_SEPARATOR_REGEX_STRING);
      // NOT + 1 because I didn't add csv headers
      // GREATER THAN or equal because number of Short Reads is operation result-dependent
      assertThat(
          (long) Iterators.size(csvResultsLogReader),
          allOf(
              greaterThanOrEqualTo(percent(configuration.operationCount(), 0.9)),
              lessThanOrEqualTo(percent(configuration.operationCount(), 1.1))));
      csvResultsLogReader.close();

      operationCount = metricsService.getWriter().results().totalOperationCount();
      double operationsPerSecond =
          Math.round(
              ((double) operationCount / workloadResults.totalRunDurationAsNano())
                  * ONE_SECOND_AS_NANO);
      double microSecondPerOperation =
          (double) TimeUnit.NANOSECONDS.toMicros(workloadResults.totalRunDurationAsNano())
              / operationCount;
      System.out.println(
          format(
              "[%s threads] Completed %s operations in %s = %s op/sec = 1 op/%s us",
              threadCount,
              numberFormatter.format(operationCount),
              TEMPORAL_UTIL.nanoDurationToString(workloadResults.totalRunDurationAsNano()),
              doubleNumberFormatter.format(operationsPerSecond),
              doubleNumberFormatter.format(microSecondPerOperation)));
    } finally {
      System.out.println(errorReporter.toString());
      if (null != controlService) {
        controlService.shutdown();
      }
      if (null != db) {
        db.close();
      }
      if (null != workload) {
        workload.close();
      }
      if (null != metricsService) {
        metricsService.shutdown();
      }
      if (null != completionTimeService) {
        completionTimeService.shutdown();
      }
    }
  }

  @Test
  public void
      shouldRunReadOnlyLdbcWorkloadWithNothingDbWhileIgnoringScheduledStartTimesUsingSynchronizedCompletionTimeServiceAndReturnExpectedMetrics()
          throws InterruptedException, DbException, WorkloadException, IOException,
              MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
              ExecutionException {
    List<Integer> threadCounts = Lists.newArrayList(1, 2, 4);
    long operationCount = 1000000;
    for (int threadCount : threadCounts) {
      ConcurrentErrorReporter errorReporter = new ConcurrentErrorReporter();
      CompletionTimeService completionTimeService =
          completionTimeServiceAssistant.newSynchronizedConcurrentCompletionTimeServiceFromPeerIds(
              new HashSet<String>());
      try {
        doShouldRunReadOnlyLdbcWorkloadWithNothingDbWhileIgnoringScheduledStartTimesAndReturnExpectedMetrics(
            threadCount, operationCount, completionTimeService, errorReporter);
      } finally {
        completionTimeService.shutdown();
      }
    }
  }

  @Test
  public void
      shouldRunReadOnlyLdbcWorkloadWithNothingDbWhileIgnoringScheduledStartTimesUsingThreadedCompletionTimeServiceAndReturnExpectedMetrics()
          throws InterruptedException, DbException, WorkloadException, IOException,
              MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
              ExecutionException {
    List<Integer> threadCounts = Lists.newArrayList(1, 2, 4);
    long operationCount = 1000000;
    for (int threadCount : threadCounts) {
      ConcurrentErrorReporter errorReporter = new ConcurrentErrorReporter();
      CompletionTimeService completionTimeService =
          completionTimeServiceAssistant
              .newThreadedQueuedConcurrentCompletionTimeServiceFromPeerIds(
                  new SystemTimeSource(), new HashSet<String>(), new ConcurrentErrorReporter());
      try {
        doShouldRunReadOnlyLdbcWorkloadWithNothingDbWhileIgnoringScheduledStartTimesAndReturnExpectedMetrics(
            threadCount, operationCount, completionTimeService, errorReporter);
      } finally {
        completionTimeService.shutdown();
      }
    }
  }

  public void
      doShouldRunReadOnlyLdbcWorkloadWithNothingDbWhileIgnoringScheduledStartTimesAndReturnExpectedMetrics(
          int threadCount,
          long operationCount,
          CompletionTimeService completionTimeService,
          ConcurrentErrorReporter errorReporter)
          throws InterruptedException, DbException, WorkloadException, IOException,
              MetricsCollectionException, CompletionTimeException, DriverConfigurationException,
              ExecutionException {
    ControlService controlService = null;
    Db db = null;
    Workload workload = null;
    MetricsService metricsService = null;
    try {
      Map<String, String> paramsMap =
          LdbcSnbInteractiveWorkloadConfiguration.defaultReadOnlyConfigSF1();
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.PARAMETERS_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      paramsMap.put(
          LdbcSnbInteractiveWorkloadConfiguration.UPDATES_DIRECTORY,
          TestUtils.getResource("/snb/interactive/").getAbsolutePath());
      // Driver-specific parameters
      String name = null;
      String dbClassName = DummyLdbcSnbInteractiveDb.class.getName();
      String workloadClassName = LdbcSnbInteractiveWorkload.class.getName();
      int statusDisplayInterval = 1;
      TimeUnit timeUnit = TimeUnit.NANOSECONDS;
      String resultDirPath = temporaryFolder.newFolder().getAbsolutePath();
      double timeCompressionRatio = 1.0;
      Set<String> peerIds = new HashSet<>();
      ConsoleAndFileDriverConfiguration.ConsoleAndFileValidationParamOptions validationParams =
          null;
      String dbValidationFilePath = null;
      boolean calculateWorkloadStatistics = false;
      long spinnerSleepDuration = 0l;
      boolean printHelp = false;
      boolean ignoreScheduledStartTimes = true;
      long warmupCount = 100;

      ConsoleAndFileDriverConfiguration configuration =
          new ConsoleAndFileDriverConfiguration(
              paramsMap,
              name,
              dbClassName,
              workloadClassName,
              operationCount,
              threadCount,
              statusDisplayInterval,
              timeUnit,
              resultDirPath,
              timeCompressionRatio,
              peerIds,
              validationParams,
              dbValidationFilePath,
              calculateWorkloadStatistics,
              spinnerSleepDuration,
              printHelp,
              ignoreScheduledStartTimes,
              warmupCount);

      configuration =
          (ConsoleAndFileDriverConfiguration)
              configuration.applyArgs(
                  MapUtils.loadPropertiesToMap(
                      TestUtils.getResource("/snb/interactive/updateStream.properties")));

      controlService =
          new LocalControlService(
              timeSource.nowAsMilli() + 1000,
              configuration,
              new Log4jLoggingServiceFactory(false),
              timeSource);
      LoggingService loggingService =
          new Log4jLoggingServiceFactory(false).loggingServiceFor("Test");
      workload = new LdbcSnbInteractiveWorkload();
      workload.init(configuration);
      db = new DummyLdbcSnbInteractiveDb();
      db.init(configuration.asMap(), loggingService, workload.operationTypeToClassMapping());
      GeneratorFactory gf = new GeneratorFactory(new RandomDataGeneratorFactory(42L));
      Iterator<Operation> operations =
          gf.limit(
              WorkloadStreams.mergeSortedByStartTimeExcludingChildOperationGenerators(
                  gf, workload.streams(gf, true)),
              configuration.operationCount());
      Iterator<Operation> timeMappedOperations =
          gf.timeOffsetAndCompress(operations, controlService.workloadStartTimeAsMilli(), 1.0);
      WorkloadStreams workloadStreams = new WorkloadStreams();
      workloadStreams.setAsynchronousStream(
          new HashSet<Class<? extends Operation>>(),
          new HashSet<Class<? extends Operation>>(),
          Collections.<Operation>emptyIterator(),
          timeMappedOperations,
          null);

      File resultsLog = temporaryFolder.newFile();
      SimpleCsvFileWriter csvResultsLogWriter =
          new SimpleCsvFileWriter(resultsLog, SimpleCsvFileWriter.DEFAULT_COLUMN_SEPARATOR);
      metricsService =
          ThreadedQueuedMetricsService.newInstanceUsingBlockingBoundedQueue(
              timeSource,
              errorReporter,
              configuration.timeUnit(),
              ThreadedQueuedMetricsService.DEFAULT_HIGHEST_EXPECTED_RUNTIME_DURATION_AS_NANO,
              csvResultsLogWriter,
              workload.operationTypeToClassMapping(),
              LOGGING_SERVICE_FACTORY);

      int boundedQueueSize = DefaultQueues.DEFAULT_BOUND_1000;
      WorkloadRunner runner =
          new WorkloadRunner(
              timeSource,
              db,
              workloadStreams,
              metricsService,
              errorReporter,
              completionTimeService,
              controlService.loggingServiceFactory(),
              controlService.configuration().threadCount(),
              controlService.configuration().statusDisplayIntervalAsSeconds(),
              controlService.configuration().spinnerSleepDurationAsMilli(),
              controlService.configuration().ignoreScheduledStartTimes(),
              boundedQueueSize);

      runner.getFuture().get();

      WorkloadResultsSnapshot workloadResults = metricsService.getWriter().results();
      SimpleDetailedWorkloadMetricsFormatter metricsFormatter =
          new SimpleDetailedWorkloadMetricsFormatter();

      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          errorReporter.errorEncountered(),
          is(false));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.latestFinishTimeAsMilli() >= workloadResults.startTimeAsMilli(),
          is(true));
      assertThat(
          errorReporter.toString() + "\n" + metricsFormatter.format(workloadResults),
          workloadResults.totalOperationCount(),
          is(operationCount));

      WorkloadResultsSnapshot workloadResultsFromJson =
          WorkloadResultsSnapshot.fromJson(workloadResults.toJson());

      assertThat(errorReporter.toString(), workloadResults, equalTo(workloadResultsFromJson));
      assertThat(
          errorReporter.toString(),
          workloadResults.toJson(),
          equalTo(workloadResultsFromJson.toJson()));

      csvResultsLogWriter.close();
      SimpleCsvFileReader csvResultsLogReader =
          new SimpleCsvFileReader(
              resultsLog, SimpleCsvFileReader.DEFAULT_COLUMN_SEPARATOR_REGEX_STRING);
      assertThat(
          (long) Iterators.size(csvResultsLogReader),
          is(configuration.operationCount())); // NOT + 1 because I didn't add csv headers
      csvResultsLogReader.close();

      operationCount = metricsService.getWriter().results().totalOperationCount();
      double operationsPerSecond =
          Math.round(
              ((double) operationCount / workloadResults.totalRunDurationAsNano())
                  * ONE_SECOND_AS_NANO);
      double microSecondPerOperation =
          (double) TimeUnit.NANOSECONDS.toMicros(workloadResults.totalRunDurationAsNano())
              / operationCount;
      System.out.println(
          format(
              "[%s threads] Completed %s operations in %s = %s op/sec = 1 op/%s us",
              threadCount,
              numberFormatter.format(operationCount),
              TEMPORAL_UTIL.nanoDurationToString(workloadResults.totalRunDurationAsNano()),
              doubleNumberFormatter.format(operationsPerSecond),
              doubleNumberFormatter.format(microSecondPerOperation)));
    } finally {
      System.out.println(errorReporter.toString());
      if (null != controlService) {
        controlService.shutdown();
      }
      if (null != db) {
        db.close();
      }
      if (null != workload) {
        workload.close();
      }
      if (null != metricsService) {
        metricsService.shutdown();
      }
      if (null != completionTimeService) {
        completionTimeService.shutdown();
      }
    }
  }

  private long percent(long value, double percent) {
    return Math.round(value * percent);
  }
}