コード例 #1
0
  public void init(FilterConfig filterConfig) throws ServletException {
    final MetricRegistry metricsRegistry = Metrics.metricRegistry();

    this.metersByStatusCode = new ConcurrentHashMap<Integer, Meter>(meterNamesByStatusCode.size());
    for (Map.Entry<Integer, String> entry : meterNamesByStatusCode.entrySet()) {
      metersByStatusCode.put(entry.getKey(), metricsRegistry.meter(name("http", entry.getValue())));
    }
    this.otherMeter = metricsRegistry.meter(name("http", otherMetricName));
    this.activeRequests = metricsRegistry.counter(name("http", "activeRequests"));
    this.requestTimer = metricsRegistry.timer(name("http", "requests"));
  }
コード例 #2
0
  @Override
  public void doRun() {
    writtenMessages = metricRegistry.meter(name(InputCacheWorkerThread.class, "writtenMessages"));
    outOfCapacity =
        metricRegistry.meter(name(InputCacheWorkerThread.class, "FailedWritesOutOfCapacity"));

    new Thread(
            new Runnable() {
              @Override
              public void run() {
                work(inputCache, processBuffer);
              }
            },
            "master-cache-worker-input")
        .start();
  }
コード例 #3
0
  public static void main(String[] args) {

    // create some various metrics and update them

    MetricRegistry registry = new MetricRegistry();

    final AtomicInteger gaugeInteger = new AtomicInteger();

    registry.register(
        name("gauge"),
        new Gauge<Integer>() {

          @Override
          public Integer getValue() {
            return gaugeInteger.get();
          }
        });

    final Counter counter = registry.counter(name("counter"));

    final Histogram histogram = registry.histogram(name("histogram"));

    final Meter meter = registry.meter(name("meter"));

    final Timer timer = registry.timer(name("timer"));

    NewRelicReporter reporter =
        new NewRelicReporter(
            registry,
            "new relic reporter",
            MetricFilter.ALL,
            new AllEnabledMetricAttributeFilter(),
            TimeUnit.SECONDS,
            TimeUnit.MILLISECONDS,
            "foo/");
    reporter.start(60, TimeUnit.SECONDS);

    ScheduledExecutorService svc = Executors.newScheduledThreadPool(1);

    final Random random = new Random();

    svc.scheduleAtFixedRate(
        new Runnable() {
          @Override
          public void run() {
            System.out.println("Updating");
            gaugeInteger.incrementAndGet();
            counter.inc();
            histogram.update(random.nextInt(10));
            meter.mark();
            timer.update(random.nextInt(10), TimeUnit.MILLISECONDS);
          }
        },
        0,
        1,
        TimeUnit.SECONDS);
  }
コード例 #4
0
  @Test
  public void testMeter() {
    System.out.println("******************************* METER *******************************");
    meter = registry.meter("meter");
    try {
      for (int i = 0; i < ITER_COUNT; i++) {
        meter.mark();
        Thread.sleep(SLEEP_MS);
      }

    } catch (InterruptedException ex) {
      Thread.currentThread().interrupt();
    }
  }
コード例 #5
0
  @Test
  public void testMeter() {
    final Meter meter = registry.meter(name("foo", "bar"));
    meter.mark(10);
    meter.mark(20);
    reportAndRefresh();

    SearchResponse searchResponse =
        client().prepareSearch(indexWithDate).setTypes("meter").execute().actionGet();
    org.assertj.core.api.Assertions.assertThat(searchResponse.getHits().totalHits()).isEqualTo(1L);

    Map<String, Object> hit = searchResponse.getHits().getAt(0).sourceAsMap();
    assertTimestamp(hit);
    assertKey(hit, "name", prefix + ".foo.bar");
    assertKey(hit, "count", 30);
    assertKey(hit, "host", "localhost");
  }
コード例 #6
0
  public CodaHaleMetricsTracker(
      final String poolName, final PoolStats poolStats, final MetricRegistry registry) {
    this.poolName = poolName;
    this.registry = registry;
    this.connectionObtainTimer = registry.timer(MetricRegistry.name(poolName, "pool", "Wait"));
    this.connectionUsage = registry.histogram(MetricRegistry.name(poolName, "pool", "Usage"));
    this.connectionTimeoutMeter =
        registry.meter(MetricRegistry.name(poolName, "pool", "ConnectionTimeoutRate"));

    registry.register(
        MetricRegistry.name(poolName, "pool", "TotalConnections"),
        new Gauge<Integer>() {
          @Override
          public Integer getValue() {
            return poolStats.getTotalConnections();
          }
        });

    registry.register(
        MetricRegistry.name(poolName, "pool", "IdleConnections"),
        new Gauge<Integer>() {
          @Override
          public Integer getValue() {
            return poolStats.getIdleConnections();
          }
        });

    registry.register(
        MetricRegistry.name(poolName, "pool", "ActiveConnections"),
        new Gauge<Integer>() {
          @Override
          public Integer getValue() {
            return poolStats.getActiveConnections();
          }
        });

    registry.register(
        MetricRegistry.name(poolName, "pool", "PendingConnections"),
        new Gauge<Integer>() {
          @Override
          public Integer getValue() {
            return poolStats.getPendingThreads();
          }
        });
  }
コード例 #7
0
 @Override
 public void updateQueuedSpans(int update) {
   registry.meter("tracing.reporter.queued.span").mark();
 }
コード例 #8
0
 @Override
 public void incrementSpansDropped(int quantity) {
   registry.meter("tracing.reporter.span.dropped").mark(quantity);
 }
コード例 #9
0
 @Override
 public void incrementMessagesDropped(Throwable cause) {
   registry.meter("tracing.reporter.message.dropped").mark();
 }
コード例 #10
0
 @Override
 public void incrementMessages() {
   registry.meter("tracing.reporter.message.accepted").mark();
 }
コード例 #11
0
 public static void markResourceMeter(String... path) {
   metricRegistry.meter(name("resource", path)).mark();
 }
コード例 #12
0
ファイル: Metrics.java プロジェクト: pmbauer/quasar
 public static Meter meter(String name) {
   return metrics.meter(name);
 }
コード例 #13
0
  @Test
  public void writeThousandsSingleSource()
      throws InterruptedException, ExecutionException, MigrationException,
          UnsupportedEncodingException {

    final Id sourceId = IdGenerator.createId("source");
    final String edgeType = "test";

    final EdgeGenerator generator =
        new EdgeGenerator() {

          @Override
          public Edge newEdge() {
            Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target"));

            return edge;
          }

          @Override
          public Observable<MarkedEdge> doSearch(final GraphManager manager) {
            return manager.loadEdgesFromSource(
                new SimpleSearchByEdgeType(
                    sourceId,
                    edgeType,
                    Long.MAX_VALUE,
                    SearchByEdgeType.Order.DESCENDING,
                    Optional.<Edge>absent()));
          }
        };

    //        final int numInjectors = 2;
    final int numInjectors = 1;

    /**
     * create 3 injectors. This way all the caches are independent of one another. This is the same
     * as multiple nodes
     */
    final List<Injector> injectors = createInjectors(numInjectors);

    final GraphFig graphFig = getInstance(injectors, GraphFig.class);

    final long shardSize = graphFig.getShardSize();

    // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of
    // processing
    // power for writes
    final int numProcessors = Runtime.getRuntime().availableProcessors() / 2;

    final int numWorkersPerInjector = numProcessors / numInjectors;

    /** Do 4x shard size so we should have approximately 4 shards */
    final long numberOfEdges = shardSize * 4;

    final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors;

    final long expectedShardCount = numberOfEdges / shardSize;

    createExecutor(numWorkersPerInjector);

    final AtomicLong writeCounter = new AtomicLong();

    // min stop time the min delta + 1 cache cycle timeout
    final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();

    logger.info(
        "Writing {} edges per worker on {} workers in {} injectors",
        workerWriteLimit,
        numWorkersPerInjector,
        numInjectors);

    final List<Future<Boolean>> futures = new ArrayList<>();

    for (Injector injector : injectors) {
      final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class);

      for (int i = 0; i < numWorkersPerInjector; i++) {
        Future<Boolean> future =
            executor.submit(
                new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter));

        futures.add(future);
      }
    }

    /** Wait for all writes to complete */
    for (Future<Boolean> future : futures) {
      future.get();
    }

    // now get all our shards
    final NodeShardCache cache = getInstance(injectors, NodeShardCache.class);

    final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType);

    // now submit the readers.
    final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class);

    final long writeCount = writeCounter.get();
    final Meter readMeter = registry.meter("readThroughput");

    final List<Throwable> failures = new ArrayList<>();

    for (int i = 0; i < 2; i++) {

      /** Start reading continuously while we migrate data to ensure our view is always correct */
      final ListenableFuture<Long> future =
          executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));

      // add the future
      Futures.addCallback(
          future,
          new FutureCallback<Long>() {

            @Override
            public void onSuccess(@Nullable final Long result) {
              logger.info("Successfully ran the read, re-running");
              executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));
            }

            @Override
            public void onFailure(final Throwable t) {
              failures.add(t);
              logger.error("Failed test!", t);
            }
          });
    }

    int compactedCount;

    // now start our readers

    while (true) {

      if (!failures.isEmpty()) {

        StringBuilder builder = new StringBuilder();

        builder.append("Read runner failed!\n");

        for (Throwable t : failures) {
          builder.append("Exception is: ");
          ByteArrayOutputStream output = new ByteArrayOutputStream();

          t.printStackTrace(new PrintWriter(output));

          builder.append(output.toString("UTF-8"));
          builder.append("\n\n");
        }

        fail(builder.toString());
      }

      // reset our count.  Ultimately we'll have 4 groups once our compaction completes
      compactedCount = 0;

      // we have to get it from the cache, because this will trigger the compaction process
      final Iterator<ShardEntryGroup> groups =
          cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);
      final Set<ShardEntryGroup> shardEntryGroups = new HashSet<>();

      while (groups.hasNext()) {

        final ShardEntryGroup group = groups.next();
        shardEntryGroups.add(group);

        logger.info(
            "Compaction pending status for group {} is {}", group, group.isCompactionPending());

        if (!group.isCompactionPending()) {
          compactedCount++;
        }
      }

      // we're done
      if (compactedCount >= expectedShardCount) {
        logger.info("All compactions complete, sleeping");

        //                final Object mutex = new Object();
        //
        //                synchronized ( mutex ){
        //
        //                    mutex.wait();
        //                }

        break;
      }

      Thread.sleep(2000);
    }

    // now continue reading everything for 30 seconds

    Thread.sleep(30000);

    executor.shutdownNow();
  }
コード例 #14
0
/** @author Kasper Nielsen */
public abstract class CassandraBatchedStagedWriter<T> extends AbstractBatchedStage<T> {

  /** The logger. */
  private static final Logger LOG = LoggerFactory.getLogger(CassandraBatchedStagedWriter.class);

  /** The connection to Cassandra. */
  private final CassandraConnection connection;

  final MetricRegistry metrics = new MetricRegistry();

  final Meter persistedCount =
      metrics.meter(
          MetricRegistry.name("aistore", "cassandra", "Number of persisted AIS messages"));

  /** greater than 0 if the last batch was slow. */
  private int lastSlowBatch;

  /**
   * @param queueSize
   * @param maxBatchSize
   */
  public CassandraBatchedStagedWriter(CassandraConnection connection, int batchSize) {
    super(Math.min(100000, batchSize * 100), batchSize);
    this.connection = requireNonNull(connection);
    final JmxReporter reporter = JmxReporter.forRegistry(metrics).inDomain("fooo.erer.er").build();
    reporter.start();
  }

  /** {@inheritDoc} */
  @Override
  protected final void handleMessages(List<T> messages) {
    long start = System.nanoTime();
    // Create a batch of message that we want to write.
    List<RegularStatement> statements = new ArrayList<>();
    for (T t : messages) {
      try {
        handleMessage(statements, t);
      } catch (RuntimeException e) {
        LOG.warn("Failed to write message: " + t, e); // Just in case we cannot process a message
      }
    }

    // Try writing the batch
    try {
      Batch batch = QueryBuilder.batch(statements.toArray(new RegularStatement[statements.size()]));

      long beforeSend = System.nanoTime();

      ResultSetFuture f = connection.getSession().executeAsync(batch);
      f.getUninterruptibly(); // throws QueryValidationExecption etc

      long total = System.nanoTime();
      // Is this an abnormal slow batch?
      boolean isSlow =
          TimeUnit.MILLISECONDS.convert(total - start, TimeUnit.NANOSECONDS) > 200
              || messages.size() >= getBatchSize();
      if (isSlow || lastSlowBatch > 0) {
        LOG.info(
            "Total time: "
                + DurationFormatter.DEFAULT.formatNanos(total - start)
                + ", prepping="
                + DurationFormatter.DEFAULT.formatNanos(beforeSend - start)
                + ", sending="
                + DurationFormatter.DEFAULT.formatNanos(total - beforeSend)
                + ", size="
                + messages.size());
        // makes sure we write 10 info statements after the last slow batch we insert
        lastSlowBatch = isSlow ? 10 : lastSlowBatch - 1;
      }
      persistedCount.mark(messages.size());
      // sink.onSucces(messages);
    } catch (QueryValidationException e) {
      LOG.error("Could not execute query, this is an internal error", e);
    } catch (Exception e) {
      onFailure(messages, e);
      try {
        sleepUntilShutdown(2, TimeUnit.SECONDS);
      } catch (InterruptedException ignore) {
        Thread.interrupted();
      }
    }
  }

  protected abstract void handleMessage(List<RegularStatement> statements, T message);

  public abstract void onFailure(List<T> messages, Throwable cause);
}
コード例 #15
0
 /**
  * Constructor passing in metric registry.
  *
  * @param registry the metric registry
  */
 public Metrics(final MetricRegistry registry) {
   this.registry = registry;
   this.httpConnectionAttempt = registry.meter("http-connection-attempt");
   this.httpConnectionSucceeded = registry.meter("http-connection-succeeded");
   this.httpConnectionFailed = registry.meter("http-connection-failed");
   this.httpConnectionClosed = registry.meter("http-connection-closed");
   this.sendAttempt = registry.meter("sent-attempt");
   this.sendSuccess = registry.meter("send-success");
   this.sendError = registry.meter("send-error");
   this.sendRateLimit = registry.meter("send-rate-limit");
   this.sendException = registry.meter("send-exception");
   this.sendLinearBackoff = registry.meter("send-backoff-linear");
   this.sendExponentialBackoff = registry.meter("send-backoff-exponential");
   this.readKafkaItem = registry.meter("read-kafka-item");
   this.passedOnKafkaItem = registry.meter("passed-on-kafka-item");
   this.readKafkaItemFromConsumer = registry.meter("read-kafka-item-from-consumer");
   this.readKafkaItemsFromConsumer = registry.meter("read-kafka-items-from-consumer");
   this.sendHeartbeatAttempt = registry.meter("send-http-heartbeat");
   this.sendHeartbeatSuccess = registry.meter("send-http-heartbeat-success");
   this.sendHeartbeatFailure = registry.meter("send-http-heartbeat-failure");
   this.interruptedService = registry.meter("interrupt-service");
   this.interruptedHTTPSending = registry.meter("interrupt-http-send");
   this.interruptedShutdown = registry.meter("interrupt-shutdown");
   this.shutdown = registry.meter("shutdown");
   this.bulkPostTime = registry.timer("bulk-post-time");
 }
コード例 #16
0
public class EventHandler extends AbstractHandler {
  private static final Logger log = LoggerFactory.getLogger(EventHandler.class);

  private AtomicLong numOps = new AtomicLong(0);
  private AtomicLong numTxs = new AtomicLong(0);

  private ConfigurationFactory configurationFactory = new TypesafeConfigFactory();
  private Conf configuration;
  private MessageEncoder messageEncoder;
  private MessageProducer messageProducer;

  private MetricRegistry metrics = new MetricRegistry();
  private Timer operationProcessingTimer = metrics.timer("operationProcessingTime");
  private Timer messageEncodingTimer = metrics.timer("encodingTime");
  private Timer messageSendingTimer = metrics.timer("sendingTime");
  private Meter operationProcessingErrorMeter = metrics.meter("processingErrors");
  private TxFactory txFactory;
  private ScheduledReporter metricsReporter;

  private String configurationPath;

  public EventHandler() {
    super(TxOpMode.op);
    log.info("created handler - default mode: " + getMode());
  }

  @VisibleForTesting
  public EventHandler(
      Conf configuration,
      MessageEncoder messageEncoder,
      MessageProducer messageProducer,
      TxFactory txFactory) {
    this.configuration = configuration;
    this.messageEncoder = messageEncoder;
    this.messageProducer = messageProducer;
    this.txFactory = txFactory;
  }

  @Override
  public void init(DsConfiguration conf, DsMetaData metaData) {
    super.init(conf, metaData);
    log.info("Initializing handler: Mode =" + getMode());
    configuration = configurationFactory.load(configurationPath);
    messageProducer = KafkaProducerFactory.create(configuration.kafka());
    messageEncoder = MessageEncoderFactory.create(configuration);
    txFactory = new TxFactory();

    if (configuration.metrics().isEnabled()) {
      metricsReporter = MetricsReporterFactory.createReporter(configuration.metrics(), metrics);
    }
  }

  @Override
  public Status transactionBegin(DsEvent e, DsTransaction transaction) {
    super.transactionBegin(e, transaction);

    if (log.isDebugEnabled()) {
      log.debug(
          "Received begin tx event, numTx="
              + numTxs.get()
              + " : position="
              + transaction.getTranID()
              + ", totalOps="
              + transaction.getTotalOps());
    }

    return Status.OK;
  }

  @Override
  public Status operationAdded(DsEvent e, DsTransaction transaction, DsOperation operation) {
    Status overallStatus = Status.OK;
    super.operationAdded(e, transaction, operation);
    numOps.incrementAndGet();

    final Tx tx = new Tx(transaction, getMetaData(), getConfig());
    final TableMetaData tMeta = getMetaData().getTableMetaData(operation.getTableName());
    final Op op = new Op(operation, tMeta, getConfig());

    operation.getTokens();
    if (isOperationMode()) {

      if (log.isDebugEnabled()) {
        log.debug(
            " Received operation: table='"
                + op.getTableName()
                + "'"
                + ", pos="
                + op.getPosition()
                + " (total_ops= "
                + tx.getTotalOps()
                + ", buffered="
                + tx.getSize()
                + ")"
                + ", ts="
                + op.getTimestamp());
      }

      Status operationStatus = processOperation(tx, op);

      if (Status.ABEND.equals(operationStatus)) {
        overallStatus = Status.ABEND;
      }
    }
    return overallStatus;
  }

  @Override
  public Status transactionCommit(DsEvent e, DsTransaction transaction) {
    Status overallStatus = Status.OK;
    super.transactionCommit(e, transaction);

    Tx tx = txFactory.createAdapterTx(transaction, getMetaData(), getConfig());
    numTxs.incrementAndGet();

    if (log.isDebugEnabled()) {
      log.debug(
          "transactionCommit event, tx #"
              + numTxs.get()
              + ":"
              + ", pos="
              + tx.getTranID()
              + " (total_ops= "
              + tx.getTotalOps()
              + ", buffered="
              + tx.getSize()
              + ")"
              + ", ts="
              + tx.getTimestamp()
              + ")");
    }

    if (!isOperationMode()) {
      for (Op op : tx) {
        Status operationStatus = processOperation(tx, op);

        if (Status.ABEND.equals(operationStatus)) {
          overallStatus = Status.ABEND;
        }
      }
    }

    return overallStatus;
  }

  private Status processOperation(Tx tx, Op op) {
    Timer.Context timer = operationProcessingTimer.time();
    Status status = Status.OK;

    try {
      encodeAndSend(tx, op);
    } catch (RuntimeException re) {
      operationProcessingErrorMeter.mark();
      log.error("Error processing operation: " + op.toString(), re);
      status = Status.ABEND;
    }

    timer.stop();
    return status;
  }

  @Override
  public Status metaDataChanged(DsEvent e, DsMetaData meta) {
    log.debug("Received metadata event: " + e + "; current tables: " + meta.getTableNames().size());
    return super.metaDataChanged(e, meta);
  }

  @Override
  public String reportStatus() {
    String s =
        "Status report: "
            + ", mode="
            + getMode()
            + ", transactions="
            + numTxs.get()
            + ", operations="
            + numOps.get();
    return s;
  }

  @Override
  public void destroy() {
    log.debug("destroy()... " + reportStatus());
    if (configuration.metricsEnabled()) {
      metricsReporter.stop();
    }

    messageProducer.terminate();
    super.destroy();
  }

  private void encodeAndSend(Tx tx, Op op) {
    if (log.isDebugEnabled()) {
      log.debug("Processing of transaction " + tx + " and operation " + op);
    }

    byte[] encodedMessage = encodeMessage(tx, op);
    sendMessage(encodedMessage);

    if (log.isDebugEnabled()) {
      log.debug("Completed processing of transaction " + tx + " and operation " + op);
    }
  }

  private void sendMessage(byte[] encodedMessage) {
    Timer.Context sendMessageTimer = messageSendingTimer.time();
    messageProducer.produce(encodedMessage);
    sendMessageTimer.stop();

    if (log.isDebugEnabled()) {
      log.debug("Completed send of message: " + new String(encodedMessage));
    }
  }

  private byte[] encodeMessage(Tx tx, Op op) {
    Timer.Context encodingTimer = messageEncodingTimer.time();
    byte[] encodedMessage = messageEncoder.encode(tx, op);
    encodingTimer.stop();

    if (log.isTraceEnabled()) {
      log.trace("Result of message encoding is = " + new String(encodedMessage));
    }

    return encodedMessage;
  }

  public void setConfigurationPath(String configurationPath) {
    this.configurationPath = configurationPath;
  }
}
コード例 #17
0
ファイル: PlogStress.java プロジェクト: hardiku/plog
  @SuppressWarnings("OverlyLongMethod")
  private void run(Config config) {
    System.err.println(
        "      _\n"
            + " _ __| |___  __ _\n"
            + "| '_ \\ / _ \\/ _` |\n"
            + "| .__/_\\___/\\__, |\n"
            + "|_|         |___/ stress");

    final Config stressConfig = config.getConfig("plog.stress");

    final int threadCount = stressConfig.getInt("threads");
    log.info("Using {} threads", threadCount);

    final int rate = stressConfig.getInt("rate");
    final RateLimiter rateLimiter = RateLimiter.create(rate);

    final int socketRenewRate = stressConfig.getInt("renew_rate");
    final int minSize = stressConfig.getInt("min_size");
    final int maxSize = stressConfig.getInt("max_size");
    final int sizeIncrements = stressConfig.getInt("size_increments");
    final double sizeExponent = stressConfig.getDouble("size_exponent");

    final int sizeDelta = maxSize - minSize;
    final int differentSizes = sizeDelta / sizeIncrements;
    if (differentSizes == 0) {
      throw new RuntimeException("No sizes! Decrease plog.stress.size_increments");
    }

    final int stopAfter = stressConfig.getInt("stop_after");

    final int packetSize = stressConfig.getInt("udp.size");
    final int bufferSize = stressConfig.getInt("udp.SO_SNDBUF");

    final Fragmenter fragmenter = new Fragmenter(packetSize);

    final Random random = new Random(stressConfig.getLong("seed"));
    final byte[] randomBytes = new byte[maxSize];
    random.nextBytes(randomBytes);
    final ByteBuf randomMessage = Unpooled.wrappedBuffer(randomBytes);

    log.info("Generating {} different hashes", differentSizes);
    final int[] precomputedHashes = new int[differentSizes];
    for (int i = 0; i < differentSizes; i++) {
      precomputedHashes[i] = Murmur3.hash32(randomMessage, 0, minSize + sizeIncrements * i, 0);
    }

    final ByteBufAllocator allocator = new PooledByteBufAllocator();

    final double packetLoss = stressConfig.getDouble("udp.loss");

    final Meter socketMeter = registry.meter("Sockets used");
    final Meter messageMeter = registry.meter("Messages sent");
    final Meter packetMeter = registry.meter("Packets sent");
    final Meter sendFailureMeter = registry.meter("Send failures");
    final Meter lossMeter = registry.meter("Packets dropped");
    final Histogram messageSizeHistogram = registry.histogram("Message size");
    final Histogram packetSizeHistogram = registry.histogram("Packet size");

    final InetSocketAddress target =
        new InetSocketAddress(stressConfig.getString("host"), stressConfig.getInt("port"));

    log.info("Starting with config {}", config);

    final long consoleRate = stressConfig.getDuration("console.interval", TimeUnit.MILLISECONDS);
    ConsoleReporter.forRegistry(registry).build().start(consoleRate, TimeUnit.MILLISECONDS);

    for (int i = 0; i < threadCount; i++) {
      new Thread("stress_" + i) {
        private DatagramChannel channel = null;

        @Override
        public void run() {
          try {
            for (int sent = 0; sent < stopAfter; sent++, messageMeter.mark()) {
              if (sent % socketRenewRate == 0) {
                if (channel != null) {
                  channel.close();
                }
                channel = DatagramChannel.open();
                channel.socket().setSendBufferSize(bufferSize);
                socketMeter.mark();
              }

              // global rate limiting
              rateLimiter.acquire();

              final int sizeIndex =
                  (int) (Math.pow(random.nextDouble(), sizeExponent) * differentSizes);
              final int messageSize = minSize + sizeIncrements * sizeIndex;
              final int hash = precomputedHashes[sizeIndex];

              messageSizeHistogram.update(messageSize);

              final ByteBuf[] fragments =
                  fragmenter.fragment(allocator, randomMessage, null, sent, messageSize, hash);

              for (ByteBuf fragment : fragments) {
                if (random.nextDouble() < packetLoss) {
                  lossMeter.mark();
                } else {
                  final int packetSize = fragment.readableBytes();
                  final ByteBuffer buffer = fragment.nioBuffer();

                  try {
                    channel.send(buffer, target);
                    packetSizeHistogram.update(packetSize);
                    packetMeter.mark();
                  } catch (SocketException e) {
                    sendFailureMeter.mark();
                  }
                }
                fragment.release();
              }
            }
          } catch (Throwable t) {
            t.printStackTrace();
            System.exit(1);
          }
        }
      }.start();
    }
  }
コード例 #18
0
  @Test(timeout = 120000)
  @Category(StressTest.class)
  public void writeThousandsDelete()
      throws InterruptedException, ExecutionException, MigrationException,
          UnsupportedEncodingException {

    final Id sourceId = IdGenerator.createId("source");
    final String edgeType = "test";

    final EdgeGenerator generator =
        new EdgeGenerator() {

          @Override
          public Edge newEdge() {
            Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target"));

            return edge;
          }

          @Override
          public Observable<MarkedEdge> doSearch(final GraphManager manager) {
            return manager.loadEdgesFromSource(
                new SimpleSearchByEdgeType(
                    sourceId,
                    edgeType,
                    Long.MAX_VALUE,
                    SearchByEdgeType.Order.DESCENDING,
                    Optional.<Edge>absent(),
                    false));
          }
        };

    //        final int numInjectors = 2;
    final int numInjectors = 1;

    /**
     * create 3 injectors. This way all the caches are independent of one another. This is the same
     * as multiple nodes
     */
    final List<Injector> injectors = createInjectors(numInjectors);

    final GraphFig graphFig = getInstance(injectors, GraphFig.class);

    final long shardSize = graphFig.getShardSize();

    // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of
    // processing
    // power for writes
    final int numProcessors = Runtime.getRuntime().availableProcessors() / 2;

    final int numWorkersPerInjector = numProcessors / numInjectors;

    /** Do 4x shard size so we should have approximately 4 shards */
    final long numberOfEdges = shardSize * 4;

    final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors;

    createExecutor(numWorkersPerInjector);

    final AtomicLong writeCounter = new AtomicLong();

    // min stop time the min delta + 1 cache cycle timeout
    final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();

    logger.info(
        "Writing {} edges per worker on {} workers in {} injectors",
        workerWriteLimit,
        numWorkersPerInjector,
        numInjectors);

    final List<Future<Boolean>> futures = new ArrayList<>();

    for (Injector injector : injectors) {
      final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class);

      for (int i = 0; i < numWorkersPerInjector; i++) {
        Future<Boolean> future =
            executor.submit(
                new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter));

        futures.add(future);
      }
    }

    /** Wait for all writes to complete */
    for (Future<Boolean> future : futures) {
      future.get();
    }

    // now get all our shards
    final NodeShardCache cache = getInstance(injectors, NodeShardCache.class);

    final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType);

    // now submit the readers.
    final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class);

    final long writeCount = writeCounter.get();
    final Meter readMeter = registry.meter("readThroughput");

    // check our shard state

    final Iterator<ShardEntryGroup> existingShardGroups =
        cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);
    int shardCount = 0;

    while (existingShardGroups.hasNext()) {
      final ShardEntryGroup group = existingShardGroups.next();

      shardCount++;

      logger.info(
          "Compaction pending status for group {} is {}", group, group.isCompactionPending());
    }

    logger.info("found {} shard groups", shardCount);

    // now mark and delete all the edges

    final GraphManager manager = gmf.createEdgeManager(scope);

    // sleep occasionally to stop pushing cassandra over

    long count = Long.MAX_VALUE;

    while (count != 0) {
      // take 10000 then sleep
      count =
          generator
              .doSearch(manager)
              .onBackpressureBlock()
              .take(1000)
              .flatMap(edge -> manager.markEdge(edge))
              .flatMap(edge -> manager.deleteEdge(edge))
              .countLong()
              .toBlocking()
              .last();

      Thread.sleep(500);
    }

    // now loop until with a reader until our shards are gone

    /** Start reading continuously while we migrate data to ensure our view is always correct */
    final ListenableFuture<Long> future =
        executor.submit(new ReadWorker(gmf, generator, 0, readMeter));

    final List<Throwable> failures = new ArrayList<>();

    // add the future
    Futures.addCallback(
        future,
        new FutureCallback<Long>() {

          @Override
          public void onSuccess(@Nullable final Long result) {
            logger.info("Successfully ran the read, re-running");
            executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));
          }

          @Override
          public void onFailure(final Throwable t) {
            failures.add(t);
            logger.error("Failed test!", t);
          }
        });

    // now start our readers

    while (true) {

      if (!failures.isEmpty()) {

        StringBuilder builder = new StringBuilder();

        builder.append("Read runner failed!\n");

        for (Throwable t : failures) {
          builder.append("Exception is: ");
          ByteArrayOutputStream output = new ByteArrayOutputStream();

          t.printStackTrace(new PrintWriter(output));

          builder.append(output.toString("UTF-8"));
          builder.append("\n\n");
        }

        fail(builder.toString());
      }

      // reset our count.  Ultimately we'll have 4 groups once our compaction completes
      shardCount = 0;

      // we have to get it from the cache, because this will trigger the compaction process
      final Iterator<ShardEntryGroup> groups =
          cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);

      ShardEntryGroup group = null;

      while (groups.hasNext()) {

        group = groups.next();

        logger.info("Shard size for group is {}", group.getReadShards());

        shardCount += group.getReadShards().size();
      }

      // we're done, 1 shard remains, we have a group, and it's our default shard
      if (shardCount == 1
          && group != null
          && group.getMinShard().getShardIndex() == Shard.MIN_SHARD.getShardIndex()) {
        logger.info("All compactions complete,");

        break;
      }

      Thread.sleep(2000);
    }

    // now that we have finished expanding s

    executor.shutdownNow();
  }
コード例 #19
0
  @Override
  protected void doStart() throws Exception {
    super.doStart();

    final String prefix = name(getHandler().getClass(), name);

    this.requests = timer(name(prefix, "requests"));
    this.dispatches = timer(name(prefix, "dispatches"));

    this.activeRequests = metricRegistry.counter(name(prefix, "active-requests"));
    this.activeDispatches = metricRegistry.counter(name(prefix, "active-dispatches"));
    this.activeSuspended = metricRegistry.counter(name(prefix, "active-suspended"));

    this.asyncDispatches = metricRegistry.meter(name(prefix, "async-dispatches"));
    this.asyncTimeouts = metricRegistry.meter(name(prefix, "async-timeouts"));

    this.responses =
        new Meter[] {
          metricRegistry.meter(name(prefix, "1xx-responses")), // 1xx
          metricRegistry.meter(name(prefix, "2xx-responses")), // 2xx
          metricRegistry.meter(name(prefix, "3xx-responses")), // 3xx
          metricRegistry.meter(name(prefix, "4xx-responses")), // 4xx
          metricRegistry.meter(name(prefix, "5xx-responses")) // 5xx
        };

    this.getRequests = timer(name(prefix, "get-requests"));
    this.postRequests = timer(name(prefix, "post-requests"));
    this.headRequests = timer(name(prefix, "head-requests"));
    this.putRequests = timer(name(prefix, "put-requests"));
    this.deleteRequests = timer(name(prefix, "delete-requests"));
    this.optionsRequests = timer(name(prefix, "options-requests"));
    this.traceRequests = timer(name(prefix, "trace-requests"));
    this.connectRequests = timer(name(prefix, "connect-requests"));
    this.moveRequests = timer(name(prefix, "move-requests"));
    this.otherRequests = timer(name(prefix, "other-requests"));

    this.listener =
        new AsyncListener() {
          @Override
          public void onTimeout(AsyncEvent event) throws IOException {
            asyncTimeouts.mark();
          }

          @Override
          public void onStartAsync(AsyncEvent event) throws IOException {
            event.getAsyncContext().addListener(this);
          }

          @Override
          public void onError(AsyncEvent event) throws IOException {}

          @Override
          public void onComplete(AsyncEvent event) throws IOException {
            final AsyncContextState state = (AsyncContextState) event.getAsyncContext();
            final Request request = (Request) state.getRequest();
            updateResponses(request);
            if (!(state.getHttpChannelState().getState() == State.DISPATCHED)) {
              activeSuspended.dec();
            }
          }
        };
  }
コード例 #20
0
public class GraphManagerShardConsistencyIT {
  private static final Logger logger =
      LoggerFactory.getLogger(GraphManagerShardConsistencyIT.class);

  private static final MetricRegistry registry = new MetricRegistry();

  private static final Meter writeMeter = registry.meter("writeThroughput");

  private Slf4jReporter reporter;

  protected ApplicationScope scope;

  protected Object originalShardSize;

  protected Object originalShardTimeout;

  protected Object originalShardDelta;

  protected ListeningExecutorService executor;

  @Before
  public void setupOrg() {

    originalShardSize = ConfigurationManager.getConfigInstance().getProperty(GraphFig.SHARD_SIZE);

    originalShardTimeout =
        ConfigurationManager.getConfigInstance().getProperty(GraphFig.SHARD_CACHE_TIMEOUT);

    originalShardDelta =
        ConfigurationManager.getConfigInstance().getProperty(GraphFig.SHARD_MIN_DELTA);

    ConfigurationManager.getConfigInstance().setProperty(GraphFig.SHARD_SIZE, 500);

    final long cacheTimeout = 2000;
    // set our cache timeout to the above value
    ConfigurationManager.getConfigInstance()
        .setProperty(GraphFig.SHARD_CACHE_TIMEOUT, cacheTimeout);

    final long minDelta = (long) (cacheTimeout * 2.5);

    ConfigurationManager.getConfigInstance().setProperty(GraphFig.SHARD_MIN_DELTA, minDelta);

    // get the system property of the UUID to use.  If one is not set, use the defualt
    String uuidString = System.getProperty("org.id", "80a42760-b699-11e3-a5e2-0800200c9a66");

    scope = new ApplicationScopeImpl(IdGenerator.createId(UUID.fromString(uuidString), "test"));

    reporter =
        Slf4jReporter.forRegistry(registry)
            .outputTo(logger)
            .convertRatesTo(TimeUnit.SECONDS)
            .convertDurationsTo(TimeUnit.MILLISECONDS)
            .build();

    reporter.start(10, TimeUnit.SECONDS);
  }

  @After
  public void tearDown() {
    reporter.stop();
    reporter.report();

    executor.shutdownNow();
  }

  private void createExecutor(final int size) {
    executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(size));
  }

  @Test
  public void writeThousandsSingleSource()
      throws InterruptedException, ExecutionException, MigrationException,
          UnsupportedEncodingException {

    final Id sourceId = IdGenerator.createId("source");
    final String edgeType = "test";

    final EdgeGenerator generator =
        new EdgeGenerator() {

          @Override
          public Edge newEdge() {
            Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target"));

            return edge;
          }

          @Override
          public Observable<MarkedEdge> doSearch(final GraphManager manager) {
            return manager.loadEdgesFromSource(
                new SimpleSearchByEdgeType(
                    sourceId,
                    edgeType,
                    Long.MAX_VALUE,
                    SearchByEdgeType.Order.DESCENDING,
                    Optional.<Edge>absent()));
          }
        };

    //        final int numInjectors = 2;
    final int numInjectors = 1;

    /**
     * create 3 injectors. This way all the caches are independent of one another. This is the same
     * as multiple nodes
     */
    final List<Injector> injectors = createInjectors(numInjectors);

    final GraphFig graphFig = getInstance(injectors, GraphFig.class);

    final long shardSize = graphFig.getShardSize();

    // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of
    // processing
    // power for writes
    final int numProcessors = Runtime.getRuntime().availableProcessors() / 2;

    final int numWorkersPerInjector = numProcessors / numInjectors;

    /** Do 4x shard size so we should have approximately 4 shards */
    final long numberOfEdges = shardSize * 4;

    final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors;

    final long expectedShardCount = numberOfEdges / shardSize;

    createExecutor(numWorkersPerInjector);

    final AtomicLong writeCounter = new AtomicLong();

    // min stop time the min delta + 1 cache cycle timeout
    final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();

    logger.info(
        "Writing {} edges per worker on {} workers in {} injectors",
        workerWriteLimit,
        numWorkersPerInjector,
        numInjectors);

    final List<Future<Boolean>> futures = new ArrayList<>();

    for (Injector injector : injectors) {
      final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class);

      for (int i = 0; i < numWorkersPerInjector; i++) {
        Future<Boolean> future =
            executor.submit(
                new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter));

        futures.add(future);
      }
    }

    /** Wait for all writes to complete */
    for (Future<Boolean> future : futures) {
      future.get();
    }

    // now get all our shards
    final NodeShardCache cache = getInstance(injectors, NodeShardCache.class);

    final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType);

    // now submit the readers.
    final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class);

    final long writeCount = writeCounter.get();
    final Meter readMeter = registry.meter("readThroughput");

    final List<Throwable> failures = new ArrayList<>();

    for (int i = 0; i < 2; i++) {

      /** Start reading continuously while we migrate data to ensure our view is always correct */
      final ListenableFuture<Long> future =
          executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));

      // add the future
      Futures.addCallback(
          future,
          new FutureCallback<Long>() {

            @Override
            public void onSuccess(@Nullable final Long result) {
              logger.info("Successfully ran the read, re-running");
              executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));
            }

            @Override
            public void onFailure(final Throwable t) {
              failures.add(t);
              logger.error("Failed test!", t);
            }
          });
    }

    int compactedCount;

    // now start our readers

    while (true) {

      if (!failures.isEmpty()) {

        StringBuilder builder = new StringBuilder();

        builder.append("Read runner failed!\n");

        for (Throwable t : failures) {
          builder.append("Exception is: ");
          ByteArrayOutputStream output = new ByteArrayOutputStream();

          t.printStackTrace(new PrintWriter(output));

          builder.append(output.toString("UTF-8"));
          builder.append("\n\n");
        }

        fail(builder.toString());
      }

      // reset our count.  Ultimately we'll have 4 groups once our compaction completes
      compactedCount = 0;

      // we have to get it from the cache, because this will trigger the compaction process
      final Iterator<ShardEntryGroup> groups =
          cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);
      final Set<ShardEntryGroup> shardEntryGroups = new HashSet<>();

      while (groups.hasNext()) {

        final ShardEntryGroup group = groups.next();
        shardEntryGroups.add(group);

        logger.info(
            "Compaction pending status for group {} is {}", group, group.isCompactionPending());

        if (!group.isCompactionPending()) {
          compactedCount++;
        }
      }

      // we're done
      if (compactedCount >= expectedShardCount) {
        logger.info("All compactions complete, sleeping");

        //                final Object mutex = new Object();
        //
        //                synchronized ( mutex ){
        //
        //                    mutex.wait();
        //                }

        break;
      }

      Thread.sleep(2000);
    }

    // now continue reading everything for 30 seconds

    Thread.sleep(30000);

    executor.shutdownNow();
  }

  private <T> T getInstance(final List<Injector> injectors, Class<T> clazz) {
    return injectors.get(0).getInstance(clazz);
  }

  /** Create new Guice injector environments and return them */
  private List<Injector> createInjectors(int count) throws MigrationException {

    final List<Injector> injectors = new ArrayList<>(count);

    for (int i = 0; i < count; i++) {
      final Injector injector = Guice.createInjector(new TestGraphModule());
      injectors.add(injector);
    }

    final MigrationManager migrationManager = getInstance(injectors, MigrationManager.class);

    migrationManager.migrate();

    return injectors;
  }

  @Test(timeout = 120000)
  @Category(StressTest.class)
  public void writeThousandsDelete()
      throws InterruptedException, ExecutionException, MigrationException,
          UnsupportedEncodingException {

    final Id sourceId = IdGenerator.createId("source");
    final String edgeType = "test";

    final EdgeGenerator generator =
        new EdgeGenerator() {

          @Override
          public Edge newEdge() {
            Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target"));

            return edge;
          }

          @Override
          public Observable<MarkedEdge> doSearch(final GraphManager manager) {
            return manager.loadEdgesFromSource(
                new SimpleSearchByEdgeType(
                    sourceId,
                    edgeType,
                    Long.MAX_VALUE,
                    SearchByEdgeType.Order.DESCENDING,
                    Optional.<Edge>absent(),
                    false));
          }
        };

    //        final int numInjectors = 2;
    final int numInjectors = 1;

    /**
     * create 3 injectors. This way all the caches are independent of one another. This is the same
     * as multiple nodes
     */
    final List<Injector> injectors = createInjectors(numInjectors);

    final GraphFig graphFig = getInstance(injectors, GraphFig.class);

    final long shardSize = graphFig.getShardSize();

    // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of
    // processing
    // power for writes
    final int numProcessors = Runtime.getRuntime().availableProcessors() / 2;

    final int numWorkersPerInjector = numProcessors / numInjectors;

    /** Do 4x shard size so we should have approximately 4 shards */
    final long numberOfEdges = shardSize * 4;

    final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors;

    createExecutor(numWorkersPerInjector);

    final AtomicLong writeCounter = new AtomicLong();

    // min stop time the min delta + 1 cache cycle timeout
    final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();

    logger.info(
        "Writing {} edges per worker on {} workers in {} injectors",
        workerWriteLimit,
        numWorkersPerInjector,
        numInjectors);

    final List<Future<Boolean>> futures = new ArrayList<>();

    for (Injector injector : injectors) {
      final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class);

      for (int i = 0; i < numWorkersPerInjector; i++) {
        Future<Boolean> future =
            executor.submit(
                new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter));

        futures.add(future);
      }
    }

    /** Wait for all writes to complete */
    for (Future<Boolean> future : futures) {
      future.get();
    }

    // now get all our shards
    final NodeShardCache cache = getInstance(injectors, NodeShardCache.class);

    final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType);

    // now submit the readers.
    final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class);

    final long writeCount = writeCounter.get();
    final Meter readMeter = registry.meter("readThroughput");

    // check our shard state

    final Iterator<ShardEntryGroup> existingShardGroups =
        cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);
    int shardCount = 0;

    while (existingShardGroups.hasNext()) {
      final ShardEntryGroup group = existingShardGroups.next();

      shardCount++;

      logger.info(
          "Compaction pending status for group {} is {}", group, group.isCompactionPending());
    }

    logger.info("found {} shard groups", shardCount);

    // now mark and delete all the edges

    final GraphManager manager = gmf.createEdgeManager(scope);

    // sleep occasionally to stop pushing cassandra over

    long count = Long.MAX_VALUE;

    while (count != 0) {
      // take 10000 then sleep
      count =
          generator
              .doSearch(manager)
              .onBackpressureBlock()
              .take(1000)
              .flatMap(edge -> manager.markEdge(edge))
              .flatMap(edge -> manager.deleteEdge(edge))
              .countLong()
              .toBlocking()
              .last();

      Thread.sleep(500);
    }

    // now loop until with a reader until our shards are gone

    /** Start reading continuously while we migrate data to ensure our view is always correct */
    final ListenableFuture<Long> future =
        executor.submit(new ReadWorker(gmf, generator, 0, readMeter));

    final List<Throwable> failures = new ArrayList<>();

    // add the future
    Futures.addCallback(
        future,
        new FutureCallback<Long>() {

          @Override
          public void onSuccess(@Nullable final Long result) {
            logger.info("Successfully ran the read, re-running");
            executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));
          }

          @Override
          public void onFailure(final Throwable t) {
            failures.add(t);
            logger.error("Failed test!", t);
          }
        });

    // now start our readers

    while (true) {

      if (!failures.isEmpty()) {

        StringBuilder builder = new StringBuilder();

        builder.append("Read runner failed!\n");

        for (Throwable t : failures) {
          builder.append("Exception is: ");
          ByteArrayOutputStream output = new ByteArrayOutputStream();

          t.printStackTrace(new PrintWriter(output));

          builder.append(output.toString("UTF-8"));
          builder.append("\n\n");
        }

        fail(builder.toString());
      }

      // reset our count.  Ultimately we'll have 4 groups once our compaction completes
      shardCount = 0;

      // we have to get it from the cache, because this will trigger the compaction process
      final Iterator<ShardEntryGroup> groups =
          cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);

      ShardEntryGroup group = null;

      while (groups.hasNext()) {

        group = groups.next();

        logger.info("Shard size for group is {}", group.getReadShards());

        shardCount += group.getReadShards().size();
      }

      // we're done, 1 shard remains, we have a group, and it's our default shard
      if (shardCount == 1
          && group != null
          && group.getMinShard().getShardIndex() == Shard.MIN_SHARD.getShardIndex()) {
        logger.info("All compactions complete,");

        break;
      }

      Thread.sleep(2000);
    }

    // now that we have finished expanding s

    executor.shutdownNow();
  }

  private class Worker implements Callable<Boolean> {
    private final GraphManagerFactory factory;
    private final EdgeGenerator generator;
    private final long writeLimit;
    private final long minExecutionTime;
    private final AtomicLong writeCounter;

    private Worker(
        final GraphManagerFactory factory,
        final EdgeGenerator generator,
        final long writeLimit,
        final long minExecutionTime,
        final AtomicLong writeCounter) {
      this.factory = factory;
      this.generator = generator;
      this.writeLimit = writeLimit;
      this.minExecutionTime = minExecutionTime;
      this.writeCounter = writeCounter;
    }

    @Override
    public Boolean call() throws Exception {
      GraphManager manager = factory.createEdgeManager(scope);

      final long startTime = System.currentTimeMillis();

      for (long i = 0;
          i < writeLimit || System.currentTimeMillis() - startTime < minExecutionTime;
          i++) {

        Edge edge = generator.newEdge();

        Edge returned = manager.writeEdge(edge).toBlocking().last();

        assertNotNull("Returned has a version", returned.getTimestamp());

        writeMeter.mark();

        writeCounter.incrementAndGet();

        if (i % 1000 == 0) {
          logger.info("   Wrote: " + i);
        }
      }

      return true;
    }
  }

  private class ReadWorker implements Callable<Long> {
    private final GraphManagerFactory factory;
    private final EdgeGenerator generator;
    private final long writeCount;
    private final Meter readMeter;

    private ReadWorker(
        final GraphManagerFactory factory,
        final EdgeGenerator generator,
        final long writeCount,
        final Meter readMeter) {
      this.factory = factory;
      this.generator = generator;
      this.writeCount = writeCount;
      this.readMeter = readMeter;
    }

    @Override
    public Long call() throws Exception {

      GraphManager gm = factory.createEdgeManager(scope);

      while (true) {

        // do a read to eventually trigger our group compaction. Take 2 pages of columns
        final long returnedEdgeCount =
            generator
                .doSearch(gm)
                .doOnNext(edge -> readMeter.mark())
                .countLong()
                .toBlocking()
                .last();

        logger.info("Completed reading {} edges", returnedEdgeCount);

        if (writeCount != returnedEdgeCount) {
          logger.warn(
              "Unexpected edge count returned!!!  Expected {} but was {}",
              writeCount,
              returnedEdgeCount);
        }

        assertEquals("Expected to read same edge count", writeCount, returnedEdgeCount);
      }
    }
  }

  private interface EdgeGenerator {

    /** Create a new edge to persiste */
    public Edge newEdge();

    /** Perform the search returning an observable edge */
    public Observable<MarkedEdge> doSearch(final GraphManager manager);
  }
}