@Override
  public Iterator<MarkedEdge> getEdgesToTargetBySourceType(
      final ApplicationScope scope, final SearchByIdType edgeType) {

    ValidationUtils.validateApplicationScope(scope);
    GraphValidation.validateSearchByIdType(edgeType);

    final Id targetId = edgeType.getNode();
    final String sourceType = edgeType.getIdType();
    final String type = edgeType.getType();
    final long maxTimestamp = edgeType.getMaxTimestamp();

    final DirectedEdgeMeta directedEdgeMeta =
        DirectedEdgeMeta.fromTargetNodeSourceType(targetId, type, sourceType);

    final Iterator<ShardEntryGroup> readShards =
        edgeShardStrategy.getReadShards(scope, maxTimestamp, directedEdgeMeta);

    return new ShardGroupColumnIterator(scope, directedEdgeMeta, shardGroupDeletion, readShards) {
      @Override
      protected Iterator<MarkedEdge> getIterator(final Collection<Shard> readShards) {
        return shardedEdgeSerialization.getEdgesToTargetBySourceType(
            edgeColumnFamilies, scope, edgeType, readShards);
      }

      @Override
      protected Iterator<MarkedEdge> getIteratorFullRange(final Collection<Shard> readShards) {

        final SearchByIdType edgeTypeFullRange =
            new SimpleSearchByIdType(
                edgeType.getNode(),
                edgeType.getType(),
                Long.MAX_VALUE,
                SearchByEdgeType.Order.DESCENDING,
                edgeType.getIdType(),
                Optional.absent(),
                false);

        return shardedEdgeSerialization.getEdgesToTargetBySourceType(
            edgeColumnFamilies, scope, edgeTypeFullRange, readShards);
      }
    };
  }
  @Override
  public Iterator<MarkedEdge> getEdgeVersions(
      final ApplicationScope scope, final SearchByEdge search) {
    ValidationUtils.validateApplicationScope(scope);
    GraphValidation.validateSearchByEdge(search);

    final Id targetId = search.targetNode();
    final Id sourceId = search.sourceNode();
    final String type = search.getType();
    final long maxTimestamp = search.getMaxTimestamp();

    final DirectedEdgeMeta versionMetaData = DirectedEdgeMeta.fromEdge(sourceId, targetId, type);

    final Iterator<ShardEntryGroup> readShards =
        edgeShardStrategy.getReadShards(scope, maxTimestamp, versionMetaData);

    // now create a result iterator with our iterator of read shards

    return new ShardGroupColumnIterator(scope, versionMetaData, shardGroupDeletion, readShards) {
      @Override
      protected Iterator<MarkedEdge> getIterator(final Collection<Shard> readShards) {
        return shardedEdgeSerialization.getEdgeVersions(
            edgeColumnFamilies, scope, search, readShards);
      }

      @Override
      protected Iterator<MarkedEdge> getIteratorFullRange(final Collection<Shard> readShards) {

        final SearchByEdge searchFullRange =
            new SimpleSearchByEdge(
                search.sourceNode(),
                search.getType(),
                search.targetNode(),
                Long.MAX_VALUE,
                SearchByEdgeType.Order.DESCENDING,
                Optional.absent());

        return shardedEdgeSerialization.getEdgeVersions(
            edgeColumnFamilies, scope, searchFullRange, readShards);
      }
    };
  }
  @Override
  public MutationBatch writeEdge(
      final ApplicationScope scope, final MarkedEdge markedEdge, final UUID timestamp) {

    ValidationUtils.validateApplicationScope(scope);
    GraphValidation.validateEdge(markedEdge);
    ValidationUtils.verifyTimeUuid(timestamp, "timestamp");

    final long now = timeService.getCurrentTime();
    final Id sourceNode = markedEdge.getSourceNode();
    final Id targetNode = markedEdge.getTargetNode();
    final String edgeType = markedEdge.getType();
    final long edgeTimestamp = markedEdge.getTimestamp();

    /** Source write */
    final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceNode, edgeType);

    final Collection<Shard> sourceWriteShards =
        edgeShardStrategy.getWriteShards(scope, edgeTimestamp, sourceEdgeMeta).getWriteShards(now);

    final MutationBatch batch =
        shardedEdgeSerialization.writeEdgeFromSource(
            edgeColumnFamilies, scope, markedEdge, sourceWriteShards, sourceEdgeMeta, timestamp);

    /** Source with target type write */
    final DirectedEdgeMeta sourceTargetTypeEdgeMeta =
        DirectedEdgeMeta.fromSourceNodeTargetType(sourceNode, edgeType, targetNode.getType());

    final Collection<Shard> sourceTargetTypeWriteShards =
        edgeShardStrategy
            .getWriteShards(scope, edgeTimestamp, sourceTargetTypeEdgeMeta)
            .getWriteShards(now);

    batch.mergeShallow(
        shardedEdgeSerialization.writeEdgeFromSourceWithTargetType(
            edgeColumnFamilies,
            scope,
            markedEdge,
            sourceTargetTypeWriteShards,
            sourceTargetTypeEdgeMeta,
            timestamp));

    /** Target write */
    final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromTargetNode(targetNode, edgeType);

    final Collection<Shard> targetWriteShards =
        edgeShardStrategy.getWriteShards(scope, edgeTimestamp, targetEdgeMeta).getWriteShards(now);

    batch.mergeShallow(
        shardedEdgeSerialization.writeEdgeToTarget(
            edgeColumnFamilies, scope, markedEdge, targetWriteShards, targetEdgeMeta, timestamp));

    /** Target with source type write */
    final DirectedEdgeMeta targetSourceTypeEdgeMeta =
        DirectedEdgeMeta.fromTargetNodeSourceType(targetNode, edgeType, sourceNode.getType());

    final Collection<Shard> targetSourceTypeWriteShards =
        edgeShardStrategy
            .getWriteShards(scope, edgeTimestamp, targetSourceTypeEdgeMeta)
            .getWriteShards(now);

    batch.mergeShallow(
        shardedEdgeSerialization.writeEdgeToTargetWithSourceType(
            edgeColumnFamilies,
            scope,
            markedEdge,
            targetSourceTypeWriteShards,
            targetSourceTypeEdgeMeta,
            timestamp));

    /** Version write */
    final DirectedEdgeMeta edgeVersionsMeta =
        DirectedEdgeMeta.fromEdge(sourceNode, targetNode, edgeType);

    final Collection<Shard> edgeVersionsShards =
        edgeShardStrategy
            .getWriteShards(scope, edgeTimestamp, edgeVersionsMeta)
            .getWriteShards(now);

    batch.mergeShallow(
        shardedEdgeSerialization.writeEdgeVersions(
            edgeColumnFamilies,
            scope,
            markedEdge,
            edgeVersionsShards,
            edgeVersionsMeta,
            timestamp));

    return batch;
  }
  @Test(timeout = 120000)
  @Category(StressTest.class)
  public void writeThousandsDelete()
      throws InterruptedException, ExecutionException, MigrationException,
          UnsupportedEncodingException {

    final Id sourceId = IdGenerator.createId("source");
    final String edgeType = "test";

    final EdgeGenerator generator =
        new EdgeGenerator() {

          @Override
          public Edge newEdge() {
            Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target"));

            return edge;
          }

          @Override
          public Observable<MarkedEdge> doSearch(final GraphManager manager) {
            return manager.loadEdgesFromSource(
                new SimpleSearchByEdgeType(
                    sourceId,
                    edgeType,
                    Long.MAX_VALUE,
                    SearchByEdgeType.Order.DESCENDING,
                    Optional.<Edge>absent(),
                    false));
          }
        };

    //        final int numInjectors = 2;
    final int numInjectors = 1;

    /**
     * create 3 injectors. This way all the caches are independent of one another. This is the same
     * as multiple nodes
     */
    final List<Injector> injectors = createInjectors(numInjectors);

    final GraphFig graphFig = getInstance(injectors, GraphFig.class);

    final long shardSize = graphFig.getShardSize();

    // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of
    // processing
    // power for writes
    final int numProcessors = Runtime.getRuntime().availableProcessors() / 2;

    final int numWorkersPerInjector = numProcessors / numInjectors;

    /** Do 4x shard size so we should have approximately 4 shards */
    final long numberOfEdges = shardSize * 4;

    final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors;

    createExecutor(numWorkersPerInjector);

    final AtomicLong writeCounter = new AtomicLong();

    // min stop time the min delta + 1 cache cycle timeout
    final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();

    logger.info(
        "Writing {} edges per worker on {} workers in {} injectors",
        workerWriteLimit,
        numWorkersPerInjector,
        numInjectors);

    final List<Future<Boolean>> futures = new ArrayList<>();

    for (Injector injector : injectors) {
      final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class);

      for (int i = 0; i < numWorkersPerInjector; i++) {
        Future<Boolean> future =
            executor.submit(
                new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter));

        futures.add(future);
      }
    }

    /** Wait for all writes to complete */
    for (Future<Boolean> future : futures) {
      future.get();
    }

    // now get all our shards
    final NodeShardCache cache = getInstance(injectors, NodeShardCache.class);

    final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType);

    // now submit the readers.
    final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class);

    final long writeCount = writeCounter.get();
    final Meter readMeter = registry.meter("readThroughput");

    // check our shard state

    final Iterator<ShardEntryGroup> existingShardGroups =
        cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);
    int shardCount = 0;

    while (existingShardGroups.hasNext()) {
      final ShardEntryGroup group = existingShardGroups.next();

      shardCount++;

      logger.info(
          "Compaction pending status for group {} is {}", group, group.isCompactionPending());
    }

    logger.info("found {} shard groups", shardCount);

    // now mark and delete all the edges

    final GraphManager manager = gmf.createEdgeManager(scope);

    // sleep occasionally to stop pushing cassandra over

    long count = Long.MAX_VALUE;

    while (count != 0) {
      // take 10000 then sleep
      count =
          generator
              .doSearch(manager)
              .onBackpressureBlock()
              .take(1000)
              .flatMap(edge -> manager.markEdge(edge))
              .flatMap(edge -> manager.deleteEdge(edge))
              .countLong()
              .toBlocking()
              .last();

      Thread.sleep(500);
    }

    // now loop until with a reader until our shards are gone

    /** Start reading continuously while we migrate data to ensure our view is always correct */
    final ListenableFuture<Long> future =
        executor.submit(new ReadWorker(gmf, generator, 0, readMeter));

    final List<Throwable> failures = new ArrayList<>();

    // add the future
    Futures.addCallback(
        future,
        new FutureCallback<Long>() {

          @Override
          public void onSuccess(@Nullable final Long result) {
            logger.info("Successfully ran the read, re-running");
            executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));
          }

          @Override
          public void onFailure(final Throwable t) {
            failures.add(t);
            logger.error("Failed test!", t);
          }
        });

    // now start our readers

    while (true) {

      if (!failures.isEmpty()) {

        StringBuilder builder = new StringBuilder();

        builder.append("Read runner failed!\n");

        for (Throwable t : failures) {
          builder.append("Exception is: ");
          ByteArrayOutputStream output = new ByteArrayOutputStream();

          t.printStackTrace(new PrintWriter(output));

          builder.append(output.toString("UTF-8"));
          builder.append("\n\n");
        }

        fail(builder.toString());
      }

      // reset our count.  Ultimately we'll have 4 groups once our compaction completes
      shardCount = 0;

      // we have to get it from the cache, because this will trigger the compaction process
      final Iterator<ShardEntryGroup> groups =
          cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);

      ShardEntryGroup group = null;

      while (groups.hasNext()) {

        group = groups.next();

        logger.info("Shard size for group is {}", group.getReadShards());

        shardCount += group.getReadShards().size();
      }

      // we're done, 1 shard remains, we have a group, and it's our default shard
      if (shardCount == 1
          && group != null
          && group.getMinShard().getShardIndex() == Shard.MIN_SHARD.getShardIndex()) {
        logger.info("All compactions complete,");

        break;
      }

      Thread.sleep(2000);
    }

    // now that we have finished expanding s

    executor.shutdownNow();
  }
  @Test
  public void writeThousandsSingleSource()
      throws InterruptedException, ExecutionException, MigrationException,
          UnsupportedEncodingException {

    final Id sourceId = IdGenerator.createId("source");
    final String edgeType = "test";

    final EdgeGenerator generator =
        new EdgeGenerator() {

          @Override
          public Edge newEdge() {
            Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target"));

            return edge;
          }

          @Override
          public Observable<MarkedEdge> doSearch(final GraphManager manager) {
            return manager.loadEdgesFromSource(
                new SimpleSearchByEdgeType(
                    sourceId,
                    edgeType,
                    Long.MAX_VALUE,
                    SearchByEdgeType.Order.DESCENDING,
                    Optional.<Edge>absent()));
          }
        };

    //        final int numInjectors = 2;
    final int numInjectors = 1;

    /**
     * create 3 injectors. This way all the caches are independent of one another. This is the same
     * as multiple nodes
     */
    final List<Injector> injectors = createInjectors(numInjectors);

    final GraphFig graphFig = getInstance(injectors, GraphFig.class);

    final long shardSize = graphFig.getShardSize();

    // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of
    // processing
    // power for writes
    final int numProcessors = Runtime.getRuntime().availableProcessors() / 2;

    final int numWorkersPerInjector = numProcessors / numInjectors;

    /** Do 4x shard size so we should have approximately 4 shards */
    final long numberOfEdges = shardSize * 4;

    final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors;

    final long expectedShardCount = numberOfEdges / shardSize;

    createExecutor(numWorkersPerInjector);

    final AtomicLong writeCounter = new AtomicLong();

    // min stop time the min delta + 1 cache cycle timeout
    final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();

    logger.info(
        "Writing {} edges per worker on {} workers in {} injectors",
        workerWriteLimit,
        numWorkersPerInjector,
        numInjectors);

    final List<Future<Boolean>> futures = new ArrayList<>();

    for (Injector injector : injectors) {
      final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class);

      for (int i = 0; i < numWorkersPerInjector; i++) {
        Future<Boolean> future =
            executor.submit(
                new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter));

        futures.add(future);
      }
    }

    /** Wait for all writes to complete */
    for (Future<Boolean> future : futures) {
      future.get();
    }

    // now get all our shards
    final NodeShardCache cache = getInstance(injectors, NodeShardCache.class);

    final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType);

    // now submit the readers.
    final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class);

    final long writeCount = writeCounter.get();
    final Meter readMeter = registry.meter("readThroughput");

    final List<Throwable> failures = new ArrayList<>();

    for (int i = 0; i < 2; i++) {

      /** Start reading continuously while we migrate data to ensure our view is always correct */
      final ListenableFuture<Long> future =
          executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));

      // add the future
      Futures.addCallback(
          future,
          new FutureCallback<Long>() {

            @Override
            public void onSuccess(@Nullable final Long result) {
              logger.info("Successfully ran the read, re-running");
              executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter));
            }

            @Override
            public void onFailure(final Throwable t) {
              failures.add(t);
              logger.error("Failed test!", t);
            }
          });
    }

    int compactedCount;

    // now start our readers

    while (true) {

      if (!failures.isEmpty()) {

        StringBuilder builder = new StringBuilder();

        builder.append("Read runner failed!\n");

        for (Throwable t : failures) {
          builder.append("Exception is: ");
          ByteArrayOutputStream output = new ByteArrayOutputStream();

          t.printStackTrace(new PrintWriter(output));

          builder.append(output.toString("UTF-8"));
          builder.append("\n\n");
        }

        fail(builder.toString());
      }

      // reset our count.  Ultimately we'll have 4 groups once our compaction completes
      compactedCount = 0;

      // we have to get it from the cache, because this will trigger the compaction process
      final Iterator<ShardEntryGroup> groups =
          cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta);
      final Set<ShardEntryGroup> shardEntryGroups = new HashSet<>();

      while (groups.hasNext()) {

        final ShardEntryGroup group = groups.next();
        shardEntryGroups.add(group);

        logger.info(
            "Compaction pending status for group {} is {}", group, group.isCompactionPending());

        if (!group.isCompactionPending()) {
          compactedCount++;
        }
      }

      // we're done
      if (compactedCount >= expectedShardCount) {
        logger.info("All compactions complete, sleeping");

        //                final Object mutex = new Object();
        //
        //                synchronized ( mutex ){
        //
        //                    mutex.wait();
        //                }

        break;
      }

      Thread.sleep(2000);
    }

    // now continue reading everything for 30 seconds

    Thread.sleep(30000);

    executor.shutdownNow();
  }