@Override public Boolean call() throws Exception { GraphManager manager = factory.createEdgeManager(scope); final long startTime = System.currentTimeMillis(); for (long i = 0; i < writeLimit || System.currentTimeMillis() - startTime < minExecutionTime; i++) { Edge edge = generator.newEdge(); Edge returned = manager.writeEdge(edge).toBlocking().last(); assertNotNull("Returned has a version", returned.getTimestamp()); writeMeter.mark(); writeCounter.incrementAndGet(); if (i % 1000 == 0) { logger.info(" Wrote: " + i); } } return true; }
@Override public Long call() throws Exception { GraphManager gm = factory.createEdgeManager(scope); while (true) { // do a read to eventually trigger our group compaction. Take 2 pages of columns final long returnedEdgeCount = generator .doSearch(gm) .doOnNext(edge -> readMeter.mark()) .countLong() .toBlocking() .last(); logger.info("Completed reading {} edges", returnedEdgeCount); if (writeCount != returnedEdgeCount) { logger.warn( "Unexpected edge count returned!!! Expected {} but was {}", writeCount, returnedEdgeCount); } assertEquals("Expected to read same edge count", writeCount, returnedEdgeCount); } }
@Test(timeout = 120000) @Category(StressTest.class) public void writeThousandsDelete() throws InterruptedException, ExecutionException, MigrationException, UnsupportedEncodingException { final Id sourceId = IdGenerator.createId("source"); final String edgeType = "test"; final EdgeGenerator generator = new EdgeGenerator() { @Override public Edge newEdge() { Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target")); return edge; } @Override public Observable<MarkedEdge> doSearch(final GraphManager manager) { return manager.loadEdgesFromSource( new SimpleSearchByEdgeType( sourceId, edgeType, Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, Optional.<Edge>absent(), false)); } }; // final int numInjectors = 2; final int numInjectors = 1; /** * create 3 injectors. This way all the caches are independent of one another. This is the same * as multiple nodes */ final List<Injector> injectors = createInjectors(numInjectors); final GraphFig graphFig = getInstance(injectors, GraphFig.class); final long shardSize = graphFig.getShardSize(); // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of // processing // power for writes final int numProcessors = Runtime.getRuntime().availableProcessors() / 2; final int numWorkersPerInjector = numProcessors / numInjectors; /** Do 4x shard size so we should have approximately 4 shards */ final long numberOfEdges = shardSize * 4; final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors; createExecutor(numWorkersPerInjector); final AtomicLong writeCounter = new AtomicLong(); // min stop time the min delta + 1 cache cycle timeout final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout(); logger.info( "Writing {} edges per worker on {} workers in {} injectors", workerWriteLimit, numWorkersPerInjector, numInjectors); final List<Future<Boolean>> futures = new ArrayList<>(); for (Injector injector : injectors) { final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class); for (int i = 0; i < numWorkersPerInjector; i++) { Future<Boolean> future = executor.submit( new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter)); futures.add(future); } } /** Wait for all writes to complete */ for (Future<Boolean> future : futures) { future.get(); } // now get all our shards final NodeShardCache cache = getInstance(injectors, NodeShardCache.class); final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType); // now submit the readers. final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class); final long writeCount = writeCounter.get(); final Meter readMeter = registry.meter("readThroughput"); // check our shard state final Iterator<ShardEntryGroup> existingShardGroups = cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta); int shardCount = 0; while (existingShardGroups.hasNext()) { final ShardEntryGroup group = existingShardGroups.next(); shardCount++; logger.info( "Compaction pending status for group {} is {}", group, group.isCompactionPending()); } logger.info("found {} shard groups", shardCount); // now mark and delete all the edges final GraphManager manager = gmf.createEdgeManager(scope); // sleep occasionally to stop pushing cassandra over long count = Long.MAX_VALUE; while (count != 0) { // take 10000 then sleep count = generator .doSearch(manager) .onBackpressureBlock() .take(1000) .flatMap(edge -> manager.markEdge(edge)) .flatMap(edge -> manager.deleteEdge(edge)) .countLong() .toBlocking() .last(); Thread.sleep(500); } // now loop until with a reader until our shards are gone /** Start reading continuously while we migrate data to ensure our view is always correct */ final ListenableFuture<Long> future = executor.submit(new ReadWorker(gmf, generator, 0, readMeter)); final List<Throwable> failures = new ArrayList<>(); // add the future Futures.addCallback( future, new FutureCallback<Long>() { @Override public void onSuccess(@Nullable final Long result) { logger.info("Successfully ran the read, re-running"); executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter)); } @Override public void onFailure(final Throwable t) { failures.add(t); logger.error("Failed test!", t); } }); // now start our readers while (true) { if (!failures.isEmpty()) { StringBuilder builder = new StringBuilder(); builder.append("Read runner failed!\n"); for (Throwable t : failures) { builder.append("Exception is: "); ByteArrayOutputStream output = new ByteArrayOutputStream(); t.printStackTrace(new PrintWriter(output)); builder.append(output.toString("UTF-8")); builder.append("\n\n"); } fail(builder.toString()); } // reset our count. Ultimately we'll have 4 groups once our compaction completes shardCount = 0; // we have to get it from the cache, because this will trigger the compaction process final Iterator<ShardEntryGroup> groups = cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta); ShardEntryGroup group = null; while (groups.hasNext()) { group = groups.next(); logger.info("Shard size for group is {}", group.getReadShards()); shardCount += group.getReadShards().size(); } // we're done, 1 shard remains, we have a group, and it's our default shard if (shardCount == 1 && group != null && group.getMinShard().getShardIndex() == Shard.MIN_SHARD.getShardIndex()) { logger.info("All compactions complete,"); break; } Thread.sleep(2000); } // now that we have finished expanding s executor.shutdownNow(); }