@Before public void setupOrg() { originalShardSize = ConfigurationManager.getConfigInstance().getProperty(GraphFig.SHARD_SIZE); originalShardTimeout = ConfigurationManager.getConfigInstance().getProperty(GraphFig.SHARD_CACHE_TIMEOUT); originalShardDelta = ConfigurationManager.getConfigInstance().getProperty(GraphFig.SHARD_MIN_DELTA); ConfigurationManager.getConfigInstance().setProperty(GraphFig.SHARD_SIZE, 500); final long cacheTimeout = 2000; // set our cache timeout to the above value ConfigurationManager.getConfigInstance() .setProperty(GraphFig.SHARD_CACHE_TIMEOUT, cacheTimeout); final long minDelta = (long) (cacheTimeout * 2.5); ConfigurationManager.getConfigInstance().setProperty(GraphFig.SHARD_MIN_DELTA, minDelta); // get the system property of the UUID to use. If one is not set, use the defualt String uuidString = System.getProperty("org.id", "80a42760-b699-11e3-a5e2-0800200c9a66"); scope = new ApplicationScopeImpl(IdGenerator.createId(UUID.fromString(uuidString), "test")); reporter = Slf4jReporter.forRegistry(registry) .outputTo(logger) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build(); reporter.start(10, TimeUnit.SECONDS); }
@Test(timeout = 120000) @Category(StressTest.class) public void writeThousandsDelete() throws InterruptedException, ExecutionException, MigrationException, UnsupportedEncodingException { final Id sourceId = IdGenerator.createId("source"); final String edgeType = "test"; final EdgeGenerator generator = new EdgeGenerator() { @Override public Edge newEdge() { Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target")); return edge; } @Override public Observable<MarkedEdge> doSearch(final GraphManager manager) { return manager.loadEdgesFromSource( new SimpleSearchByEdgeType( sourceId, edgeType, Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, Optional.<Edge>absent(), false)); } }; // final int numInjectors = 2; final int numInjectors = 1; /** * create 3 injectors. This way all the caches are independent of one another. This is the same * as multiple nodes */ final List<Injector> injectors = createInjectors(numInjectors); final GraphFig graphFig = getInstance(injectors, GraphFig.class); final long shardSize = graphFig.getShardSize(); // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of // processing // power for writes final int numProcessors = Runtime.getRuntime().availableProcessors() / 2; final int numWorkersPerInjector = numProcessors / numInjectors; /** Do 4x shard size so we should have approximately 4 shards */ final long numberOfEdges = shardSize * 4; final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors; createExecutor(numWorkersPerInjector); final AtomicLong writeCounter = new AtomicLong(); // min stop time the min delta + 1 cache cycle timeout final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout(); logger.info( "Writing {} edges per worker on {} workers in {} injectors", workerWriteLimit, numWorkersPerInjector, numInjectors); final List<Future<Boolean>> futures = new ArrayList<>(); for (Injector injector : injectors) { final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class); for (int i = 0; i < numWorkersPerInjector; i++) { Future<Boolean> future = executor.submit( new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter)); futures.add(future); } } /** Wait for all writes to complete */ for (Future<Boolean> future : futures) { future.get(); } // now get all our shards final NodeShardCache cache = getInstance(injectors, NodeShardCache.class); final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType); // now submit the readers. final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class); final long writeCount = writeCounter.get(); final Meter readMeter = registry.meter("readThroughput"); // check our shard state final Iterator<ShardEntryGroup> existingShardGroups = cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta); int shardCount = 0; while (existingShardGroups.hasNext()) { final ShardEntryGroup group = existingShardGroups.next(); shardCount++; logger.info( "Compaction pending status for group {} is {}", group, group.isCompactionPending()); } logger.info("found {} shard groups", shardCount); // now mark and delete all the edges final GraphManager manager = gmf.createEdgeManager(scope); // sleep occasionally to stop pushing cassandra over long count = Long.MAX_VALUE; while (count != 0) { // take 10000 then sleep count = generator .doSearch(manager) .onBackpressureBlock() .take(1000) .flatMap(edge -> manager.markEdge(edge)) .flatMap(edge -> manager.deleteEdge(edge)) .countLong() .toBlocking() .last(); Thread.sleep(500); } // now loop until with a reader until our shards are gone /** Start reading continuously while we migrate data to ensure our view is always correct */ final ListenableFuture<Long> future = executor.submit(new ReadWorker(gmf, generator, 0, readMeter)); final List<Throwable> failures = new ArrayList<>(); // add the future Futures.addCallback( future, new FutureCallback<Long>() { @Override public void onSuccess(@Nullable final Long result) { logger.info("Successfully ran the read, re-running"); executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter)); } @Override public void onFailure(final Throwable t) { failures.add(t); logger.error("Failed test!", t); } }); // now start our readers while (true) { if (!failures.isEmpty()) { StringBuilder builder = new StringBuilder(); builder.append("Read runner failed!\n"); for (Throwable t : failures) { builder.append("Exception is: "); ByteArrayOutputStream output = new ByteArrayOutputStream(); t.printStackTrace(new PrintWriter(output)); builder.append(output.toString("UTF-8")); builder.append("\n\n"); } fail(builder.toString()); } // reset our count. Ultimately we'll have 4 groups once our compaction completes shardCount = 0; // we have to get it from the cache, because this will trigger the compaction process final Iterator<ShardEntryGroup> groups = cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta); ShardEntryGroup group = null; while (groups.hasNext()) { group = groups.next(); logger.info("Shard size for group is {}", group.getReadShards()); shardCount += group.getReadShards().size(); } // we're done, 1 shard remains, we have a group, and it's our default shard if (shardCount == 1 && group != null && group.getMinShard().getShardIndex() == Shard.MIN_SHARD.getShardIndex()) { logger.info("All compactions complete,"); break; } Thread.sleep(2000); } // now that we have finished expanding s executor.shutdownNow(); }
@Test public void writeThousandsSingleSource() throws InterruptedException, ExecutionException, MigrationException, UnsupportedEncodingException { final Id sourceId = IdGenerator.createId("source"); final String edgeType = "test"; final EdgeGenerator generator = new EdgeGenerator() { @Override public Edge newEdge() { Edge edge = createEdge(sourceId, edgeType, IdGenerator.createId("target")); return edge; } @Override public Observable<MarkedEdge> doSearch(final GraphManager manager) { return manager.loadEdgesFromSource( new SimpleSearchByEdgeType( sourceId, edgeType, Long.MAX_VALUE, SearchByEdgeType.Order.DESCENDING, Optional.<Edge>absent())); } }; // final int numInjectors = 2; final int numInjectors = 1; /** * create 3 injectors. This way all the caches are independent of one another. This is the same * as multiple nodes */ final List<Injector> injectors = createInjectors(numInjectors); final GraphFig graphFig = getInstance(injectors, GraphFig.class); final long shardSize = graphFig.getShardSize(); // we don't want to starve the cass runtime since it will be on the same box. Only take 50% of // processing // power for writes final int numProcessors = Runtime.getRuntime().availableProcessors() / 2; final int numWorkersPerInjector = numProcessors / numInjectors; /** Do 4x shard size so we should have approximately 4 shards */ final long numberOfEdges = shardSize * 4; final long workerWriteLimit = numberOfEdges / numWorkersPerInjector / numInjectors; final long expectedShardCount = numberOfEdges / shardSize; createExecutor(numWorkersPerInjector); final AtomicLong writeCounter = new AtomicLong(); // min stop time the min delta + 1 cache cycle timeout final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout(); logger.info( "Writing {} edges per worker on {} workers in {} injectors", workerWriteLimit, numWorkersPerInjector, numInjectors); final List<Future<Boolean>> futures = new ArrayList<>(); for (Injector injector : injectors) { final GraphManagerFactory gmf = injector.getInstance(GraphManagerFactory.class); for (int i = 0; i < numWorkersPerInjector; i++) { Future<Boolean> future = executor.submit( new Worker(gmf, generator, workerWriteLimit, minExecutionTime, writeCounter)); futures.add(future); } } /** Wait for all writes to complete */ for (Future<Boolean> future : futures) { future.get(); } // now get all our shards final NodeShardCache cache = getInstance(injectors, NodeShardCache.class); final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode(sourceId, edgeType); // now submit the readers. final GraphManagerFactory gmf = getInstance(injectors, GraphManagerFactory.class); final long writeCount = writeCounter.get(); final Meter readMeter = registry.meter("readThroughput"); final List<Throwable> failures = new ArrayList<>(); for (int i = 0; i < 2; i++) { /** Start reading continuously while we migrate data to ensure our view is always correct */ final ListenableFuture<Long> future = executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter)); // add the future Futures.addCallback( future, new FutureCallback<Long>() { @Override public void onSuccess(@Nullable final Long result) { logger.info("Successfully ran the read, re-running"); executor.submit(new ReadWorker(gmf, generator, writeCount, readMeter)); } @Override public void onFailure(final Throwable t) { failures.add(t); logger.error("Failed test!", t); } }); } int compactedCount; // now start our readers while (true) { if (!failures.isEmpty()) { StringBuilder builder = new StringBuilder(); builder.append("Read runner failed!\n"); for (Throwable t : failures) { builder.append("Exception is: "); ByteArrayOutputStream output = new ByteArrayOutputStream(); t.printStackTrace(new PrintWriter(output)); builder.append(output.toString("UTF-8")); builder.append("\n\n"); } fail(builder.toString()); } // reset our count. Ultimately we'll have 4 groups once our compaction completes compactedCount = 0; // we have to get it from the cache, because this will trigger the compaction process final Iterator<ShardEntryGroup> groups = cache.getReadShardGroup(scope, Long.MAX_VALUE, directedEdgeMeta); final Set<ShardEntryGroup> shardEntryGroups = new HashSet<>(); while (groups.hasNext()) { final ShardEntryGroup group = groups.next(); shardEntryGroups.add(group); logger.info( "Compaction pending status for group {} is {}", group, group.isCompactionPending()); if (!group.isCompactionPending()) { compactedCount++; } } // we're done if (compactedCount >= expectedShardCount) { logger.info("All compactions complete, sleeping"); // final Object mutex = new Object(); // // synchronized ( mutex ){ // // mutex.wait(); // } break; } Thread.sleep(2000); } // now continue reading everything for 30 seconds Thread.sleep(30000); executor.shutdownNow(); }