@Before public void setup() throws Throwable { // setup a cluster with some data and entries in log files in fully functional and shutdown // state clusterManager.start(); cluster = clusterManager.getDefaultCluster(); try { cluster.await(allSeesAllAsAvailable()); master = cluster.getMaster(); try (Transaction tx = master.beginTx()) { master.createNode(); tx.success(); } cluster.sync(); slave1 = cluster.getAnySlave(); slave2 = cluster.getAnySlave(slave1); } finally { clusterManager.shutdown(); } assertAllStoreConsistent(); }
@Before public void before() throws Throwable { clusterManager = new ClusterManager( clusterWithAdditionalClients(2, 1), dir.directory("dbs", true), stringMap()); clusterManager.start(); cluster = clusterManager.getDefaultCluster(); cluster.await(masterSeesMembers(3)); }
@Before public void setup() throws Throwable { clusterManager = new ClusterManager(clusterOfSize(3), dir.graphDbDir(), stringMap()); clusterManager.start(); cluster = clusterManager.getDefaultCluster(); cluster.await(ClusterManager.allSeesAllAsAvailable()); master = cluster.getMaster(); slave1 = cluster.getAnySlave(); slave2 = cluster.getAnySlave(slave1); }
private ClusterManager.ManagedCluster startCluster( ClusterManager.Provider provider, HighlyAvailableGraphDatabaseFactory dbFactory) throws Throwable { clusterManager = new ClusterManager( provider, storeDir, stringMap(default_timeout.name(), "1s", tx_push_factor.name(), "0"), new HashMap<Integer, Map<String, String>>(), dbFactory); clusterManager.start(); ClusterManager.ManagedCluster cluster = clusterManager.getDefaultCluster(); cluster.await(allSeesAllAsAvailable()); return cluster; }
@Test public void aSlaveWithoutAnyGraphDBFilesShouldBeAbleToJoinACluster() throws Throwable { // GIVEN a cluster with some data and entry in log files // WHEN removing all the files in graphdb on the slave and restarting the cluster deleteAllFilesOn(slave1); clusterManager.start(); // THEN the cluster should work cluster = clusterManager.getDefaultCluster(); try { cluster.await(allSeesAllAsAvailable()); } finally { clusterManager.shutdown(); } assertAllStoreConsistent(); }
@Test public void givenClusterWithCreatedIndexWhenDeleteIndexOnMasterThenIndexIsDeletedOnSlave() throws Throwable { ClusterManager clusterManager = new ClusterManager( fromXml(getClass().getResource("/threeinstances.xml").toURI()), TargetDirectory.forTest(getClass()).cleanDirectory("testCluster"), MapUtil.stringMap( HaSettings.ha_server.name(), ":6001-6005", HaSettings.tx_push_factor.name(), "2")); try { // Given clusterManager.start(); clusterManager.getDefaultCluster().await(ClusterManager.allSeesAllAsAvailable()); GraphDatabaseAPI master = clusterManager.getDefaultCluster().getMaster(); try (Transaction tx = master.beginTx()) { master.index().forNodes("Test"); tx.success(); } HighlyAvailableGraphDatabase aSlave = clusterManager.getDefaultCluster().getAnySlave(); try (Transaction tx = aSlave.beginTx()) { assertThat(aSlave.index().existsForNodes("Test"), equalTo(true)); tx.success(); } // When try (Transaction tx = master.beginTx()) { master.index().forNodes("Test").delete(); tx.success(); } // Then HighlyAvailableGraphDatabase anotherSlave = clusterManager.getDefaultCluster().getAnySlave(); try (Transaction tx = anotherSlave.beginTx()) { assertThat(anotherSlave.index().existsForNodes("Test"), equalTo(false)); tx.success(); } } finally { clusterManager.stop(); } }
@Before public void startCluster() throws Throwable { FileUtils.deleteDirectory(PATH); FileUtils.deleteDirectory(BACKUP_PATH); clusterManager = new ClusterManager( fromXml(getClass().getResource("/threeinstances.xml").toURI()), PATH, MapUtil.stringMap(OnlineBackupSettings.online_backup_enabled.name(), Settings.TRUE)) { @Override protected void config(GraphDatabaseBuilder builder, String clusterName, int serverId) { builder.setConfig(OnlineBackupSettings.online_backup_server, (":" + (4444 + serverId))); } }; clusterManager.start(); cluster = clusterManager.getDefaultCluster(); // Really doesn't matter which instance representation = createSomeData(cluster.getMaster()); }
@Test public void transactionsGetsPushedToSlaves() throws Throwable { // given clusterManager = new ClusterManager( clusterOfSize(3), DIR.directory("dbs", true), stringMap(tx_push_factor.name(), "2")); clusterManager.start(); ManagedCluster cluster = clusterManager.getDefaultCluster(); // when String name = "a node"; long node = createNode(cluster.getMaster(), name); // then for (HighlyAvailableGraphDatabase db : cluster.getAllMembers()) { Transaction transaction = db.beginTx(); try { assertEquals(node, getNodeByName(db, name)); } finally { transaction.finish(); } } }
@Test public void aClusterShouldStartAndRunWhenSeededWithAStoreHavingNoLogicalLogFiles() throws Throwable { // GIVEN a cluster with some data and entry in log files // WHEN removing all logical log files in graphdb on the slave and restarting a new cluster File seedDir = deleteAllLogsOn(slave1); File newDir = new File(dir.directory(), "new"); FileUtils.deleteRecursively(newDir); ClusterManager newClusterManager = new ClusterManager( new ClusterManager.Builder(newDir).withProvider(clusterOfSize(3)).withSeedDir(seedDir)); newClusterManager.start(); // THEN the new cluster should work ClusterManager.ManagedCluster newCluster = newClusterManager.getDefaultCluster(); HighlyAvailableGraphDatabase newMaster; HighlyAvailableGraphDatabase newSlave1; HighlyAvailableGraphDatabase newSlave2; try { newCluster.await(allSeesAllAsAvailable()); newMaster = newCluster.getMaster(); newSlave1 = newCluster.getAnySlave(); newSlave2 = newCluster.getAnySlave(newSlave1); } finally { newClusterManager.shutdown(); } assertAllStoreConsistent(newMaster, newSlave1, newSlave2); assertConsistentStore(new File(newMaster.getStoreDir())); assertConsistentStore(new File(newSlave1.getStoreDir())); assertConsistentStore(new File(newSlave2.getStoreDir())); }
@Test public void testPullStorm() throws Throwable { // given ClusterManager clusterManager = new ClusterManager( ClusterManager.clusterWithAdditionalArbiters(2, 1), testDirectory.directory(), stringMap( HaSettings.pull_interval.name(), "0", HaSettings.tx_push_factor.name(), "1")); clusterManager.start(); try { ClusterManager.ManagedCluster cluster = clusterManager.getDefaultCluster(); cluster.await(ClusterManager.masterAvailable()); cluster.await(ClusterManager.masterSeesSlavesAsAvailable(1)); // Create data final HighlyAvailableGraphDatabase master = cluster.getMaster(); { Transaction tx = master.beginTx(); for (int i = 0; i < 1000; i++) { master.createNode().setProperty("foo", "bar"); } tx.success(); tx.finish(); } // Slave goes down HighlyAvailableGraphDatabase slave = cluster.getAnySlave(); ClusterManager.RepairKit repairKit = cluster.fail(slave); // Create more data for (int i = 0; i < 1000; i++) { { Transaction tx = master.beginTx(); for (int j = 0; j < 1000; j++) { master.createNode().setProperty("foo", "bar"); master.createNode().setProperty("foo", "bar"); } tx.success(); tx.finish(); } } // Slave comes back online repairKit.repair(); cluster.await(ClusterManager.masterSeesSlavesAsAvailable(1)); // when // Create 20 concurrent transactions System.out.println("Pull storm"); ExecutorService executor = Executors.newFixedThreadPool(20); for (int i = 0; i < 20; i++) { executor.submit( new Runnable() { @Override public void run() { Transaction tx = master.beginTx(); master.createNode().setProperty("foo", "bar"); tx.success(); tx.finish(); // This should cause lots of concurrent calls to pullUpdate() } }); } executor.shutdown(); executor.awaitTermination(1, TimeUnit.MINUTES); System.out.println("Pull storm done"); // then long masterLastCommittedTxId = lastCommittedTxId(master); for (HighlyAvailableGraphDatabase member : cluster.getAllMembers()) { assertEquals(masterLastCommittedTxId, lastCommittedTxId(member)); } } finally { System.err.println("Shutting down"); clusterManager.shutdown(); System.err.println("Shut down"); } }