@Test public void dataReplicatedToCorrectTableWithoutDrain() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg); peer1Cluster.start(); try { Connector connMaster = getConnector(); Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); String peerClusterName = "peer"; String peerUserName = "******"; String peerPassword = "******"; // Create a user on the peer for replication to use connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); // Configure the credentials we should use to authenticate ourselves to the peer for // replication connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers()))); String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2"; connMaster.tableOperations().create(masterTable1); String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1); Assert.assertNotNull(masterTableId1); connMaster.tableOperations().create(masterTable2); String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2); Assert.assertNotNull(masterTableId2); connPeer.tableOperations().create(peerTable1); String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1); Assert.assertNotNull(peerTableId1); connPeer.tableOperations().create(peerTable2); String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2); Assert.assertNotNull(peerTableId2); // Give our replication user the ability to write to the tables connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE); connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1); connMaster .tableOperations() .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2); // Wait for zookeeper updates (configuration) to propagate sleepUninterruptibly(3, TimeUnit.SECONDS); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig()); for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable1 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); // Write some data to table2 bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig()); for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable2 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); while (!ReplicationTable.isOnline(connMaster)) { Thread.sleep(500); } for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); // Wait until we fully replicated something boolean fullyReplicated = false; for (int i = 0; i < 10 && !fullyReplicated; i++) { sleepUninterruptibly(timeoutFactor * 2, TimeUnit.SECONDS); Scanner s = ReplicationTable.getScanner(connMaster); WorkSection.limit(s); for (Entry<Key, Value> entry : s) { Status status = Status.parseFrom(entry.getValue().get()); if (StatusUtil.isFullyReplicated(status)) { fullyReplicated |= true; } } } Assert.assertNotEquals(0, fullyReplicated); long countTable = 0l; // Check a few times for (int i = 0; i < 10; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable1)); } log.info("Found {} records in {}", countTable, peerTable1); if (0 < countTable) { break; } Thread.sleep(2000); } Assert.assertTrue("Did not find any records in " + peerTable1 + " on peer", countTable > 0); for (int i = 0; i < 10; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable2)); } log.info("Found {} records in {}", countTable, peerTable2); if (0 < countTable) { break; } Thread.sleep(2000); } Assert.assertTrue("Did not find any records in " + peerTable2 + " on peer", countTable > 0); } finally { peer1Cluster.stop(); } }
@Test public void dataWasReplicatedToThePeer() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg); peerCluster.start(); try { final Connector connMaster = getConnector(); final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); ReplicationTable.setOnline(connMaster); String peerUserName = "******", peerPassword = "******"; String peerClusterName = "peer"; connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peerCluster.getInstanceName(), peerCluster.getZooKeepers()))); final String masterTable = "master", peerTable = "peer"; connMaster.tableOperations().create(masterTable); String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable); Assert.assertNotNull(masterTableId); connPeer.tableOperations().create(peerTable); String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable); Assert.assertNotNull(peerTableId); connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId); // Wait for zookeeper updates (configuration) to propagate sleepUninterruptibly(3, TimeUnit.SECONDS); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig()); for (int rows = 0; rows < 5000; rows++) { Mutation m = new Mutation(Integer.toString(rows)); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable); for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); log.info("TabletServer restarted"); Iterators.size(ReplicationTable.getScanner(connMaster).iterator()); log.info("TabletServer is online"); log.info(""); log.info("Fetching metadata records:"); for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } else { log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue()); } } log.info(""); log.info("Fetching replication records:"); for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } Future<Boolean> future = executor.submit( new Callable<Boolean>() { @Override public Boolean call() throws Exception { connMaster.replicationOperations().drain(masterTable, filesNeedingReplication); log.info("Drain completed"); return true; } }); long timeoutSeconds = timeoutFactor * 30; try { future.get(timeoutSeconds, TimeUnit.SECONDS); } catch (TimeoutException e) { future.cancel(true); Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds"); } log.info("drain completed"); log.info(""); log.info("Fetching metadata records:"); for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } else { log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue()); } } log.info(""); log.info("Fetching replication records:"); for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY); Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator(); Entry<Key, Value> masterEntry = null, peerEntry = null; while (masterIter.hasNext() && peerIter.hasNext()) { masterEntry = masterIter.next(); peerEntry = peerIter.next(); Assert.assertEquals( masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry .getKey() .compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS)); Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue()); } log.info("Last master entry: " + masterEntry); log.info("Last peer entry: " + peerEntry); Assert.assertFalse("Had more data to read from the master", masterIter.hasNext()); Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext()); } finally { peerCluster.stop(); } }