@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s"); cfg.setProperty(Property.MASTER_RECOVERY_DELAY, "5s"); // use raw local file system so walogs sync and flush will work hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName()); }
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { Map<String, String> siteConfig = cfg.getSiteConfig(); cfg.setNumTservers(1); siteConfig.put(Property.TSERV_SESSION_MAXIDLE.getKey(), getMaxIdleTimeString()); siteConfig.put(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), "11"); cfg.setSiteConfig(siteConfig); }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { Map<String, String> props = new HashMap<String, String>(); props.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "3s"); cfg.setSiteConfig(props); cfg.setNumTservers(1); cfg.useMiniDFS(true); }
public MiniAccumuloClusterImpl create( String testClassName, String testMethodName, AuthenticationToken token, MiniClusterConfigurationCallback configCallback, TestingKdc kdc) throws Exception { requireNonNull(token); checkArgument( token instanceof PasswordToken || token instanceof KerberosToken, "A PasswordToken or KerberosToken is required"); String rootPasswd; if (token instanceof PasswordToken) { rootPasswd = new String(((PasswordToken) token).getPassword(), UTF_8); } else { rootPasswd = UUID.randomUUID().toString(); } File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName); MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd); // Enable native maps by default cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath()); cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString()); Configuration coreSite = new Configuration(false); // Setup SSL and credential providers if the properties request such configureForEnvironment( cfg, getClass(), AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc); // Invoke the callback for tests to configure MAC before it starts configCallback.configureMiniCluster(cfg, coreSite); MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg); // Write out any configuration items to a file so HDFS will pick them up automatically (from the // classpath) if (coreSite.size() > 0) { File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml"); if (csFile.exists()) throw new RuntimeException(csFile + " already exist"); OutputStream out = new BufferedOutputStream( new FileOutputStream( new File(miniCluster.getConfig().getConfDir(), "core-site.xml"))); coreSite.writeXml(out); out.close(); } return miniCluster; }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { Map<String, String> siteConfig = new HashMap<String, String>(); siteConfig.put(Property.TSERV_MAXMEM.getKey(), "5K"); siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms"); cfg.setSiteConfig(siteConfig); }
protected void configureForEnvironment( MiniAccumuloConfigImpl cfg, Class<?> testClass, File folder, Configuration coreSite, TestingKdc kdc) { if (TRUE.equals(System.getProperty(USE_SSL_FOR_IT_OPTION))) { configureForSsl(cfg, folder); } if (TRUE.equals(System.getProperty(USE_CRED_PROVIDER_FOR_IT_OPTION))) { cfg.setUseCredentialProvider(true); } if (TRUE.equals(System.getProperty(USE_KERBEROS_FOR_IT_OPTION))) { if (TRUE.equals(System.getProperty(USE_SSL_FOR_IT_OPTION))) { throw new RuntimeException("Cannot use both SSL and Kerberos"); } try { configureForKerberos(cfg, folder, coreSite, kdc); } catch (Exception e) { throw new RuntimeException("Failed to initialize KDC", e); } } }
@Override public void configure(MiniAccumuloConfigImpl cfg) { Map<String, String> siteConfig = new HashMap<String, String>(); siteConfig.put(Property.TSERV_MAXMEM.getKey(), "50K"); siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0"); cfg.setSiteConfig(siteConfig); }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setNumTservers(1); cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s"); cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M"); cfg.setProperty(Property.GC_CYCLE_START, "1s"); cfg.setProperty(Property.GC_CYCLE_DELAY, "5s"); cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s"); cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s"); cfg.setProperty(Property.REPLICATION_MAX_UNIT_SIZE, "8M"); cfg.setProperty(Property.REPLICATION_NAME, "master"); cfg.setProperty(Property.REPLICATION_WORK_ASSIGNER, UnorderedWorkAssigner.class.getName()); cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M"); hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName()); }
/** * Use the same SSL and credential provider configuration that is set up by AbstractMacIT for the * other MAC used for replication */ private void updatePeerConfigFromPrimary( MiniAccumuloConfigImpl primaryCfg, MiniAccumuloConfigImpl peerCfg) { // Set the same SSL information from the primary when present Map<String, String> primarySiteConfig = primaryCfg.getSiteConfig(); if ("true".equals(primarySiteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) { Map<String, String> peerSiteConfig = new HashMap<String, String>(); peerSiteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true"); String keystorePath = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey()); Assert.assertNotNull("Keystore Path was null", keystorePath); peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), keystorePath); String truststorePath = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey()); Assert.assertNotNull("Truststore Path was null", truststorePath); peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), truststorePath); // Passwords might be stored in CredentialProvider String keystorePassword = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey()); if (null != keystorePassword) { peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), keystorePassword); } String truststorePassword = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey()); if (null != truststorePassword) { peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword); } System.out.println("Setting site configuration for peer " + peerSiteConfig); peerCfg.setSiteConfig(peerSiteConfig); } // Use the CredentialProvider if the primary also uses one String credProvider = primarySiteConfig.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey()); if (null != credProvider) { Map<String, String> peerSiteConfig = peerCfg.getSiteConfig(); peerSiteConfig.put( Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvider); peerCfg.setSiteConfig(peerSiteConfig); } }
protected void configureForSsl(MiniAccumuloConfigImpl cfg, File folder) { Map<String, String> siteConfig = cfg.getSiteConfig(); if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) { // already enabled; don't mess with it return; } File sslDir = new File(folder, "ssl"); assertTrue(sslDir.mkdirs() || sslDir.isDirectory()); File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + ".jks"); File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() + ".jks"); File publicTruststoreFile = new File(sslDir, "public-" + cfg.getInstanceName() + ".jks"); final String rootKeystorePassword = "******", truststorePassword = "******"; try { new CertUtils( Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), "o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, "sha1WithRSAEncryption") .createAll( rootKeystoreFile, localKeystoreFile, publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, cfg.getRootPassword(), truststorePassword); } catch (Exception e) { throw new RuntimeException("error creating MAC keystore", e); } siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true"); siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), localKeystoreFile.getAbsolutePath()); siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), cfg.getRootPassword()); siteConfig.put( Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), publicTruststoreFile.getAbsolutePath()); siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword); cfg.setSiteConfig(siteConfig); }
@Override public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setProperty(Property.TSERV_MAJC_DELAY, "1s"); }
@Override public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) { cfg.setNumTservers(1); cfg.setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "1"); }
protected void configureForKerberos( MiniAccumuloConfigImpl cfg, File folder, Configuration coreSite, TestingKdc kdc) throws Exception { Map<String, String> siteConfig = cfg.getSiteConfig(); if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) { throw new RuntimeException("Cannot use both SSL and SASL/Kerberos"); } if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SASL_ENABLED.getKey()))) { // already enabled return; } if (null == kdc) { throw new IllegalStateException("MiniClusterKdc was null"); } log.info("Enabling Kerberos/SASL for minicluster"); // Turn on SASL and set the keytab/principal information cfg.setProperty(Property.INSTANCE_RPC_SASL_ENABLED, "true"); ClusterUser serverUser = kdc.getAccumuloServerUser(); cfg.setProperty(Property.GENERAL_KERBEROS_KEYTAB, serverUser.getKeytab().getAbsolutePath()); cfg.setProperty(Property.GENERAL_KERBEROS_PRINCIPAL, serverUser.getPrincipal()); cfg.setProperty( Property.INSTANCE_SECURITY_AUTHENTICATOR, KerberosAuthenticator.class.getName()); cfg.setProperty(Property.INSTANCE_SECURITY_AUTHORIZOR, KerberosAuthorizor.class.getName()); cfg.setProperty( Property.INSTANCE_SECURITY_PERMISSION_HANDLER, KerberosPermissionHandler.class.getName()); // Piggy-back on the "system user" credential, but use it as a normal KerberosToken, not the // SystemToken. cfg.setProperty(Property.TRACE_USER, serverUser.getPrincipal()); cfg.setProperty(Property.TRACE_TOKEN_TYPE, KerberosToken.CLASS_NAME); // Pass down some KRB5 debug properties Map<String, String> systemProperties = cfg.getSystemProperties(); systemProperties.put(JAVA_SECURITY_KRB5_CONF, System.getProperty(JAVA_SECURITY_KRB5_CONF, "")); systemProperties.put( SUN_SECURITY_KRB5_DEBUG, System.getProperty(SUN_SECURITY_KRB5_DEBUG, "false")); cfg.setSystemProperties(systemProperties); // Make sure UserGroupInformation will do the correct login coreSite.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); cfg.setRootUserName(kdc.getRootUser().getPrincipal()); }
@Test public void dataReplicatedToCorrectTableWithoutDrain() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg); peer1Cluster.start(); try { Connector connMaster = getConnector(); Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); String peerClusterName = "peer"; String peerUserName = "******"; String peerPassword = "******"; // Create a user on the peer for replication to use connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); // Configure the credentials we should use to authenticate ourselves to the peer for // replication connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers()))); String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2"; connMaster.tableOperations().create(masterTable1); String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1); Assert.assertNotNull(masterTableId1); connMaster.tableOperations().create(masterTable2); String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2); Assert.assertNotNull(masterTableId2); connPeer.tableOperations().create(peerTable1); String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1); Assert.assertNotNull(peerTableId1); connPeer.tableOperations().create(peerTable2); String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2); Assert.assertNotNull(peerTableId2); // Give our replication user the ability to write to the tables connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE); connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1); connMaster .tableOperations() .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2); // Wait for zookeeper updates (configuration) to propagate sleepUninterruptibly(3, TimeUnit.SECONDS); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig()); for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable1 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); // Write some data to table2 bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig()); for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable2 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); while (!ReplicationTable.isOnline(connMaster)) { Thread.sleep(500); } for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); // Wait until we fully replicated something boolean fullyReplicated = false; for (int i = 0; i < 10 && !fullyReplicated; i++) { sleepUninterruptibly(timeoutFactor * 2, TimeUnit.SECONDS); Scanner s = ReplicationTable.getScanner(connMaster); WorkSection.limit(s); for (Entry<Key, Value> entry : s) { Status status = Status.parseFrom(entry.getValue().get()); if (StatusUtil.isFullyReplicated(status)) { fullyReplicated |= true; } } } Assert.assertNotEquals(0, fullyReplicated); long countTable = 0l; // Check a few times for (int i = 0; i < 10; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable1)); } log.info("Found {} records in {}", countTable, peerTable1); if (0 < countTable) { break; } Thread.sleep(2000); } Assert.assertTrue("Did not find any records in " + peerTable1 + " on peer", countTable > 0); for (int i = 0; i < 10; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable2)); } log.info("Found {} records in {}", countTable, peerTable2); if (0 < countTable) { break; } Thread.sleep(2000); } Assert.assertTrue("Did not find any records in " + peerTable2 + " on peer", countTable > 0); } finally { peer1Cluster.stop(); } }
@Test public void dataWasReplicatedToThePeerWithoutDrain() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg); peerCluster.start(); Connector connMaster = getConnector(); Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); String peerUserName = "******"; String peerPassword = "******"; // Create a user on the peer for replication to use connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); String peerClusterName = "peer"; // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peerCluster.getInstanceName(), peerCluster.getZooKeepers()))); // Configure the credentials we should use to authenticate ourselves to the peer for replication connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); String masterTable = "master", peerTable = "peer"; connMaster.tableOperations().create(masterTable); String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable); Assert.assertNotNull(masterTableId); connPeer.tableOperations().create(peerTable); String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable); Assert.assertNotNull(peerTableId); // Give our replication user the ability to write to the table connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig()); for (int rows = 0; rows < 5000; rows++) { Mutation m = new Mutation(Integer.toString(rows)); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable); for (String s : files) { log.info("Found referenced file for " + masterTable + ": " + s); } for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator()); for (Entry<Key, Value> kv : connMaster.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) { log.debug( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } connMaster.replicationOperations().drain(masterTable, files); Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY); Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator(); Assert.assertTrue("No data in master table", masterIter.hasNext()); Assert.assertTrue("No data in peer table", peerIter.hasNext()); while (masterIter.hasNext() && peerIter.hasNext()) { Entry<Key, Value> masterEntry = masterIter.next(), peerEntry = peerIter.next(); Assert.assertEquals( peerEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS)); Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue()); } Assert.assertFalse("Had more data to read from the master", masterIter.hasNext()); Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext()); peerCluster.stop(); }
@Test public void dataReplicatedToCorrectTable() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg); peer1Cluster.start(); try { Connector connMaster = getConnector(); Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); String peerClusterName = "peer"; String peerUserName = "******", peerPassword = "******"; // Create local user connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers()))); String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2"; // Create tables connMaster.tableOperations().create(masterTable1); String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1); Assert.assertNotNull(masterTableId1); connMaster.tableOperations().create(masterTable2); String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2); Assert.assertNotNull(masterTableId2); connPeer.tableOperations().create(peerTable1); String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1); Assert.assertNotNull(peerTableId1); connPeer.tableOperations().create(peerTable2); String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2); Assert.assertNotNull(peerTableId2); // Grant write permission connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE); connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1); connMaster .tableOperations() .setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2); // Wait for zookeeper updates (configuration) to propogate sleepUninterruptibly(3, TimeUnit.SECONDS); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig()); long masterTable1Records = 0l; for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable1 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); masterTable1Records++; } bw.addMutation(m); } bw.close(); // Write some data to table2 bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig()); long masterTable2Records = 0l; for (int rows = 0; rows < 2500; rows++) { Mutation m = new Mutation(masterTable2 + rows); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); masterTable2Records++; } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1), filesFor2 = connMaster.replicationOperations().referencedFiles(masterTable2); while (!ReplicationTable.isOnline(connMaster)) { Thread.sleep(500); } // Restart the tserver to force a close on the WAL for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); log.info("Restarted the tserver"); // Read the data -- the tserver is back up and running Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator()); // Wait for both tables to be replicated log.info("Waiting for {} for {}", filesFor1, masterTable1); connMaster.replicationOperations().drain(masterTable1, filesFor1); log.info("Waiting for {} for {}", filesFor2, masterTable2); connMaster.replicationOperations().drain(masterTable2, filesFor2); long countTable = 0l; for (int i = 0; i < 5; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable1)); } log.info("Found {} records in {}", countTable, peerTable1); if (masterTable1Records != countTable) { log.warn( "Did not find {} expected records in {}, only found {}", masterTable1Records, peerTable1, countTable); } } Assert.assertEquals(masterTable1Records, countTable); for (int i = 0; i < 5; i++) { countTable = 0l; for (Entry<Key, Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) { countTable++; Assert.assertTrue( "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(masterTable2)); } log.info("Found {} records in {}", countTable, peerTable2); if (masterTable2Records != countTable) { log.warn( "Did not find {} expected records in {}, only found {}", masterTable2Records, peerTable2, countTable); } } Assert.assertEquals(masterTable2Records, countTable); } finally { peer1Cluster.stop(); } }
@Test public void dataWasReplicatedToThePeer() throws Exception { MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl( createTestDir( this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD); peerCfg.setNumTservers(1); peerCfg.setInstanceName("peer"); updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg); peerCfg.setProperty(Property.REPLICATION_NAME, "peer"); MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg); peerCluster.start(); try { final Connector connMaster = getConnector(); final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)); ReplicationTable.setOnline(connMaster); String peerUserName = "******", peerPassword = "******"; String peerClusterName = "peer"; connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword)); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName); connMaster .instanceOperations() .setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword); // ...peer = AccumuloReplicaSystem,instanceName,zookeepers connMaster .instanceOperations() .setProperty( Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue( AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration( peerCluster.getInstanceName(), peerCluster.getZooKeepers()))); final String masterTable = "master", peerTable = "peer"; connMaster.tableOperations().create(masterTable); String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable); Assert.assertNotNull(masterTableId); connPeer.tableOperations().create(peerTable); String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable); Assert.assertNotNull(peerTableId); connPeer .securityOperations() .grantTablePermission(peerUserName, peerTable, TablePermission.WRITE); // Replicate this table to the peerClusterName in a table with the peerTableId table id connMaster .tableOperations() .setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true"); connMaster .tableOperations() .setProperty( masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId); // Wait for zookeeper updates (configuration) to propagate sleepUninterruptibly(3, TimeUnit.SECONDS); // Write some data to table1 BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig()); for (int rows = 0; rows < 5000; rows++) { Mutation m = new Mutation(Integer.toString(rows)); for (int cols = 0; cols < 100; cols++) { String value = Integer.toString(cols); m.put(value, "", value); } bw.addMutation(m); } bw.close(); log.info("Wrote all data to master cluster"); final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable); for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) { cluster.killProcess(ServerType.TABLET_SERVER, proc); } cluster.exec(TabletServer.class); log.info("TabletServer restarted"); Iterators.size(ReplicationTable.getScanner(connMaster).iterator()); log.info("TabletServer is online"); log.info(""); log.info("Fetching metadata records:"); for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } else { log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue()); } } log.info(""); log.info("Fetching replication records:"); for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } Future<Boolean> future = executor.submit( new Callable<Boolean>() { @Override public Boolean call() throws Exception { connMaster.replicationOperations().drain(masterTable, filesNeedingReplication); log.info("Drain completed"); return true; } }); long timeoutSeconds = timeoutFactor * 30; try { future.get(timeoutSeconds, TimeUnit.SECONDS); } catch (TimeoutException e) { future.cancel(true); Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds"); } log.info("drain completed"); log.info(""); log.info("Fetching metadata records:"); for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) { if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } else { log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue()); } } log.info(""); log.info("Fetching replication records:"); for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) { log.info( kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get()))); } Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY); Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator(); Entry<Key, Value> masterEntry = null, peerEntry = null; while (masterIter.hasNext() && peerIter.hasNext()) { masterEntry = masterIter.next(); peerEntry = peerIter.next(); Assert.assertEquals( masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry .getKey() .compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS)); Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue()); } log.info("Last master entry: " + masterEntry); log.info("Last peer entry: " + peerEntry); Assert.assertFalse("Had more data to read from the master", masterIter.hasNext()); Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext()); } finally { peerCluster.stop(); } }