示例#1
0
  @Test
  public void testGetTableCfsStr() {
    // opposite of TestPerTableCFReplication#testParseTableCFsFromConfig()

    Map<TableName, List<String>> tabCFsMap = null;

    // 1. null or empty string, result should be null
    assertEquals(null, ReplicationAdmin.getTableCfsStr(tabCFsMap));

    // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3"
    tabCFsMap = new TreeMap<TableName, List<String>>();
    tabCFsMap.put(TableName.valueOf("tab1"), null); // its table name is "tab1"
    assertEquals("tab1", ReplicationAdmin.getTableCfsStr(tabCFsMap));

    tabCFsMap = new TreeMap<TableName, List<String>>();
    tabCFsMap.put(TableName.valueOf("tab1"), Lists.newArrayList("cf1"));
    assertEquals("tab1:cf1", ReplicationAdmin.getTableCfsStr(tabCFsMap));

    tabCFsMap = new TreeMap<TableName, List<String>>();
    tabCFsMap.put(TableName.valueOf("tab1"), Lists.newArrayList("cf1", "cf3"));
    assertEquals("tab1:cf1,cf3", ReplicationAdmin.getTableCfsStr(tabCFsMap));

    // 3. multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3"
    tabCFsMap = new TreeMap<TableName, List<String>>();
    tabCFsMap.put(TableName.valueOf("tab1"), null);
    tabCFsMap.put(TableName.valueOf("tab2"), Lists.newArrayList("cf1"));
    tabCFsMap.put(TableName.valueOf("tab3"), Lists.newArrayList("cf1", "cf3"));
    assertEquals("tab1;tab2:cf1;tab3:cf1,cf3", ReplicationAdmin.getTableCfsStr(tabCFsMap));
  }
示例#2
0
 private void enablePeer(String id, int masterClusterNumber) throws Exception {
   ReplicationAdmin replicationAdmin = null;
   try {
     replicationAdmin = new ReplicationAdmin(configurations[masterClusterNumber]);
     replicationAdmin.enablePeer(id);
   } finally {
     close(replicationAdmin);
   }
 }
示例#3
0
 private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber)
     throws Exception {
   ReplicationAdmin replicationAdmin = null;
   try {
     replicationAdmin = new ReplicationAdmin(configurations[masterClusterNumber]);
     replicationAdmin.addPeer(id, utilities[slaveClusterNumber].getClusterKey());
   } finally {
     close(replicationAdmin);
   }
 }
示例#4
0
  @Test
  public void testAppendPeerTableCFs() throws Exception {
    // Add a valid peer
    admin.addPeer(ID_ONE, KEY_ONE);

    admin.appendPeerTableCFs(ID_ONE, "t1");
    assertEquals("t1", admin.getPeerTableCFs(ID_ONE));

    // append table t2 to replication
    admin.appendPeerTableCFs(ID_ONE, "t2");
    String peerTablesOne = admin.getPeerTableCFs(ID_ONE);

    // Different jdk's return different sort order for the tables. ( Not sure on why exactly )
    //
    // So instead of asserting that the string is exactly we
    // assert that the string contains all tables and the needed separator.
    assertTrue("Should contain t1", peerTablesOne.contains("t1"));
    assertTrue("Should contain t2", peerTablesOne.contains("t2"));
    assertTrue("Should contain ; as the seperator", peerTablesOne.contains(";"));

    // append table column family: f1 of t3 to replication
    admin.appendPeerTableCFs(ID_ONE, "t3:f1");
    String peerTablesTwo = admin.getPeerTableCFs(ID_ONE);
    assertTrue("Should contain t1", peerTablesTwo.contains("t1"));
    assertTrue("Should contain t2", peerTablesTwo.contains("t2"));
    assertTrue("Should contain t3:f1", peerTablesTwo.contains("t3:f1"));
    assertTrue("Should contain ; as the seperator", peerTablesTwo.contains(";"));
    admin.removePeer(ID_ONE);
  }
示例#5
0
 @AfterClass
 public static void tearDownAfterClass() throws Exception {
   if (admin != null) {
     admin.close();
   }
   TEST_UTIL.shutdownMiniZKCluster();
 }
  /**
   * Add a row to a table in each cluster, check it's replicated, delete it, check's gone Also check
   * the puts and deletes are not replicated back to the originating cluster.
   */
  @Test(timeout = 300000)
  public void testSimplePutDelete() throws Exception {
    LOG.info("testSimplePutDelete");
    utility1.startMiniCluster();
    utility2.startMiniCluster();

    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
    ReplicationAdmin admin2 = new ReplicationAdmin(conf2);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);

    // set M-M
    admin1.addPeer("1", clusterKey2);
    admin2.addPeer("1", clusterKey1);

    // add rows to both clusters,
    // make sure they are both replication
    putAndWait(row, famName, htable1, htable2);
    putAndWait(row1, famName, htable2, htable1);

    // make sure "row" did not get replicated back.
    assertEquals("Puts were replicated back ", 2, getCount(htable1, put));

    // delete "row" and wait
    deleteAndWait(row, htable1, htable2);

    // make the 2nd cluster replicated back
    assertEquals("Puts were replicated back ", 2, getCount(htable2, put));

    deleteAndWait(row1, htable2, htable1);

    assertEquals("Deletes were replicated back ", 2, getCount(htable1, delete));
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
示例#7
0
  /**
   * basic checks that when we add a peer that it is enabled, and that we can disable
   *
   * @throws Exception
   */
  @Test
  public void testEnableDisable() throws Exception {
    admin.addPeer(ID_ONE, KEY_ONE);
    assertEquals(1, admin.getPeersCount());
    assertTrue(admin.getPeerState(ID_ONE));
    admin.disablePeer(ID_ONE);

    assertFalse(admin.getPeerState(ID_ONE));
    try {
      admin.getPeerState(ID_SECOND);
    } catch (IllegalArgumentException iae) {
      // OK!
    }
    admin.removePeer(ID_ONE);
  }
  @Test(timeout = 300000)
  public void testCyclicReplication() throws Exception {
    LOG.info("testCyclicReplication");
    utility1.startMiniCluster();
    utility2.startMiniCluster();
    utility3.startMiniCluster();
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
    ReplicationAdmin admin2 = new ReplicationAdmin(conf2);
    ReplicationAdmin admin3 = new ReplicationAdmin(conf3);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    new HBaseAdmin(conf3).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);
    HTable htable3 = new HTable(conf3, tableName);
    htable3.setWriteBufferSize(1024);

    admin1.addPeer("1", clusterKey2);
    admin2.addPeer("1", clusterKey3);
    admin3.addPeer("1", clusterKey1);

    // put "row" and wait 'til it got around
    putAndWait(row, famName, htable1, htable3);
    // it should have passed through table2
    check(row, famName, htable2);

    putAndWait(row1, famName, htable2, htable1);
    check(row, famName, htable3);
    putAndWait(row2, famName, htable3, htable2);
    check(row, famName, htable1);

    deleteAndWait(row, htable1, htable3);
    deleteAndWait(row1, htable2, htable1);
    deleteAndWait(row2, htable3, htable2);

    assertEquals("Puts were replicated back ", 3, getCount(htable1, put));
    assertEquals("Puts were replicated back ", 3, getCount(htable2, put));
    assertEquals("Puts were replicated back ", 3, getCount(htable3, put));
    assertEquals("Deletes were replicated back ", 3, getCount(htable1, delete));
    assertEquals("Deletes were replicated back ", 3, getCount(htable2, delete));
    assertEquals("Deletes were replicated back ", 3, getCount(htable3, delete));
    utility3.shutdownMiniCluster();
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
示例#9
0
 /**
  * Simple testing of adding and removing peers, basically shows that all interactions with ZK work
  *
  * @throws Exception
  */
 @Test
 public void testAddRemovePeer() throws Exception {
   // Add a valid peer
   admin.addPeer(ID_ONE, KEY_ONE);
   // try adding the same (fails)
   try {
     admin.addPeer(ID_ONE, KEY_ONE);
   } catch (IllegalArgumentException iae) {
     // OK!
   }
   assertEquals(1, admin.getPeersCount());
   // Try to remove an inexisting peer
   try {
     admin.removePeer(ID_SECOND);
     fail();
   } catch (IllegalArgumentException iae) {
     // OK!
   }
   assertEquals(1, admin.getPeersCount());
   // Add a second since multi-slave is supported
   try {
     admin.addPeer(ID_SECOND, KEY_SECOND);
   } catch (IllegalStateException iae) {
     fail();
   }
   assertEquals(2, admin.getPeersCount());
   // Remove the first peer we added
   admin.removePeer(ID_ONE);
   assertEquals(1, admin.getPeersCount());
   admin.removePeer(ID_SECOND);
   assertEquals(0, admin.getPeersCount());
 }
示例#10
0
  @Test
  public void testRemovePeerTableCFs() throws Exception {
    // Add a valid peer
    admin.addPeer(ID_ONE, KEY_ONE);
    try {
      admin.removePeerTableCFs(ID_ONE, "t3");
      assertTrue(false);
    } catch (ReplicationException e) {
    }
    assertEquals("", admin.getPeerTableCFs(ID_ONE));

    admin.setPeerTableCFs(ID_ONE, "t1;t2:cf1");
    try {
      admin.removePeerTableCFs(ID_ONE, "t3");
      assertTrue(false);
    } catch (ReplicationException e) {
    }
    assertEquals("t1;t2:cf1", admin.getPeerTableCFs(ID_ONE));

    try {
      admin.removePeerTableCFs(ID_ONE, "t1:f1");
      assertTrue(false);
    } catch (ReplicationException e) {
    }
    admin.removePeerTableCFs(ID_ONE, "t1");
    assertEquals("t2:cf1", admin.getPeerTableCFs(ID_ONE));

    try {
      admin.removePeerTableCFs(ID_ONE, "t2");
      assertTrue(false);
    } catch (ReplicationException e) {
    }
    admin.removePeerTableCFs(ID_ONE, "t2:cf1");
    assertEquals("", admin.getPeerTableCFs(ID_ONE));
    admin.removePeer(ID_ONE);
  }
示例#11
0
  @SuppressWarnings("deprecation")
  @Test(timeout = 300000)
  public void testReplicaAndReplication() throws Exception {
    HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaAndReplication");
    hdt.setRegionReplication(NB_SERVERS);

    HColumnDescriptor fam = new HColumnDescriptor(row);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    hdt.addFamily(fam);

    hdt.addCoprocessor(SlowMeCopro.class.getName());
    HTU.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);

    Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration());
    conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    MiniZooKeeperCluster miniZK = HTU.getZkCluster();

    HTU2 = new HBaseTestingUtility(conf2);
    HTU2.setZkCluster(miniZK);
    HTU2.startMiniCluster(NB_SERVERS);
    LOG.info("Setup second Zk");
    HTU2.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);

    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
    admin.addPeer("2", HTU2.getClusterKey());
    admin.close();

    Put p = new Put(row);
    p.add(row, row, row);
    final Table table = HTU.getConnection().getTable(hdt.getTableName());
    table.put(p);

    HTU.getHBaseAdmin().flush(table.getName());
    LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");

    Waiter.waitFor(
        HTU.getConfiguration(),
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            try {
              SlowMeCopro.cdl.set(new CountDownLatch(1));
              Get g = new Get(row);
              g.setConsistency(Consistency.TIMELINE);
              Result r = table.get(g);
              Assert.assertTrue(r.isStale());
              return !r.isEmpty();
            } finally {
              SlowMeCopro.cdl.get().countDown();
              SlowMeCopro.sleepTime.set(0);
            }
          }
        });
    table.close();
    LOG.info("stale get on the first cluster done. Now for the second.");

    final Table table2 = HTU.getConnection().getTable(hdt.getTableName());
    Waiter.waitFor(
        HTU.getConfiguration(),
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            try {
              SlowMeCopro.cdl.set(new CountDownLatch(1));
              Get g = new Get(row);
              g.setConsistency(Consistency.TIMELINE);
              Result r = table2.get(g);
              Assert.assertTrue(r.isStale());
              return !r.isEmpty();
            } finally {
              SlowMeCopro.cdl.get().countDown();
              SlowMeCopro.sleepTime.set(0);
            }
          }
        });
    table2.close();

    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());

    HTU2.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU2.deleteTable(hdt.getTableName());

    // We shutdown HTU2 minicluster later, in afterClass(), as shutting down
    // the minicluster has negative impact of deleting all HConnections in JVM.
  }
  @Test(timeout = 300000)
  public void testMultiSlaveReplication() throws Exception {
    LOG.info("testCyclicReplication");
    MiniHBaseCluster master = utility1.startMiniCluster();
    utility2.startMiniCluster();
    utility3.startMiniCluster();
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    new HBaseAdmin(conf3).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);
    HTable htable3 = new HTable(conf3, tableName);
    htable3.setWriteBufferSize(1024);

    admin1.addPeer("1", utility2.getClusterKey());

    // put "row" and wait 'til it got around, then delete
    putAndWait(row, famName, htable1, htable2);
    deleteAndWait(row, htable1, htable2);
    // check it wasn't replication to cluster 3
    checkRow(row, 0, htable3);

    putAndWait(row2, famName, htable1, htable2);

    // now roll the region server's logs
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());
    // after the log was rolled put a new row
    putAndWait(row3, famName, htable1, htable2);

    admin1.addPeer("2", utility3.getClusterKey());

    // put a row, check it was replicated to all clusters
    putAndWait(row1, famName, htable1, htable2, htable3);
    // delete and verify
    deleteAndWait(row1, htable1, htable2, htable3);

    // make sure row2 did not get replicated after
    // cluster 3 was added
    checkRow(row2, 0, htable3);

    // row3 will get replicated, because it was in the
    // latest log
    checkRow(row3, 1, htable3);

    Put p = new Put(row);
    p.add(famName, row, row);
    htable1.put(p);
    // now roll the logs again
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());

    // cleanup "row2", also conveniently use this to wait replication
    // to finish
    deleteAndWait(row2, htable1, htable2, htable3);
    // Even if the log was rolled in the middle of the replication
    // "row" is still replication.
    checkRow(row, 1, htable2, htable3);

    // cleanup the rest
    deleteAndWait(row, htable1, htable2, htable3);
    deleteAndWait(row3, htable1, htable2, htable3);

    utility3.shutdownMiniCluster();
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
示例#13
0
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    conf1.setInt("hfile.format.version", 3);
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf1.setStrings(
        CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
        TestCoprocessorForTagsAtSource.class.getName());

    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    replicationAdmin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");

    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.setInt("hfile.format.version", 3);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean("dfs.support.append", true);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf2.setStrings(
        CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
        TestCoprocessorForTagsAtSink.class.getName());

    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);

    replicationAdmin.addPeer("2", utility2.getClusterKey());

    LOG.info("Setup second Zk");
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);

    HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
    HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
    fam.setMaxVersions(3);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    try (Connection conn = ConnectionFactory.createConnection(conf1);
        Admin admin = conn.getAdmin()) {
      admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    try (Connection conn = ConnectionFactory.createConnection(conf2);
        Admin admin = conn.getAdmin()) {
      admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    htable1 = utility1.getConnection().getTable(TABLE_NAME);
    htable2 = utility2.getConnection().getTable(TABLE_NAME);
  }
示例#14
0
  /** @throws java.lang.Exception */
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // smaller log roll size to trigger more events
    conf1.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);

    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");

    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf2.setBoolean("dfs.support.append", true);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);

    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);

    admin.addPeer("2", utility2.getClusterKey());

    LOG.info("Setup second Zk");
    CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);

    HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setMaxVersions(3);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    fam = new HColumnDescriptor(noRepfamName);
    table.addFamily(fam);
    HBaseAdmin admin1 = new HBaseAdmin(conf1);
    HBaseAdmin admin2 = new HBaseAdmin(conf2);
    admin1.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    admin2.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    htable2 = new HTable(conf2, tableName);
  }