private void putAndWait(byte[] row, byte[] fam, HTable source, HTable... targets) throws Exception { Put put = new Put(row); put.add(fam, row, row); source.put(put); Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for put replication"); } boolean replicatedToAll = true; for (HTable target : targets) { Result res = target.get(get); if (res.size() == 0) { LOG.info("Row not available"); replicatedToAll = false; break; } else { assertArrayEquals(res.value(), row); } } if (replicatedToAll) { break; } else { Thread.sleep(SLEEP_TIME); } } }
private void deleteAndWait(byte[] row, HTable source, HTable... targets) throws Exception { Delete del = new Delete(row); source.delete(del); Get get = new Get(row); for (int i = 0; i < NB_RETRIES; i++) { if (i == NB_RETRIES - 1) { fail("Waited too much time for del replication"); } boolean removedFromAll = true; for (HTable target : targets) { Result res = target.get(get); if (res.size() >= 1) { LOG.info("Row not deleted"); removedFromAll = false; break; } } if (removedFromAll) { break; } else { Thread.sleep(SLEEP_TIME); } } }
protected void waitForDelete(String tableName) throws Exception { for (int i = 0; i < MAX_WAIT_ITERATION; i++) { if (!admin.tableExists(tableName)) { return; } Thread.sleep(WAIT_INTERVAL); } Assert.fail(getMethodName() + " failed"); }
@Test public void testHTableDescriptors() throws IOException, InterruptedException { final String name = "testHTableDescriptors"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any debris laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); createHTDInFS(fs, rootdir, htd); } FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { @Override public HTableDescriptor get(byte[] tablename) throws TableExistsException, FileNotFoundException, IOException { LOG.info(Bytes.toString(tablename) + ", cachehits=" + this.cachehits); return super.get(tablename); } }; for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } // Update the table infos for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htd.addFamily(new HColumnDescriptor("" + i)); FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } for (int i = 0; i < count; i++) { assertTrue(htds.get(Bytes.toBytes(name + i)) != null); } assertEquals(count * 4, htds.invocations); assertTrue( "expected=" + (count * 2) + ", actual=" + htds.cachehits, htds.cachehits >= (count * 2)); assertTrue(htds.get(HConstants.ROOT_TABLE_NAME) != null); assertEquals(htds.invocations, count * 4 + 1); assertTrue( "expected=" + ((count * 2) + 1) + ", actual=" + htds.cachehits, htds.cachehits >= ((count * 2) + 1)); }
private static String lock(String lock) { String realPath = ""; String parent = "/lock"; String lockName = parent + "/" + lock; logger.debug("Getting lock " + lockName); try { if (zkInstance.exists(parent, false) == null) zkInstance.create(parent, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.fromFlag(0)); } catch (Exception E) { logger.error("Error creating lock node: " + E.toString()); return null; } List<String> children = new LinkedList<String>(); try { // List <ACL> ACLList = zkInstance.getACL(lockName, zkInstance.exists(lock, false)); realPath = zkInstance.create( lockName, new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); // children = zkInstance.getChildren(realPath, false); checkLock: while (true) { children = zkInstance.getChildren(parent, false); for (String curChild : children) { String child = parent + "/" + curChild; // System.out.println(child + " " + realPath + " " + // Integer.toString(child.compareTo(realPath))); if (child.compareTo(realPath) < 0 && child.length() == realPath.length() && curChild.startsWith(lock)) { // System.out.println(child + " cmp to " + realPath); Thread.sleep(300); continue checkLock; } } logger.info("Got lock " + lockName); return realPath; } } catch (Exception E) { logger.error("Exception while trying to get lock " + lockName + " :" + E.toString()); E.printStackTrace(); return null; } }
protected void waitForMoving(HRegionInfo hRegionInfo, ServerName serverName) throws Exception { Map<byte[], HServerLoad.RegionLoad> regionsLoad = null; for (int i = 0; i < MAX_WAIT_ITERATION; i++) { HServerLoad load = admin.getClusterStatus().getLoad(serverName); regionsLoad = load.getRegionsLoad(); for (byte[] regionName : regionsLoad.keySet()) { if (Arrays.equals(regionName, hRegionInfo.getRegionName())) return; } admin.move(hRegionInfo.getEncodedNameAsBytes(), serverName.getServerName().getBytes()); Thread.sleep(WAIT_INTERVAL); } System.out.println("hRegionInfo = " + Bytes.toString(hRegionInfo.getRegionName())); for (Map.Entry<byte[], HServerLoad.RegionLoad> entry : regionsLoad.entrySet()) { System.out.println( "regionsLoad = " + Bytes.toString(entry.getKey()) + " - " + entry.getValue()); } Assert.fail(Util.getMethodName() + " failed"); }
@Test public void testRPCException() throws Exception { HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.startMiniZKCluster(); Configuration conf = TEST_UTIL.getConfiguration(); conf.set(HConstants.MASTER_PORT, "0"); HMaster hm = new HMaster(conf); ServerName sm = hm.getServerName(); InetSocketAddress isa = new InetSocketAddress(sm.getHostname(), sm.getPort()); int i = 0; // retry the RPC a few times; we have seen SocketTimeoutExceptions if we // try to connect too soon. Retry on SocketTimeoutException. while (i < 20) { try { MasterMonitorProtocol inf = (MasterMonitorProtocol) HBaseClientRPC.getProxy(MasterMonitorProtocol.class, isa, conf, 100 * 10); inf.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance()); fail(); } catch (ServiceException ex) { IOException ie = ProtobufUtil.getRemoteException(ex); if (!(ie instanceof SocketTimeoutException)) { if (ie.getMessage() .startsWith( "org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet")) { return; } } else { System.err.println("Got SocketTimeoutException. Will retry. "); } } catch (Throwable t) { fail("Unexpected throwable: " + t); } Thread.sleep(100); i++; } fail(); }
protected void waitForSplitting(String tableName, int regionCount) throws IOException, InterruptedException { int regionCountActual = 0; for (int i = 0; i < MAX_WAIT_ITERATION; i++) { try (HTable table = new HTable(conf, tableName)) { regionCountActual = 0; NavigableMap<HRegionInfo, ServerName> regionLocations = table.getRegionLocations(); for (Map.Entry<HRegionInfo, ServerName> entry : regionLocations.entrySet()) { HServerLoad serverLoad = admin.getClusterStatus().getLoad(entry.getValue()); for (HServerLoad.RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) { if (Arrays.equals(entry.getKey().getRegionName(), regionLoad.getName())) regionCountActual++; } } if (regionCountActual == regionCount) { return; } } Thread.sleep(WAIT_INTERVAL); } Assert.assertEquals(getMethodName() + " failed - ", regionCount, regionCountActual); }
@Test public void testLogCleaning() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); // set TTL long ttl = 10000; conf.setLong("hbase.master.logcleaner.ttl", ttl); conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT); Replication.decorateMasterConfiguration(conf); Server server = new DummyServer(); ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server); repQueues.init(server.getServerName().toString()); final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME); String fakeMachineName = URLEncoder.encode(server.getServerName().toString(), "UTF8"); final FileSystem fs = FileSystem.get(conf); // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files long now = System.currentTimeMillis(); fs.delete(oldLogDir, true); fs.mkdirs(oldLogDir); // Case 1: 2 invalid files, which would be deleted directly fs.createNewFile(new Path(oldLogDir, "a")); fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a")); // Case 2: 1 "recent" file, not even deletable for the first log cleaner // (TimeToLiveLogCleaner), so we are not going down the chain System.out.println("Now is: " + now); for (int i = 1; i < 31; i++) { // Case 3: old files which would be deletable for the first log cleaner // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner) Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i)); fs.createNewFile(fileName); // Case 4: put 3 old log files in ZK indicating that they are scheduled // for replication so these files would pass the first log cleaner // (TimeToLiveLogCleaner) but would be rejected by the second // (ReplicationLogCleaner) if (i % (30 / 3) == 1) { repQueues.addLog(fakeMachineName, fileName.getName()); System.out.println("Replication log file: " + fileName); } } // sleep for sometime to get newer modifcation time Thread.sleep(ttl); fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now)); // Case 2: 1 newer file, not even deletable for the first log cleaner // (TimeToLiveLogCleaner), so we are not going down the chain fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000))); for (FileStatus stat : fs.listStatus(oldLogDir)) { System.out.println(stat.getPath().toString()); } assertEquals(34, fs.listStatus(oldLogDir).length); LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, oldLogDir); cleaner.chore(); // We end up with the current log file, a newer one and the 3 old log // files which are scheduled for replication TEST_UTIL.waitFor( 1000, new Waiter.Predicate<Exception>() { @Override public boolean evaluate() throws Exception { return 5 == fs.listStatus(oldLogDir).length; } }); for (FileStatus file : fs.listStatus(oldLogDir)) { System.out.println("Kept log files: " + file.getPath().getName()); } }