/** Test that if we do a close while opening it stops the opening. */ @Test(timeout = 60000) public void testCancelOpeningWithoutZK() throws Exception { // We close closeRegionNoZK(); checkRegionIsClosed(HTU, getRS(), hri); // Let do the initial steps, without having a handler getRS().getRegionsInTransitionInRS().put(hri.getEncodedNameAsBytes(), Boolean.TRUE); // That's a close without ZK. AdminProtos.CloseRegionRequest crr = RequestConverter.buildCloseRegionRequest(getRS().getServerName(), regionName); try { getRS().rpcServices.closeRegion(null, crr); Assert.assertTrue(false); } catch (ServiceException expected) { } // The state in RIT should have changed to close Assert.assertEquals( Boolean.FALSE, getRS().getRegionsInTransitionInRS().get(hri.getEncodedNameAsBytes())); // Let's start the open handler HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTable()); getRS().service.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd)); // The open handler should have removed the region from RIT but kept the region closed checkRegionIsClosed(HTU, getRS(), hri); openRegion(HTU, getRS(), hri); }
private PairOfSameType<HRegionInfo> requestMergeRegion( HMaster master, TableName tablename, int regionAnum, int regionBnum) throws Exception { List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor.getTableRegionsAndLocations(master.getConnection(), tablename); HRegionInfo regionA = tableRegions.get(regionAnum).getFirst(); HRegionInfo regionB = tableRegions.get(regionBnum).getFirst(); TEST_UTIL .getHBaseAdmin() .mergeRegions(regionA.getEncodedNameAsBytes(), regionB.getEncodedNameAsBytes(), false); return new PairOfSameType<HRegionInfo>(regionA, regionB); }
/** * This test tests 1, merging region not online; 2, merging same two regions; 3, merging unknown * regions. They are in one test case so that we don't have to create many tables, and these tests * are simple. */ @Test public void testMerge() throws Exception { LOG.info("Starting testMerge"); final TableName tableName = TableName.valueOf("testMerge"); try { // Create table and load data. Table table = createTableAndLoadData(master, tableName); RegionStates regionStates = master.getAssignmentManager().getRegionStates(); List<HRegionInfo> regions = regionStates.getRegionsOfTable(tableName); // Fake offline one region HRegionInfo a = regions.get(0); HRegionInfo b = regions.get(1); regionStates.regionOffline(a); try { // Merge offline region. Region a is offline here admin.mergeRegions(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false); fail("Offline regions should not be able to merge"); } catch (IOException ie) { System.out.println(ie); assertTrue( "Exception should mention regions not online", StringUtils.stringifyException(ie).contains("regions not online") && ie instanceof MergeRegionException); } try { // Merge the same region: b and b. admin.mergeRegions(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true); fail("A region should not be able to merge with itself, even forcifully"); } catch (IOException ie) { assertTrue( "Exception should mention regions not online", StringUtils.stringifyException(ie).contains("region to itself") && ie instanceof MergeRegionException); } try { // Merge unknown regions admin.mergeRegions(Bytes.toBytes("-f1"), Bytes.toBytes("-f2"), true); fail("Unknown region could not be merged"); } catch (IOException ie) { assertTrue("UnknownRegionException should be thrown", ie instanceof UnknownRegionException); } table.close(); } finally { TEST_UTIL.deleteTable(tableName); } }
public RegionReplicaReplayCallable( ClusterConnection connection, RpcControllerFactory rpcControllerFactory, TableName tableName, HRegionLocation location, HRegionInfo regionInfo, byte[] row, List<Entry> entries, AtomicLong skippedEntries) { super(connection, rpcControllerFactory, location, tableName, row, regionInfo.getReplicaId()); this.entries = entries; this.skippedEntries = skippedEntries; this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes(); }
/** * Test adding server to draining servers and then move regions off it. Make sure that no regions * are moved back to the draining server. * * @throws IOException * @throws KeeperException */ @Test // (timeout=30000) public void testDrainingServerOffloading() throws Exception { // I need master in the below. HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); HRegionInfo hriToMoveBack = null; // Set first server as draining server. HRegionServer drainingServer = setDrainingServer(TEST_UTIL.getMiniHBaseCluster().getRegionServer(0)); try { final int regionsOnDrainingServer = drainingServer.getNumberOfOnlineRegions(); Assert.assertTrue(regionsOnDrainingServer > 0); List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(drainingServer); for (HRegionInfo hri : hris) { // Pass null and AssignmentManager will chose a random server BUT it // should exclude draining servers. master.moveRegion( null, RequestConverter.buildMoveRegionRequest(hri.getEncodedNameAsBytes(), null)); // Save off region to move back. hriToMoveBack = hri; } // Wait for regions to come back on line again. waitForAllRegionsOnline(); Assert.assertEquals(0, drainingServer.getNumberOfOnlineRegions()); } finally { unsetDrainingServer(drainingServer); } // Now we've unset the draining server, we should be able to move a region // to what was the draining server. master.moveRegion( null, RequestConverter.buildMoveRegionRequest( hriToMoveBack.getEncodedNameAsBytes(), Bytes.toBytes(drainingServer.getServerName().toString()))); // Wait for regions to come back on line again. waitForAllRegionsOnline(); Assert.assertEquals(1, drainingServer.getNumberOfOnlineRegions()); }
@Test public void testMoveRegionWhenNotInitialized() { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster m = cluster.getMaster(); try { m.setInitialized(false); // fake it, set back later HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO; m.move(meta.getEncodedNameAsBytes(), null); fail("Region should not be moved since master is not initialized"); } catch (IOException ioe) { assertTrue(ioe instanceof PleaseHoldException); } finally { m.setInitialized(true); } }
private HRegion openRegion( final FileSystem fs, final Path dir, final HTableDescriptor htd, final WALFactory wals, final long whenToRoll, final LogRoller roller) throws IOException { // Initialize HRegion HRegionInfo regionInfo = new HRegionInfo(htd.getTableName()); // Initialize WAL final WAL wal = wals.getWAL(regionInfo.getEncodedNameAsBytes()); // If we haven't already, attach a listener to this wal to handle rolls and metrics. if (walsListenedTo.add(wal)) { roller.addWAL(wal); wal.registerWALActionsListener( new WALActionsListener.Base() { private int appends = 0; @Override public void visitLogEntryBeforeWrite( HTableDescriptor htd, WALKey logKey, WALEdit logEdit) { this.appends++; if (this.appends % whenToRoll == 0) { LOG.info("Rolling after " + appends + " edits"); // We used to do explicit call to rollWriter but changed it to a request // to avoid dead lock (there are less threads going on in this class than // in the regionserver -- regionserver does not have the issue). DefaultWALProvider.requestLogRoll(wal); } } @Override public void postSync(final long timeInNanos, final int handlerSyncs) { syncMeter.mark(); syncHistogram.update(timeInNanos); syncCountHistogram.update(handlerSyncs); } @Override public void postAppend(final long size, final long elapsedTime) { appendMeter.mark(size); } }); } return HRegion.createHRegion(regionInfo, dir, getConf(), htd, wal); }
private void OpenRegion( Server server, RegionServerServices rss, HTableDescriptor htd, HRegionInfo hri) throws IOException, NodeExistsException, KeeperException, DeserializationException { // Create it OFFLINE node, which is what Master set before sending OPEN RPC ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName()); OpenRegionHandler openHandler = new OpenRegionHandler(server, rss, hri, htd); rss.getRegionsInTransitionInRS().put(hri.getEncodedNameAsBytes(), Boolean.TRUE); openHandler.process(); // This parse is not used? RegionTransition.parseFrom(ZKAssign.getData(server.getZooKeeper(), hri.getEncodedName())); // delete the node, which is what Master do after the region is opened ZKAssign.deleteNode( server.getZooKeeper(), hri.getEncodedName(), EventType.RS_ZK_REGION_OPENED, server.getServerName()); }
HRegion createRegion(final Path testdir, final WALFactory wals) throws IOException { // Make a region with start and end keys. Use 'aaa', to 'AAA'. The load // region utility will add rows between 'aaa' and 'zzz'. HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table")); HColumnDescriptor hcd = new HColumnDescriptor(CF); htd.addFamily(hcd); HRegionInfo hri = new HRegionInfo(htd.getTableName(), STARTROW, ENDROW); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, testdir, TEST_UTIL.getConfiguration(), htd); HBaseTestingUtility.closeRegionAndWAL(r); return HRegion.openHRegion( testdir, hri, htd, wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()), TEST_UTIL.getConfiguration()); }
/** * This method does an RPC to hbase:meta. Do not call this method with a lock/synchronize held. * * @param hris The hris to check if empty in hbase:meta and if so, clean them up. */ private void cleanIfNoMetaEntry(Set<HRegionInfo> hris) { if (hris.isEmpty()) return; for (HRegionInfo hri : hris) { try { // This is RPC to meta table. It is done while we have a synchronize on // regionstates. No progress will be made if meta is not available at this time. // This is a cleanup task. Not critical. if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) == null) { regionOffline(hri); FSUtils.deleteRegionDir(server.getConfiguration(), hri); } } catch (IOException e) { LOG.warn("Got exception while deleting " + hri + " directories from file system.", e); } } }
@Test public void testMoveThrowsUnknownRegionException() throws IOException { TableName tableName = TableName.valueOf("testMoveThrowsUnknownRegionException"); HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor("value"); htd.addFamily(hcd); admin.createTable(htd, null); try { HRegionInfo hri = new HRegionInfo(tableName, Bytes.toBytes("A"), Bytes.toBytes("Z")); admin.move(hri.getEncodedNameAsBytes(), null); fail("Region should not be moved since it is fake"); } catch (IOException ioe) { assertTrue(ioe instanceof UnknownRegionException); } finally { TEST_UTIL.deleteTable(tableName); } }
@Override public void run() { this.log.info(getName() + " started"); final AtomicLong sequenceId = new AtomicLong(1); try { for (int i = 0; i < this.count; i++) { long now = System.currentTimeMillis(); // Roll every ten edits if (i % 10 == 0) { this.wal.rollWriter(); } WALEdit edit = new WALEdit(); byte[] bytes = Bytes.toBytes(i); edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; final FSTableDescriptors fts = new FSTableDescriptors(TEST_UTIL.getConfiguration()); final HTableDescriptor htd = fts.get(TableName.META_TABLE_NAME); final long txid = wal.append( htd, hri, new WALKey(hri.getEncodedNameAsBytes(), TableName.META_TABLE_NAME, now), edit, sequenceId, true, null); wal.sync(txid); } String msg = getName() + " finished"; if (isException()) this.log.info(msg, getException()); else this.log.info(msg); } catch (Exception e) { this.e = e; log.info("Caught exception from Appender:" + getName(), e); } finally { // Call sync on our log.else threads just hang out. try { this.wal.sync(); } catch (IOException e) { throw new RuntimeException(e); } } }
protected void waitForMoving(HRegionInfo hRegionInfo, ServerName serverName) throws Exception { Map<byte[], HServerLoad.RegionLoad> regionsLoad = null; for (int i = 0; i < MAX_WAIT_ITERATION; i++) { HServerLoad load = admin.getClusterStatus().getLoad(serverName); regionsLoad = load.getRegionsLoad(); for (byte[] regionName : regionsLoad.keySet()) { if (Arrays.equals(regionName, hRegionInfo.getRegionName())) return; } admin.move(hRegionInfo.getEncodedNameAsBytes(), serverName.getServerName().getBytes()); Thread.sleep(WAIT_INTERVAL); } System.out.println("hRegionInfo = " + Bytes.toString(hRegionInfo.getRegionName())); for (Map.Entry<byte[], HServerLoad.RegionLoad> entry : regionsLoad.entrySet()) { System.out.println( "regionsLoad = " + Bytes.toString(entry.getKey()) + " - " + entry.getValue()); } Assert.fail(Util.getMethodName() + " failed"); }
@Override public void run() { byte[] key = new byte[keySize]; byte[] value = new byte[valueSize]; Random rand = new Random(Thread.currentThread().getId()); WAL wal = region.getWAL(); TraceScope threadScope = Trace.startSpan("WALPerfEval." + Thread.currentThread().getName()); try { long startTime = System.currentTimeMillis(); int lastSync = 0; for (int i = 0; i < numIterations; ++i) { assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected."; TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler); try { long now = System.nanoTime(); Put put = setupPut(rand, key, value, numFamilies); WALEdit walEdit = new WALEdit(); addFamilyMapToWALEdit(put.getFamilyCellMap(), walEdit); HRegionInfo hri = region.getRegionInfo(); final WALKey logkey = new WALKey(hri.getEncodedNameAsBytes(), hri.getTable(), now); wal.append(htd, hri, logkey, walEdit, region.getSequenceId(), true, null); if (!this.noSync) { if (++lastSync >= this.syncInterval) { wal.sync(); lastSync = 0; } } latencyHistogram.update(System.nanoTime() - now); } finally { loopScope.close(); } } long totalTime = (System.currentTimeMillis() - startTime); logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime); } catch (Exception e) { LOG.error(getClass().getSimpleName() + " Thread failed", e); } finally { threadScope.close(); } }
private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName()); HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId); HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, tableDir); final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId); HRegion region = new HRegion( fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()), conf, htd, null); region.initialize(); return region; }
/** * Inserts three waledits in the wal file, and reads them back. The first edit is of a regular * table, second waledit is for the ROOT table (it will be ignored while reading), and last * waledit is for the hbase:meta table, which will be linked to the new system:meta table. * * @throws IOException */ @Test public void testReadOldRootAndMetaEdits() throws IOException { LOG.debug("testReadOldRootAndMetaEdits"); Configuration conf = HBaseConfiguration.create(); conf.setClass( "hbase.regionserver.hlog.writer.impl", SequenceFileLogWriter.class, HLog.Writer.class); // kv list to be used for all WALEdits. byte[] row = Bytes.toBytes("row"); KeyValue kv = new KeyValue(row, row, row, row); List<KeyValue> kvs = new ArrayList<KeyValue>(); kvs.add(kv); HLog.Writer writer = null; HLog.Reader reader = null; // a regular table TableName t = TableName.valueOf("t"); HRegionInfo tRegionInfo = null; int logCount = 0; long timestamp = System.currentTimeMillis(); Path path = new Path(dir, "t"); try { tRegionInfo = new HRegionInfo(t, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); HLog.Entry tEntry = createAEntry( new HLogKey( tRegionInfo.getEncodedNameAsBytes(), t, ++logCount, timestamp, HConstants.DEFAULT_CLUSTER_ID), kvs); // create a old root edit (-ROOT-). HLog.Entry rootEntry = createAEntry( new HLogKey( Bytes.toBytes(TableName.OLD_ROOT_STR), TableName.OLD_ROOT_TABLE_NAME, ++logCount, timestamp, HConstants.DEFAULT_CLUSTER_ID), kvs); // create a old meta edit (hbase:meta). HLog.Entry oldMetaEntry = createAEntry( new HLogKey( Bytes.toBytes(TableName.OLD_META_STR), TableName.OLD_META_TABLE_NAME, ++logCount, timestamp, HConstants.DEFAULT_CLUSTER_ID), kvs); // write above entries writer = HLogFactory.createWALWriter(fs, path, conf); writer.append(tEntry); writer.append(rootEntry); writer.append(oldMetaEntry); // sync/close the writer writer.sync(); writer.close(); // read the log and see things are okay. reader = HLogFactory.createReader(fs, path, conf); HLog.Entry entry = reader.next(); assertNotNull(entry); assertTrue(entry.getKey().getTablename().equals(t)); assertEquals( Bytes.toString(entry.getKey().getEncodedRegionName()), Bytes.toString(tRegionInfo.getEncodedNameAsBytes())); // read the ROOT waledit, but that will be ignored, and hbase:meta waledit will be read // instead. entry = reader.next(); assertEquals(entry.getKey().getTablename(), TableName.META_TABLE_NAME); // should reach end of log assertNull(reader.next()); } finally { if (writer != null) { writer.close(); } if (reader != null) { reader.close(); } } }
@Test(timeout = 300000) public void testDataCorrectnessReplayingRecoveredEdits() throws Exception { final int NUM_MASTERS = 1; final int NUM_RS = 3; TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); try { final byte[] TABLENAME = Bytes.toBytes("testDataCorrectnessReplayingRecoveredEdits"); final byte[] FAMILY = Bytes.toBytes("family"); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); // Create table HTableDescriptor desc = new HTableDescriptor(TABLENAME); desc.addFamily(new HColumnDescriptor(FAMILY)); HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin(); hbaseAdmin.createTable(desc); assertTrue(hbaseAdmin.isTableAvailable(TABLENAME)); // Put data: r1->v1 Log.info("Loading r1 to v1 into " + Bytes.toString(TABLENAME)); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); putDataAndVerify(table, "r1", FAMILY, "v1", 1); // Move region to target server HRegionInfo regionInfo = table.getRegionLocation("r1").getRegionInfo(); int originServerNum = cluster.getServerWith(regionInfo.getRegionName()); HRegionServer originServer = cluster.getRegionServer(originServerNum); int targetServerNum = (originServerNum + 1) % NUM_RS; HRegionServer targetServer = cluster.getRegionServer(targetServerNum); assertFalse(originServer.equals(targetServer)); Log.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName()); hbaseAdmin.move( regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(targetServer.getServerName().getServerName())); do { Thread.sleep(1); } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum); // Put data: r2->v2 Log.info("Loading r2 to v2 into " + Bytes.toString(TABLENAME)); putDataAndVerify(table, "r2", FAMILY, "v2", 2); // Move region to origin server Log.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName()); hbaseAdmin.move( regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(originServer.getServerName().getServerName())); do { Thread.sleep(1); } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum); // Put data: r3->v3 Log.info("Loading r3 to v3 into " + Bytes.toString(TABLENAME)); putDataAndVerify(table, "r3", FAMILY, "v3", 3); // Kill target server Log.info("Killing target server " + targetServer.getServerName()); targetServer.kill(); cluster.getRegionServerThreads().get(targetServerNum).join(); // Wait until finish processing of shutdown while (master.getServerManager().areDeadServersInProgress()) { Thread.sleep(5); } // Kill origin server Log.info("Killing origin server " + targetServer.getServerName()); originServer.kill(); cluster.getRegionServerThreads().get(originServerNum).join(); // Put data: r4->v4 Log.info("Loading r4 to v4 into " + Bytes.toString(TABLENAME)); putDataAndVerify(table, "r4", FAMILY, "v4", 4); } finally { TEST_UTIL.shutdownMiniCluster(); } }