コード例 #1
0
  private static void waitForAllRegionsOnline() throws Exception {
    // Wait for regions to come back on line again.

    boolean done = false;
    while (!done) {
      Thread.sleep(1);

      // Nothing in ZK RIT for a start
      ZKAssign.blockUntilNoRIT(TEST_UTIL.getZooKeeperWatcher());

      // Then we want all the regions to be marked as available...
      if (!isAllRegionsOnline()) continue;

      // And without any work in progress on the master side
      if (TEST_UTIL
          .getMiniHBaseCluster()
          .getMaster()
          .getAssignmentManager()
          .getRegionStates()
          .isRegionsInTransition()) continue;

      // nor on the region server side
      done = true;
      for (JVMClusterUtil.RegionServerThread rs :
          TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()) {
        if (!rs.getRegionServer().getRegionsInTransitionInRS().isEmpty()) {
          done = false;
        }
      }
    }
  }
コード例 #2
0
 /**
  * Test adding server to draining servers and then move regions off it. Make sure that no regions
  * are moved back to the draining server.
  *
  * @throws IOException
  * @throws KeeperException
  */
 @Test // (timeout=30000)
 public void testDrainingServerOffloading() throws Exception {
   // I need master in the below.
   HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
   HRegionInfo hriToMoveBack = null;
   // Set first server as draining server.
   HRegionServer drainingServer =
       setDrainingServer(TEST_UTIL.getMiniHBaseCluster().getRegionServer(0));
   try {
     final int regionsOnDrainingServer = drainingServer.getNumberOfOnlineRegions();
     Assert.assertTrue(regionsOnDrainingServer > 0);
     List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(drainingServer);
     for (HRegionInfo hri : hris) {
       // Pass null and AssignmentManager will chose a random server BUT it
       // should exclude draining servers.
       master.moveRegion(
           null, RequestConverter.buildMoveRegionRequest(hri.getEncodedNameAsBytes(), null));
       // Save off region to move back.
       hriToMoveBack = hri;
     }
     // Wait for regions to come back on line again.
     waitForAllRegionsOnline();
     Assert.assertEquals(0, drainingServer.getNumberOfOnlineRegions());
   } finally {
     unsetDrainingServer(drainingServer);
   }
   // Now we've unset the draining server, we should be able to move a region
   // to what was the draining server.
   master.moveRegion(
       null,
       RequestConverter.buildMoveRegionRequest(
           hriToMoveBack.getEncodedNameAsBytes(),
           Bytes.toBytes(drainingServer.getServerName().toString())));
   // Wait for regions to come back on line again.
   waitForAllRegionsOnline();
   Assert.assertEquals(1, drainingServer.getNumberOfOnlineRegions());
 }
コード例 #3
0
  /** Spin up a cluster with a bunch of regions on it. */
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.startMiniCluster(NB_SLAVES);
    TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster();
    TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);

    final List<String> families = new ArrayList<String>(1);
    families.add("family");
    TEST_UTIL.createRandomTable("table", families, 1, 0, 0, COUNT_OF_REGIONS, 0);

    // Ensure a stable env
    TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false);

    boolean ready = false;
    while (!ready) {
      waitForAllRegionsOnline();

      // Assert that every regionserver has some regions on it.
      int i = 0;
      ready = true;
      while (i < NB_SLAVES && ready) {
        HRegionServer hrs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i);
        if (ProtobufUtil.getOnlineRegions(hrs).isEmpty()) {
          ready = false;
        }
        i++;
      }

      if (!ready) {
        TEST_UTIL.getHBaseAdmin().setBalancerRunning(true, true);
        Assert.assertTrue("Can't start a balance!", TEST_UTIL.getHBaseAdmin().balancer());
        TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false);
        Thread.sleep(100);
      }
    }
  }
コード例 #4
0
 private static boolean isAllRegionsOnline() {
   return TEST_UTIL.getMiniHBaseCluster().countServedRegions()
       == (COUNT_OF_REGIONS + 2 /*catalog regions*/);
 }
コード例 #5
0
  /**
   * Test that draining servers are ignored even after killing regionserver(s). Verify that the
   * draining server is not given any of the dead servers regions.
   *
   * @throws KeeperException
   * @throws IOException
   */
  @Test(timeout = 30000)
  public void testDrainingServerWithAbort() throws KeeperException, Exception {
    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();

    waitForAllRegionsOnline();

    final long regionCount = TEST_UTIL.getMiniHBaseCluster().countServedRegions();

    // Let's get a copy of the regions today.
    Collection<HRegion> regions = new ArrayList<HRegion>();
    for (int i = 0; i < NB_SLAVES; i++) {
      HRegionServer hrs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i);
      regions.addAll(hrs.getCopyOfOnlineRegionsSortedBySize().values());
    }

    // Choose the draining server
    HRegionServer drainingServer = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
    final int regionsOnDrainingServer = drainingServer.getNumberOfOnlineRegions();
    Assert.assertTrue(regionsOnDrainingServer > 0);

    ServerManager sm = master.getServerManager();

    Collection<HRegion> regionsBefore =
        drainingServer.getCopyOfOnlineRegionsSortedBySize().values();
    LOG.info("Regions of drained server are: " + regionsBefore);

    try {
      // Add first server to draining servers up in zk.
      setDrainingServer(drainingServer);

      // wait for the master to receive and manage the event
      while (sm.createDestinationServersList().contains(drainingServer.getServerName())) {
        Thread.sleep(1);
      }

      LOG.info("The available servers are: " + sm.createDestinationServersList());

      Assert.assertEquals(
          "Nothing should have happened here.",
          regionsOnDrainingServer,
          drainingServer.getNumberOfOnlineRegions());
      Assert.assertFalse(
          "We should not have regions in transition here. List is: "
              + master.getAssignmentManager().getRegionStates().getRegionsInTransition(),
          master.getAssignmentManager().getRegionStates().isRegionsInTransition());

      // Kill a few regionservers.
      for (int aborted = 0; aborted <= 2; aborted++) {
        HRegionServer hrs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(aborted + 1);
        hrs.abort("Aborting");
      }

      // Wait for regions to come back online again.
      waitForAllRegionsOnline();

      Collection<HRegion> regionsAfter =
          drainingServer.getCopyOfOnlineRegionsSortedBySize().values();
      LOG.info("Regions of drained server are: " + regionsAfter);

      Assert.assertEquals(
          "Test conditions are not met: regions were" + " created/deleted during the test. ",
          regionCount,
          TEST_UTIL.getMiniHBaseCluster().countServedRegions());

      // Assert the draining server still has the same regions.
      StringBuilder result = new StringBuilder();
      for (HRegion r : regionsAfter) {
        if (!regionsBefore.contains(r)) {
          result.append(r).append(" was added after the drain");
          if (regions.contains(r)) {
            result.append("(existing region");
          } else {
            result.append("(new region)");
          }
          result.append("; ");
        }
      }
      for (HRegion r : regionsBefore) {
        if (!regionsAfter.contains(r)) {
          result.append(r).append(" was removed after the drain; ");
        }
      }
      Assert.assertTrue("Errors are: " + result.toString(), result.length() == 0);

    } finally {
      unsetDrainingServer(drainingServer);
    }
  }
コード例 #6
0
  @Test
  public void testHostRank() throws Exception {

    if (System.getProperty("prop.mapred.job.tracker") != null) {
      if (LOG.isInfoEnabled())
        LOG.info("testHBaseInputOutput: Ignore this test if not local mode.");
      return;
    }

    File jarTest = new File(System.getProperty("prop.jarLocation"));
    if (!jarTest.exists()) {
      fail(
          "Could not find Giraph jar at "
              + "location specified by 'prop.jarLocation'. "
              + "Make sure you built the main Giraph artifact?.");
    }

    MiniHBaseCluster cluster = null;
    MiniZooKeeperCluster zkCluster = null;
    FileSystem fs = null;

    try {
      // using the restart method allows us to avoid having the hbase
      // root directory overwritten by /home/$username
      zkCluster = testUtil.startMiniZKCluster();
      testUtil.restartHBaseCluster(2);
      cluster = testUtil.getMiniHBaseCluster();

      final byte[] OL_BYTES = Bytes.toBytes("ol");
      final byte[] S_BYTES = Bytes.toBytes("s");
      final byte[] METADATA_BYTES = Bytes.toBytes("mtdt");
      final byte[] HR_BYTES = Bytes.toBytes("_hr_");
      final byte[] TAB = Bytes.toBytes(TABLE_NAME);

      Configuration conf = cluster.getConfiguration();
      HTableDescriptor desc = new HTableDescriptor(TAB);
      desc.addFamily(new HColumnDescriptor(OL_BYTES));
      desc.addFamily(new HColumnDescriptor(S_BYTES));
      desc.addFamily(new HColumnDescriptor(METADATA_BYTES));
      HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
      if (hbaseAdmin.isTableAvailable(TABLE_NAME)) {
        hbaseAdmin.disableTable(TABLE_NAME);
        hbaseAdmin.deleteTable(TABLE_NAME);
      }
      hbaseAdmin.createTable(desc);

      /**
       * Enter the initial data (a,b), (b,c), (a,c) a = 1.0 - google b = 1.0 - yahoo c = 1.0 - bing
       */
      HTable table = new HTable(conf, TABLE_NAME);

      Put p1 = new Put(Bytes.toBytes("com.google.www"));
      p1.add(OL_BYTES, Bytes.toBytes("www.yahoo.com"), Bytes.toBytes("ab"));

      Put p2 = new Put(Bytes.toBytes("com.google.www"));
      p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("ac"));
      p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("invalid1"));
      p2.add(OL_BYTES, Bytes.toBytes("www.google.com"), Bytes.toBytes("invalid2"));

      Put p3 = new Put(Bytes.toBytes("com.yahoo.www"));
      p3.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("bc"));
      // p3.add(OL_BYTES, Bytes.toBytes(""), Bytes.toBytes("invalid4"));

      Put p4 = new Put(Bytes.toBytes("com.bing.www"));
      // TODO: Handle below case. use apache isValid method.
      p4.add(OL_BYTES, Bytes.toBytes("http://invalidurl"), Bytes.toBytes("invalid5"));
      p4.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d));

      Put p5 = new Put(Bytes.toBytes("dummy"));
      p5.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d));

      table.put(p1);
      table.put(p2);
      table.put(p3);
      table.put(p4);
      table.put(p5);

      // Set Giraph configuration
      // now operate over HBase using Vertex I/O formats
      conf.set(TableInputFormat.INPUT_TABLE, TABLE_NAME);
      conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE_NAME);

      // Start the giraph job
      GiraphJob giraphJob = new GiraphJob(conf, BspCase.getCallingMethodName());
      GiraphConfiguration giraphConf = giraphJob.getConfiguration();
      giraphConf.setZooKeeperConfiguration(cluster.getMaster().getZooKeeper().getQuorum());
      setupConfiguration(giraphJob);
      giraphConf.setComputationClass(LinkRankComputation.class);
      giraphConf.setMasterComputeClass(LinkRankVertexMasterCompute.class);
      giraphConf.setOutEdgesClass(ByteArrayEdges.class);
      giraphConf.setVertexInputFormatClass(Nutch2HostInputFormat.class);
      giraphConf.setVertexOutputFormatClass(Nutch2HostOutputFormat.class);
      giraphConf.setInt("giraph.linkRank.superstepCount", 10);
      giraphConf.setInt("giraph.linkRank.scale", 10);
      giraphConf.set("giraph.linkRank.family", "mtdt");
      giraphConf.set("giraph.linkRank.qualifier", "_hr_");
      giraphConf.setVertexInputFilterClass(HostRankVertexFilter.class);
      assertTrue(giraphJob.run(true));

      if (LOG.isInfoEnabled()) LOG.info("Giraph job successful. Checking output qualifier.");

      /** Check the results * */
      Result result;
      String key;
      byte[] calculatedScoreByte;
      HashMap expectedValues = new HashMap<String, Double>();
      expectedValues.put("com.google.www", 1.3515060339386287d);
      expectedValues.put("com.yahoo.www", 4.144902009567587d);
      expectedValues.put("com.bing.www", 9.063893290511482d);

      for (Object keyObject : expectedValues.keySet()) {
        key = keyObject.toString();
        result = table.get(new Get(key.getBytes()));
        calculatedScoreByte = result.getValue(METADATA_BYTES, HR_BYTES);
        assertNotNull(calculatedScoreByte);
        assertTrue(calculatedScoreByte.length > 0);
        Assert.assertEquals(
            "Scores are not the same",
            (Double) expectedValues.get(key),
            Bytes.toDouble(calculatedScoreByte),
            DELTA);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
      if (zkCluster != null) {
        zkCluster.shutdown();
      }
      // clean test files
      if (fs != null) {
        fs.delete(hbaseRootdir);
      }
    }
  }
コード例 #7
0
ファイル: TestIOFencing.java プロジェクト: grokcoder/pbase
  public void doTest(Class<?> regionClass, boolean distributedLogReplay) throws Exception {
    Configuration c = TEST_UTIL.getConfiguration();
    c.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay);
    // Insert our custom region
    c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);
    c.setBoolean("dfs.support.append", true);
    // Encourage plenty of flushes
    c.setLong("hbase.hregion.memstore.flush.size", 200000);
    c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName());
    // Only run compaction when we tell it to
    c.setInt("hbase.hstore.compactionThreshold", 1000);
    c.setLong("hbase.hstore.blockingStoreFiles", 1000);
    // Compact quickly after we tell it to!
    c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
    LOG.info("Starting mini cluster");
    TEST_UTIL.startMiniCluster(1);
    CompactionBlockerRegion compactingRegion = null;
    Admin admin = null;
    try {
      LOG.info("Creating admin");
      admin = TEST_UTIL.getConnection().getAdmin();
      LOG.info("Creating table");
      TEST_UTIL.createTable(TABLE_NAME, FAMILY);
      Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
      LOG.info("Loading test table");
      // Find the region
      List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME);
      assertEquals(1, testRegions.size());
      compactingRegion = (CompactionBlockerRegion) testRegions.get(0);
      LOG.info("Blocking compactions");
      compactingRegion.stopCompactions();
      long lastFlushTime = compactingRegion.getLastFlushTime();
      // Load some rows
      TEST_UTIL.loadNumericRows(table, FAMILY, 0, FIRST_BATCH_COUNT);

      // add a compaction from an older (non-existing) region to see whether we successfully skip
      // those entries
      HRegionInfo oldHri =
          new HRegionInfo(table.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      CompactionDescriptor compactionDescriptor =
          ProtobufUtil.toCompactionDescriptor(
              oldHri,
              FAMILY,
              Lists.newArrayList(new Path("/a")),
              Lists.newArrayList(new Path("/b")),
              new Path("store_dir"));
      WALUtil.writeCompactionMarker(
          compactingRegion.getWAL(),
          table.getTableDescriptor(),
          oldHri,
          compactionDescriptor,
          new AtomicLong(Long.MAX_VALUE - 100));

      // Wait till flush has happened, otherwise there won't be multiple store files
      long startWaitTime = System.currentTimeMillis();
      while (compactingRegion.getLastFlushTime() <= lastFlushTime
          || compactingRegion.countStoreFiles() <= 1) {
        LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString());
        Thread.sleep(1000);
        assertTrue(
            "Timed out waiting for the region to flush",
            System.currentTimeMillis() - startWaitTime < 30000);
      }
      assertTrue(compactingRegion.countStoreFiles() > 1);
      final byte REGION_NAME[] = compactingRegion.getRegionName();
      LOG.info("Asking for compaction");
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      LOG.info("Waiting for compaction to be about to start");
      compactingRegion.waitForCompactionToBlock();
      LOG.info("Starting a new server");
      RegionServerThread newServerThread = TEST_UTIL.getMiniHBaseCluster().startRegionServer();
      final HRegionServer newServer = newServerThread.getRegionServer();
      LOG.info("Killing region server ZK lease");
      TEST_UTIL.expireRegionServerSession(0);
      CompactionBlockerRegion newRegion = null;
      startWaitTime = System.currentTimeMillis();
      LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME));

      // wait for region to be assigned and to go out of log replay if applicable
      Waiter.waitFor(
          c,
          60000,
          new Waiter.Predicate<Exception>() {
            @Override
            public boolean evaluate() throws Exception {
              HRegion newRegion = newServer.getOnlineRegion(REGION_NAME);
              return newRegion != null && !newRegion.isRecovering();
            }
          });

      newRegion = (CompactionBlockerRegion) newServer.getOnlineRegion(REGION_NAME);

      LOG.info("Allowing compaction to proceed");
      compactingRegion.allowCompactions();
      while (compactingRegion.compactCount == 0) {
        Thread.sleep(1000);
      }
      // The server we killed stays up until the compaction that was started before it was killed
      // completes.  In logs
      // you should see the old regionserver now going down.
      LOG.info("Compaction finished");
      // After compaction of old region finishes on the server that was going down, make sure that
      // all the files we expect are still working when region is up in new location.
      FileSystem fs = newRegion.getFilesystem();
      for (String f : newRegion.getStoreFileList(new byte[][] {FAMILY})) {
        assertTrue("After compaction, does not exist: " + f, fs.exists(new Path(f)));
      }
      // If we survive the split keep going...
      // Now we make sure that the region isn't totally confused.  Load up more rows.
      TEST_UTIL.loadNumericRows(
          table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      startWaitTime = System.currentTimeMillis();
      while (newRegion.compactCount == 0) {
        Thread.sleep(1000);
        assertTrue(
            "New region never compacted", System.currentTimeMillis() - startWaitTime < 180000);
      }
      assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table));
    } finally {
      if (compactingRegion != null) {
        compactingRegion.allowCompactions();
      }
      admin.close();
      TEST_UTIL.shutdownMiniCluster();
    }
  }