@Test(timeout = 60000)
 public void testExceptionDuringInitialization() throws Exception {
   Configuration conf = TEST_UTIL.getConfiguration();
   conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
   conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
   conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
   TEST_UTIL.startMiniCluster(2);
   try {
     MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
     // Trigger one regionserver to fail as if it came up with a coprocessor
     // that fails during initialization
     final HRegionServer regionServer = cluster.getRegionServer(0);
     conf.set(
         CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
         FailedInitializationObserver.class.getName());
     regionServer
         .getRegionServerCoprocessorHost()
         .loadSystemCoprocessors(conf, CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
     TEST_UTIL.waitFor(
         10000,
         1000,
         new Predicate<Exception>() {
           @Override
           public boolean evaluate() throws Exception {
             return regionServer.isAborted();
           }
         });
   } finally {
     TEST_UTIL.shutdownMiniCluster();
   }
 }
  @Test
  public void testPreWALRestoreSkip() throws Exception {
    LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
    TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});

    JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
    ServerName sn2 = rs1.getRegionServer().getServerName();
    String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

    util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
    while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
      Thread.sleep(100);
    }

    Put put = new Put(ROW);
    put.add(A, A, A);
    put.add(B, B, B);
    put.add(C, C, C);
    table.put(put);
    table.flushCommits();

    cluster.killRegionServer(rs1.getRegionServer().getServerName());
    Threads.sleep(20000); // just to be sure that the kill has fully started.
    util.waitUntilAllRegionsAssigned(tableName);

    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"getCtPreWALRestore", "getCtPostWALRestore"},
        tableName,
        new Integer[] {0, 0});

    util.deleteTable(tableName);
    table.close();
  }
 @BeforeClass
 public static void beforeAllTests() throws Exception {
   // Start a cluster
   TEST_UTIL.startMiniCluster(1, NB_SERVERS, null, MyMaster.class, null);
   MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
   master = cluster.getMaster();
   master.balanceSwitch(false);
   admin = TEST_UTIL.getHBaseAdmin();
 }
  @Test
  public void testRecovery() throws Exception {
    LOG.info(TestRegionObserverInterface.class.getName() + ".testRecovery");
    TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery");
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});
    try {
      JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
      ServerName sn2 = rs1.getRegionServer().getServerName();
      String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

      util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
      while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
        Thread.sleep(100);
      }

      Put put = new Put(ROW);
      put.add(A, A, A);
      put.add(B, B, B);
      put.add(C, C, C);
      table.put(put);

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {
            "hadPreGet",
            "hadPostGet",
            "hadPrePut",
            "hadPostPut",
            "hadPreBatchMutate",
            "hadPostBatchMutate",
            "hadDelete"
          },
          tableName,
          new Boolean[] {false, false, true, true, true, true, false});

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut"},
          tableName,
          new Integer[] {0, 0, 1, 1});

      cluster.killRegionServer(rs1.getRegionServer().getServerName());
      Threads.sleep(1000); // Let the kill soak in.
      util.waitUntilAllRegionsAssigned(tableName);
      LOG.info("All regions assigned");

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"getCtPrePut", "getCtPostPut"},
          tableName,
          new Integer[] {0, 0});
    } finally {
      util.deleteTable(tableName);
      table.close();
    }
  }
Exemple #5
0
 @BeforeClass
 public static void startCluster() throws Exception {
   LOG.info("Starting cluster");
   TEST_UTIL = new HBaseTestingUtility();
   TEST_UTIL.startMiniCluster(1, 1, 1, null, MyMaster.class, null);
   cluster = TEST_UTIL.getHBaseCluster();
   LOG.info("Waiting for active/ready master");
   cluster.waitForActiveAndReadyMaster();
   master = cluster.getMaster();
 }
Exemple #6
0
 @Test
 public void testMoveRegionWhenNotInitialized() {
   MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
   HMaster m = cluster.getMaster();
   try {
     m.setInitialized(false); // fake it, set back later
     HRegionInfo meta = HRegionInfo.FIRST_META_REGIONINFO;
     m.move(meta.getEncodedNameAsBytes(), null);
     fail("Region should not be moved since master is not initialized");
   } catch (IOException ioe) {
     assertTrue(ioe instanceof PleaseHoldException);
   } finally {
     m.setInitialized(true);
   }
 }
Exemple #7
0
  @Test(timeout = 300000)
  public void testClusterRequests() throws Exception {

    // sending fake request to master to see how metric value has changed
    RegionServerStatusProtos.RegionServerReportRequest.Builder request =
        RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
    ServerName serverName = cluster.getMaster(0).getServerName();
    request.setServer(ProtobufUtil.toServerName(serverName));

    MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
    ClusterStatusProtos.ServerLoad sl =
        ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(10000).build();
    masterSource.init();
    request.setLoad(sl);
    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 10000, masterSource);

    sl = ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(15000).build();
    request.setLoad(sl);
    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 15000, masterSource);

    master.getMasterRpcServices().regionServerReport(null, request.build());

    metricsHelper.assertCounter("cluster_requests", 15000, masterSource);
    master.stopMaster();
  }
 @Test
 public void test() throws IOException, InterruptedException {
   testUtil
       .getHBaseAdmin()
       .createNamespace(NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
   Table table = testUtil.createTable(tableName, families);
   table.put(
       new Put(Bytes.toBytes("k")).addColumn(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
   MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
   List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
   Region region = null;
   for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
     HRegionServer hrs = rsts.get(i).getRegionServer();
     for (Region r : hrs.getOnlineRegions(tableName)) {
       region = r;
       break;
     }
   }
   assertNotNull(region);
   Thread.sleep(2000);
   RegionStoreSequenceIds ids =
       testUtil
           .getHBaseCluster()
           .getMaster()
           .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
   assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
   // This will be the sequenceid just before that of the earliest edit in memstore.
   long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
   assertTrue(storeSequenceId > 0);
   testUtil.getHBaseAdmin().flush(tableName);
   Thread.sleep(2000);
   ids =
       testUtil
           .getHBaseCluster()
           .getMaster()
           .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
   assertTrue(
       ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
       ids.getLastFlushedSequenceId() > storeSequenceId);
   assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
   table.close();
 }
  @Test
  public void testWholesomeMerge() throws Exception {
    LOG.info("Starting testWholesomeMerge");
    final TableName tableName = TableName.valueOf("testWholesomeMerge");

    // Create table and load data.
    Table table = createTableAndLoadData(master, tableName);
    // Merge 1st and 2nd region
    mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1, INITIAL_REGION_NUM - 1);

    // Merge 2nd and 3th region
    PairOfSameType<HRegionInfo> mergedRegions =
        mergeRegionsAndVerifyRegionNum(master, tableName, 1, 2, INITIAL_REGION_NUM - 2);

    verifyRowCount(table, ROWSIZE);

    // Randomly choose one of the two merged regions
    HRegionInfo hri =
        RandomUtils.nextBoolean() ? mergedRegions.getFirst() : mergedRegions.getSecond();
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    AssignmentManager am = cluster.getMaster().getAssignmentManager();
    RegionStates regionStates = am.getRegionStates();
    long start = EnvironmentEdgeManager.currentTime();
    while (!regionStates.isRegionInState(hri, State.MERGED)) {
      assertFalse(
          "Timed out in waiting one merged region to be in state MERGED",
          EnvironmentEdgeManager.currentTime() - start > 60000);
      Thread.sleep(500);
    }

    // We should not be able to assign it again
    am.assign(hri, true);
    assertFalse("Merged region can't be assigned", regionStates.isRegionInTransition(hri));
    assertTrue(regionStates.isRegionInState(hri, State.MERGED));

    // We should not be able to unassign it either
    am.unassign(hri, null);
    assertFalse("Merged region can't be unassigned", regionStates.isRegionInTransition(hri));
    assertTrue(regionStates.isRegionInState(hri, State.MERGED));

    table.close();
  }
Exemple #10
0
  @Test
  @SuppressWarnings("deprecation")
  public void testMasterOpsWhileSplitting() throws Exception {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster m = cluster.getMaster();

    try (Table ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME)) {
      assertTrue(m.getTableStateManager().isTableState(TABLENAME, TableState.State.ENABLED));
      TEST_UTIL.loadTable(ht, FAMILYNAME, false);
    }

    List<Pair<HRegionInfo, ServerName>> tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME);
    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
    assertEquals(1, tableRegions.size());
    assertArrayEquals(HConstants.EMPTY_START_ROW, tableRegions.get(0).getFirst().getStartKey());
    assertArrayEquals(HConstants.EMPTY_END_ROW, tableRegions.get(0).getFirst().getEndKey());

    // Now trigger a split and stop when the split is in progress
    LOG.info("Splitting table");
    TEST_UTIL.getAdmin().split(TABLENAME);
    LOG.info("Waiting for split result to be about to open");
    RegionStates regionStates = m.getAssignmentManager().getRegionStates();
    while (regionStates.getRegionsOfTable(TABLENAME).size() <= 1) {
      Thread.sleep(100);
    }
    LOG.info("Making sure we can call getTableRegions while opening");
    tableRegions =
        MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME, false);

    LOG.info("Regions: " + Joiner.on(',').join(tableRegions));
    // We have three regions because one is split-in-progress
    assertEquals(3, tableRegions.size());
    LOG.info("Making sure we can call getTableRegionClosest while opening");
    Pair<HRegionInfo, ServerName> pair = m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
    LOG.info("Result is: " + pair);
    Pair<HRegionInfo, ServerName> tableRegionFromName =
        MetaTableAccessor.getRegion(m.getConnection(), pair.getFirst().getRegionName());
    assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
  }
  // check each region whether the coprocessor upcalls are called or not.
  private void verifyMethodResult(
      Class<?> c, String methodName[], TableName tableName, Object value[]) throws IOException {
    try {
      for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
        if (!t.isAlive() || t.getRegionServer().isAborted() || t.getRegionServer().isStopping()) {
          continue;
        }
        for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer())) {
          if (!r.getTable().equals(tableName)) {
            continue;
          }
          RegionCoprocessorHost cph =
              t.getRegionServer().getOnlineRegion(r.getRegionName()).getCoprocessorHost();

          Coprocessor cp = cph.findCoprocessor(c.getName());
          assertNotNull(cp);
          for (int i = 0; i < methodName.length; ++i) {
            Method m = c.getMethod(methodName[i]);
            Object o = m.invoke(cp);
            assertTrue(
                "Result of "
                    + c.getName()
                    + "."
                    + methodName[i]
                    + " is expected to be "
                    + value[i].toString()
                    + ", while we get "
                    + o.toString(),
                o.equals(value[i]));
          }
        }
      }
    } catch (Exception e) {
      throw new IOException(e.toString());
    }
  }
  @Test(timeout = 60000)
  public void testServerListener() throws IOException, InterruptedException {
    ServerManager serverManager = TEST_UTIL.getHBaseCluster().getMaster().getServerManager();

    DummyServerListener listener = new DummyServerListener();
    serverManager.registerListener(listener);
    try {
      MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();

      // Start a new Region Server
      miniCluster.startRegionServer();
      listener.awaitModifications(1);
      assertEquals(1, listener.getAddedCount());
      assertEquals(0, listener.getRemovedCount());

      // Start another Region Server
      listener.reset();
      miniCluster.startRegionServer();
      listener.awaitModifications(1);
      assertEquals(1, listener.getAddedCount());
      assertEquals(0, listener.getRemovedCount());

      int nrs = miniCluster.getRegionServerThreads().size();

      // Stop a Region Server
      listener.reset();
      miniCluster.stopRegionServer(nrs - 1);
      listener.awaitModifications(1);
      assertEquals(0, listener.getAddedCount());
      assertEquals(1, listener.getRemovedCount());

      // Stop another Region Server
      listener.reset();
      miniCluster.stopRegionServer(nrs - 2);
      listener.awaitModifications(1);
      assertEquals(0, listener.getAddedCount());
      assertEquals(1, listener.getRemovedCount());
    } finally {
      serverManager.unregisterListener(listener);
    }
  }
Exemple #13
0
  /** This tests retaining assignments on a cluster restart */
  @Test(timeout = 300000)
  public void testRetainAssignmentOnRestart() throws Exception {
    UTIL.startMiniCluster(2);
    while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) {
      Threads.sleep(1);
    }
    // Turn off balancer
    UTIL.getMiniHBaseCluster().getMaster().getMasterRpcServices().synchronousBalanceSwitch(false);
    LOG.info("\n\nCreating tables");
    for (byte[] TABLE : TABLES) {
      UTIL.createTable(TABLE, FAMILY);
    }
    for (byte[] TABLE : TABLES) {
      UTIL.waitTableEnabled(TABLE);
    }

    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
    UTIL.waitUntilNoRegionsInTransition(120000);

    // We don't have to use SnapshotOfRegionAssignmentFromMeta.
    // We use it here because AM used to use it to load all user region placements
    SnapshotOfRegionAssignmentFromMeta snapshot =
        new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection());
    snapshot.initialize();
    Map<HRegionInfo, ServerName> regionToRegionServerMap = snapshot.getRegionToRegionServerMap();

    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    List<JVMClusterUtil.RegionServerThread> threads = cluster.getLiveRegionServerThreads();
    assertEquals(2, threads.size());
    int[] rsPorts = new int[3];
    for (int i = 0; i < 2; i++) {
      rsPorts[i] = threads.get(i).getRegionServer().getServerName().getPort();
    }
    rsPorts[2] = cluster.getMaster().getServerName().getPort();
    for (ServerName serverName : regionToRegionServerMap.values()) {
      boolean found = false; // Test only, no need to optimize
      for (int k = 0; k < 3 && !found; k++) {
        found = serverName.getPort() == rsPorts[k];
      }
      assertTrue(found);
    }

    LOG.info("\n\nShutting down HBase cluster");
    cluster.shutdown();
    cluster.waitUntilShutDown();

    LOG.info("\n\nSleeping a bit");
    Thread.sleep(2000);

    LOG.info("\n\nStarting cluster the second time with the same ports");
    try {
      cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 4);
      master = cluster.startMaster().getMaster();
      for (int i = 0; i < 3; i++) {
        cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, rsPorts[i]);
        cluster.startRegionServer();
      }
    } finally {
      // Reset region server port so as not to conflict with other tests
      cluster.getConf().setInt(HConstants.REGIONSERVER_PORT, 0);
      cluster.getConf().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 2);
    }

    // Make sure live regionservers are on the same host/port
    List<ServerName> localServers = master.getServerManager().getOnlineServersList();
    assertEquals(4, localServers.size());
    for (int i = 0; i < 3; i++) {
      boolean found = false;
      for (ServerName serverName : localServers) {
        if (serverName.getPort() == rsPorts[i]) {
          found = true;
          break;
        }
      }
      assertTrue(found);
    }

    // Wait till master is initialized and all regions are assigned
    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
    int expectedRegions = regionToRegionServerMap.size() + 1;
    while (!master.isInitialized()
        || regionStates.getRegionAssignments().size() != expectedRegions) {
      Threads.sleep(100);
    }

    snapshot = new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection());
    snapshot.initialize();
    Map<HRegionInfo, ServerName> newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap();
    assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size());
    for (Map.Entry<HRegionInfo, ServerName> entry : newRegionToRegionServerMap.entrySet()) {
      if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) continue;
      ServerName oldServer = regionToRegionServerMap.get(entry.getKey());
      ServerName currentServer = entry.getValue();
      assertEquals(oldServer.getHostAndPort(), currentServer.getHostAndPort());
      assertNotEquals(oldServer.getStartcode(), currentServer.getStartcode());
    }
  }
  @Test
  public void testRegionMerge() throws Exception {
    String nsp1 = prefix + "_regiontest";
    NamespaceDescriptor nspDesc =
        NamespaceDescriptor.create(nsp1)
            .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3")
            .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2")
            .build();
    ADMIN.createNamespace(nspDesc);
    final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2");
    byte[] columnFamily = Bytes.toBytes("info");
    HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo);
    tableDescOne.addFamily(new HColumnDescriptor(columnFamily));
    final int initialRegions = 3;
    ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions);
    Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
    try (Table table = connection.getTable(tableTwo)) {
      UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999);
    }
    ADMIN.flush(tableTwo);
    List<HRegionInfo> hris = ADMIN.getTableRegions(tableTwo);
    Collections.sort(hris);
    // merge the two regions
    final Set<String> encodedRegionNamesToMerge =
        Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName());
    ADMIN.mergeRegions(
        hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), false);
    UTIL.waitFor(
        10000,
        100,
        new Waiter.ExplainingPredicate<Exception>() {

          @Override
          public boolean evaluate() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                return false;
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return false;
              }
            }
            return true;
          }

          @Override
          public String explainFailure() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) {
                return hri + " which is expected to be merged is still online";
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return hri + " is still in not opened";
              }
            }
            return "Unknown";
          }
        });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions - 1, hris.size());
    Collections.sort(hris);

    final HRegionInfo hriToSplit = hris.get(1);
    ADMIN.split(tableTwo, Bytes.toBytes("500"));

    UTIL.waitFor(
        10000,
        100,
        new Waiter.ExplainingPredicate<Exception>() {

          @Override
          public boolean evaluate() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                return false;
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return false;
              }
            }
            return true;
          }

          @Override
          public String explainFailure() throws Exception {
            RegionStates regionStates =
                UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
            for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) {
              if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) {
                return hriToSplit + " which is expected to be split is still online";
              }
              if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) {
                return hri + " is still in not opened";
              }
            }
            return "Unknown";
          }
        });
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);

    // fail region merge through Coprocessor hook
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HRegionServer regionServer = cluster.getRegionServer(0);
    RegionServerCoprocessorHost cpHost = regionServer.getRegionServerCoprocessorHost();
    Coprocessor coprocessor = cpHost.findCoprocessor(CPRegionServerObserver.class.getName());
    CPRegionServerObserver regionServerObserver = (CPRegionServerObserver) coprocessor;
    regionServerObserver.failMerge(true);
    regionServerObserver.triggered = false;

    ADMIN.mergeRegions(
        hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), false);
    regionServerObserver.waitUtilTriggered();
    hris = ADMIN.getTableRegions(tableTwo);
    assertEquals(initialRegions, hris.size());
    Collections.sort(hris);
    // verify that we cannot split
    HRegionInfo hriToSplit2 = hris.get(1);
    ADMIN.split(
        tableTwo,
        TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true));
    Thread.sleep(2000);
    assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size());
  }
Exemple #15
0
  @Test
  public void testReducerNumEstimation() throws Exception {
    // Skip the test for Tez. Tez use a different mechanism.
    // Equivalent test is in TestTezAutoParallelism
    Assume.assumeTrue("Skip this test for TEZ", Util.isMapredExecType(cluster.getExecType()));
    // use the estimation
    Configuration conf = HBaseConfiguration.create(new Configuration());
    HBaseTestingUtility util = new HBaseTestingUtility(conf);
    int clientPort = util.startMiniZKCluster().getClientPort();
    util.startMiniHBaseCluster(1, 1);

    String query = "a = load '/passwd';" + "b = group a by $0;" + "store b into 'output';";
    PigServer ps = new PigServer(cluster.getExecType(), cluster.getProperties());
    PhysicalPlan pp = Util.buildPp(ps, query);
    MROperPlan mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");
    pc.getConf().setProperty(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    JobControlCompiler jcc = new JobControlCompiler(pc, conf);
    JobControl jc = jcc.compile(mrPlan, "Test");
    Job job = jc.getWaitingJobs().get(0);
    long reducer =
        Math.min(
            (long) Math.ceil(new File("test/org/apache/pig/test/data/passwd").length() / 100.0),
            10);

    Util.assertParallelValues(-1, -1, reducer, reducer, job.getJobConf());

    // use the PARALLEL key word, it will override the estimated reducer number
    query = "a = load '/passwd';" + "b = group a by $0 PARALLEL 2;" + "store b into 'output';";
    pp = Util.buildPp(ps, query);
    mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");
    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    jcc = new JobControlCompiler(pc, conf);
    jc = jcc.compile(mrPlan, "Test");
    job = jc.getWaitingJobs().get(0);

    Util.assertParallelValues(-1, 2, -1, 2, job.getJobConf());

    final byte[] COLUMNFAMILY = Bytes.toBytes("pig");
    util.createTable(Bytes.toBytesBinary("test_table"), COLUMNFAMILY);

    // the estimation won't take effect when it apply to non-dfs or the files doesn't exist, such as
    // hbase
    query =
        "a = load 'hbase://test_table' using org.apache.pig.backend.hadoop.hbase.HBaseStorage('c:f1 c:f2');"
            + "b = group a by $0 ;"
            + "store b into 'output';";
    pp = Util.buildPp(ps, query);
    mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");

    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    jcc = new JobControlCompiler(pc, conf);
    jc = jcc.compile(mrPlan, "Test");
    job = jc.getWaitingJobs().get(0);

    Util.assertParallelValues(-1, -1, 1, 1, job.getJobConf());

    util.deleteTable(Bytes.toBytesBinary("test_table"));
    // In HBase 0.90.1 and above we can use util.shutdownMiniHBaseCluster()
    // here instead.
    MiniHBaseCluster hbc = util.getHBaseCluster();
    if (hbc != null) {
      hbc.shutdown();
      hbc.join();
    }
    util.shutdownMiniZKCluster();
  }
  @Test(timeout = 60000)
  public void testAssignmentListener() throws IOException, InterruptedException {
    AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    Admin admin = TEST_UTIL.getHBaseAdmin();

    DummyAssignmentListener listener = new DummyAssignmentListener();
    am.registerListener(listener);
    try {
      final String TABLE_NAME_STR = "testtb";
      final TableName TABLE_NAME = TableName.valueOf(TABLE_NAME_STR);
      final byte[] FAMILY = Bytes.toBytes("cf");

      // Create a new table, with a single region
      LOG.info("Create Table");
      TEST_UTIL.createTable(TABLE_NAME, FAMILY);
      listener.awaitModifications(1);
      assertEquals(1, listener.getLoadCount());
      assertEquals(0, listener.getCloseCount());

      // Add some data
      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE_NAME);
      try {
        for (int i = 0; i < 10; ++i) {
          byte[] key = Bytes.toBytes("row-" + i);
          Put put = new Put(key);
          put.add(FAMILY, null, key);
          table.put(put);
        }
      } finally {
        table.close();
      }

      // Split the table in two
      LOG.info("Split Table");
      listener.reset();
      admin.split(TABLE_NAME_STR, "row-3");
      listener.awaitModifications(3);
      assertEquals(2, listener.getLoadCount()); // daughters added
      assertEquals(1, listener.getCloseCount()); // parent removed

      // Wait for the Regions to be mergeable
      MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();
      int mergeable = 0;
      while (mergeable < 2) {
        Thread.sleep(100);
        admin.majorCompact(TABLE_NAME_STR);
        mergeable = 0;
        for (JVMClusterUtil.RegionServerThread regionThread :
            miniCluster.getRegionServerThreads()) {
          for (HRegion region : regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) {
            mergeable += region.isMergeable() ? 1 : 0;
          }
        }
      }

      // Merge the two regions
      LOG.info("Merge Regions");
      listener.reset();
      List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
      assertEquals(2, regions.size());
      admin.mergeRegions(
          regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes(), true);
      listener.awaitModifications(3);
      assertEquals(1, admin.getTableRegions(TABLE_NAME).size());
      assertEquals(1, listener.getLoadCount()); // new merged region added
      assertEquals(2, listener.getCloseCount()); // daughters removed

      // Delete the table
      LOG.info("Drop Table");
      listener.reset();
      TEST_UTIL.deleteTable(TABLE_NAME);
      listener.awaitModifications(1);
      assertEquals(0, listener.getLoadCount());
      assertEquals(1, listener.getCloseCount());
    } finally {
      am.unregisterListener(listener);
    }
  }
  @Test(timeout = 300000)
  public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
    final int NUM_MASTERS = 1;
    final int NUM_RS = 3;
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);

    try {
      final byte[] TABLENAME = Bytes.toBytes("testDataCorrectnessReplayingRecoveredEdits");
      final byte[] FAMILY = Bytes.toBytes("family");
      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
      HMaster master = cluster.getMaster();

      // Create table
      HTableDescriptor desc = new HTableDescriptor(TABLENAME);
      desc.addFamily(new HColumnDescriptor(FAMILY));
      HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
      hbaseAdmin.createTable(desc);

      assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));

      // Put data: r1->v1
      Log.info("Loading r1 to v1 into " + Bytes.toString(TABLENAME));
      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
      putDataAndVerify(table, "r1", FAMILY, "v1", 1);

      // Move region to target server
      HRegionInfo regionInfo = table.getRegionLocation("r1").getRegionInfo();
      int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
      HRegionServer originServer = cluster.getRegionServer(originServerNum);
      int targetServerNum = (originServerNum + 1) % NUM_RS;
      HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
      assertFalse(originServer.equals(targetServer));
      Log.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
      hbaseAdmin.move(
          regionInfo.getEncodedNameAsBytes(),
          Bytes.toBytes(targetServer.getServerName().getServerName()));
      do {
        Thread.sleep(1);
      } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);

      // Put data: r2->v2
      Log.info("Loading r2 to v2 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r2", FAMILY, "v2", 2);

      // Move region to origin server
      Log.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
      hbaseAdmin.move(
          regionInfo.getEncodedNameAsBytes(),
          Bytes.toBytes(originServer.getServerName().getServerName()));
      do {
        Thread.sleep(1);
      } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);

      // Put data: r3->v3
      Log.info("Loading r3 to v3 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r3", FAMILY, "v3", 3);

      // Kill target server
      Log.info("Killing target server " + targetServer.getServerName());
      targetServer.kill();
      cluster.getRegionServerThreads().get(targetServerNum).join();
      // Wait until finish processing of shutdown
      while (master.getServerManager().areDeadServersInProgress()) {
        Thread.sleep(5);
      }
      // Kill origin server
      Log.info("Killing origin server " + targetServer.getServerName());
      originServer.kill();
      cluster.getRegionServerThreads().get(originServerNum).join();

      // Put data: r4->v4
      Log.info("Loading r4 to v4 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r4", FAMILY, "v4", 4);

    } finally {
      TEST_UTIL.shutdownMiniCluster();
    }
  }
  @Test(timeout = 300000)
  public void testMultiSlaveReplication() throws Exception {
    LOG.info("testCyclicReplication");
    MiniHBaseCluster master = utility1.startMiniCluster();
    utility2.startMiniCluster();
    utility3.startMiniCluster();
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    new HBaseAdmin(conf3).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);
    HTable htable3 = new HTable(conf3, tableName);
    htable3.setWriteBufferSize(1024);

    admin1.addPeer("1", utility2.getClusterKey());

    // put "row" and wait 'til it got around, then delete
    putAndWait(row, famName, htable1, htable2);
    deleteAndWait(row, htable1, htable2);
    // check it wasn't replication to cluster 3
    checkRow(row, 0, htable3);

    putAndWait(row2, famName, htable1, htable2);

    // now roll the region server's logs
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());
    // after the log was rolled put a new row
    putAndWait(row3, famName, htable1, htable2);

    admin1.addPeer("2", utility3.getClusterKey());

    // put a row, check it was replicated to all clusters
    putAndWait(row1, famName, htable1, htable2, htable3);
    // delete and verify
    deleteAndWait(row1, htable1, htable2, htable3);

    // make sure row2 did not get replicated after
    // cluster 3 was added
    checkRow(row2, 0, htable3);

    // row3 will get replicated, because it was in the
    // latest log
    checkRow(row3, 1, htable3);

    Put p = new Put(row);
    p.add(famName, row, row);
    htable1.put(p);
    // now roll the logs again
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());

    // cleanup "row2", also conveniently use this to wait replication
    // to finish
    deleteAndWait(row2, htable1, htable2, htable3);
    // Even if the log was rolled in the middle of the replication
    // "row" is still replication.
    checkRow(row, 1, htable2, htable3);

    // cleanup the rest
    deleteAndWait(row, htable1, htable2, htable3);
    deleteAndWait(row3, htable1, htable2, htable3);

    utility3.shutdownMiniCluster();
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
  /**
   * Tests overriding compaction handling via coprocessor hooks
   *
   * @throws Exception
   */
  @Test
  public void testCompactionOverride() throws Exception {
    byte[] compactTable = Bytes.toBytes("TestCompactionOverride");
    HBaseAdmin admin = util.getHBaseAdmin();
    if (admin.tableExists(compactTable)) {
      admin.disableTable(compactTable);
      admin.deleteTable(compactTable);
    }

    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable));
    htd.addFamily(new HColumnDescriptor(A));
    htd.addCoprocessor(EvenOnlyCompactor.class.getName());
    admin.createTable(htd);

    HTable table = new HTable(util.getConfiguration(), compactTable);
    for (long i = 1; i <= 10; i++) {
      byte[] iBytes = Bytes.toBytes(i);
      Put put = new Put(iBytes);
      put.setDurability(Durability.SKIP_WAL);
      put.add(A, A, iBytes);
      table.put(put);
    }

    HRegion firstRegion = cluster.getRegions(compactTable).get(0);
    Coprocessor cp =
        firstRegion.getCoprocessorHost().findCoprocessor(EvenOnlyCompactor.class.getName());
    assertNotNull("EvenOnlyCompactor coprocessor should be loaded", cp);
    EvenOnlyCompactor compactor = (EvenOnlyCompactor) cp;

    // force a compaction
    long ts = System.currentTimeMillis();
    admin.flush(compactTable);
    // wait for flush
    for (int i = 0; i < 10; i++) {
      if (compactor.lastFlush >= ts) {
        break;
      }
      Thread.sleep(1000);
    }
    assertTrue("Flush didn't complete", compactor.lastFlush >= ts);
    LOG.debug("Flush complete");

    ts = compactor.lastFlush;
    admin.majorCompact(compactTable);
    // wait for compaction
    for (int i = 0; i < 30; i++) {
      if (compactor.lastCompaction >= ts) {
        break;
      }
      Thread.sleep(1000);
    }
    LOG.debug("Last compaction was at " + compactor.lastCompaction);
    assertTrue("Compaction didn't complete", compactor.lastCompaction >= ts);

    // only even rows should remain
    ResultScanner scanner = table.getScanner(new Scan());
    try {
      for (long i = 2; i <= 10; i += 2) {
        Result r = scanner.next();
        assertNotNull(r);
        assertFalse(r.isEmpty());
        byte[] iBytes = Bytes.toBytes(i);
        assertArrayEquals("Row should be " + i, r.getRow(), iBytes);
        assertArrayEquals("Value should be " + i, r.getValue(A, A), iBytes);
      }
    } finally {
      scanner.close();
    }
    table.close();
  }
  @Test
  public void testHostRank() throws Exception {

    if (System.getProperty("prop.mapred.job.tracker") != null) {
      if (LOG.isInfoEnabled())
        LOG.info("testHBaseInputOutput: Ignore this test if not local mode.");
      return;
    }

    File jarTest = new File(System.getProperty("prop.jarLocation"));
    if (!jarTest.exists()) {
      fail(
          "Could not find Giraph jar at "
              + "location specified by 'prop.jarLocation'. "
              + "Make sure you built the main Giraph artifact?.");
    }

    MiniHBaseCluster cluster = null;
    MiniZooKeeperCluster zkCluster = null;
    FileSystem fs = null;

    try {
      // using the restart method allows us to avoid having the hbase
      // root directory overwritten by /home/$username
      zkCluster = testUtil.startMiniZKCluster();
      testUtil.restartHBaseCluster(2);
      cluster = testUtil.getMiniHBaseCluster();

      final byte[] OL_BYTES = Bytes.toBytes("ol");
      final byte[] S_BYTES = Bytes.toBytes("s");
      final byte[] METADATA_BYTES = Bytes.toBytes("mtdt");
      final byte[] HR_BYTES = Bytes.toBytes("_hr_");
      final byte[] TAB = Bytes.toBytes(TABLE_NAME);

      Configuration conf = cluster.getConfiguration();
      HTableDescriptor desc = new HTableDescriptor(TAB);
      desc.addFamily(new HColumnDescriptor(OL_BYTES));
      desc.addFamily(new HColumnDescriptor(S_BYTES));
      desc.addFamily(new HColumnDescriptor(METADATA_BYTES));
      HBaseAdmin hbaseAdmin = new HBaseAdmin(conf);
      if (hbaseAdmin.isTableAvailable(TABLE_NAME)) {
        hbaseAdmin.disableTable(TABLE_NAME);
        hbaseAdmin.deleteTable(TABLE_NAME);
      }
      hbaseAdmin.createTable(desc);

      /**
       * Enter the initial data (a,b), (b,c), (a,c) a = 1.0 - google b = 1.0 - yahoo c = 1.0 - bing
       */
      HTable table = new HTable(conf, TABLE_NAME);

      Put p1 = new Put(Bytes.toBytes("com.google.www"));
      p1.add(OL_BYTES, Bytes.toBytes("www.yahoo.com"), Bytes.toBytes("ab"));

      Put p2 = new Put(Bytes.toBytes("com.google.www"));
      p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("ac"));
      p2.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("invalid1"));
      p2.add(OL_BYTES, Bytes.toBytes("www.google.com"), Bytes.toBytes("invalid2"));

      Put p3 = new Put(Bytes.toBytes("com.yahoo.www"));
      p3.add(OL_BYTES, Bytes.toBytes("www.bing.com"), Bytes.toBytes("bc"));
      // p3.add(OL_BYTES, Bytes.toBytes(""), Bytes.toBytes("invalid4"));

      Put p4 = new Put(Bytes.toBytes("com.bing.www"));
      // TODO: Handle below case. use apache isValid method.
      p4.add(OL_BYTES, Bytes.toBytes("http://invalidurl"), Bytes.toBytes("invalid5"));
      p4.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d));

      Put p5 = new Put(Bytes.toBytes("dummy"));
      p5.add(S_BYTES, S_BYTES, Bytes.toBytes(10.0d));

      table.put(p1);
      table.put(p2);
      table.put(p3);
      table.put(p4);
      table.put(p5);

      // Set Giraph configuration
      // now operate over HBase using Vertex I/O formats
      conf.set(TableInputFormat.INPUT_TABLE, TABLE_NAME);
      conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE_NAME);

      // Start the giraph job
      GiraphJob giraphJob = new GiraphJob(conf, BspCase.getCallingMethodName());
      GiraphConfiguration giraphConf = giraphJob.getConfiguration();
      giraphConf.setZooKeeperConfiguration(cluster.getMaster().getZooKeeper().getQuorum());
      setupConfiguration(giraphJob);
      giraphConf.setComputationClass(LinkRankComputation.class);
      giraphConf.setMasterComputeClass(LinkRankVertexMasterCompute.class);
      giraphConf.setOutEdgesClass(ByteArrayEdges.class);
      giraphConf.setVertexInputFormatClass(Nutch2HostInputFormat.class);
      giraphConf.setVertexOutputFormatClass(Nutch2HostOutputFormat.class);
      giraphConf.setInt("giraph.linkRank.superstepCount", 10);
      giraphConf.setInt("giraph.linkRank.scale", 10);
      giraphConf.set("giraph.linkRank.family", "mtdt");
      giraphConf.set("giraph.linkRank.qualifier", "_hr_");
      giraphConf.setVertexInputFilterClass(HostRankVertexFilter.class);
      assertTrue(giraphJob.run(true));

      if (LOG.isInfoEnabled()) LOG.info("Giraph job successful. Checking output qualifier.");

      /** Check the results * */
      Result result;
      String key;
      byte[] calculatedScoreByte;
      HashMap expectedValues = new HashMap<String, Double>();
      expectedValues.put("com.google.www", 1.3515060339386287d);
      expectedValues.put("com.yahoo.www", 4.144902009567587d);
      expectedValues.put("com.bing.www", 9.063893290511482d);

      for (Object keyObject : expectedValues.keySet()) {
        key = keyObject.toString();
        result = table.get(new Get(key.getBytes()));
        calculatedScoreByte = result.getValue(METADATA_BYTES, HR_BYTES);
        assertNotNull(calculatedScoreByte);
        assertTrue(calculatedScoreByte.length > 0);
        Assert.assertEquals(
            "Scores are not the same",
            (Double) expectedValues.get(key),
            Bytes.toDouble(calculatedScoreByte),
            DELTA);
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
      if (zkCluster != null) {
        zkCluster.shutdown();
      }
      // clean test files
      if (fs != null) {
        fs.delete(hbaseRootdir);
      }
    }
  }