public static void checkRegionIsOpened(HBaseTestingUtility HTU, HRegionServer rs, HRegionInfo hri)
      throws Exception {
    while (!rs.getRegionsInTransitionInRS().isEmpty()) {
      Thread.sleep(1);
    }

    Assert.assertTrue(rs.getRegion(hri.getRegionName()).isAvailable());
  }
  public static void checkRegionIsClosed(HBaseTestingUtility HTU, HRegionServer rs, HRegionInfo hri)
      throws Exception {
    while (!rs.getRegionsInTransitionInRS().isEmpty()) {
      Thread.sleep(1);
    }

    try {
      Assert.assertFalse(rs.getRegion(hri.getRegionName()).isAvailable());
    } catch (NotServingRegionException expected) {
      // That's how it work: if the region is closed we have an exception.
    }
  }
  private int start() throws Exception {
    Configuration conf = getConf();

    // If 'local', don't start a region server here. Defer to
    // LocalHBaseCluster. It manages 'local' clusters.
    if (LocalHBaseCluster.isLocal(conf)) {
      LOG.warn(
          "Not starting a distinct region server because "
              + HConstants.CLUSTER_DISTRIBUTED
              + " is false");
    } else {
      logJVMInfo();
      HRegionServer hrs = HRegionServer.constructRegionServer(regionServerClass, conf);
      HRegionServer.startRegionServer(hrs);
    }
    return 0;
  }
 public static void closeRegion(HBaseTestingUtility HTU, HRegionServer rs, HRegionInfo hri)
     throws Exception {
   AdminProtos.CloseRegionRequest crr =
       RequestConverter.buildCloseRegionRequest(rs.getServerName(), hri.getEncodedName());
   AdminProtos.CloseRegionResponse responseClose = rs.rpcServices.closeRegion(null, crr);
   Assert.assertTrue(responseClose.getClosed());
   checkRegionIsClosed(HTU, rs, hri);
 }
Example #5
0
 public boolean isMetaRegion(byte[] regionName) {
   HRegion region;
   try {
     region = hRegionServer.getRegion(regionName);
   } catch (NotServingRegionException ignored) {
     return false;
   }
   return region.getRegionInfo().isMetaTable();
 }
  public static void openRegion(HBaseTestingUtility HTU, HRegionServer rs, HRegionInfo hri)
      throws Exception {
    AdminProtos.OpenRegionRequest orr =
        RequestConverter.buildOpenRegionRequest(rs.getServerName(), hri, null, null);
    AdminProtos.OpenRegionResponse responseOpen = rs.rpcServices.openRegion(null, orr);

    Assert.assertTrue(responseOpen.getOpeningStateCount() == 1);
    Assert.assertTrue(
        responseOpen
            .getOpeningState(0)
            .equals(AdminProtos.OpenRegionResponse.RegionOpeningState.OPENED));

    checkRegionIsOpened(HTU, rs, hri);
  }
  public static void stopMasterAndAssignMeta(HBaseTestingUtility HTU)
      throws IOException, InterruptedException {
    // Stop master
    HMaster master = HTU.getHBaseCluster().getMaster();
    ServerName masterAddr = master.getServerName();
    master.stopMaster();

    Log.info("Waiting until master thread exits");
    while (HTU.getHBaseCluster().getMasterThread() != null
        && HTU.getHBaseCluster().getMasterThread().isAlive()) {
      Threads.sleep(100);
    }

    HRegionServer.TEST_SKIP_REPORTING_TRANSITION = true;
    // Master is down, so is the meta. We need to assign it somewhere
    // so that regions can be assigned during the mocking phase.
    HRegionServer hrs = HTU.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
    ZooKeeperWatcher zkw = hrs.getZooKeeper();
    MetaTableLocator mtl = new MetaTableLocator();
    ServerName sn = mtl.getMetaRegionLocation(zkw);
    if (sn != null && !masterAddr.equals(sn)) {
      return;
    }

    ProtobufUtil.openRegion(
        hrs.getRSRpcServices(), hrs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
    while (true) {
      sn = mtl.getMetaRegionLocation(zkw);
      if (sn != null
          && sn.equals(hrs.getServerName())
          && hrs.onlineRegions.containsKey(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {
        break;
      }
      Thread.sleep(100);
    }
  }
Example #8
0
  @Override
  public Integer apply(Pair<RequestHeader, Message> headerAndParam) {
    RequestHeader header = headerAndParam.getFirst();
    String methodName = header.getMethodName();
    Integer priorityByAnnotation = annotatedQos.get(methodName);
    if (priorityByAnnotation != null) {
      return priorityByAnnotation;
    }

    Message param = headerAndParam.getSecond();
    if (param == null) {
      return HConstants.NORMAL_QOS;
    }
    if (methodName.equalsIgnoreCase("multi") && param instanceof MultiRequest) {
      // The multi call has its priority set in the header.  All calls should work this way but
      // only this one has been converted so far.  No priority == NORMAL_QOS.
      return header.hasPriority() ? header.getPriority() : HConstants.NORMAL_QOS;
    }
    String cls = param.getClass().getName();
    Class<? extends Message> rpcArgClass = argumentToClassMap.get(cls);
    RegionSpecifier regionSpecifier = null;
    // check whether the request has reference to meta region or now.
    try {
      // Check if the param has a region specifier; the pb methods are hasRegion and getRegion if
      // hasRegion returns true.  Not all listed methods have region specifier each time.  For
      // example, the ScanRequest has it on setup but thereafter relies on the scannerid rather than
      // send the region over every time.
      Method hasRegion = methodMap.get("hasRegion").get(rpcArgClass);
      if (hasRegion != null && (Boolean) hasRegion.invoke(param, (Object[]) null)) {
        Method getRegion = methodMap.get("getRegion").get(rpcArgClass);
        regionSpecifier = (RegionSpecifier) getRegion.invoke(param, (Object[]) null);
        HRegion region = hRegionServer.getRegion(regionSpecifier);
        if (region.getRegionInfo().isMetaTable()) {
          if (LOG.isTraceEnabled()) {
            LOG.trace("High priority because region=" + region.getRegionNameAsString());
          }
          return HConstants.HIGH_QOS;
        }
      }
    } catch (Exception ex) {
      // Not good throwing an exception out of here, a runtime anyways.  Let the query go into the
      // server and have it throw the exception if still an issue.  Just mark it normal priority.
      if (LOG.isTraceEnabled()) LOG.trace("Marking normal priority after getting exception=" + ex);
      return HConstants.NORMAL_QOS;
    }

    if (methodName.equals("scan")) { // scanner methods...
      ScanRequest request = (ScanRequest) param;
      if (!request.hasScannerId()) {
        return HConstants.NORMAL_QOS;
      }
      RegionScanner scanner = hRegionServer.getScanner(request.getScannerId());
      if (scanner != null && scanner.getRegionInfo().isMetaRegion()) {
        if (LOG.isTraceEnabled()) {
          // Scanner requests are small in size so TextFormat version should not overwhelm log.
          LOG.trace("High priority scanner request " + TextFormat.shortDebugString(request));
        }
        return HConstants.HIGH_QOS;
      }
    }
    return HConstants.NORMAL_QOS;
  }
  @Test
  public void testIsStale() throws IOException {
    int period = 0;
    byte[][] families = new byte[][] {Bytes.toBytes("cf")};
    byte[] qf = Bytes.toBytes("cq");

    HRegionServer regionServer = mock(HRegionServer.class);
    List<Region> regions = new ArrayList<Region>();
    when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions);
    when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());

    HTableDescriptor htd = getTableDesc(TableName.valueOf("testIsStale"), families);
    Region primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
    Region replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
    regions.add(primary);
    regions.add(replica1);

    StaleStorefileRefresherChore chore =
        new StaleStorefileRefresherChore(period, regionServer, new StoppableImplementation());

    // write some data to primary and flush
    putData(primary, 0, 100, qf, families);
    primary.flush(true);
    verifyData(primary, 0, 100, qf, families);

    try {
      verifyData(replica1, 0, 100, qf, families);
      Assert.fail("should have failed");
    } catch (AssertionError ex) {
      // expected
    }
    chore.chore();
    verifyData(replica1, 0, 100, qf, families);

    // simulate an fs failure where we cannot refresh the store files for the replica
    ((FailingHRegionFileSystem) ((HRegion) replica1).getRegionFileSystem()).fail = true;

    // write some more data to primary and flush
    putData(primary, 100, 100, qf, families);
    primary.flush(true);
    verifyData(primary, 0, 200, qf, families);

    chore.chore(); // should not throw ex, but we cannot refresh the store files

    verifyData(replica1, 0, 100, qf, families);
    try {
      verifyData(replica1, 100, 100, qf, families);
      Assert.fail("should have failed");
    } catch (AssertionError ex) {
      // expected
    }

    chore.isStale = true;
    chore.chore(); // now after this, we cannot read back any value
    try {
      verifyData(replica1, 0, 100, qf, families);
      Assert.fail("should have failed with IOException");
    } catch (IOException ex) {
      // expected
    }
  }
Example #10
0
  @Test(timeout = 300000)
  public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
    final int NUM_MASTERS = 1;
    final int NUM_RS = 3;
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);

    try {
      final byte[] TABLENAME = Bytes.toBytes("testDataCorrectnessReplayingRecoveredEdits");
      final byte[] FAMILY = Bytes.toBytes("family");
      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
      HMaster master = cluster.getMaster();

      // Create table
      HTableDescriptor desc = new HTableDescriptor(TABLENAME);
      desc.addFamily(new HColumnDescriptor(FAMILY));
      HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
      hbaseAdmin.createTable(desc);

      assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));

      // Put data: r1->v1
      Log.info("Loading r1 to v1 into " + Bytes.toString(TABLENAME));
      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
      putDataAndVerify(table, "r1", FAMILY, "v1", 1);

      // Move region to target server
      HRegionInfo regionInfo = table.getRegionLocation("r1").getRegionInfo();
      int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
      HRegionServer originServer = cluster.getRegionServer(originServerNum);
      int targetServerNum = (originServerNum + 1) % NUM_RS;
      HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
      assertFalse(originServer.equals(targetServer));
      Log.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
      hbaseAdmin.move(
          regionInfo.getEncodedNameAsBytes(),
          Bytes.toBytes(targetServer.getServerName().getServerName()));
      do {
        Thread.sleep(1);
      } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);

      // Put data: r2->v2
      Log.info("Loading r2 to v2 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r2", FAMILY, "v2", 2);

      // Move region to origin server
      Log.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
      hbaseAdmin.move(
          regionInfo.getEncodedNameAsBytes(),
          Bytes.toBytes(originServer.getServerName().getServerName()));
      do {
        Thread.sleep(1);
      } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);

      // Put data: r3->v3
      Log.info("Loading r3 to v3 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r3", FAMILY, "v3", 3);

      // Kill target server
      Log.info("Killing target server " + targetServer.getServerName());
      targetServer.kill();
      cluster.getRegionServerThreads().get(targetServerNum).join();
      // Wait until finish processing of shutdown
      while (master.getServerManager().areDeadServersInProgress()) {
        Thread.sleep(5);
      }
      // Kill origin server
      Log.info("Killing origin server " + targetServer.getServerName());
      originServer.kill();
      cluster.getRegionServerThreads().get(originServerNum).join();

      // Put data: r4->v4
      Log.info("Loading r4 to v4 into " + Bytes.toString(TABLENAME));
      putDataAndVerify(table, "r4", FAMILY, "v4", 4);

    } finally {
      TEST_UTIL.shutdownMiniCluster();
    }
  }
 /**
  * Load data to a table, flush it to disk, trigger compaction, confirm the compaction state is
  * right and wait till it is done.
  *
  * @param tableName
  * @param flushes
  * @param expectedState
  * @param singleFamily otherwise, run compaction on all cfs
  * @throws IOException
  * @throws InterruptedException
  */
 private void compaction(
     final String tableName,
     final int flushes,
     final CompactionState expectedState,
     boolean singleFamily)
     throws IOException, InterruptedException {
   // Create a table with regions
   TableName table = TableName.valueOf(tableName);
   byte[] family = Bytes.toBytes("family");
   byte[][] families = {
     family, Bytes.add(family, Bytes.toBytes("2")), Bytes.add(family, Bytes.toBytes("3"))
   };
   Table ht = null;
   try {
     ht = TEST_UTIL.createTable(table, families);
     loadData(ht, families, 3000, flushes);
     HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
     List<HRegion> regions = rs.getOnlineRegions(table);
     int countBefore = countStoreFilesInFamilies(regions, families);
     int countBeforeSingleFamily = countStoreFilesInFamily(regions, family);
     assertTrue(countBefore > 0); // there should be some data files
     HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
     if (expectedState == CompactionState.MINOR) {
       if (singleFamily) {
         admin.compact(table.getName(), family);
       } else {
         admin.compact(table.getName());
       }
     } else {
       if (singleFamily) {
         admin.majorCompact(table.getName(), family);
       } else {
         admin.majorCompact(table.getName());
       }
     }
     long curt = System.currentTimeMillis();
     long waitTime = 5000;
     long endt = curt + waitTime;
     CompactionState state = admin.getCompactionState(table.getName());
     while (state == CompactionState.NONE && curt < endt) {
       Thread.sleep(10);
       state = admin.getCompactionState(table.getName());
       curt = System.currentTimeMillis();
     }
     // Now, should have the right compaction state,
     // otherwise, the compaction should have already been done
     if (expectedState != state) {
       for (HRegion region : regions) {
         state = region.getCompactionState();
         assertEquals(CompactionState.NONE, state);
       }
     } else {
       // Wait until the compaction is done
       state = admin.getCompactionState(table.getName());
       while (state != CompactionState.NONE && curt < endt) {
         Thread.sleep(10);
         state = admin.getCompactionState(table.getName());
       }
       // Now, compaction should be done.
       assertEquals(CompactionState.NONE, state);
     }
     int countAfter = countStoreFilesInFamilies(regions, families);
     int countAfterSingleFamily = countStoreFilesInFamily(regions, family);
     assertTrue(countAfter < countBefore);
     if (!singleFamily) {
       if (expectedState == CompactionState.MAJOR) assertTrue(families.length == countAfter);
       else assertTrue(families.length < countAfter);
     } else {
       int singleFamDiff = countBeforeSingleFamily - countAfterSingleFamily;
       // assert only change was to single column family
       assertTrue(singleFamDiff == (countBefore - countAfter));
       if (expectedState == CompactionState.MAJOR) {
         assertTrue(1 == countAfterSingleFamily);
       } else {
         assertTrue(1 < countAfterSingleFamily);
       }
     }
   } finally {
     if (ht != null) {
       TEST_UTIL.deleteTable(table);
     }
   }
 }
Example #12
0
  /** @param server */
  CompactSplitThread(HRegionServer server) {
    super();
    this.server = server;
    this.conf = server.getConfiguration();
    this.regionSplitLimit = conf.getInt("hbase.regionserver.regionSplitLimit", Integer.MAX_VALUE);

    int largeThreads =
        Math.max(1, conf.getInt(LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT));
    int smallThreads = conf.getInt(SMALL_COMPACTION_THREADS, SMALL_COMPACTION_THREADS_DEFAULT);

    int splitThreads = conf.getInt(SPLIT_THREADS, SPLIT_THREADS_DEFAULT);

    // if we have throttle threads, make sure the user also specified size
    Preconditions.checkArgument(largeThreads > 0 && smallThreads > 0);

    final String n = Thread.currentThread().getName();

    this.longCompactions =
        new ThreadPoolExecutor(
            largeThreads,
            largeThreads,
            60,
            TimeUnit.SECONDS,
            new PriorityBlockingQueue<Runnable>(),
            new ThreadFactory() {
              @Override
              public Thread newThread(Runnable r) {
                Thread t = new Thread(r);
                t.setName(n + "-longCompactions-" + System.currentTimeMillis());
                return t;
              }
            });
    this.longCompactions.setRejectedExecutionHandler(new Rejection());
    this.shortCompactions =
        new ThreadPoolExecutor(
            smallThreads,
            smallThreads,
            60,
            TimeUnit.SECONDS,
            new PriorityBlockingQueue<Runnable>(),
            new ThreadFactory() {
              @Override
              public Thread newThread(Runnable r) {
                Thread t = new Thread(r);
                t.setName(n + "-shortCompactions-" + System.currentTimeMillis());
                return t;
              }
            });
    this.shortCompactions.setRejectedExecutionHandler(new Rejection());
    this.splits =
        (ThreadPoolExecutor)
            Executors.newFixedThreadPool(
                splitThreads,
                new ThreadFactory() {
                  @Override
                  public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName(n + "-splits-" + System.currentTimeMillis());
                    return t;
                  }
                });
    int mergeThreads = conf.getInt(MERGE_THREADS, MERGE_THREADS_DEFAULT);
    this.mergePool =
        (ThreadPoolExecutor)
            Executors.newFixedThreadPool(
                mergeThreads,
                new ThreadFactory() {
                  @Override
                  public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName(n + "-merges-" + System.currentTimeMillis());
                    return t;
                  }
                });
  }
Example #13
0
 private boolean shouldSplitRegion() {
   return (regionSplitLimit > server.getNumberOfOnlineRegions());
 }