コード例 #1
0
  /**
   * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
   * loaded during region startup.
   *
   * @throws Exception
   */
  @BeforeClass
  public static void setupBeforeClass() throws Exception {

    conf.set(
        CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
        "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

    util.startMiniCluster(2);
    HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
    util.createMultiRegions(
        util.getConfiguration(),
        table,
        TEST_FAMILY,
        new byte[][] {HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1], ROWS[rowSeperator2]});
    /**
     * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
     * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
     */
    for (int i = 0; i < ROWSIZE; i++) {
      Put put = new Put(ROWS[i]);
      put.setWriteToWAL(false);
      Long l = new Long(i);
      put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(l));
      table.put(put);
      Put p2 = new Put(ROWS[i]);
      put.setWriteToWAL(false);
      p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(l)), Bytes.toBytes(l * 10));
      table.put(p2);
    }
    table.close();
  }
コード例 #2
0
ファイル: TestNamespace.java プロジェクト: kairsas/hbase
  @Test
  public void createDoubleTest() throws IOException, InterruptedException {
    String testName = "createDoubleTest";
    String nsName = prefix + "_" + testName;
    LOG.info(testName);

    TableName tableName = TableName.valueOf("my_table");
    TableName tableNameFoo = TableName.valueOf(nsName + ":my_table");
    // create namespace and verify
    admin.createNamespace(NamespaceDescriptor.create(nsName).build());
    TEST_UTIL.createTable(tableName, Bytes.toBytes(nsName));
    TEST_UTIL.createTable(tableNameFoo, Bytes.toBytes(nsName));
    assertEquals(2, admin.listTables().length);
    assertNotNull(admin.getTableDescriptor(tableName));
    assertNotNull(admin.getTableDescriptor(tableNameFoo));
    // remove namespace and verify
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
    assertEquals(1, admin.listTables().length);
  }
コード例 #3
0
 /**
  * Start up a mini cluster and put a small table of many empty regions into it.
  *
  * @throws Exception
  */
 @BeforeClass
 public static void beforeAllTests() throws Exception {
   TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
   TEST_UTIL.startMiniCluster(2);
   // Create a table of three families.  This will assign a region.
   TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
   HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
   int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
   TEST_UTIL.waitUntilAllRegionsAssigned(countOfRegions);
   addToEachStartKey(countOfRegions);
   t.close();
 }
コード例 #4
0
ファイル: TestIOFencing.java プロジェクト: grokcoder/pbase
  public void doTest(Class<?> regionClass, boolean distributedLogReplay) throws Exception {
    Configuration c = TEST_UTIL.getConfiguration();
    c.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay);
    // Insert our custom region
    c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);
    c.setBoolean("dfs.support.append", true);
    // Encourage plenty of flushes
    c.setLong("hbase.hregion.memstore.flush.size", 200000);
    c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName());
    // Only run compaction when we tell it to
    c.setInt("hbase.hstore.compactionThreshold", 1000);
    c.setLong("hbase.hstore.blockingStoreFiles", 1000);
    // Compact quickly after we tell it to!
    c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
    LOG.info("Starting mini cluster");
    TEST_UTIL.startMiniCluster(1);
    CompactionBlockerRegion compactingRegion = null;
    Admin admin = null;
    try {
      LOG.info("Creating admin");
      admin = TEST_UTIL.getConnection().getAdmin();
      LOG.info("Creating table");
      TEST_UTIL.createTable(TABLE_NAME, FAMILY);
      Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
      LOG.info("Loading test table");
      // Find the region
      List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME);
      assertEquals(1, testRegions.size());
      compactingRegion = (CompactionBlockerRegion) testRegions.get(0);
      LOG.info("Blocking compactions");
      compactingRegion.stopCompactions();
      long lastFlushTime = compactingRegion.getLastFlushTime();
      // Load some rows
      TEST_UTIL.loadNumericRows(table, FAMILY, 0, FIRST_BATCH_COUNT);

      // add a compaction from an older (non-existing) region to see whether we successfully skip
      // those entries
      HRegionInfo oldHri =
          new HRegionInfo(table.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      CompactionDescriptor compactionDescriptor =
          ProtobufUtil.toCompactionDescriptor(
              oldHri,
              FAMILY,
              Lists.newArrayList(new Path("/a")),
              Lists.newArrayList(new Path("/b")),
              new Path("store_dir"));
      WALUtil.writeCompactionMarker(
          compactingRegion.getWAL(),
          table.getTableDescriptor(),
          oldHri,
          compactionDescriptor,
          new AtomicLong(Long.MAX_VALUE - 100));

      // Wait till flush has happened, otherwise there won't be multiple store files
      long startWaitTime = System.currentTimeMillis();
      while (compactingRegion.getLastFlushTime() <= lastFlushTime
          || compactingRegion.countStoreFiles() <= 1) {
        LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString());
        Thread.sleep(1000);
        assertTrue(
            "Timed out waiting for the region to flush",
            System.currentTimeMillis() - startWaitTime < 30000);
      }
      assertTrue(compactingRegion.countStoreFiles() > 1);
      final byte REGION_NAME[] = compactingRegion.getRegionName();
      LOG.info("Asking for compaction");
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      LOG.info("Waiting for compaction to be about to start");
      compactingRegion.waitForCompactionToBlock();
      LOG.info("Starting a new server");
      RegionServerThread newServerThread = TEST_UTIL.getMiniHBaseCluster().startRegionServer();
      final HRegionServer newServer = newServerThread.getRegionServer();
      LOG.info("Killing region server ZK lease");
      TEST_UTIL.expireRegionServerSession(0);
      CompactionBlockerRegion newRegion = null;
      startWaitTime = System.currentTimeMillis();
      LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME));

      // wait for region to be assigned and to go out of log replay if applicable
      Waiter.waitFor(
          c,
          60000,
          new Waiter.Predicate<Exception>() {
            @Override
            public boolean evaluate() throws Exception {
              HRegion newRegion = newServer.getOnlineRegion(REGION_NAME);
              return newRegion != null && !newRegion.isRecovering();
            }
          });

      newRegion = (CompactionBlockerRegion) newServer.getOnlineRegion(REGION_NAME);

      LOG.info("Allowing compaction to proceed");
      compactingRegion.allowCompactions();
      while (compactingRegion.compactCount == 0) {
        Thread.sleep(1000);
      }
      // The server we killed stays up until the compaction that was started before it was killed
      // completes.  In logs
      // you should see the old regionserver now going down.
      LOG.info("Compaction finished");
      // After compaction of old region finishes on the server that was going down, make sure that
      // all the files we expect are still working when region is up in new location.
      FileSystem fs = newRegion.getFilesystem();
      for (String f : newRegion.getStoreFileList(new byte[][] {FAMILY})) {
        assertTrue("After compaction, does not exist: " + f, fs.exists(new Path(f)));
      }
      // If we survive the split keep going...
      // Now we make sure that the region isn't totally confused.  Load up more rows.
      TEST_UTIL.loadNumericRows(
          table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      startWaitTime = System.currentTimeMillis();
      while (newRegion.compactCount == 0) {
        Thread.sleep(1000);
        assertTrue(
            "New region never compacted", System.currentTimeMillis() - startWaitTime < 180000);
      }
      assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table));
    } finally {
      if (compactingRegion != null) {
        compactingRegion.allowCompactions();
      }
      admin.close();
      TEST_UTIL.shutdownMiniCluster();
    }
  }