コード例 #1
0
ファイル: TestBase.java プロジェクト: kakao/hbase-tools
  @BeforeClass
  public static void setUpOnce() throws Exception {
    miniCluster = System.getProperty("cluster.type").equals("mini");
    securedCluster = System.getProperty("cluster.secured").equals("true");
    System.out.println("realCluster - " + !miniCluster);
    System.out.println("securedCluster - " + securedCluster);

    Util.setLoggingThreshold("ERROR");

    if (miniCluster) {
      if (hbase == null) {
        hbase = new HBaseTestingUtility();
        conf = hbase.getConfiguration();
        conf.set("zookeeper.session.timeout", "3600000");
        conf.set("dfs.client.socket-timeout", "3600000");

        if (securedCluster) {
          hbase.startMiniCluster(RS_COUNT);
          hbase.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME, 30000L);
          admin = new HBaseAdminWrapper(conf);
        } else {
          hbase.startMiniCluster(RS_COUNT);
          admin = hbase.getHBaseAdmin();
        }
      }
    } else {
      if (admin == null) {
        final String argsFileName =
            securedCluster
                ? "../../testClusterRealSecured.args"
                : "../../testClusterRealNonSecured.args";
        if (!Util.isFile(argsFileName)) {
          throw new IllegalStateException(
              "You have to define args file " + argsFileName + " for tests.");
        }

        String[] testArgs = {argsFileName};
        Args args = new TestArgs(testArgs);
        admin = HBaseClient.getAdmin(args);
        conf = admin.getConfiguration();
        RS_COUNT = getServerNameList().size();
      }
    }
    previousBalancerRunning = admin.setBalancerRunning(false, true);
    hConnection = HConnectionManager.createConnection(conf);

    USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
  }
コード例 #2
0
  /**
   * A set up method to start the test cluster. AggregateProtocolImpl is registered and will be
   * loaded during region startup.
   *
   * @throws Exception
   */
  @BeforeClass
  public static void setupBeforeClass() throws Exception {

    conf.set(
        CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
        "org.apache.hadoop.hbase.coprocessor.AggregateImplementation");

    util.startMiniCluster(2);
    HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
    util.createMultiRegions(
        util.getConfiguration(),
        table,
        TEST_FAMILY,
        new byte[][] {HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1], ROWS[rowSeperator2]});
    /**
     * The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
     * CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2
     */
    for (int i = 0; i < ROWSIZE; i++) {
      Put put = new Put(ROWS[i]);
      put.setWriteToWAL(false);
      Long l = new Long(i);
      put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(l));
      table.put(put);
      Put p2 = new Put(ROWS[i]);
      put.setWriteToWAL(false);
      p2.add(TEST_FAMILY, Bytes.add(TEST_MULTI_CQ, Bytes.toBytes(l)), Bytes.toBytes(l * 10));
      table.put(p2);
    }
    table.close();
  }
コード例 #3
0
ファイル: TestNamespace.java プロジェクト: kairsas/hbase
 @BeforeClass
 public static void setUp() throws Exception {
   TEST_UTIL = new HBaseTestingUtility();
   TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE);
   admin = TEST_UTIL.getHBaseAdmin();
   cluster = TEST_UTIL.getHBaseCluster();
   master = ((MiniHBaseCluster) cluster).getMaster();
   zkNamespaceManager = new ZKNamespaceManager(master.getZooKeeper());
   zkNamespaceManager.start();
   LOG.info("Done initializing cluster");
 }
コード例 #4
0
 /**
  * Start up a mini cluster and put a small table of many empty regions into it.
  *
  * @throws Exception
  */
 @BeforeClass
 public static void beforeAllTests() throws Exception {
   TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
   TEST_UTIL.startMiniCluster(2);
   // Create a table of three families.  This will assign a region.
   TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
   HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
   int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
   TEST_UTIL.waitUntilAllRegionsAssigned(countOfRegions);
   addToEachStartKey(countOfRegions);
   t.close();
 }
コード例 #5
0
ファイル: TestScannerResource.java プロジェクト: kfive/hbase
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
   conf = TEST_UTIL.getConfiguration();
   TEST_UTIL.startMiniCluster();
   REST_TEST_UTIL.startServletContainer(conf);
   client = new Client(new Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
   context =
       JAXBContext.newInstance(
           CellModel.class, CellSetModel.class, RowModel.class, ScannerModel.class);
   marshaller = context.createMarshaller();
   unmarshaller = context.createUnmarshaller();
   Admin admin = TEST_UTIL.getHBaseAdmin();
   if (admin.tableExists(TABLE)) {
     return;
   }
   HTableDescriptor htd = new HTableDescriptor(TABLE);
   htd.addFamily(new HColumnDescriptor(CFA));
   htd.addFamily(new HColumnDescriptor(CFB));
   admin.createTable(htd);
   expectedRows1 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_1, 1.0);
   expectedRows2 = insertData(TEST_UTIL.getConfiguration(), TABLE, COLUMN_2, 0.5);
 }
コード例 #6
0
  /** Spin up a cluster with a bunch of regions on it. */
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    TEST_UTIL.startMiniCluster(NB_SLAVES);
    TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster();
    TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);

    final List<String> families = new ArrayList<String>(1);
    families.add("family");
    TEST_UTIL.createRandomTable("table", families, 1, 0, 0, COUNT_OF_REGIONS, 0);

    // Ensure a stable env
    TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false);

    boolean ready = false;
    while (!ready) {
      waitForAllRegionsOnline();

      // Assert that every regionserver has some regions on it.
      int i = 0;
      ready = true;
      while (i < NB_SLAVES && ready) {
        HRegionServer hrs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(i);
        if (ProtobufUtil.getOnlineRegions(hrs).isEmpty()) {
          ready = false;
        }
        i++;
      }

      if (!ready) {
        TEST_UTIL.getHBaseAdmin().setBalancerRunning(true, true);
        Assert.assertTrue("Can't start a balance!", TEST_UTIL.getHBaseAdmin().balancer());
        TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false);
        Thread.sleep(100);
      }
    }
  }
コード例 #7
0
  @Test(timeout = 300000)
  public void testMultiSlaveReplication() throws Exception {
    LOG.info("testCyclicReplication");
    MiniHBaseCluster master = utility1.startMiniCluster();
    utility2.startMiniCluster();
    utility3.startMiniCluster();
    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);

    new HBaseAdmin(conf1).createTable(table);
    new HBaseAdmin(conf2).createTable(table);
    new HBaseAdmin(conf3).createTable(table);
    HTable htable1 = new HTable(conf1, tableName);
    htable1.setWriteBufferSize(1024);
    HTable htable2 = new HTable(conf2, tableName);
    htable2.setWriteBufferSize(1024);
    HTable htable3 = new HTable(conf3, tableName);
    htable3.setWriteBufferSize(1024);

    admin1.addPeer("1", utility2.getClusterKey());

    // put "row" and wait 'til it got around, then delete
    putAndWait(row, famName, htable1, htable2);
    deleteAndWait(row, htable1, htable2);
    // check it wasn't replication to cluster 3
    checkRow(row, 0, htable3);

    putAndWait(row2, famName, htable1, htable2);

    // now roll the region server's logs
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());
    // after the log was rolled put a new row
    putAndWait(row3, famName, htable1, htable2);

    admin1.addPeer("2", utility3.getClusterKey());

    // put a row, check it was replicated to all clusters
    putAndWait(row1, famName, htable1, htable2, htable3);
    // delete and verify
    deleteAndWait(row1, htable1, htable2, htable3);

    // make sure row2 did not get replicated after
    // cluster 3 was added
    checkRow(row2, 0, htable3);

    // row3 will get replicated, because it was in the
    // latest log
    checkRow(row3, 1, htable3);

    Put p = new Put(row);
    p.add(famName, row, row);
    htable1.put(p);
    // now roll the logs again
    new HBaseAdmin(conf1).rollHLogWriter(master.getRegionServer(0).getServerName().toString());

    // cleanup "row2", also conveniently use this to wait replication
    // to finish
    deleteAndWait(row2, htable1, htable2, htable3);
    // Even if the log was rolled in the middle of the replication
    // "row" is still replication.
    checkRow(row, 1, htable2, htable3);

    // cleanup the rest
    deleteAndWait(row, htable1, htable2, htable3);
    deleteAndWait(row3, htable1, htable2, htable3);

    utility3.shutdownMiniCluster();
    utility2.shutdownMiniCluster();
    utility1.shutdownMiniCluster();
  }
コード例 #8
0
ファイル: TestIOFencing.java プロジェクト: grokcoder/pbase
  public void doTest(Class<?> regionClass, boolean distributedLogReplay) throws Exception {
    Configuration c = TEST_UTIL.getConfiguration();
    c.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay);
    // Insert our custom region
    c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);
    c.setBoolean("dfs.support.append", true);
    // Encourage plenty of flushes
    c.setLong("hbase.hregion.memstore.flush.size", 200000);
    c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName());
    // Only run compaction when we tell it to
    c.setInt("hbase.hstore.compactionThreshold", 1000);
    c.setLong("hbase.hstore.blockingStoreFiles", 1000);
    // Compact quickly after we tell it to!
    c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
    LOG.info("Starting mini cluster");
    TEST_UTIL.startMiniCluster(1);
    CompactionBlockerRegion compactingRegion = null;
    Admin admin = null;
    try {
      LOG.info("Creating admin");
      admin = TEST_UTIL.getConnection().getAdmin();
      LOG.info("Creating table");
      TEST_UTIL.createTable(TABLE_NAME, FAMILY);
      Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
      LOG.info("Loading test table");
      // Find the region
      List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME);
      assertEquals(1, testRegions.size());
      compactingRegion = (CompactionBlockerRegion) testRegions.get(0);
      LOG.info("Blocking compactions");
      compactingRegion.stopCompactions();
      long lastFlushTime = compactingRegion.getLastFlushTime();
      // Load some rows
      TEST_UTIL.loadNumericRows(table, FAMILY, 0, FIRST_BATCH_COUNT);

      // add a compaction from an older (non-existing) region to see whether we successfully skip
      // those entries
      HRegionInfo oldHri =
          new HRegionInfo(table.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      CompactionDescriptor compactionDescriptor =
          ProtobufUtil.toCompactionDescriptor(
              oldHri,
              FAMILY,
              Lists.newArrayList(new Path("/a")),
              Lists.newArrayList(new Path("/b")),
              new Path("store_dir"));
      WALUtil.writeCompactionMarker(
          compactingRegion.getWAL(),
          table.getTableDescriptor(),
          oldHri,
          compactionDescriptor,
          new AtomicLong(Long.MAX_VALUE - 100));

      // Wait till flush has happened, otherwise there won't be multiple store files
      long startWaitTime = System.currentTimeMillis();
      while (compactingRegion.getLastFlushTime() <= lastFlushTime
          || compactingRegion.countStoreFiles() <= 1) {
        LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString());
        Thread.sleep(1000);
        assertTrue(
            "Timed out waiting for the region to flush",
            System.currentTimeMillis() - startWaitTime < 30000);
      }
      assertTrue(compactingRegion.countStoreFiles() > 1);
      final byte REGION_NAME[] = compactingRegion.getRegionName();
      LOG.info("Asking for compaction");
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      LOG.info("Waiting for compaction to be about to start");
      compactingRegion.waitForCompactionToBlock();
      LOG.info("Starting a new server");
      RegionServerThread newServerThread = TEST_UTIL.getMiniHBaseCluster().startRegionServer();
      final HRegionServer newServer = newServerThread.getRegionServer();
      LOG.info("Killing region server ZK lease");
      TEST_UTIL.expireRegionServerSession(0);
      CompactionBlockerRegion newRegion = null;
      startWaitTime = System.currentTimeMillis();
      LOG.info("Waiting for the new server to pick up the region " + Bytes.toString(REGION_NAME));

      // wait for region to be assigned and to go out of log replay if applicable
      Waiter.waitFor(
          c,
          60000,
          new Waiter.Predicate<Exception>() {
            @Override
            public boolean evaluate() throws Exception {
              HRegion newRegion = newServer.getOnlineRegion(REGION_NAME);
              return newRegion != null && !newRegion.isRecovering();
            }
          });

      newRegion = (CompactionBlockerRegion) newServer.getOnlineRegion(REGION_NAME);

      LOG.info("Allowing compaction to proceed");
      compactingRegion.allowCompactions();
      while (compactingRegion.compactCount == 0) {
        Thread.sleep(1000);
      }
      // The server we killed stays up until the compaction that was started before it was killed
      // completes.  In logs
      // you should see the old regionserver now going down.
      LOG.info("Compaction finished");
      // After compaction of old region finishes on the server that was going down, make sure that
      // all the files we expect are still working when region is up in new location.
      FileSystem fs = newRegion.getFilesystem();
      for (String f : newRegion.getStoreFileList(new byte[][] {FAMILY})) {
        assertTrue("After compaction, does not exist: " + f, fs.exists(new Path(f)));
      }
      // If we survive the split keep going...
      // Now we make sure that the region isn't totally confused.  Load up more rows.
      TEST_UTIL.loadNumericRows(
          table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
      ((HBaseAdmin) admin).majorCompact(TABLE_NAME.getName());
      startWaitTime = System.currentTimeMillis();
      while (newRegion.compactCount == 0) {
        Thread.sleep(1000);
        assertTrue(
            "New region never compacted", System.currentTimeMillis() - startWaitTime < 180000);
      }
      assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table));
    } finally {
      if (compactingRegion != null) {
        compactingRegion.allowCompactions();
      }
      admin.close();
      TEST_UTIL.shutdownMiniCluster();
    }
  }
コード例 #9
0
  /**
   * Test the global mem store size in the region server is equal to sum of each region's mem store
   * size
   *
   * @throws Exception
   */
  @Test
  public void testGlobalMemStore() throws Exception {
    // Start the cluster
    LOG.info("Starting cluster");
    Configuration conf = HBaseConfiguration.create();
    TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(1, regionServerNum);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
    cluster.waitForActiveAndReadyMaster();

    // Create a table with regions
    TableName table = TableName.valueOf("TestGlobalMemStoreSize");
    byte[] family = Bytes.toBytes("family");
    LOG.info("Creating table with " + regionNum + " regions");
    Table ht = TEST_UTIL.createMultiRegionTable(table, family, regionNum);
    int numRegions = -1;
    try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) {
      numRegions = r.getStartKeys().length;
    }
    assertEquals(regionNum, numRegions);
    waitForAllRegionsAssigned();

    for (HRegionServer server : getOnlineRegionServers()) {
      long globalMemStoreSize = 0;
      for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
        globalMemStoreSize +=
            server.getFromOnlineRegions(regionInfo.getEncodedName()).getMemstoreSize();
      }
      assertEquals(server.getRegionServerAccounting().getGlobalMemstoreSize(), globalMemStoreSize);
    }

    // check the global memstore size after flush
    int i = 0;
    for (HRegionServer server : getOnlineRegionServers()) {
      LOG.info(
          "Starting flushes on "
              + server.getServerName()
              + ", size="
              + server.getRegionServerAccounting().getGlobalMemstoreSize());

      for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
        Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
        flush(r, server);
      }
      LOG.info("Post flush on " + server.getServerName());
      long now = System.currentTimeMillis();
      long timeout = now + 1000;
      while (server.getRegionServerAccounting().getGlobalMemstoreSize() != 0
          && timeout < System.currentTimeMillis()) {
        Threads.sleep(10);
      }
      long size = server.getRegionServerAccounting().getGlobalMemstoreSize();
      if (size > 0) {
        // If size > 0, see if its because the meta region got edits while
        // our test was running....
        for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) {
          Region r = server.getFromOnlineRegions(regionInfo.getEncodedName());
          long l = r.getMemstoreSize();
          if (l > 0) {
            // Only meta could have edits at this stage.  Give it another flush
            // clear them.
            assertTrue(regionInfo.isMetaRegion());
            LOG.info(r.toString() + " " + l + ", reflushing");
            r.flush(true);
          }
        }
      }
      size = server.getRegionServerAccounting().getGlobalMemstoreSize();
      assertEquals("Server=" + server.getServerName() + ", i=" + i++, 0, size);
    }

    ht.close();
    TEST_UTIL.shutdownMiniCluster();
  }