@Test
  public void testSnapshotStateAfterMerge() throws Exception {
    int numRows = DEFAULT_NUM_ROWS;
    // make sure we don't fail on listing snapshots
    SnapshotTestingUtils.assertNoSnapshots(admin);
    // load the table so we have some data
    SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);

    // Take a snapshot
    String snapshotBeforeMergeName = "snapshotBeforeMerge";
    admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotType.FLUSH);

    // Clone the table
    TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
    admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
    SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneBeforeMergeName);

    // Merge two regions
    List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
    Collections.sort(
        regions,
        new Comparator<HRegionInfo>() {
          public int compare(HRegionInfo r1, HRegionInfo r2) {
            return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
          }
        });

    int numRegions = admin.getTableRegions(TABLE_NAME).size();
    int numRegionsAfterMerge = numRegions - 2;
    admin.mergeRegionsAsync(
        regions.get(1).getEncodedNameAsBytes(), regions.get(2).getEncodedNameAsBytes(), true);
    admin.mergeRegionsAsync(
        regions.get(4).getEncodedNameAsBytes(), regions.get(5).getEncodedNameAsBytes(), true);

    // Verify that there's one region less
    waitRegionsAfterMerge(numRegionsAfterMerge);
    assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());

    // Clone the table
    TableName cloneAfterMergeName = TableName.valueOf("cloneAfterMerge");
    admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
    SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);

    verifyRowCount(UTIL, TABLE_NAME, numRows);
    verifyRowCount(UTIL, cloneBeforeMergeName, numRows);
    verifyRowCount(UTIL, cloneAfterMergeName, numRows);

    // test that we can delete the snapshot
    UTIL.deleteTable(cloneAfterMergeName);
    UTIL.deleteTable(cloneBeforeMergeName);
  }
  @Test
  public void testIncrementHook() throws IOException {
    TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});
    try {
      Increment inc = new Increment(Bytes.toBytes(0));
      inc.addColumn(A, A, 1);

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
          tableName,
          new Boolean[] {false, false, false});

      table.increment(inc);

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
          tableName,
          new Boolean[] {true, true, true});
    } finally {
      util.deleteTable(tableName);
      table.close();
    }
  }
 @Test
 public void testCheckAndDeleteHooks() throws IOException {
   TableName tableName =
       TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks");
   HTable table = util.createTable(tableName, new byte[][] {A, B, C});
   try {
     Put p = new Put(Bytes.toBytes(0));
     p.add(A, A, A);
     table.put(p);
     table.flushCommits();
     Delete d = new Delete(Bytes.toBytes(0));
     table.delete(d);
     verifyMethodResult(
         SimpleRegionObserver.class,
         new String[] {
           "hadPreCheckAndDelete", "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"
         },
         tableName,
         new Boolean[] {false, false, false});
     table.checkAndDelete(Bytes.toBytes(0), A, A, A, d);
     verifyMethodResult(
         SimpleRegionObserver.class,
         new String[] {
           "hadPreCheckAndDelete", "hadPreCheckAndDeleteAfterRowLock", "hadPostCheckAndDelete"
         },
         tableName,
         new Boolean[] {true, true, true});
   } finally {
     util.deleteTable(tableName);
     table.close();
   }
 }
  @Test
  public void bulkLoadHFileTest() throws Exception {
    String testName = TestRegionObserverInterface.class.getName() + ".bulkLoadHFileTest";
    TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest");
    Configuration conf = util.getConfiguration();
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});
    try {
      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
          tableName,
          new Boolean[] {false, false});

      FileSystem fs = util.getTestFileSystem();
      final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
      Path familyDir = new Path(dir, Bytes.toString(A));

      createHFile(util.getConfiguration(), fs, new Path(familyDir, Bytes.toString(A)), A, A);

      // Bulk load
      new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName));

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
          tableName,
          new Boolean[] {true, true});
    } finally {
      util.deleteTable(tableName);
      table.close();
    }
  }
  @Test(timeout = 30000)
  public void testCreateDeleteTable() throws IOException {
    // Create table then get the single region for our new table.
    HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable");
    hdt.setRegionReplication(NB_SERVERS);
    hdt.addCoprocessor(SlowMeCopro.class.getName());
    Table table = HTU.createTable(hdt, new byte[][] {f}, HTU.getConfiguration());

    Put p = new Put(row);
    p.add(f, row, row);
    table.put(p);

    Get g = new Get(row);
    Result r = table.get(g);
    Assert.assertFalse(r.isStale());

    try {
      // But if we ask for stale we will get it
      SlowMeCopro.cdl.set(new CountDownLatch(1));
      g = new Get(row);
      g.setConsistency(Consistency.TIMELINE);
      r = table.get(g);
      Assert.assertTrue(r.isStale());
      SlowMeCopro.cdl.get().countDown();
    } finally {
      SlowMeCopro.cdl.get().countDown();
      SlowMeCopro.sleepTime.set(0);
    }

    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());
  }
  @Test
  public void testPreWALRestoreSkip() throws Exception {
    LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip");
    TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED);
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});

    JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
    ServerName sn2 = rs1.getRegionServer().getServerName();
    String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

    util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
    while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
      Thread.sleep(100);
    }

    Put put = new Put(ROW);
    put.add(A, A, A);
    put.add(B, B, B);
    put.add(C, C, C);
    table.put(put);
    table.flushCommits();

    cluster.killRegionServer(rs1.getRegionServer().getServerName());
    Threads.sleep(20000); // just to be sure that the kill has fully started.
    util.waitUntilAllRegionsAssigned(tableName);

    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"getCtPreWALRestore", "getCtPostWALRestore"},
        tableName,
        new Integer[] {0, 0});

    util.deleteTable(tableName);
    table.close();
  }
 @Test
 public void testInvalidColumnFamily() throws IOException, InterruptedException {
   byte[] table = Bytes.toBytes("testInvalidColumnFamily");
   byte[] family = Bytes.toBytes("family");
   byte[] fakecf = Bytes.toBytes("fakecf");
   boolean caughtMinorCompact = false;
   boolean caughtMajorCompact = false;
   Table ht = null;
   try {
     ht = TEST_UTIL.createTable(table, family);
     HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
     try {
       admin.compact(table, fakecf);
     } catch (IOException ioe) {
       caughtMinorCompact = true;
     }
     try {
       admin.majorCompact(table, fakecf);
     } catch (IOException ioe) {
       caughtMajorCompact = true;
     }
   } finally {
     if (ht != null) {
       TEST_UTIL.deleteTable(table);
     }
     assertTrue(caughtMinorCompact);
     assertTrue(caughtMajorCompact);
   }
 }
 @After
 public void tearDown() throws Exception {
   UTIL.deleteTable(TABLE_NAME);
   SnapshotTestingUtils.deleteAllSnapshots(this.admin);
   this.admin.close();
   SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
 }
  @Test
  public void testRowMutation() throws IOException {
    TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});
    try {
      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"},
          tableName,
          new Boolean[] {false, false, false, false, false});
      Put put = new Put(ROW);
      put.add(A, A, A);
      put.add(B, B, B);
      put.add(C, C, C);

      Delete delete = new Delete(ROW);
      delete.deleteColumn(A, A);
      delete.deleteColumn(B, B);
      delete.deleteColumn(C, C);

      RowMutations arm = new RowMutations(ROW);
      arm.add(put);
      arm.add(delete);
      table.mutateRow(arm);

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", "hadDeleted"},
          tableName,
          new Boolean[] {false, false, true, true, true});
    } finally {
      util.deleteTable(tableName);
      table.close();
    }
  }
  @Test
  public void testAppendHook() throws IOException {
    TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook");
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});
    try {
      Append app = new Append(Bytes.toBytes(0));
      app.add(A, A, A);

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
          tableName,
          new Boolean[] {false, false, false});

      table.append(app);

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"hadPreAppend", "hadPostAppend", "hadPreAppendAfterRowLock"},
          tableName,
          new Boolean[] {true, true, true});
    } finally {
      util.deleteTable(tableName);
      table.close();
    }
  }
 @After
 public void tearDown() throws Exception {
   ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false);
   for (HTableDescriptor htd : UTIL.getHBaseAdmin().listTables()) {
     LOG.info("Tear down, remove table=" + htd.getTableName());
     UTIL.deleteTable(htd.getTableName());
   }
 }
  private void runTest(
      String testName,
      HTableDescriptor htd,
      BloomType bloomType,
      boolean preCreateTable,
      byte[][] tableSplitKeys,
      byte[][][] hfileRanges)
      throws Exception {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(
          util.getConfiguration(),
          fs,
          new Path(familyDir, "hfile_" + hfileIdx++),
          FAMILY,
          QUALIFIER,
          from,
          to,
          1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
    String[] args = {dir.toString(), tableName.toString()};
    loader.run(args);

    Table table = new HTable(util.getConfiguration(), tableName);
    try {
      assertEquals(expectedRows, util.countRows(table));
    } finally {
      table.close();
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue(
            "Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
  @Test
  public void testTTL() throws Exception {
    TableName tableName = TableName.valueOf("testTTL");
    if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) {
      TEST_UTIL.deleteTable(tableName);
    }
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor(F).setMaxVersions(10).setTimeToLive(1);
    desc.addFamily(hcd);
    TEST_UTIL.getHBaseAdmin().createTable(desc);
    Table t = new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName);
    long now = EnvironmentEdgeManager.currentTime();
    ManualEnvironmentEdge me = new ManualEnvironmentEdge();
    me.setValue(now);
    EnvironmentEdgeManagerTestHelper.injectEdge(me);
    // 2s in the past
    long ts = now - 2000;
    // Set the TTL override to 3s
    Put p = new Put(R);
    p.setAttribute("ttl", new byte[] {});
    p.add(F, tableName.getName(), Bytes.toBytes(3000L));
    t.put(p);

    p = new Put(R);
    p.add(F, Q, ts, Q);
    t.put(p);
    p = new Put(R);
    p.add(F, Q, ts + 1, Q);
    t.put(p);

    // these two should be expired but for the override
    // (their ts was 2s in the past)
    Get g = new Get(R);
    g.setMaxVersions(10);
    Result r = t.get(g);
    // still there?
    assertEquals(2, r.size());

    TEST_UTIL.flush(tableName);
    TEST_UTIL.compact(tableName, true);

    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    // still there?
    assertEquals(2, r.size());

    // roll time forward 2s.
    me.setValue(now + 2000);
    // now verify that data eventually does expire
    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    // should be gone now
    assertEquals(0, r.size());
    t.close();
  }
 @After
 public void tearDown() throws Exception {
   // Clean the _acl_ table
   TEST_UTIL.deleteTable(tableName);
   TEST_UTIL.getHBaseAdmin().deleteNamespace(namespace);
   // Verify all table/namespace permissions are erased
   assertEquals(0, AccessControlLists.getTablePermissions(conf, tableName).size());
   assertEquals(0, AccessControlLists.getNamespacePermissions(conf, namespace).size());
 }
Beispiel #15
0
  @Test
  public void testStartStopRow() throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testStartStopRow1");
    final TableName TABLENAME2 = TableName.valueOf("testStartStopRow2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");
    final byte[] ROW0 = Bytes.toBytesBinary("\\x01row0");
    final byte[] ROW1 = Bytes.toBytesBinary("\\x01row1");
    final byte[] ROW2 = Bytes.toBytesBinary("\\x01row2");

    Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
    Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY);

    // put rows into the first table
    Put p = new Put(ROW0);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW1);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);
    p = new Put(ROW2);
    p.addColumn(FAMILY, COLUMN1, COLUMN1);
    t1.put(p);

    CopyTable copy = new CopyTable();
    assertEquals(
        0,
        ToolRunner.run(
            new Configuration(TEST_UTIL.getConfiguration()),
            copy,
            new String[] {
              "--new.name=" + TABLENAME2,
              "--startrow=\\x01row1",
              "--stoprow=\\x01row2",
              TABLENAME1.getNameAsString()
            }));

    // verify the data was copied into table 2
    // row1 exist, row0, row2 do not exist
    Get g = new Get(ROW1);
    Result r = t2.get(g);
    assertEquals(1, r.size());
    assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));

    g = new Get(ROW0);
    r = t2.get(g);
    assertEquals(0, r.size());

    g = new Get(ROW2);
    r = t2.get(g);
    assertEquals(0, r.size());

    t1.close();
    t2.close();
    TEST_UTIL.deleteTable(TABLENAME1);
    TEST_UTIL.deleteTable(TABLENAME2);
  }
  @Test
  public void testRecovery() throws Exception {
    LOG.info(TestRegionObserverInterface.class.getName() + ".testRecovery");
    TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery");
    HTable table = util.createTable(tableName, new byte[][] {A, B, C});
    try {
      JVMClusterUtil.RegionServerThread rs1 = cluster.startRegionServer();
      ServerName sn2 = rs1.getRegionServer().getServerName();
      String regEN = table.getRegionLocations().firstEntry().getKey().getEncodedName();

      util.getHBaseAdmin().move(regEN.getBytes(), sn2.getServerName().getBytes());
      while (!sn2.equals(table.getRegionLocations().firstEntry().getValue())) {
        Thread.sleep(100);
      }

      Put put = new Put(ROW);
      put.add(A, A, A);
      put.add(B, B, B);
      put.add(C, C, C);
      table.put(put);

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {
            "hadPreGet",
            "hadPostGet",
            "hadPrePut",
            "hadPostPut",
            "hadPreBatchMutate",
            "hadPostBatchMutate",
            "hadDelete"
          },
          tableName,
          new Boolean[] {false, false, true, true, true, true, false});

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"getCtPreWALRestore", "getCtPostWALRestore", "getCtPrePut", "getCtPostPut"},
          tableName,
          new Integer[] {0, 0, 1, 1});

      cluster.killRegionServer(rs1.getRegionServer().getServerName());
      Threads.sleep(1000); // Let the kill soak in.
      util.waitUntilAllRegionsAssigned(tableName);
      LOG.info("All regions assigned");

      verifyMethodResult(
          SimpleRegionObserver.class,
          new String[] {"getCtPrePut", "getCtPostPut"},
          tableName,
          new Integer[] {0, 0});
    } finally {
      util.deleteTable(tableName);
      table.close();
    }
  }
 @After
 public void tearDown() {
   try {
     htu.deleteTable(Bytes.toBytes(tableName));
     htu.shutdownMiniHBaseCluster();
     htu.shutdownMiniZKCluster();
     htu.cleanupTestDir();
     Log.info("Minicluster Shutdown complete");
   } catch (Exception e) {
     throw new RuntimeException(e);
   }
 }
Beispiel #18
0
  private void doCopyTableTest(boolean bulkload) throws Exception {
    final TableName TABLENAME1 = TableName.valueOf("testCopyTable1");
    final TableName TABLENAME2 = TableName.valueOf("testCopyTable2");
    final byte[] FAMILY = Bytes.toBytes("family");
    final byte[] COLUMN1 = Bytes.toBytes("c1");

    try (Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);
        Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); ) {
      // put rows into the first table
      for (int i = 0; i < 10; i++) {
        Put p = new Put(Bytes.toBytes("row" + i));
        p.addColumn(FAMILY, COLUMN1, COLUMN1);
        t1.put(p);
      }

      CopyTable copy = new CopyTable();

      int code;
      if (bulkload) {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(),
                  "--bulkload",
                  TABLENAME1.getNameAsString()
                });
      } else {
        code =
            ToolRunner.run(
                new Configuration(TEST_UTIL.getConfiguration()),
                copy,
                new String[] {
                  "--new.name=" + TABLENAME2.getNameAsString(), TABLENAME1.getNameAsString()
                });
      }
      assertEquals("copy job failed", 0, code);

      // verify the data was copied into table 2
      for (int i = 0; i < 10; i++) {
        Get g = new Get(Bytes.toBytes("row" + i));
        Result r = t2.get(g);
        assertEquals(1, r.size());
        assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1));
      }
    } finally {
      TEST_UTIL.deleteTable(TABLENAME1);
      TEST_UTIL.deleteTable(TABLENAME2);
    }
  }
  @Test
  public void testBaseCases() throws Exception {
    TableName tableName = TableName.valueOf("baseCases");
    if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) {
      TEST_UTIL.deleteTable(tableName);
    }
    Table t = TEST_UTIL.createTable(tableName, F, 1);
    // set the version override to 2
    Put p = new Put(R);
    p.setAttribute("versions", new byte[] {});
    p.add(F, tableName.getName(), Bytes.toBytes(2));
    t.put(p);

    long now = EnvironmentEdgeManager.currentTime();

    // insert 2 versions
    p = new Put(R);
    p.add(F, Q, now, Q);
    t.put(p);
    p = new Put(R);
    p.add(F, Q, now + 1, Q);
    t.put(p);
    Get g = new Get(R);
    g.setMaxVersions(10);
    Result r = t.get(g);
    assertEquals(2, r.size());

    TEST_UTIL.flush(tableName);
    TEST_UTIL.compact(tableName, true);

    // both version are still visible even after a flush/compaction
    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    assertEquals(2, r.size());

    // insert a 3rd version
    p = new Put(R);
    p.add(F, Q, now + 2, Q);
    t.put(p);
    g = new Get(R);
    g.setMaxVersions(10);
    r = t.get(g);
    // still only two version visible
    assertEquals(2, r.size());

    t.close();
  }
  /**
   * This test tests 1, merging region not online; 2, merging same two regions; 3, merging unknown
   * regions. They are in one test case so that we don't have to create many tables, and these tests
   * are simple.
   */
  @Test
  public void testMerge() throws Exception {
    LOG.info("Starting testMerge");
    final TableName tableName = TableName.valueOf("testMerge");

    try {
      // Create table and load data.
      Table table = createTableAndLoadData(master, tableName);
      RegionStates regionStates = master.getAssignmentManager().getRegionStates();
      List<HRegionInfo> regions = regionStates.getRegionsOfTable(tableName);
      // Fake offline one region
      HRegionInfo a = regions.get(0);
      HRegionInfo b = regions.get(1);
      regionStates.regionOffline(a);
      try {
        // Merge offline region. Region a is offline here
        admin.mergeRegions(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false);
        fail("Offline regions should not be able to merge");
      } catch (IOException ie) {
        System.out.println(ie);
        assertTrue(
            "Exception should mention regions not online",
            StringUtils.stringifyException(ie).contains("regions not online")
                && ie instanceof MergeRegionException);
      }
      try {
        // Merge the same region: b and b.
        admin.mergeRegions(b.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), true);
        fail("A region should not be able to merge with itself, even forcifully");
      } catch (IOException ie) {
        assertTrue(
            "Exception should mention regions not online",
            StringUtils.stringifyException(ie).contains("region to itself")
                && ie instanceof MergeRegionException);
      }
      try {
        // Merge unknown regions
        admin.mergeRegions(Bytes.toBytes("-f1"), Bytes.toBytes("-f2"), true);
        fail("Unknown region could not be merged");
      } catch (IOException ie) {
        assertTrue("UnknownRegionException should be thrown", ie instanceof UnknownRegionException);
      }
      table.close();
    } finally {
      TEST_UTIL.deleteTable(tableName);
    }
  }
  @Test
  // HBase-3583
  public void testHBase3583() throws IOException {
    TableName tableName = TableName.valueOf("testHBase3583");
    util.createTable(tableName, new byte[][] {A, B, C});
    util.waitUntilAllRegionsAssigned(tableName);

    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled"},
        tableName,
        new Boolean[] {false, false, false, false});

    HTable table = new HTable(util.getConfiguration(), tableName);
    Put put = new Put(ROW);
    put.add(A, A, A);
    table.put(put);

    Get get = new Get(ROW);
    get.addColumn(A, A);
    table.get(get);

    // verify that scannerNext and scannerClose upcalls won't be invoked
    // when we perform get().
    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"hadPreGet", "hadPostGet", "wasScannerNextCalled", "wasScannerCloseCalled"},
        tableName,
        new Boolean[] {true, true, false, false});

    Scan s = new Scan();
    ResultScanner scanner = table.getScanner(s);
    try {
      for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {}
    } finally {
      scanner.close();
    }

    // now scanner hooks should be invoked.
    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"wasScannerNextCalled", "wasScannerCloseCalled"},
        tableName,
        new Boolean[] {true, true});
    util.deleteTable(tableName);
    table.close();
  }
Beispiel #22
0
  @Test
  public void testMoveThrowsUnknownRegionException() throws IOException {
    TableName tableName = TableName.valueOf("testMoveThrowsUnknownRegionException");
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor("value");
    htd.addFamily(hcd);

    admin.createTable(htd, null);
    try {
      HRegionInfo hri = new HRegionInfo(tableName, Bytes.toBytes("A"), Bytes.toBytes("Z"));
      admin.move(hri.getEncodedNameAsBytes(), null);
      fail("Region should not be moved since it is fake");
    } catch (IOException ioe) {
      assertTrue(ioe instanceof UnknownRegionException);
    } finally {
      TEST_UTIL.deleteTable(tableName);
    }
  }
  @Test
  public void testMROnTableWithInvalidOperationAttr() throws Exception {
    TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());

    // Prepare the arguments required for the test.
    String[] args =
        new String[] {
          "-D"
              + ImportTsv.MAPPER_CONF_KEY
              + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapperForOprAttr",
          "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_ATTRIBUTES_KEY",
          "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
          tableName.getNameAsString()
        };
    String data = "KEY\u001bVALUE1\u001bVALUE2\u001btest1=>myvalue\n";
    util.createTable(tableName, FAMILY);
    doMROnTableTest(util, FAMILY, data, args, 1, false);
    util.deleteTable(tableName);
  }
  @Test
  // HBase-3758
  public void testHBase3758() throws IOException {
    TableName tableName = TableName.valueOf("testHBase3758");
    util.createTable(tableName, new byte[][] {A, B, C});

    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"hadDeleted", "wasScannerOpenCalled"},
        tableName,
        new Boolean[] {false, false});

    HTable table = new HTable(util.getConfiguration(), tableName);
    Put put = new Put(ROW);
    put.add(A, A, A);
    table.put(put);

    Delete delete = new Delete(ROW);
    table.delete(delete);

    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"hadDeleted", "wasScannerOpenCalled"},
        tableName,
        new Boolean[] {true, false});

    Scan s = new Scan();
    ResultScanner scanner = table.getScanner(s);
    try {
      for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {}
    } finally {
      scanner.close();
    }

    // now scanner hooks should be invoked.
    verifyMethodResult(
        SimpleRegionObserver.class,
        new String[] {"wasScannerOpenCalled"},
        tableName,
        new Boolean[] {true});
    util.deleteTable(tableName);
    table.close();
  }
Beispiel #25
0
  @Test
  public void testMoveThrowsPleaseHoldException() throws IOException {
    TableName tableName = TableName.valueOf("testMoveThrowsPleaseHoldException");
    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor("value");
    htd.addFamily(hcd);

    admin.createTable(htd, null);
    try {
      List<HRegionInfo> tableRegions = admin.getTableRegions(tableName);

      master.setInitialized(false); // fake it, set back later
      admin.move(tableRegions.get(0).getEncodedNameAsBytes(), null);
      fail("Region should not be moved since master is not initialized");
    } catch (IOException ioe) {
      assertTrue(StringUtils.stringifyException(ioe).contains("PleaseHoldException"));
    } finally {
      master.setInitialized(true);
      TEST_UTIL.deleteTable(tableName);
    }
  }
  @Before
  public void setUp() throws Exception {

    File tempDir = Files.createTempDir();
    tempDir.deleteOnExit();

    htu = HBaseTestingUtility.createLocalHTU();
    try {
      htu.cleanupTestDir();

      htu.startMiniZKCluster();
      htu.startMiniHBaseCluster(1, 1);

      try {
        htu.deleteTable(Bytes.toBytes(tableName));
      } catch (Exception e) {
        Log.info(" - no table " + tableName + " found");
      }
      htu.createTable(Bytes.toBytes(tableName), colFam);

      dao =
          new Hbase1OffsetStore.Builder()
              .setHbaseConfiguration(htu.getConfiguration())
              .setOffsetTable(tableName)
              .build();

    } catch (Exception e1) {
      throw new RuntimeException(e1);
    }

    KOM =
        new KafkaOffsetManager.Builder()
            .setOffsetManager(dao)
            .setKafkaBrokerList("localhost:" + kafkaRule.kafkaBrokerPort())
            .setGroupID(testGroupID)
            .setTopic(testTopicName)
            .build();
  }
  private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality)
      throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
    int hostCount = 1;
    int regionNum = 5;
    if (shouldKeepLocality) {
      // We should change host count higher than hdfs replica count when MiniHBaseCluster supports
      // explicit hostnames parameter just like MiniDFSCluster does.
      hostCount = 3;
      regionNum = 20;
    }

    byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
      hostnames[i] = "datanode_" + i;
    }
    util.startMiniCluster(1, hostCount, hostnames);

    Table table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    try (RegionLocator r = util.getConnection().getRegionLocator(TABLE_NAME);
        Admin admin = util.getConnection().getAdmin(); ) {
      assertEquals("Should start with empty table", 0, util.countRows(table));
      int numRegions = r.getStartKeys().length;
      assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);

      // Generate the bulk load files
      runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir);
      // This doesn't write into the table, just makes files
      assertEquals("HFOF should not touch actual table", 0, util.countRows(table));

      // Make sure that a directory was created for every CF
      int dir = 0;
      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
        for (byte[] family : FAMILIES) {
          if (Bytes.toString(family).equals(f.getPath().getName())) {
            ++dir;
          }
        }
      }
      assertEquals("Column family not found in FS.", FAMILIES.length, dir);

      // handle the split case
      if (shouldChangeRegions) {
        LOG.info("Changing regions in table");
        admin.disableTable(table.getName());
        while (util.getMiniHBaseCluster()
            .getMaster()
            .getAssignmentManager()
            .getRegionStates()
            .isRegionsInTransition()) {
          Threads.sleep(200);
          LOG.info("Waiting on table to finish disabling");
        }
        util.deleteTable(table.getName());
        byte[][] newSplitKeys = generateRandomSplitKeys(14);
        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);

        while (util.getConnection().getRegionLocator(TABLE_NAME).getAllRegionLocations().size()
                != 15
            || !admin.isTableAvailable(table.getName())) {
          Thread.sleep(200);
          LOG.info("Waiting for new region assignment to happen");
        }
      }

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));
      Scan scan = new Scan();
      ResultScanner results = table.getScanner(scan);
      for (Result res : results) {
        assertEquals(FAMILIES.length, res.rawCells().length);
        Cell first = res.rawCells()[0];
        for (Cell kv : res.rawCells()) {
          assertTrue(CellUtil.matchingRow(first, kv));
          assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
        }
      }
      results.close();
      String tableDigestBefore = util.checksumRows(table);

      // Check region locality
      HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
      for (HRegion region : util.getHBaseCluster().getRegions(TABLE_NAME)) {
        hbd.add(region.getHDFSBlocksDistribution());
      }
      for (String hostname : hostnames) {
        float locality = hbd.getBlockLocalityIndex(hostname);
        LOG.info("locality of [" + hostname + "]: " + locality);
        assertEquals(100, (int) (locality * 100));
      }

      // Cause regions to reopen
      admin.disableTable(TABLE_NAME);
      while (!admin.isTableDisabled(TABLE_NAME)) {
        Thread.sleep(200);
        LOG.info("Waiting for table to disable");
      }
      admin.enableTable(TABLE_NAME);
      util.waitTableAvailable(TABLE_NAME);
      assertEquals(
          "Data should remain after reopening of regions",
          tableDigestBefore,
          util.checksumRows(table));
    } finally {
      testDir.getFileSystem(conf).delete(testDir, true);
      util.deleteTable(TABLE_NAME);
      util.shutdownMiniCluster();
    }
  }
Beispiel #28
0
  @Test
  public void testReducerNumEstimation() throws Exception {
    // Skip the test for Tez. Tez use a different mechanism.
    // Equivalent test is in TestTezAutoParallelism
    Assume.assumeTrue("Skip this test for TEZ", Util.isMapredExecType(cluster.getExecType()));
    // use the estimation
    Configuration conf = HBaseConfiguration.create(new Configuration());
    HBaseTestingUtility util = new HBaseTestingUtility(conf);
    int clientPort = util.startMiniZKCluster().getClientPort();
    util.startMiniHBaseCluster(1, 1);

    String query = "a = load '/passwd';" + "b = group a by $0;" + "store b into 'output';";
    PigServer ps = new PigServer(cluster.getExecType(), cluster.getProperties());
    PhysicalPlan pp = Util.buildPp(ps, query);
    MROperPlan mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");
    pc.getConf().setProperty(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    JobControlCompiler jcc = new JobControlCompiler(pc, conf);
    JobControl jc = jcc.compile(mrPlan, "Test");
    Job job = jc.getWaitingJobs().get(0);
    long reducer =
        Math.min(
            (long) Math.ceil(new File("test/org/apache/pig/test/data/passwd").length() / 100.0),
            10);

    Util.assertParallelValues(-1, -1, reducer, reducer, job.getJobConf());

    // use the PARALLEL key word, it will override the estimated reducer number
    query = "a = load '/passwd';" + "b = group a by $0 PARALLEL 2;" + "store b into 'output';";
    pp = Util.buildPp(ps, query);
    mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");
    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    jcc = new JobControlCompiler(pc, conf);
    jc = jcc.compile(mrPlan, "Test");
    job = jc.getWaitingJobs().get(0);

    Util.assertParallelValues(-1, 2, -1, 2, job.getJobConf());

    final byte[] COLUMNFAMILY = Bytes.toBytes("pig");
    util.createTable(Bytes.toBytesBinary("test_table"), COLUMNFAMILY);

    // the estimation won't take effect when it apply to non-dfs or the files doesn't exist, such as
    // hbase
    query =
        "a = load 'hbase://test_table' using org.apache.pig.backend.hadoop.hbase.HBaseStorage('c:f1 c:f2');"
            + "b = group a by $0 ;"
            + "store b into 'output';";
    pp = Util.buildPp(ps, query);
    mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");

    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    jcc = new JobControlCompiler(pc, conf);
    jc = jcc.compile(mrPlan, "Test");
    job = jc.getWaitingJobs().get(0);

    Util.assertParallelValues(-1, -1, 1, 1, job.getJobConf());

    util.deleteTable(Bytes.toBytesBinary("test_table"));
    // In HBase 0.90.1 and above we can use util.shutdownMiniHBaseCluster()
    // here instead.
    MiniHBaseCluster hbc = util.getHBaseCluster();
    if (hbc != null) {
      hbc.shutdown();
      hbc.join();
    }
    util.shutdownMiniZKCluster();
  }
Beispiel #29
0
  @Test(timeout = 60000)
  public void testAssignmentListener() throws IOException, InterruptedException {
    AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    Admin admin = TEST_UTIL.getHBaseAdmin();

    DummyAssignmentListener listener = new DummyAssignmentListener();
    am.registerListener(listener);
    try {
      final String TABLE_NAME_STR = "testtb";
      final TableName TABLE_NAME = TableName.valueOf(TABLE_NAME_STR);
      final byte[] FAMILY = Bytes.toBytes("cf");

      // Create a new table, with a single region
      LOG.info("Create Table");
      TEST_UTIL.createTable(TABLE_NAME, FAMILY);
      listener.awaitModifications(1);
      assertEquals(1, listener.getLoadCount());
      assertEquals(0, listener.getCloseCount());

      // Add some data
      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE_NAME);
      try {
        for (int i = 0; i < 10; ++i) {
          byte[] key = Bytes.toBytes("row-" + i);
          Put put = new Put(key);
          put.add(FAMILY, null, key);
          table.put(put);
        }
      } finally {
        table.close();
      }

      // Split the table in two
      LOG.info("Split Table");
      listener.reset();
      admin.split(TABLE_NAME_STR, "row-3");
      listener.awaitModifications(3);
      assertEquals(2, listener.getLoadCount()); // daughters added
      assertEquals(1, listener.getCloseCount()); // parent removed

      // Wait for the Regions to be mergeable
      MiniHBaseCluster miniCluster = TEST_UTIL.getMiniHBaseCluster();
      int mergeable = 0;
      while (mergeable < 2) {
        Thread.sleep(100);
        admin.majorCompact(TABLE_NAME_STR);
        mergeable = 0;
        for (JVMClusterUtil.RegionServerThread regionThread :
            miniCluster.getRegionServerThreads()) {
          for (HRegion region : regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) {
            mergeable += region.isMergeable() ? 1 : 0;
          }
        }
      }

      // Merge the two regions
      LOG.info("Merge Regions");
      listener.reset();
      List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
      assertEquals(2, regions.size());
      admin.mergeRegions(
          regions.get(0).getEncodedNameAsBytes(), regions.get(1).getEncodedNameAsBytes(), true);
      listener.awaitModifications(3);
      assertEquals(1, admin.getTableRegions(TABLE_NAME).size());
      assertEquals(1, listener.getLoadCount()); // new merged region added
      assertEquals(2, listener.getCloseCount()); // daughters removed

      // Delete the table
      LOG.info("Drop Table");
      listener.reset();
      TEST_UTIL.deleteTable(TABLE_NAME);
      listener.awaitModifications(1);
      assertEquals(0, listener.getLoadCount());
      assertEquals(1, listener.getCloseCount());
    } finally {
      am.unregisterListener(listener);
    }
  }
  /**
   * Demonstrate that we reject snapshot requests if there is a snapshot already running on the same
   * table currently running and that concurrent snapshots on different tables can both succeed
   * concurretly.
   */
  @Test
  public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException {
    final TableName TABLE2_NAME = TableName.valueOf(TABLE_NAME + "2");

    int ssNum = 20;
    // make sure we don't fail on listing snapshots
    SnapshotTestingUtils.assertNoSnapshots(admin);
    // create second testing table
    SnapshotTestingUtils.createTable(UTIL, TABLE2_NAME, TEST_FAM);
    // load the table so we have some data
    SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
    SnapshotTestingUtils.loadData(UTIL, TABLE2_NAME, DEFAULT_NUM_ROWS, TEST_FAM);

    final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
    // We'll have one of these per thread
    class SSRunnable implements Runnable {
      SnapshotDescription ss;

      SSRunnable(SnapshotDescription ss) {
        this.ss = ss;
      }

      @Override
      public void run() {
        try {
          LOG.info(
              "Submitting snapshot request: "
                  + ClientSnapshotDescriptionUtils.toString(
                      ProtobufUtil.createHBaseProtosSnapshotDesc(ss)));
          admin.takeSnapshotAsync(ss);
        } catch (Exception e) {
          LOG.info(
              "Exception during snapshot request: "
                  + ClientSnapshotDescriptionUtils.toString(
                      ProtobufUtil.createHBaseProtosSnapshotDesc(ss))
                  + ".  This is ok, we expect some",
              e);
        }
        LOG.info(
            "Submitted snapshot request: "
                + ClientSnapshotDescriptionUtils.toString(
                    ProtobufUtil.createHBaseProtosSnapshotDesc(ss)));
        toBeSubmitted.countDown();
      }
    };

    // build descriptions
    SnapshotDescription[] descs = new SnapshotDescription[ssNum];
    for (int i = 0; i < ssNum; i++) {
      if (i % 2 == 0) {
        descs[i] = new SnapshotDescription("ss" + i, TABLE_NAME, SnapshotType.FLUSH);
      } else {
        descs[i] = new SnapshotDescription("ss" + i, TABLE2_NAME, SnapshotType.FLUSH);
      }
    }

    // kick each off its own thread
    for (int i = 0; i < ssNum; i++) {
      new Thread(new SSRunnable(descs[i])).start();
    }

    // wait until all have been submitted
    toBeSubmitted.await();

    // loop until all are done.
    while (true) {
      int doneCount = 0;
      for (SnapshotDescription ss : descs) {
        try {
          if (admin.isSnapshotFinished(ss)) {
            doneCount++;
          }
        } catch (Exception e) {
          LOG.warn("Got an exception when checking for snapshot " + ss.getName(), e);
          doneCount++;
        }
      }
      if (doneCount == descs.length) {
        break;
      }
      Thread.sleep(100);
    }

    // dump for debugging
    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);

    List<SnapshotDescription> taken = admin.listSnapshots();
    int takenSize = taken.size();
    LOG.info("Taken " + takenSize + " snapshots:  " + taken);
    assertTrue(
        "We expect at least 1 request to be rejected because of we concurrently"
            + " issued many requests",
        takenSize < ssNum && takenSize > 0);

    // Verify that there's at least one snapshot per table
    int t1SnapshotsCount = 0;
    int t2SnapshotsCount = 0;
    for (SnapshotDescription ss : taken) {
      if (ss.getTableName().equals(TABLE_NAME)) {
        t1SnapshotsCount++;
      } else if (ss.getTableName().equals(TABLE2_NAME)) {
        t2SnapshotsCount++;
      }
    }
    assertTrue("We expect at least 1 snapshot of table1 ", t1SnapshotsCount > 0);
    assertTrue("We expect at least 1 snapshot of table2 ", t2SnapshotsCount > 0);

    UTIL.deleteTable(TABLE2_NAME);
  }