/*
   * Take a snapshot of a table, do a split, and verify that this only affects one table
   * @param online - Whether the table is online or not during the snapshot
   */
  private void runTestRegionOperationsIndependent(boolean online) throws Exception {
    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();

    // Create a table
    Admin admin = UTIL.getHBaseAdmin();
    final long startTime = System.currentTimeMillis();
    final TableName localTableName = TableName.valueOf(STRING_TABLE_NAME + startTime);
    Table original = UTIL.createTable(localTableName, TEST_FAM);
    UTIL.loadTable(original, TEST_FAM);
    final int loadedTableCount = UTIL.countRows(original);
    System.out.println("Original table has: " + loadedTableCount + " rows");

    final String snapshotNameAsString = "snapshot_" + localTableName;

    // Create a snapshot
    SnapshotTestingUtils.createSnapshotAndValidate(
        admin, localTableName, TEST_FAM_STR, snapshotNameAsString, rootDir, fs, online);

    if (!online) {
      admin.enableTable(localTableName);
    }

    TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);

    // Clone the snapshot
    byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
    admin.cloneSnapshot(snapshotName, cloneTableName);

    // Verify that region information is the same pre-split
    ((ClusterConnection) UTIL.getConnection()).clearRegionCache();
    List<HRegionInfo> originalTableHRegions = admin.getTableRegions(localTableName);

    final int originalRegionCount = originalTableHRegions.size();
    final int cloneTableRegionCount = admin.getTableRegions(cloneTableName).size();
    Assert.assertEquals(
        "The number of regions in the cloned table is different than in the original table.",
        originalRegionCount,
        cloneTableRegionCount);

    // Split a region on the parent table
    admin.splitRegion(originalTableHRegions.get(0).getRegionName());
    waitOnSplit(UTIL.getConnection(), original, originalRegionCount);

    // Verify that the cloned table region is not split
    final int cloneTableRegionCount2 = admin.getTableRegions(cloneTableName).size();
    Assert.assertEquals(
        "The number of regions in the cloned table changed though none of its regions were split.",
        cloneTableRegionCount,
        cloneTableRegionCount2);
  }
 private void verify(TableName tableName) throws IOException {
   Table table = UTIL.getConnection().getTable(tableName);
   boolean verified = false;
   long pause = UTIL.getConfiguration().getLong("hbase.client.pause", 5 * 1000);
   int numRetries = UTIL.getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
   for (int i = 0; i < numRetries; i++) {
     try {
       LOG.info("Verification attempt #" + i);
       verifyAttempt(table);
       verified = true;
       break;
     } catch (NullPointerException e) {
       // If here, a cell was empty.  Presume its because updates came in
       // after the scanner had been opened.  Wait a while and retry.
       LOG.debug("Verification attempt failed: " + e.getMessage());
     }
     try {
       Thread.sleep(pause);
     } catch (InterruptedException e) {
       // continue
     }
   }
   assertTrue(verified);
   table.close();
 }
  @Test(timeout = 300000)
  public void testReplicationWithCellTags() throws Exception {
    LOG.info("testSimplePutDelete");
    Put put = new Put(ROW);
    put.setAttribute("visibility", Bytes.toBytes("myTag3"));
    put.add(FAMILY, ROW, ROW);

    htable1 = utility1.getConnection().getTable(TABLE_NAME);
    htable1.put(put);

    Get get = new Get(ROW);
    try {
      for (int i = 0; i < NB_RETRIES; i++) {
        if (i == NB_RETRIES - 1) {
          fail("Waited too much time for put replication");
        }
        Result res = htable2.get(get);
        if (res.size() == 0) {
          LOG.info("Row not available");
          Thread.sleep(SLEEP_TIME);
        } else {
          assertArrayEquals(res.value(), ROW);
          assertEquals(1, TestCoprocessorForTagsAtSink.tags.size());
          Tag tag = TestCoprocessorForTagsAtSink.tags.get(0);
          assertEquals(TAG_TYPE, tag.getType());
          break;
        }
      }
    } finally {
      TestCoprocessorForTagsAtSink.tags = null;
    }
  }
  /**
   * Test snapshotting a table that is online without flushing
   *
   * @throws Exception
   */
  @Test
  public void testSkipFlushTableSnapshot() throws Exception {
    // make sure we don't fail on listing snapshots
    SnapshotTestingUtils.assertNoSnapshots(admin);

    // put some stuff in the table
    Table table = UTIL.getConnection().getTable(TABLE_NAME);
    UTIL.loadTable(table, TEST_FAM);
    UTIL.flush(TABLE_NAME);

    LOG.debug("FS state before snapshot:");
    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);

    // take a snapshot of the enabled table
    String snapshotString = "skipFlushTableSnapshot";
    byte[] snapshot = Bytes.toBytes(snapshotString);
    admin.snapshot(snapshotString, TABLE_NAME, SnapshotType.SKIPFLUSH);
    LOG.debug("Snapshot completed.");

    // make sure we have the snapshot
    List<SnapshotDescription> snapshots =
        SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);

    // make sure its a valid snapshot
    LOG.debug("FS state after snapshot:");
    UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);

    SnapshotTestingUtils.confirmSnapshotValid(
        UTIL, ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)), TABLE_NAME, TEST_FAM);

    admin.deleteSnapshot(snapshot);
    snapshots = admin.listSnapshots();
    SnapshotTestingUtils.assertNoSnapshots(admin);
  }
 public AtomicScanReader(TableName TABLE_NAME, TestContext ctx, byte targetFamilies[][])
     throws IOException {
   super(ctx);
   this.TABLE_NAME = TABLE_NAME;
   this.targetFamilies = targetFamilies;
   table = UTIL.getConnection().getTable(TABLE_NAME);
 }
  /*
   * Take a snapshot of a table, add data, and verify that deleting the snapshot does not affect
   * either table.
   * @param online - Whether the table is online or not during the snapshot
   */
  private void runTestSnapshotDeleteIndependent(boolean online) throws Exception {
    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();

    final Admin admin = UTIL.getHBaseAdmin();
    final long startTime = System.currentTimeMillis();
    final TableName localTableName = TableName.valueOf(STRING_TABLE_NAME + startTime);

    try (Table original = UTIL.createTable(localTableName, TEST_FAM)) {
      UTIL.loadTable(original, TEST_FAM);
    }

    // Take a snapshot
    final String snapshotNameAsString = "snapshot_" + localTableName;
    byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);

    SnapshotTestingUtils.createSnapshotAndValidate(
        admin, localTableName, TEST_FAM_STR, snapshotNameAsString, rootDir, fs, online);

    if (!online) {
      admin.enableTable(localTableName);
    }
    TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
    admin.cloneSnapshot(snapshotName, cloneTableName);

    // Ensure the original table does not reference the HFiles anymore
    admin.majorCompact(localTableName);

    // Deleting the snapshot used to break the cloned table by deleting in-use HFiles
    admin.deleteSnapshot(snapshotName);

    // Wait for cleaner run and DFS heartbeats so that anything that is deletable is fully deleted
    Thread.sleep(10000);

    try (Table original = UTIL.getConnection().getTable(localTableName)) {
      try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
        // Verify that all regions of both tables are readable
        final int origTableRowCount = UTIL.countRows(original);
        final int clonedTableRowCount = UTIL.countRows(clonedTable);
        Assert.assertEquals(origTableRowCount, clonedTableRowCount);
      }
    }
  }
  /**
   * Returns the number of rows in a given table. HBase must be up and the table should be present
   * (will wait for timeout for a while otherwise)
   *
   * @return # of rows in the specified table
   */
  protected int tableRowCount(Configuration conf, TableName table) throws IOException {
    Table t = TEST_UTIL.getConnection().getTable(table);
    Scan st = new Scan();

    ResultScanner rst = t.getScanner(st);
    int count = 0;
    for (@SuppressWarnings("unused") Result rt : rst) {
      count++;
    }
    t.close();
    return count;
  }
 public void manualTest(String args[]) throws Exception {
   Configuration conf = HBaseConfiguration.create();
   util = new HBaseTestingUtility(conf);
   if ("newtable".equals(args[0])) {
     TableName tname = TableName.valueOf(args[1]);
     byte[][] splitKeys = generateRandomSplitKeys(4);
     HTable table = util.createTable(tname, FAMILIES, splitKeys);
   } else if ("incremental".equals(args[0])) {
     TableName tname = TableName.valueOf(args[1]);
     HTable table = (HTable) util.getConnection().getTable(tname);
     Path outDir = new Path("incremental-out");
     runIncrementalPELoad(conf, table, outDir);
   } else {
     throw new RuntimeException("usage: TestHFileOutputFormat newtable | incremental");
   }
 }
  /**
   * Write a random data file and a non-file in a dir with a valid family name but not part of the
   * table families. we should we able to bulkload without getting the unmatched family exception.
   * HBASE-13037/HBASE-13227
   */
  private void testNonHfileFolder(String tableName, boolean preCreateTable) throws Exception {
    Path dir = util.getDataTestDirOnTestFS(tableName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);

    Path familyDir = new Path(dir, Bytes.toString(FAMILY));
    HFileTestUtil.createHFile(
        util.getConfiguration(),
        fs,
        new Path(familyDir, "hfile_0"),
        FAMILY,
        QUALIFIER,
        Bytes.toBytes("begin"),
        Bytes.toBytes("end"),
        500);
    createRandomDataFile(fs, new Path(familyDir, "012356789"), 16 * 1024);

    final String NON_FAMILY_FOLDER = "_logs";
    Path nonFamilyDir = new Path(dir, NON_FAMILY_FOLDER);
    fs.mkdirs(nonFamilyDir);
    fs.mkdirs(new Path(nonFamilyDir, "non-file"));
    createRandomDataFile(fs, new Path(nonFamilyDir, "012356789"), 16 * 1024);

    Table table = null;
    try {
      if (preCreateTable) {
        table = util.createTable(TableName.valueOf(tableName), FAMILY);
      } else {
        table = util.getConnection().getTable(TableName.valueOf(tableName));
      }

      final String[] args = {dir.toString(), tableName};
      new LoadIncrementalHFiles(util.getConfiguration()).run(args);
      assertEquals(500, util.countRows(table));
    } finally {
      if (table != null) {
        table.close();
      }
      fs.delete(dir, true);
    }
  }
  protected HRegionInfo createRegion(
      Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException {
    Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
    HTableDescriptor htd = htbl.getTableDescriptor();
    HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

    LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
    Path rootDir = FSUtils.getRootDir(conf);
    FileSystem fs = rootDir.getFileSystem(conf);
    Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName());
    fs.mkdirs(p);
    Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
    FSDataOutputStream out = fs.create(riPath);
    out.write(hri.toDelimitedByteArray());
    out.close();

    // add to meta.
    MetaTableAccessor.addRegionToMeta(meta, hri);
    meta.close();
    return hri;
  }
 protected void wipeOutMeta() throws IOException {
   // Mess it up by blowing up meta.
   Admin admin = TEST_UTIL.getHBaseAdmin();
   Scan s = new Scan();
   Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
   ResultScanner scanner = meta.getScanner(s);
   List<Delete> dels = new ArrayList<Delete>();
   for (Result r : scanner) {
     HRegionInfo info = HRegionInfo.getHRegionInfo(r);
     if (info != null
         && !info.getTable()
             .getNamespaceAsString()
             .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
       Delete d = new Delete(r.getRow());
       dels.add(d);
       admin.unassign(r.getRow(), true);
     }
   }
   meta.delete(dels);
   scanner.close();
   meta.close();
 }
Beispiel #12
0
  @Test
  public void testAclTableEntries() throws Exception {
    String userTestNamespace = "userTestNsp";
    Table acl = UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME);
    try {
      ListMultimap<String, TablePermission> perms =
          AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);

      perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);
      for (Map.Entry<String, TablePermission> entry : perms.entries()) {
        LOG.debug(entry);
      }
      assertEquals(6, perms.size());

      // Grant and check state in ACL table
      grantOnNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, Permission.Action.WRITE);

      Result result = acl.get(new Get(Bytes.toBytes(userTestNamespace)));
      assertTrue(result != null);
      perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);
      assertEquals(7, perms.size());
      List<TablePermission> namespacePerms = perms.get(userTestNamespace);
      assertTrue(perms.containsKey(userTestNamespace));
      assertEquals(1, namespacePerms.size());
      assertEquals(TEST_NAMESPACE, namespacePerms.get(0).getNamespace());
      assertEquals(null, namespacePerms.get(0).getFamily());
      assertEquals(null, namespacePerms.get(0).getQualifier());
      assertEquals(1, namespacePerms.get(0).getActions().length);
      assertEquals(Permission.Action.WRITE, namespacePerms.get(0).getActions()[0]);

      // Revoke and check state in ACL table
      revokeFromNamespace(UTIL, userTestNamespace, TEST_NAMESPACE, Permission.Action.WRITE);

      perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);
      assertEquals(6, perms.size());
    } finally {
      acl.close();
    }
  }
  @Test
  public void testInterrupt50Percent() throws IOException, InterruptedException {
    final AtomicInteger noEx = new AtomicInteger(0);
    final AtomicInteger badEx = new AtomicInteger(0);
    final AtomicInteger noInt = new AtomicInteger(0);
    final AtomicInteger done = new AtomicInteger(0);
    List<Thread> threads = new ArrayList<Thread>();

    final int nbThread = 100;

    for (int i = 0; i < nbThread; i++) {
      Thread t =
          new Thread() {
            @Override
            public void run() {
              try {
                Table ht = util.getConnection().getTable(tableName);
                Result r = ht.get(new Get(row1));
                noEx.incrementAndGet();
              } catch (IOException e) {
                LOG.info("exception", e);
                if (!(e instanceof InterruptedIOException)
                    || (e instanceof SocketTimeoutException)) {
                  badEx.incrementAndGet();
                } else {
                  if (Thread.currentThread().isInterrupted()) {
                    noInt.incrementAndGet();
                    LOG.info("The thread should NOT be with the 'interrupt' status.");
                  }
                }
              } finally {
                done.incrementAndGet();
              }
            }
          };
      t.setName("TestClientOperationInterrupt #" + i);
      threads.add(t);
      t.start();
    }

    for (int i = 0; i < nbThread / 2; i++) {
      threads.get(i).interrupt();
    }

    boolean stillAlive = true;
    while (stillAlive) {
      stillAlive = false;
      for (Thread t : threads) {
        if (t.isAlive()) {
          stillAlive = true;
        }
      }
      Threads.sleep(10);
    }

    Assert.assertFalse(Thread.currentThread().isInterrupted());

    Assert.assertTrue(
        " noEx: " + noEx.get() + ", badEx=" + badEx.get() + ", noInt=" + noInt.get(),
        noEx.get() == nbThread / 2 && badEx.get() == 0);

    // The problem here is that we need the server to free its handlers to handle all operations
    while (done.get() != nbThread) {
      Thread.sleep(1);
    }

    Table ht = util.getConnection().getTable(tableName);
    Result r = ht.get(new Get(row1));
    Assert.assertFalse(r.isEmpty());
  }
 @Before
 public void setup() throws Exception {
   createTable();
   this.admin = UTIL.getConnection().getAdmin();
 }
  private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality)
      throws Exception {
    util = new HBaseTestingUtility();
    Configuration conf = util.getConfiguration();
    conf.setBoolean(HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY, shouldKeepLocality);
    int hostCount = 1;
    int regionNum = 5;
    if (shouldKeepLocality) {
      // We should change host count higher than hdfs replica count when MiniHBaseCluster supports
      // explicit hostnames parameter just like MiniDFSCluster does.
      hostCount = 3;
      regionNum = 20;
    }

    byte[][] splitKeys = generateRandomSplitKeys(regionNum - 1);
    String[] hostnames = new String[hostCount];
    for (int i = 0; i < hostCount; ++i) {
      hostnames[i] = "datanode_" + i;
    }
    util.startMiniCluster(1, hostCount, hostnames);

    Table table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
    Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
    try (RegionLocator r = util.getConnection().getRegionLocator(TABLE_NAME);
        Admin admin = util.getConnection().getAdmin(); ) {
      assertEquals("Should start with empty table", 0, util.countRows(table));
      int numRegions = r.getStartKeys().length;
      assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);

      // Generate the bulk load files
      runIncrementalPELoad(conf, table.getTableDescriptor(), r, testDir);
      // This doesn't write into the table, just makes files
      assertEquals("HFOF should not touch actual table", 0, util.countRows(table));

      // Make sure that a directory was created for every CF
      int dir = 0;
      for (FileStatus f : testDir.getFileSystem(conf).listStatus(testDir)) {
        for (byte[] family : FAMILIES) {
          if (Bytes.toString(family).equals(f.getPath().getName())) {
            ++dir;
          }
        }
      }
      assertEquals("Column family not found in FS.", FAMILIES.length, dir);

      // handle the split case
      if (shouldChangeRegions) {
        LOG.info("Changing regions in table");
        admin.disableTable(table.getName());
        while (util.getMiniHBaseCluster()
            .getMaster()
            .getAssignmentManager()
            .getRegionStates()
            .isRegionsInTransition()) {
          Threads.sleep(200);
          LOG.info("Waiting on table to finish disabling");
        }
        util.deleteTable(table.getName());
        byte[][] newSplitKeys = generateRandomSplitKeys(14);
        table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);

        while (util.getConnection().getRegionLocator(TABLE_NAME).getAllRegionLocations().size()
                != 15
            || !admin.isTableAvailable(table.getName())) {
          Thread.sleep(200);
          LOG.info("Waiting for new region assignment to happen");
        }
      }

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, r);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows,
          util.countRows(table));
      Scan scan = new Scan();
      ResultScanner results = table.getScanner(scan);
      for (Result res : results) {
        assertEquals(FAMILIES.length, res.rawCells().length);
        Cell first = res.rawCells()[0];
        for (Cell kv : res.rawCells()) {
          assertTrue(CellUtil.matchingRow(first, kv));
          assertTrue(Bytes.equals(CellUtil.cloneValue(first), CellUtil.cloneValue(kv)));
        }
      }
      results.close();
      String tableDigestBefore = util.checksumRows(table);

      // Check region locality
      HDFSBlocksDistribution hbd = new HDFSBlocksDistribution();
      for (HRegion region : util.getHBaseCluster().getRegions(TABLE_NAME)) {
        hbd.add(region.getHDFSBlocksDistribution());
      }
      for (String hostname : hostnames) {
        float locality = hbd.getBlockLocalityIndex(hostname);
        LOG.info("locality of [" + hostname + "]: " + locality);
        assertEquals(100, (int) (locality * 100));
      }

      // Cause regions to reopen
      admin.disableTable(TABLE_NAME);
      while (!admin.isTableDisabled(TABLE_NAME)) {
        Thread.sleep(200);
        LOG.info("Waiting for table to disable");
      }
      admin.enableTable(TABLE_NAME);
      util.waitTableAvailable(TABLE_NAME);
      assertEquals(
          "Data should remain after reopening of regions",
          tableDigestBefore,
          util.checksumRows(table));
    } finally {
      testDir.getFileSystem(conf).delete(testDir, true);
      util.deleteTable(TABLE_NAME);
      util.shutdownMiniCluster();
    }
  }
 /**
  * Dumps hbase:meta table info
  *
  * @return # of entries in meta.
  */
 protected int scanMeta() throws IOException {
   LOG.info("Scanning META");
   MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection());
   return MetaTableAccessor.fullScanRegions(TEST_UTIL.getConnection()).size();
 }
 /**
  * Test multithreadedTableMappper map/reduce against a multi-region table
  *
  * @throws IOException
  * @throws ClassNotFoundException
  * @throws InterruptedException
  */
 @Test
 public void testMultithreadedTableMapper()
     throws IOException, InterruptedException, ClassNotFoundException {
   runTestOnTable(UTIL.getConnection().getTable(MULTI_REGION_TABLE_NAME));
 }
  @SuppressWarnings("deprecation")
  @Test(timeout = 300000)
  public void testReplicaAndReplication() throws Exception {
    HTableDescriptor hdt = HTU.createTableDescriptor("testReplicaAndReplication");
    hdt.setRegionReplication(NB_SERVERS);

    HColumnDescriptor fam = new HColumnDescriptor(row);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    hdt.addFamily(fam);

    hdt.addCoprocessor(SlowMeCopro.class.getName());
    HTU.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);

    Configuration conf2 = HBaseConfiguration.create(HTU.getConfiguration());
    conf2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    MiniZooKeeperCluster miniZK = HTU.getZkCluster();

    HTU2 = new HBaseTestingUtility(conf2);
    HTU2.setZkCluster(miniZK);
    HTU2.startMiniCluster(NB_SERVERS);
    LOG.info("Setup second Zk");
    HTU2.getHBaseAdmin().createTable(hdt, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);

    ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
    admin.addPeer("2", HTU2.getClusterKey());
    admin.close();

    Put p = new Put(row);
    p.add(row, row, row);
    final Table table = HTU.getConnection().getTable(hdt.getTableName());
    table.put(p);

    HTU.getHBaseAdmin().flush(table.getName());
    LOG.info("Put & flush done on the first cluster. Now doing a get on the same cluster.");

    Waiter.waitFor(
        HTU.getConfiguration(),
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            try {
              SlowMeCopro.cdl.set(new CountDownLatch(1));
              Get g = new Get(row);
              g.setConsistency(Consistency.TIMELINE);
              Result r = table.get(g);
              Assert.assertTrue(r.isStale());
              return !r.isEmpty();
            } finally {
              SlowMeCopro.cdl.get().countDown();
              SlowMeCopro.sleepTime.set(0);
            }
          }
        });
    table.close();
    LOG.info("stale get on the first cluster done. Now for the second.");

    final Table table2 = HTU.getConnection().getTable(hdt.getTableName());
    Waiter.waitFor(
        HTU.getConfiguration(),
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            try {
              SlowMeCopro.cdl.set(new CountDownLatch(1));
              Get g = new Get(row);
              g.setConsistency(Consistency.TIMELINE);
              Result r = table2.get(g);
              Assert.assertTrue(r.isStale());
              return !r.isEmpty();
            } finally {
              SlowMeCopro.cdl.get().countDown();
              SlowMeCopro.sleepTime.set(0);
            }
          }
        });
    table2.close();

    HTU.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU.deleteTable(hdt.getTableName());

    HTU2.getHBaseAdmin().disableTable(hdt.getTableName());
    HTU2.deleteTable(hdt.getTableName());

    // We shutdown HTU2 minicluster later, in afterClass(), as shutting down
    // the minicluster has negative impact of deleting all HConnections in JVM.
  }
  /**
   * Unfortunately, the easiest way to test this is to spin up a mini-cluster since we want to do
   * the usual compaction mechanism on the region, rather than going through the backdoor to the
   * region
   */
  @Test
  public void testRegionObserverCompactionTimeStacking() throws Exception {
    // setup a mini cluster so we can do a real compaction on a region
    Configuration conf = UTIL.getConfiguration();
    conf.setClass(HConstants.REGION_IMPL, CompactionCompletionNotifyingRegion.class, HRegion.class);
    conf.setInt("hbase.hstore.compaction.min", 2);
    UTIL.startMiniCluster();
    String tableName = "testRegionObserverCompactionTimeStacking";
    byte[] ROW = Bytes.toBytes("testRow");
    byte[] A = Bytes.toBytes("A");
    HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    desc.addFamily(new HColumnDescriptor(A));
    desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null);
    desc.addCoprocessor(
        NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST, null);

    Admin admin = UTIL.getHBaseAdmin();
    admin.createTable(desc);

    Table table = UTIL.getConnection().getTable(desc.getTableName());

    // put a row and flush it to disk
    Put put = new Put(ROW);
    put.add(A, A, A);
    table.put(put);

    HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
    List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
    assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
    HRegion region = regions.get(0);
    admin.flushRegion(region.getRegionName());
    CountDownLatch latch =
        ((CompactionCompletionNotifyingRegion) region).getCompactionStateChangeLatch();

    // put another row and flush that too
    put = new Put(Bytes.toBytes("anotherrow"));
    put.add(A, A, A);
    table.put(put);
    admin.flushRegion(region.getRegionName());

    // run a compaction, which normally would should get rid of the data
    // wait for the compaction checker to complete
    latch.await();
    // check both rows to ensure that they aren't there
    Get get = new Get(ROW);
    Result r = table.get(get);
    assertNull(
        "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
            + r,
        r.listCells());

    get = new Get(Bytes.toBytes("anotherrow"));
    r = table.get(get);
    assertNull(
        "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
            + r,
        r.listCells());

    table.close();
    UTIL.shutdownMiniCluster();
  }
  @BeforeClass
  public static void setUpBeforeClass() throws Exception {
    conf1.setInt("hfile.format.version", 3);
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    conf1.setInt("replication.source.size.capacity", 10240);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setInt("zookeeper.recovery.retry", 1);
    conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setInt("replication.stats.thread.period.seconds", 5);
    conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf1.setStrings(
        CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
        TestCoprocessorForTagsAtSource.class.getName());

    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration();
    replicationAdmin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");

    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.setInt("hfile.format.version", 3);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
    conf2.setBoolean("dfs.support.append", true);
    conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
    conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
    conf2.setStrings(
        CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
        TestCoprocessorForTagsAtSink.class.getName());

    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);

    replicationAdmin.addPeer("2", utility2.getClusterKey());

    LOG.info("Setup second Zk");
    utility1.startMiniCluster(2);
    utility2.startMiniCluster(2);

    HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
    HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
    fam.setMaxVersions(3);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    try (Connection conn = ConnectionFactory.createConnection(conf1);
        Admin admin = conn.getAdmin()) {
      admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    try (Connection conn = ConnectionFactory.createConnection(conf2);
        Admin admin = conn.getAdmin()) {
      admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
    }
    htable1 = utility1.getConnection().getTable(TABLE_NAME);
    htable2 = utility2.getConnection().getTable(TABLE_NAME);
  }
  /*
   * Take a snapshot of a table, add data, and verify that this only
   * affects one table
   * @param online - Whether the table is online or not during the snapshot
   */
  private void runTestSnapshotAppendIndependent(boolean online) throws Exception {
    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();

    Admin admin = UTIL.getHBaseAdmin();
    final long startTime = System.currentTimeMillis();
    final TableName localTableName = TableName.valueOf(STRING_TABLE_NAME + startTime);

    try (Table original = UTIL.createTable(localTableName, TEST_FAM)) {
      UTIL.loadTable(original, TEST_FAM);
      final int origTableRowCount = UTIL.countRows(original);

      // Take a snapshot
      final String snapshotNameAsString = "snapshot_" + localTableName;
      byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);

      SnapshotTestingUtils.createSnapshotAndValidate(
          admin, localTableName, TEST_FAM_STR, snapshotNameAsString, rootDir, fs, online);

      if (!online) {
        admin.enableTable(localTableName);
      }
      TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
      admin.cloneSnapshot(snapshotName, cloneTableName);

      try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
        final int clonedTableRowCount = UTIL.countRows(clonedTable);

        Assert.assertEquals(
            "The line counts of original and cloned tables do not match after clone. ",
            origTableRowCount,
            clonedTableRowCount);

        // Attempt to add data to the test
        final String rowKey = "new-row-" + System.currentTimeMillis();

        Put p = new Put(Bytes.toBytes(rowKey));
        p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
        original.put(p);

        // Verify that it is not present in the original table
        Assert.assertEquals(
            "The row count of the original table was not modified by the put",
            origTableRowCount + 1,
            UTIL.countRows(original));
        Assert.assertEquals(
            "The row count of the cloned table changed as a result of addition to the original",
            clonedTableRowCount,
            UTIL.countRows(clonedTable));

        p = new Put(Bytes.toBytes(rowKey));
        p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
        clonedTable.put(p);

        // Verify that the new family is not in the restored table's description
        Assert.assertEquals(
            "The row count of the original table was modified by the put to the clone",
            origTableRowCount + 1,
            UTIL.countRows(original));
        Assert.assertEquals(
            "The row count of the cloned table was not modified by the put",
            clonedTableRowCount + 1,
            UTIL.countRows(clonedTable));
      }
    }
  }
Beispiel #22
0
 public void setUp() throws Exception {
   TEST_UTIL.startMiniCluster(1);
   this.connection = TEST_UTIL.getConnection();
 }