Esempio n. 1
0
  /**
   * Assert that getSplitEditFilesSorted returns files in expected order and that it skips
   * moved-aside files.
   *
   * @throws IOException
   */
  @Test
  public void testGetSplitEditFilesSorted() throws IOException {
    FileSystem fs = FileSystem.get(util.getConfiguration());
    Path regiondir = util.getDataTestDir("regiondir");
    fs.delete(regiondir, true);
    fs.mkdirs(regiondir);
    Path recoverededits = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
    String first = WALSplitter.formatRecoveredEditsFileName(-1);
    createFile(fs, recoverededits, first);
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(0));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(1));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(11));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(2));
    createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(50));
    String last = WALSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE);
    createFile(fs, recoverededits, last);
    createFile(
        fs, recoverededits, Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis());

    final Configuration walConf = new Configuration(util.getConfiguration());
    FSUtils.setRootDir(walConf, regiondir);
    (new WALFactory(walConf, null, "dummyLogName")).getWAL(new byte[] {}, null);

    NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
    assertEquals(7, files.size());
    assertEquals(files.pollFirst().getName(), first);
    assertEquals(files.pollLast().getName(), last);
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(0));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(1));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(2));
    assertEquals(files.pollFirst().getName(), WALSplitter.formatRecoveredEditsFileName(11));
  }
Esempio n. 2
0
  /**
   * Assert that getSplitEditFilesSorted returns files in expected order and that it skips
   * moved-aside files.
   *
   * @throws IOException
   */
  @Test
  public void testGetSplitEditFilesSorted() throws IOException {
    FileSystem fs = FileSystem.get(util.getConfiguration());
    Path regiondir = util.getDataTestDir("regiondir");
    fs.delete(regiondir, true);
    fs.mkdirs(regiondir);
    Path recoverededits = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
    String first = HLogSplitter.formatRecoveredEditsFileName(-1);
    createFile(fs, recoverededits, first);
    createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(0));
    createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(1));
    createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(11));
    createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(2));
    createFile(fs, recoverededits, HLogSplitter.formatRecoveredEditsFileName(50));
    String last = HLogSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE);
    createFile(fs, recoverededits, last);
    createFile(
        fs, recoverededits, Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis());

    HLogFactory.createHLog(fs, regiondir, "dummyLogName", util.getConfiguration());
    NavigableSet<Path> files = HLogUtil.getSplitEditFilesSorted(fs, regiondir);
    assertEquals(7, files.size());
    assertEquals(files.pollFirst().getName(), first);
    assertEquals(files.pollLast().getName(), last);
    assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(0));
    assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(1));
    assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(2));
    assertEquals(files.pollFirst().getName(), HLogSplitter.formatRecoveredEditsFileName(11));
  }
 @Test
 public void testNoSuchTable() throws IOException {
   final String name = "testNoSuchTable";
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   // Cleanup old tests if any detrius laying around.
   Path rootdir = new Path(UTIL.getDataTestDir(), name);
   TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
   assertNull("There shouldn't be any HTD for this table", htds.get("NoSuchTable"));
 }
 @Test
 public void testReadingHTDFromFS() throws IOException {
   final String name = "testReadingHTDFromFS";
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   HTableDescriptor htd = new HTableDescriptor(name);
   Path rootdir = UTIL.getDataTestDir(name);
   createHTDInFS(fs, rootdir, htd);
   HTableDescriptor htd2 =
       FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString());
   assertTrue(htd.equals(htd2));
 }
 @Test
 public void testUpdates() throws IOException {
   final String name = "testUpdates";
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   // Cleanup old tests if any detrius laying around.
   Path rootdir = new Path(UTIL.getDataTestDir(), name);
   TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
   HTableDescriptor htd = new HTableDescriptor(name);
   htds.add(htd);
   htds.add(htd);
   htds.add(htd);
 }
 @Test
 public void testHTableDescriptors() throws IOException, InterruptedException {
   final String name = "testHTableDescriptors";
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   // Cleanup old tests if any debris laying around.
   Path rootdir = new Path(UTIL.getDataTestDir(), name);
   final int count = 10;
   // Write out table infos.
   for (int i = 0; i < count; i++) {
     HTableDescriptor htd = new HTableDescriptor(name + i);
     createHTDInFS(fs, rootdir, htd);
   }
   FSTableDescriptors htds =
       new FSTableDescriptors(fs, rootdir) {
         @Override
         public HTableDescriptor get(byte[] tablename)
             throws TableExistsException, FileNotFoundException, IOException {
           LOG.info(Bytes.toString(tablename) + ", cachehits=" + this.cachehits);
           return super.get(tablename);
         }
       };
   for (int i = 0; i < count; i++) {
     assertTrue(htds.get(Bytes.toBytes(name + i)) != null);
   }
   for (int i = 0; i < count; i++) {
     assertTrue(htds.get(Bytes.toBytes(name + i)) != null);
   }
   // Update the table infos
   for (int i = 0; i < count; i++) {
     HTableDescriptor htd = new HTableDescriptor(name + i);
     htd.addFamily(new HColumnDescriptor("" + i));
     FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd);
   }
   // Wait a while so mod time we write is for sure different.
   Thread.sleep(100);
   for (int i = 0; i < count; i++) {
     assertTrue(htds.get(Bytes.toBytes(name + i)) != null);
   }
   for (int i = 0; i < count; i++) {
     assertTrue(htds.get(Bytes.toBytes(name + i)) != null);
   }
   assertEquals(count * 4, htds.invocations);
   assertTrue(
       "expected=" + (count * 2) + ", actual=" + htds.cachehits, htds.cachehits >= (count * 2));
   assertTrue(htds.get(HConstants.ROOT_TABLE_NAME) != null);
   assertEquals(htds.invocations, count * 4 + 1);
   assertTrue(
       "expected=" + ((count * 2) + 1) + ", actual=" + htds.cachehits,
       htds.cachehits >= ((count * 2) + 1));
 }
 @Test
 public void testSequenceidAdvancesOnTableInfo() throws IOException {
   Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo");
   HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo");
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
   int i0 = FSTableDescriptors.getTableInfoSequenceid(p0);
   Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
   // Assert we cleaned up the old file.
   assertTrue(!fs.exists(p0));
   int i1 = FSTableDescriptors.getTableInfoSequenceid(p1);
   assertTrue(i1 == i0 + 1);
   Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
   // Assert we cleaned up the old file.
   assertTrue(!fs.exists(p1));
   int i2 = FSTableDescriptors.getTableInfoSequenceid(p2);
   assertTrue(i2 == i1 + 1);
 }
 @Test
 public void testCreateAndUpdate() throws IOException {
   Path testdir = UTIL.getDataTestDir("testCreateAndUpdate");
   HTableDescriptor htd = new HTableDescriptor("testCreate");
   FileSystem fs = FileSystem.get(UTIL.getConfiguration());
   assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
   assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd));
   FileStatus[] statuses = fs.listStatus(testdir);
   assertTrue("statuses.length=" + statuses.length, statuses.length == 1);
   for (int i = 0; i < 10; i++) {
     FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd);
   }
   statuses = fs.listStatus(testdir);
   assertTrue(statuses.length == 1);
   Path tmpTableDir = new Path(FSUtils.getTablePath(testdir, htd.getName()), ".tmp");
   statuses = fs.listStatus(tmpTableDir);
   assertTrue(statuses.length == 0);
 }
Esempio n. 9
0
  @Test
  public void testLogCleaning() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    // set TTL
    long ttl = 10000;
    conf.setLong("hbase.master.logcleaner.ttl", ttl);
    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    Replication.decorateMasterConfiguration(conf);
    Server server = new DummyServer();
    ReplicationQueues repQueues =
        ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
    repQueues.init(server.getServerName().toString());
    final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME);
    String fakeMachineName = URLEncoder.encode(server.getServerName().toString(), "UTF8");

    final FileSystem fs = FileSystem.get(conf);

    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
    long now = System.currentTimeMillis();
    fs.delete(oldLogDir, true);
    fs.mkdirs(oldLogDir);
    // Case 1: 2 invalid files, which would be deleted directly
    fs.createNewFile(new Path(oldLogDir, "a"));
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    System.out.println("Now is: " + now);
    for (int i = 1; i < 31; i++) {
      // Case 3: old files which would be deletable for the first log cleaner
      // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
      Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i));
      fs.createNewFile(fileName);
      // Case 4: put 3 old log files in ZK indicating that they are scheduled
      // for replication so these files would pass the first log cleaner
      // (TimeToLiveLogCleaner) but would be rejected by the second
      // (ReplicationLogCleaner)
      if (i % (30 / 3) == 1) {
        repQueues.addLog(fakeMachineName, fileName.getName());
        System.out.println("Replication log file: " + fileName);
      }
    }

    // sleep for sometime to get newer modifcation time
    Thread.sleep(ttl);
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));

    // Case 2: 1 newer file, not even deletable for the first log cleaner
    // (TimeToLiveLogCleaner), so we are not going down the chain
    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000)));

    for (FileStatus stat : fs.listStatus(oldLogDir)) {
      System.out.println(stat.getPath().toString());
    }

    assertEquals(34, fs.listStatus(oldLogDir).length);

    LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, oldLogDir);
    cleaner.chore();

    // We end up with the current log file, a newer one and the 3 old log
    // files which are scheduled for replication
    TEST_UTIL.waitFor(
        1000,
        new Waiter.Predicate<Exception>() {
          @Override
          public boolean evaluate() throws Exception {
            return 5 == fs.listStatus(oldLogDir).length;
          }
        });

    for (FileStatus file : fs.listStatus(oldLogDir)) {
      System.out.println("Kept log files: " + file.getPath().getName());
    }
  }
Esempio n. 10
0
/** Testing of multiPut in parallel. */
@Category(MediumTests.class)
public class TestParallelPut extends HBaseTestCase {
  static final Log LOG = LogFactory.getLog(TestParallelPut.class);

  private static HRegion region = null;
  private static HBaseTestingUtility hbtu = new HBaseTestingUtility();
  private static final String DIR = hbtu.getDataTestDir() + "/TestParallelPut/";

  // Test names
  static final byte[] tableName = Bytes.toBytes("testtable");;
  static final byte[] qual1 = Bytes.toBytes("qual1");
  static final byte[] qual2 = Bytes.toBytes("qual2");
  static final byte[] qual3 = Bytes.toBytes("qual3");
  static final byte[] value1 = Bytes.toBytes("value1");
  static final byte[] value2 = Bytes.toBytes("value2");
  static final byte[] row = Bytes.toBytes("rowA");
  static final byte[] row2 = Bytes.toBytes("rowB");

  /** @see org.apache.hadoop.hbase.HBaseTestCase#setUp() */
  @Override
  protected void setUp() throws Exception {
    super.setUp();
  }

  @Override
  protected void tearDown() throws Exception {
    super.tearDown();
    EnvironmentEdgeManagerTestHelper.reset();
  }

  //////////////////////////////////////////////////////////////////////////////
  // New tests that don't spin up a mini cluster but rather just test the
  // individual code pieces in the HRegion.
  //////////////////////////////////////////////////////////////////////////////

  /** Test one put command. */
  public void testPut() throws IOException {
    LOG.info("Starting testPut");
    initHRegion(tableName, getName(), fam1);

    long value = 1L;

    Put put = new Put(row);
    put.add(fam1, qual1, Bytes.toBytes(value));
    region.put(put);

    assertGet(row, fam1, qual1, Bytes.toBytes(value));
  }

  /** Test multi-threaded Puts. */
  public void testParallelPuts() throws IOException {

    LOG.info("Starting testParallelPuts");
    initHRegion(tableName, getName(), fam1);
    int numOps = 1000; // these many operations per thread

    // create 100 threads, each will do its own puts
    int numThreads = 100;
    Putter[] all = new Putter[numThreads];

    // create all threads
    for (int i = 0; i < numThreads; i++) {
      all[i] = new Putter(region, i, numOps);
    }

    // run all threads
    for (int i = 0; i < numThreads; i++) {
      all[i].start();
    }

    // wait for all threads to finish
    for (int i = 0; i < numThreads; i++) {
      try {
        all[i].join();
      } catch (InterruptedException e) {
        LOG.warn("testParallelPuts encountered InterruptedException." + " Ignoring....", e);
      }
    }
    LOG.info(
        "testParallelPuts successfully verified " + (numOps * numThreads) + " put operations.");
  }

  private static void assertGet(byte[] row, byte[] familiy, byte[] qualifier, byte[] value)
      throws IOException {
    // run a get and see if the value matches
    Get get = new Get(row);
    get.addColumn(familiy, qualifier);
    Result result = region.get(get, null);
    assertEquals(1, result.size());

    KeyValue kv = result.raw()[0];
    byte[] r = kv.getValue();
    assertTrue(Bytes.compareTo(r, value) == 0);
  }

  private void initHRegion(byte[] tableName, String callingMethod, byte[]... families)
      throws IOException {
    initHRegion(tableName, callingMethod, HBaseConfiguration.create(), families);
  }

  private void initHRegion(
      byte[] tableName, String callingMethod, Configuration conf, byte[]... families)
      throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
      htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
    Path path = new Path(DIR + callingMethod);
    if (fs.exists(path)) {
      if (!fs.delete(path, true)) {
        throw new IOException("Failed delete of " + path);
      }
    }
    region = HRegion.createHRegion(info, path, conf, htd);
  }

  /** A thread that makes a few put calls */
  public static class Putter extends Thread {

    private final HRegion region;
    private final int threadNumber;
    private final int numOps;
    private final Random rand = new Random();
    byte[] rowkey = null;

    public Putter(HRegion region, int threadNumber, int numOps) {
      this.region = region;
      this.threadNumber = threadNumber;
      this.numOps = numOps;
      this.rowkey = Bytes.toBytes((long) threadNumber); // unique rowid per thread
      setDaemon(true);
    }

    @Override
    public void run() {
      byte[] value = new byte[100];
      Put[] in = new Put[1];

      // iterate for the specified number of operations
      for (int i = 0; i < numOps; i++) {
        // generate random bytes
        rand.nextBytes(value);

        // put the randombytes and verify that we can read it. This is one
        // way of ensuring that rwcc manipulation in HRegion.put() is fine.
        Put put = new Put(rowkey);
        put.add(fam1, qual1, value);
        in[0] = put;
        try {
          OperationStatus[] ret = region.put(in);
          assertEquals(1, ret.length);
          assertEquals(OperationStatusCode.SUCCESS, ret[0].getOperationStatusCode());
          assertGet(rowkey, fam1, qual1, value);
        } catch (IOException e) {
          assertTrue("Thread id " + threadNumber + " operation " + i + " failed.", false);
        }
      }
    }
  }

  @org.junit.Rule
  public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
      new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}