示例#1
0
 /**
  * Get the directory to archive a store directory
  *
  * @param conf {@link Configuration} to read for the archive directory name.
  * @param region parent region information under which the store currently lives
  * @param tabledir directory for the table under which the store currently lives
  * @param family name of the family in the store
  * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
  *     not be archived
  */
 public static Path getStoreArchivePath(
     Configuration conf, HRegionInfo region, Path tabledir, byte[] family) throws IOException {
   TableName tableName = FSUtils.getTableName(tabledir);
   Path rootDir = FSUtils.getRootDir(conf);
   Path tableArchiveDir = getTableArchivePath(rootDir, tableName);
   return HStore.getStoreHomedir(tableArchiveDir, region, family);
 }
示例#2
0
  /**
   * Performs the compaction.
   *
   * @param scanner Where to read from.
   * @param writer Where to write to.
   * @param smallestReadPoint Smallest read point.
   * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
   * @return Whether compaction ended; false if it was interrupted for some reason.
   */
  protected boolean performCompaction(
      InternalScanner scanner, CellSink writer, long smallestReadPoint, boolean cleanSeqId)
      throws IOException {
    int bytesWritten = 0;
    // Since scanner.next() can return 'false' but still be delivering data,
    // we have to use a do/while loop.
    List<Cell> kvs = new ArrayList<Cell>();
    int closeCheckInterval = HStore.getCloseCheckInterval();
    long lastMillis;
    if (LOG.isDebugEnabled()) {
      lastMillis = System.currentTimeMillis();
    } else {
      lastMillis = 0;
    }
    boolean hasMore;
    do {
      hasMore = scanner.next(kvs, compactionKVMax);
      // output to writer:
      for (Cell c : kvs) {
        KeyValue kv = KeyValueUtil.ensureKeyValue(c);
        if (cleanSeqId && kv.getSequenceId() <= smallestReadPoint) {
          kv.setSequenceId(0);
        }
        writer.append(kv);
        ++progress.currentCompactedKVs;
        progress.totalCompactedSize += kv.getLength();

        // check periodically to see if a system stop is requested
        if (closeCheckInterval > 0) {
          bytesWritten += kv.getLength();
          if (bytesWritten > closeCheckInterval) {
            // Log the progress of long running compactions every minute if
            // logging at DEBUG level
            if (LOG.isDebugEnabled()) {
              long now = System.currentTimeMillis();
              if ((now - lastMillis) >= 60 * 1000) {
                LOG.debug(
                    "Compaction progress: "
                        + progress
                        + String.format(
                            ", rate=%.2f kB/sec",
                            (bytesWritten / 1024.0) / ((now - lastMillis) / 1000.0)));
                lastMillis = now;
              }
            }
            bytesWritten = 0;
            if (!store.areWritesEnabled()) {
              progress.cancel();
              return false;
            }
          }
        }
      }
      kvs.clear();
    } while (hasMore);
    progress.complete();
    return true;
  }
示例#3
0
 /**
  * Get the directory to archive a store directory
  *
  * @param conf {@link Configuration} to read for the archive directory name
  * @param tableName table name under which the store currently lives
  * @param regionName region encoded name under which the store currently lives
  * @param familyName name of the family in the store
  * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
  *     not be archived
  */
 public static Path getStoreArchivePath(
     final Configuration conf,
     final TableName tableName,
     final String regionName,
     final String familyName)
     throws IOException {
   Path tableArchiveDir = getTableArchivePath(conf, tableName);
   return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName));
 }
示例#4
0
 @Override
 protected void completeCompaction(Collection<StoreFile> compactedFiles) throws IOException {
   try {
     r.compactionsWaiting.countDown();
     r.compactionsBlocked.await();
   } catch (InterruptedException ex) {
     throw new IOException(ex);
   }
   super.completeCompaction(compactedFiles);
 }
示例#5
0
  @Test
  public void testExcludeMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);

    try {
      util.startMiniCluster();
      Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
      final FileSystem fs = util.getDFSCluster().getFileSystem();
      Admin admin = util.getHBaseAdmin();
      HTable table = util.createTable(TABLE_NAME, FAMILIES);
      assertEquals("Should start with empty table", 0, util.countRows(table));

      // deep inspection: get the StoreFile dir
      final Path storePath =
          HStore.getStoreHomedir(
              FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
              admin.getTableRegions(TABLE_NAME).get(0),
              FAMILIES[0]);
      assertEquals(0, fs.listStatus(storePath).length);

      // put some data in it and flush to create a storefile
      Put p = new Put(Bytes.toBytes("test"));
      p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
      table.put(p);
      admin.flush(TABLE_NAME);
      assertEquals(1, util.countRows(table));
      quickPoll(
          new Callable<Boolean>() {
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

      // Generate a bulk load file with more rows
      conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);
      util.startMiniMapReduceCluster();
      runIncrementalPELoad(conf, table, testDir);

      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

      // Ensure data shows up
      int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
      assertEquals(
          "LoadIncrementalHFiles should put expected data in table",
          expectedRows + 1,
          util.countRows(table));

      // should have a second StoreFile now
      assertEquals(2, fs.listStatus(storePath).length);

      // minor compactions shouldn't get rid of the file
      admin.compact(TABLE_NAME);
      try {
        quickPoll(
            new Callable<Boolean>() {
              public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
              }
            },
            5000);
        throw new IOException("SF# = " + fs.listStatus(storePath).length);
      } catch (AssertionError ae) {
        // this is expected behavior
      }

      // a major compaction should work though
      admin.majorCompact(TABLE_NAME);
      quickPoll(
          new Callable<Boolean>() {
            public Boolean call() throws Exception {
              return fs.listStatus(storePath).length == 1;
            }
          },
          5000);

    } finally {
      util.shutdownMiniMapReduceCluster();
      util.shutdownMiniCluster();
    }
  }