@Override
    public void run() {
      byte[] key = new byte[keySize];
      byte[] value = new byte[valueSize];
      Random rand = new Random(Thread.currentThread().getId());
      HLog hlog = region.getLog();

      try {
        long startTime = System.currentTimeMillis();
        for (int i = 0; i < numIterations; ++i) {
          Put put = setupPut(rand, key, value, numFamilies);
          long now = System.currentTimeMillis();
          WALEdit walEdit = new WALEdit();
          addFamilyMapToWALEdit(put.getFamilyCellMap(), walEdit);
          HRegionInfo hri = region.getRegionInfo();
          if (this.noSync) {
            hlog.appendNoSync(hri, hri.getTable(), walEdit, new ArrayList<UUID>(), now, htd);
          } else {
            hlog.append(hri, hri.getTable(), walEdit, now, htd);
          }
        }
        long totalTime = (System.currentTimeMillis() - startTime);
        logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
      } catch (Exception e) {
        LOG.error(getClass().getSimpleName() + " Thread failed", e);
      }
    }
Example #2
0
 @SuppressWarnings("deprecation")
 @Override
 public DResult get(Get get, long startId) throws IOException {
   if (get.hasFamilies()) get.addFamily(DominoConst.INNER_FAMILY);
   get.setTimeRange(0, startId + 1); // [x, y)
   get.setMaxVersions();
   Result preRead = region.get(get);
   List<KeyValue> status = preRead.getColumn(DominoConst.INNER_FAMILY, DominoConst.STATUS_COL);
   if (status == null || status.size() == 0) {
     Result ret = MVCC.handleResult(this, getTrxMetaTable(), preRead, startId, null);
     return new DResult(ret, null);
   }
   Integer lockId = region.getLock(null, get.getRow(), true);
   try {
     Result r =
         MVCC.handleResult(this, getTrxMetaTable(), region.get(get, lockId), startId, lockId);
     return new DResult(r, null);
   } catch (TransactionOutOfDateException oode) {
     return new DResult(null, oode.getMessage());
   } catch (InvalidRowStatusException e) {
     return new DResult(null, e.getMessage());
   } finally {
     region.releaseRowLock(lockId);
   }
 }
 private void doPuts(HRegion region) throws IOException {
   LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE);
   for (int i = 0; i < NUM_ROWS; ++i) {
     byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
     for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
       Put put = new Put(key);
       byte[] col = Bytes.toBytes(String.valueOf(j));
       byte[] value = dataGenerator.generateRandomSizeValue(key, col);
       if (includeTags) {
         Tag[] tag = new Tag[1];
         tag[0] = new Tag((byte) 1, "Visibility");
         KeyValue kv = new KeyValue(key, CF_BYTES, col, HConstants.LATEST_TIMESTAMP, value, tag);
         put.add(kv);
       } else {
         put.add(CF_BYTES, col, value);
       }
       if (VERBOSE) {
         KeyValue kvPut = new KeyValue(key, CF_BYTES, col, value);
         System.err.println(Strings.padFront(i + "", ' ', 4) + " " + kvPut);
       }
       region.put(put);
     }
     if (i % NUM_ROWS_PER_FLUSH == 0) {
       region.flushcache();
     }
   }
 }
 private void closeRegion(final HRegion region) throws IOException {
   if (region != null) {
     region.close();
     HLog wal = region.getLog();
     if (wal != null) wal.close();
   }
 }
 /**
  *
  *
  * <pre>
  * Region-unittest,\x82\xB4\x85\xC2\x7F\xFF\xFE\xB6\xC9jNG\xEE!\x5C3\xBB\xAE\xA1:\x05\xA5\xA9x\xB0\xA1"8\x05\xFB(\xD2VY\xDB\x9A\x06\x09\xA9\x98\xC2\xE3\x8D=,1413960230654.aaf2a6c9f2c87c196f43497243bb2424.
  * RegionID-unittest,1413960230654
  * </pre>
  */
 protected String getLogHeader() {
   HRegion region = this.getCurrentRegion();
   return LOG.isDebugEnabled()
       ? String.format("Region-%s", region.getRegionNameAsString())
       : String.format(
           "Region-%s,%d", region.getTableDesc().getNameAsString(), region.getRegionId());
 }
Example #6
0
 /**
  * @param r Region we're working on.
  * @return whether znode is successfully transitioned to OPENED state.
  * @throws IOException
  */
 private boolean transitionToOpened(final HRegion r) throws IOException {
   boolean result = false;
   HRegionInfo hri = r.getRegionInfo();
   final String name = hri.getRegionNameAsString();
   // Finally, Transition ZK node to OPENED
   try {
     if (ZKAssign.transitionNodeOpened(
             this.server.getZooKeeper(), hri, this.server.getServerName(), this.version)
         == -1) {
       LOG.warn(
           "Completed the OPEN of region "
               + name
               + " but when transitioning from "
               + " OPENING to OPENED got a version mismatch, someone else clashed "
               + "so now unassigning -- closing region on server: "
               + this.server.getServerName());
     } else {
       LOG.debug(
           "region transitioned to opened in zookeeper: "
               + r.getRegionInfo()
               + ", server: "
               + this.server.getServerName());
       result = true;
     }
   } catch (KeeperException e) {
     LOG.error(
         "Failed transitioning node " + name + " from OPENING to OPENED -- closing region", e);
   }
   return result;
 }
  @Test
  public void testRegionObserverFlushTimeStacking() throws Exception {
    byte[] ROW = Bytes.toBytes("testRow");
    byte[] TABLE = Bytes.toBytes(getClass().getName());
    byte[] A = Bytes.toBytes("A");
    byte[][] FAMILIES = new byte[][] {A};

    Configuration conf = HBaseConfiguration.create();
    HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
    RegionCoprocessorHost h = region.getCoprocessorHost();
    h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
    h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

    // put a row and flush it to disk
    Put put = new Put(ROW);
    put.add(A, A, A);
    region.put(put);
    region.flushcache();
    Get get = new Get(ROW);
    Result r = region.get(get);
    assertNull(
        "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
            + r,
        r.listCells());
  }
Example #8
0
    @Override
    protected void updateMeta(final byte[] oldRegion1, final byte[] oldRegion2, HRegion newRegion)
        throws IOException {
      byte[][] regionsToDelete = {oldRegion1, oldRegion2};
      for (int r = 0; r < regionsToDelete.length; r++) {
        if (Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) {
          latestRegion = null;
        }
        Delete delete = new Delete(regionsToDelete[r]);
        table.delete(delete);
        if (LOG.isDebugEnabled()) {
          LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
        }
      }
      newRegion.getRegionInfo().setOffline(true);

      Put put = new Put(newRegion.getRegionName());
      put.add(
          HConstants.CATALOG_FAMILY,
          HConstants.REGIONINFO_QUALIFIER,
          Writables.getBytes(newRegion.getRegionInfo()));
      table.put(put);

      if (LOG.isDebugEnabled()) {
        LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName()));
      }
    }
Example #9
0
  /**
   * Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
   * This is used by the "online snapshot" when the table is enabled.
   */
  public void addRegion(final HRegion region) throws IOException {
    // 0. Get the ManifestBuilder/RegionVisitor
    RegionVisitor visitor = createRegionVisitor(desc);

    // 1. dump region meta info into the snapshot directory
    LOG.debug("Storing '" + region + "' region-info for snapshot.");
    Object regionData = visitor.regionOpen(region.getRegionInfo());
    monitor.rethrowException();

    // 2. iterate through all the stores in the region
    LOG.debug("Creating references for hfiles");

    for (Store store : region.getStores()) {
      // 2.1. build the snapshot reference for the store
      Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
      monitor.rethrowException();

      List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
      if (LOG.isDebugEnabled()) {
        LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
      }

      // 2.2. iterate through all the store's files and create "references".
      for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
        StoreFile storeFile = storeFiles.get(i);
        monitor.rethrowException();

        // create "reference" to this store file.
        LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath());
        visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
      }
      visitor.familyClose(regionData, familyData);
    }
    visitor.regionClose(regionData);
  }
 /**
  * Test that if we fail a flush, abort gets set on close.
  *
  * @see <a href="https://issues.apache.org/jira/browse/HBASE-4270">HBASE-4270</a>
  * @throws IOException
  * @throws NodeExistsException
  * @throws KeeperException
  */
 @Test
 public void testFailedFlushAborts() throws IOException, NodeExistsException, KeeperException {
   final Server server = new MockServer(HTU, false);
   final RegionServerServices rss = HTU.createMockRegionServerService();
   HTableDescriptor htd = TEST_HTD;
   final HRegionInfo hri =
       new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW);
   HRegion region = HTU.createLocalHRegion(hri, htd);
   try {
     assertNotNull(region);
     // Spy on the region so can throw exception when close is called.
     HRegion spy = Mockito.spy(region);
     final boolean abort = false;
     Mockito.when(spy.close(abort)).thenThrow(new RuntimeException("Mocked failed close!"));
     // The CloseRegionHandler will try to get an HRegion that corresponds
     // to the passed hri -- so insert the region into the online region Set.
     rss.addToOnlineRegions(spy);
     // Assert the Server is NOT stopped before we call close region.
     assertFalse(server.isStopped());
     CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false, false, -1);
     boolean throwable = false;
     try {
       handler.process();
     } catch (Throwable t) {
       throwable = true;
     } finally {
       assertTrue(throwable);
       // Abort calls stop so stopped flag should be set.
       assertTrue(server.isStopped());
     }
   } finally {
     HRegion.closeHRegion(region);
   }
 }
Example #11
0
    @Override
    protected void updateMeta(final byte[] oldRegion1, final byte[] oldRegion2, HRegion newRegion)
        throws IOException {
      byte[][] regionsToDelete = {oldRegion1, oldRegion2};
      for (int r = 0; r < regionsToDelete.length; r++) {
        Delete delete = new Delete(regionsToDelete[r]);
        delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
        delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
        delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
        delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
        delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
        root.delete(delete, null, true);

        if (LOG.isDebugEnabled()) {
          LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
        }
      }
      HRegionInfo newInfo = newRegion.getRegionInfo();
      newInfo.setOffline(true);
      Put put = new Put(newRegion.getRegionName());
      put.add(
          HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(newInfo));
      root.put(put);
      if (LOG.isDebugEnabled()) {
        LOG.debug("updated columns in row: " + Bytes.toStringBinary(newRegion.getRegionName()));
      }
    }
 private void closeRegion(final HRegion region) throws IOException {
   if (region != null) {
     region.close();
     WAL wal = region.getWAL();
     if (wal != null) {
       wal.shutdown();
     }
   }
 }
Example #13
0
 @Override
 public void rollbackRow(byte[] row, long startId) throws IOException {
   Integer lockId = region.getLock(null, row, true);
   try {
     this.rollbackRow(row, startId, lockId);
   } finally {
     region.releaseRowLock(lockId);
   }
 }
Example #14
0
  /** Creates, flushes, and closes a new region. */
  public static HRegion createHDFSRegionDir(
      Configuration conf, HRegionInfo hri, HTableDescriptor htd) throws IOException {
    // Create HRegion
    Path root = FSUtils.getRootDir(conf);
    HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

    // Close the new region to flush to disk. Close log file too.
    region.close();
    return region;
  }
Example #15
0
 @Override
 public void commitRow(byte[] row, long startId, long commitId, boolean isDelete)
     throws IOException {
   Integer lockId = region.getLock(null, row, true);
   try {
     commitRow(row, startId, commitId, isDelete, lockId);
   } finally {
     region.releaseRowLock(lockId);
   }
 }
Example #16
0
  @Test
  public void testIncrement() throws Exception {
    byte[] row1 = Bytes.toBytes("row1");
    byte[] col1 = Bytes.toBytes("col1");
    byte[] col2 = Bytes.toBytes("col2");
    byte[] col3 = Bytes.toBytes("col3");

    // Setting up region
    final WALFactory wals = new WALFactory(CONF, null, "TestIncrement");
    byte[] tableName = Bytes.toBytes("TestIncrement");
    final WAL wal = wals.getWAL(tableName);
    HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT);

    // col1: amount = 1, 1 write back to WAL
    Increment inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 1);
    Result res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 1);

    // col1: amount = 0, 0 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 1);

    // col1: amount = 0, col2: amount = 0, col3: amount = 0
    // 0 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    inc1.addColumn(FAMILY, col2, 0);
    inc1.addColumn(FAMILY, col3, 0);
    res = region.increment(inc1);
    assertEquals(3, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2)));
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3)));
    verifyWALCount(wals, wal, 1);

    // col1: amount = 5, col2: amount = 4, col3: amount = 3
    // 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 5);
    inc1.addColumn(FAMILY, col2, 4);
    inc1.addColumn(FAMILY, col3, 3);
    res = region.increment(inc1);
    assertEquals(3, res.size());
    assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1)));
    assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2)));
    assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
    verifyWALCount(wals, wal, 2);
  }
 private static void bootstrap(final Path rd, final Configuration c)
 throws IOException {
   LOG.info("BOOTSTRAP: creating ROOT and first META regions");
   try {
     // Bootstrapping, make sure blockcache is off.  Else, one will be
     // created here in bootstap and it'll need to be cleaned up.  Better to
     // not make it in first place.  Turn off block caching for bootstrap.
     // Enable after.
     HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
     setInfoFamilyCachingForRoot(false);
     HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
     setInfoFamilyCachingForMeta(false);
     HRegion root = HRegion.createHRegion(rootHRI, rd, c,
         HTableDescriptor.ROOT_TABLEDESC);
     HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
         HTableDescriptor.META_TABLEDESC);
     setInfoFamilyCachingForRoot(true);
     setInfoFamilyCachingForMeta(true);
     // Add first region from the META table to the ROOT region.
     HRegion.addRegionToMETA(root, meta);
     root.close();
     root.getLog().closeAndDelete();
     meta.close();
     meta.getLog().closeAndDelete();
   } catch (IOException e) {
     e = RemoteExceptionHandler.checkIOException(e);
     LOG.error("bootstrap", e);
     throw e;
   }
 }
Example #18
0
 @Override
 public DResult put(Put put, long startId, boolean locking) throws IOException {
   Integer lockId = region.getLock(null, put.getRow(), true);
   try {
     byte[] columnsWritten =
         MVCC.writeCheckRowStatus(this, getTrxMetaTable(), put.getRow(), locking, lockId, startId);
     Put innerPut = clonePut(put, startId, locking, columnsWritten);
     mutateRow(innerPut, lockId);
     return null;
   } catch (InvalidRowStatusException e) {
     return new DResult(null, e.getMessage());
   } finally {
     region.releaseRowLock(lockId);
   }
 }
Example #19
0
 @Override
 public DResult delete(byte[] row, long startId) throws IOException {
   Integer lockId = region.getLock(null, row, true);
   try {
     byte[] columnsWritten =
         MVCC.writeCheckRowStatus(this, getTrxMetaTable(), row, false, lockId, startId);
     Put deletePut = deletePut(row, startId, columnsWritten);
     mutateRow(deletePut, lockId);
     return null;
   } catch (InvalidRowStatusException e) {
     return new DResult(null, e.getMessage());
   } finally {
     region.releaseRowLock(lockId);
   }
 }
 /**
  * openHRegion
  *
  * @param reporter
  *     <p>Placeholder for any TransactionalRegion-specific logic.
  */
 @Override
 protected HRegion openHRegion(final CancelableProgressable reporter) throws IOException {
   LOG.trace("openHRegion -- ENTRY");
   super.openHRegion(reporter);
   LOG.trace("openHRegion -- EXIT");
   return this;
 }
 private HRegion openRegion(
     final FileSystem fs, final Path dir, final HTableDescriptor htd, final HLog hlog)
     throws IOException {
   // Initialize HRegion
   HRegionInfo regionInfo = new HRegionInfo(htd.getTableName());
   return HRegion.createHRegion(regionInfo, dir, getConf(), htd, hlog);
 }
Example #22
0
 private void mutateRow(Mutation mut, Integer lockId) throws IOException {
   @SuppressWarnings("unchecked")
   Pair<Mutation, Integer> pair[] = new Pair[1];
   mut.setWriteToWAL(true);
   pair[0] = new Pair<Mutation, Integer>(mut, lockId);
   region.batchMutate(pair);
 }
 @Override
 public Void call() throws Exception {
   // Taking the region read lock prevents the individual region from being closed while a
   // snapshot is in progress.  This is helpful but not sufficient for preventing races with
   // snapshots that involve multiple regions and regionservers.  It is still possible to have
   // an interleaving such that globally regions are missing, so we still need the verification
   // step.
   LOG.debug("Starting region operation on " + region);
   region.startRegionOperation();
   try {
     if (snapshotSkipFlush) {
       /*
        * This is to take an online-snapshot without force a coordinated flush to prevent pause
        * The snapshot type is defined inside the snapshot description. FlushSnapshotSubprocedure
        * should be renamed to distributedSnapshotSubprocedure, and the flush() behavior can be
        * turned on/off based on the flush type.
        * To minimized the code change, class name is not changed.
        */
       LOG.debug("take snapshot without flush memstore first");
     } else {
       LOG.debug("Flush Snapshotting region " + region.toString() + " started...");
       region.flush(true);
     }
     ((HRegion) region).addRegionToSnapshot(snapshot, monitor);
     if (snapshotSkipFlush) {
       LOG.debug("... SkipFlush Snapshotting region " + region.toString() + " completed.");
     } else {
       LOG.debug("... Flush Snapshotting region " + region.toString() + " completed.");
     }
   } finally {
     LOG.debug("Closing region operation on " + region);
     region.closeRegionOperation();
   }
   return null;
 }
Example #24
0
    OfflineMerger(Configuration conf, FileSystem fs) throws IOException {
      super(conf, fs, HConstants.META_TABLE_NAME);

      Path rootTableDir =
          HTableDescriptor.getTableDir(
              fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))),
              HConstants.ROOT_TABLE_NAME);

      // Scan root region to find all the meta regions

      root =
          HRegion.newHRegion(
              rootTableDir,
              hlog,
              fs,
              conf,
              HRegionInfo.ROOT_REGIONINFO,
              HTableDescriptor.ROOT_TABLEDESC,
              null);
      root.initialize();

      Scan scan = new Scan();
      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
      InternalScanner rootScanner = root.getScanner(scan);

      try {
        List<KeyValue> results = new ArrayList<KeyValue>();
        boolean hasMore;
        do {
          hasMore = rootScanner.next(results);
          for (KeyValue kv : results) {
            HRegionInfo info = Writables.getHRegionInfoOrNull(kv.getValue());
            if (info != null) {
              metaRegions.add(info);
            }
          }
        } while (hasMore);
      } finally {
        rootScanner.close();
        try {
          root.close();

        } catch (IOException e) {
          LOG.error(e);
        }
      }
    }
  /**
   * Restore the on-disk table to a specified snapshot state.
   *
   * @return the set of regions touched by the restore operation
   */
  public RestoreMetaChanges restoreHdfsRegions() throws IOException {
    LOG.debug("starting restore");
    Set<String> snapshotRegionNames = SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
    if (snapshotRegionNames == null) {
      LOG.warn("Nothing to restore. Snapshot " + snapshotDesc + " looks empty");
      return null;
    }

    RestoreMetaChanges metaChanges = new RestoreMetaChanges();

    // Identify which region are still available and which not.
    // NOTE: we rely upon the region name as: "table name, start key, end key"
    List<HRegionInfo> tableRegions = getTableRegions();
    if (tableRegions != null) {
      monitor.rethrowException();
      for (HRegionInfo regionInfo : tableRegions) {
        String regionName = regionInfo.getEncodedName();
        if (snapshotRegionNames.contains(regionName)) {
          LOG.info("region to restore: " + regionName);
          snapshotRegionNames.remove(regionName);
          metaChanges.addRegionToRestore(regionInfo);
        } else {
          LOG.info("region to remove: " + regionName);
          metaChanges.addRegionToRemove(regionInfo);
        }
      }

      // Restore regions using the snapshot data
      monitor.rethrowException();
      restoreHdfsRegions(metaChanges.getRegionsToRestore());

      // Remove regions from the current table
      monitor.rethrowException();
      removeHdfsRegions(metaChanges.getRegionsToRemove());
    }

    // Regions to Add: present in the snapshot but not in the current table
    if (snapshotRegionNames.size() > 0) {
      List<HRegionInfo> regionsToAdd = new LinkedList<HRegionInfo>();

      monitor.rethrowException();
      for (String regionName : snapshotRegionNames) {
        LOG.info("region to add: " + regionName);
        Path regionDir = new Path(snapshotDir, regionName);
        regionsToAdd.add(HRegion.loadDotRegionInfoFileContent(fs, regionDir));
      }

      // Create new regions cloning from the snapshot
      monitor.rethrowException();
      HRegionInfo[] clonedRegions = cloneHdfsRegions(regionsToAdd);
      metaChanges.setNewRegions(clonedRegions);
    }

    // Restore WALs
    monitor.rethrowException();
    restoreWALs();

    return metaChanges;
  }
Example #26
0
  /**
   * Get the archive directory for a given region under the specified table
   *
   * @param tableName the table name. Cannot be null.
   * @param regiondir the path to the region directory. Cannot be null.
   * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
   *     should not be archived
   */
  public static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir) {
    // get the archive directory for a table
    Path archiveDir = getTableArchivePath(rootDir, tableName);

    // then add on the region path under the archive
    String encodedRegionName = regiondir.getName();
    return HRegion.getRegionDir(archiveDir, encodedRegionName);
  }
 /**
  * Open HRegion. Calls initialize and sets sequenceid to both regular WAL and trx WAL.
  *
  * @param reporter
  * @return Returns <code>this</code>
  * @throws IOException
  */
 @Override
 protected HRegion openHRegion(final CancelableProgressable reporter) throws IOException {
   super.openHRegion(reporter);
   if (this.transactionLog != null) {
     this.transactionLog.setSequenceNumber(super.getLog().getSequenceNumber());
   }
   return this;
 }
Example #28
0
  @Test
  public void testEncodedSeeker() throws IOException {
    System.err.println(
        "Testing encoded seekers for encoding : "
            + encoding
            + ", includeTags : "
            + includeTags
            + ", compressTags : "
            + compressTags);
    if (includeTags) {
      testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
    }
    LruBlockCache cache =
        (LruBlockCache) new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();
    // Need to disable default row bloom filter for this test to pass.
    HColumnDescriptor hcd =
        (new HColumnDescriptor(CF_NAME))
            .setMaxVersions(MAX_VERSIONS)
            .setDataBlockEncoding(encoding)
            .setBlocksize(BLOCK_SIZE)
            .setBloomFilterType(BloomType.NONE)
            .setCompressTags(compressTags);
    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);

    // write the data, but leave some in the memstore
    doPuts(region);

    // verify correctness when memstore contains data
    doGets(region);

    // verify correctness again after compacting
    region.compactStores();
    doGets(region);

    Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();

    // Ensure that compactions don't pollute the cache with unencoded blocks
    // in case of in-cache-only encoding.
    System.err.println("encodingCounts=" + encodingCounts);
    assertEquals(1, encodingCounts.size());
    DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next();
    assertEquals(encoding, encodingInCache);
    assertTrue(encodingCounts.get(encodingInCache) > 0);
  }
 HRegion initHRegion(
     byte[] tableName, String callingMethod, Configuration conf, byte[]... families)
     throws IOException {
   HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
   for (byte[] family : families) {
     htd.addFamily(new HColumnDescriptor(family));
   }
   HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
   Path path = new Path(DIR + callingMethod);
   HRegion r = HRegion.createHRegion(info, path, conf, htd);
   // this following piece is a hack. currently a coprocessorHost
   // is secretly loaded at OpenRegionHandler. we don't really
   // start a region server here, so just manually create cphost
   // and set it to region.
   RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
   r.setCoprocessorHost(host);
   return r;
 }
  @Override
  public Result getCurrentRowState(Mutation m, Collection<? extends ColumnReference> columns)
      throws IOException {
    byte[] row = m.getRow();
    // need to use a scan here so we can get raw state, which Get doesn't provide.
    Scan s = IndexManagementUtil.newLocalStateScan(Collections.singletonList(columns));
    s.setStartRow(row);
    s.setStopRow(row);
    HRegion region = this.env.getRegion();
    RegionScanner scanner = region.getScanner(s);
    List<KeyValue> kvs = new ArrayList<KeyValue>(1);
    boolean more = scanner.next(kvs);
    assert !more : "Got more than one result when scanning" + " a single row in the primary table!";

    Result r = new Result(kvs);
    scanner.close();
    return r;
  }