Esempio n. 1
0
  /**
   * Creates a 'manifest' for the specified region, by reading directly from the HRegion object.
   * This is used by the "online snapshot" when the table is enabled.
   */
  public void addRegion(final HRegion region) throws IOException {
    // 0. Get the ManifestBuilder/RegionVisitor
    RegionVisitor visitor = createRegionVisitor(desc);

    // 1. dump region meta info into the snapshot directory
    LOG.debug("Storing '" + region + "' region-info for snapshot.");
    Object regionData = visitor.regionOpen(region.getRegionInfo());
    monitor.rethrowException();

    // 2. iterate through all the stores in the region
    LOG.debug("Creating references for hfiles");

    for (Store store : region.getStores()) {
      // 2.1. build the snapshot reference for the store
      Object familyData = visitor.familyOpen(regionData, store.getFamily().getName());
      monitor.rethrowException();

      List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
      if (LOG.isDebugEnabled()) {
        LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
      }

      // 2.2. iterate through all the store's files and create "references".
      for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
        StoreFile storeFile = storeFiles.get(i);
        monitor.rethrowException();

        // create "reference" to this store file.
        LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath());
        visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
      }
      visitor.familyClose(regionData, familyData);
    }
    visitor.regionClose(regionData);
  }
 @Override
 public KeyValueScanner preStoreScannerOpen(
     final ObserverContext<RegionCoprocessorEnvironment> c,
     Store store,
     final Scan scan,
     final NavigableSet<byte[]> targetCols,
     KeyValueScanner s)
     throws IOException {
   TableName tn = store.getTableName();
   if (!tn.isSystemTable()) {
     Long newTtl = ttls.get(store.getTableName());
     Integer newVersions = versions.get(store.getTableName());
     ScanInfo oldSI = store.getScanInfo();
     HColumnDescriptor family = store.getFamily();
     ScanInfo scanInfo =
         new ScanInfo(
             family.getName(),
             family.getMinVersions(),
             newVersions == null ? family.getMaxVersions() : newVersions,
             newTtl == null ? oldSI.getTtl() : newTtl,
             family.getKeepDeletedCells(),
             oldSI.getTimeToPurgeDeletes(),
             oldSI.getComparator());
     return new StoreScanner(
         store,
         scanInfo,
         scan,
         targetCols,
         ((HStore) store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED));
   } else {
     return s;
   }
 }
 @Override
 public InternalScanner preCompactScannerOpen(
     final ObserverContext<RegionCoprocessorEnvironment> c,
     Store store,
     List<? extends KeyValueScanner> scanners,
     ScanType scanType,
     long earliestPutTs,
     InternalScanner s)
     throws IOException {
   Long newTtl = ttls.get(store.getTableName());
   Integer newVersions = versions.get(store.getTableName());
   ScanInfo oldSI = store.getScanInfo();
   HColumnDescriptor family = store.getFamily();
   ScanInfo scanInfo =
       new ScanInfo(
           family.getName(),
           family.getMinVersions(),
           newVersions == null ? family.getMaxVersions() : newVersions,
           newTtl == null ? oldSI.getTtl() : newTtl,
           family.getKeepDeletedCells(),
           oldSI.getTimeToPurgeDeletes(),
           oldSI.getComparator());
   Scan scan = new Scan();
   scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
   return new StoreScanner(
       store, scanInfo, scan, scanners, scanType, store.getSmallestReadPoint(), earliestPutTs);
 }
 @Override
 public InternalScanner preFlushScannerOpen(
     final ObserverContext<RegionCoprocessorEnvironment> c,
     Store store,
     KeyValueScanner memstoreScanner,
     InternalScanner s)
     throws IOException {
   Long newTtl = ttls.get(store.getTableName());
   if (newTtl != null) {
     System.out.println("PreFlush:" + newTtl);
   }
   Integer newVersions = versions.get(store.getTableName());
   ScanInfo oldSI = store.getScanInfo();
   HColumnDescriptor family = store.getFamily();
   ScanInfo scanInfo =
       new ScanInfo(
           family.getName(),
           family.getMinVersions(),
           newVersions == null ? family.getMaxVersions() : newVersions,
           newTtl == null ? oldSI.getTtl() : newTtl,
           family.getKeepDeletedCells(),
           oldSI.getTimeToPurgeDeletes(),
           oldSI.getComparator());
   Scan scan = new Scan();
   scan.setMaxVersions(newVersions == null ? oldSI.getMaxVersions() : newVersions);
   return new StoreScanner(
       store,
       scanInfo,
       scan,
       Collections.singletonList(memstoreScanner),
       ScanType.COMPACT_RETAIN_DELETES,
       store.getSmallestReadPoint(),
       HConstants.OLDEST_TIMESTAMP);
 }
Esempio n. 5
0
 /**
  * @param store store
  * @param scanners Store file scanners.
  * @param scanType Scan type.
  * @param smallestReadPoint Smallest MVCC read point.
  * @param earliestPutTs Earliest put across all files.
  * @return A compaction scanner.
  */
 protected InternalScanner createScanner(
     Store store,
     List<StoreFileScanner> scanners,
     ScanType scanType,
     long smallestReadPoint,
     long earliestPutTs)
     throws IOException {
   Scan scan = new Scan();
   scan.setMaxVersions(store.getFamily().getMaxVersions());
   return new StoreScanner(
       store, store.getScanInfo(), scan, scanners, scanType, smallestReadPoint, earliestPutTs);
 }
Esempio n. 6
0
 /**
  * @param store The store.
  * @param scanners Store file scanners.
  * @param smallestReadPoint Smallest MVCC read point.
  * @param earliestPutTs Earliest put across all files.
  * @param dropDeletesFromRow Drop deletes starting with this row, inclusive. Can be null.
  * @param dropDeletesToRow Drop deletes ending with this row, exclusive. Can be null.
  * @return A compaction scanner.
  */
 protected InternalScanner createScanner(
     Store store,
     List<StoreFileScanner> scanners,
     long smallestReadPoint,
     long earliestPutTs,
     byte[] dropDeletesFromRow,
     byte[] dropDeletesToRow)
     throws IOException {
   Scan scan = new Scan();
   scan.setMaxVersions(store.getFamily().getMaxVersions());
   return new StoreScanner(
       store,
       store.getScanInfo(),
       scan,
       scanners,
       smallestReadPoint,
       earliestPutTs,
       dropDeletesFromRow,
       dropDeletesToRow);
 }
Esempio n. 7
0
  /**
   * Extracts some details about the files to compact that are commonly needed by compactors.
   *
   * @param filesToCompact Files.
   * @param allFiles Whether all files are included for compaction
   * @return The result.
   */
  protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles)
      throws IOException {
    FileDetails fd = new FileDetails();
    long oldestHFileTimeStampToKeepMVCC =
        System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);

    for (StoreFile file : filesToCompact) {
      if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
        // when isAllFiles is true, all files are compacted so we can calculate the smallest
        // MVCC value to keep
        if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
          fd.minSeqIdToKeep = file.getMaxMemstoreTS();
        }
      }
      long seqNum = file.getMaxSequenceId();
      fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
      StoreFile.Reader r = file.getReader();
      if (r == null) {
        LOG.warn("Null reader for " + file.getPath());
        continue;
      }
      // NOTE: getFilterEntries could cause under-sized blooms if the user
      // switches bloom type (e.g. from ROW to ROWCOL)
      long keyCount =
          (r.getBloomFilterType() == store.getFamily().getBloomFilterType())
              ? r.getFilterEntries()
              : r.getEntries();
      fd.maxKeyCount += keyCount;
      // calculate the latest MVCC readpoint in any of the involved store files
      Map<byte[], byte[]> fileInfo = r.loadFileInfo();
      byte tmp[] = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
      if (tmp != null) {
        fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
      }
      tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
      if (tmp != null) {
        fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
      }
      // If required, calculate the earliest put timestamp of all involved storefiles.
      // This is used to remove family delete marker during compaction.
      long earliestPutTs = 0;
      if (allFiles) {
        tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
        if (tmp == null) {
          // There's a file with no information, must be an old one
          // assume we have very old puts
          fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
        } else {
          earliestPutTs = Bytes.toLong(tmp);
          fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
        }
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug(
            "Compacting "
                + file
                + ", keycount="
                + keyCount
                + ", bloomtype="
                + r.getBloomFilterType().toString()
                + ", size="
                + StringUtils.humanReadableInt(r.length())
                + ", encoding="
                + r.getHFileReader().getDataBlockEncoding()
                + ", seqNum="
                + seqNum
                + (allFiles ? ", earliestPutTs=" + earliestPutTs : ""));
      }
    }
    return fd;
  }