/** * Creates a 'manifest' for the specified region, by reading directly from the HRegion object. * This is used by the "online snapshot" when the table is enabled. */ public void addRegion(final HRegion region) throws IOException { // 0. Get the ManifestBuilder/RegionVisitor RegionVisitor visitor = createRegionVisitor(desc); // 1. dump region meta info into the snapshot directory LOG.debug("Storing '" + region + "' region-info for snapshot."); Object regionData = visitor.regionOpen(region.getRegionInfo()); monitor.rethrowException(); // 2. iterate through all the stores in the region LOG.debug("Creating references for hfiles"); for (Store store : region.getStores()) { // 2.1. build the snapshot reference for the store Object familyData = visitor.familyOpen(regionData, store.getFamily().getName()); monitor.rethrowException(); List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles()); if (LOG.isDebugEnabled()) { LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); } // 2.2. iterate through all the store's files and create "references". for (int i = 0, sz = storeFiles.size(); i < sz; i++) { StoreFile storeFile = storeFiles.get(i); monitor.rethrowException(); // create "reference" to this store file. LOG.debug("Adding reference for file (" + (i + 1) + "/" + sz + "): " + storeFile.getPath()); visitor.storeFile(regionData, familyData, storeFile.getFileInfo()); } visitor.familyClose(regionData, familyData); } visitor.regionClose(regionData); }
@Override public void postFlush( ObserverContext<RegionCoprocessorEnvironment> e, Store store, StoreFile resultFile) throws IOException { // Register HFiles for incremental backup SpliceLogUtils.info(LOG, "Flushing region %s.%s", tableName, regionName); try { if (namespace.compareTo("splice") != 0) return; BackupUtils.captureIncrementalChanges( conf, region, path, fs, rootDir, backupDir, tableName, resultFile.getPath().getName(), preparing); } catch (Exception ex) { throw new IOException(ex); } }
/** * Extracts some details about the files to compact that are commonly needed by compactors. * * @param filesToCompact Files. * @param allFiles Whether all files are included for compaction * @return The result. */ protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles) throws IOException { FileDetails fd = new FileDetails(); long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod); for (StoreFile file : filesToCompact) { if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) { // when isAllFiles is true, all files are compacted so we can calculate the smallest // MVCC value to keep if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) { fd.minSeqIdToKeep = file.getMaxMemstoreTS(); } } long seqNum = file.getMaxSequenceId(); fd.maxSeqId = Math.max(fd.maxSeqId, seqNum); StoreFile.Reader r = file.getReader(); if (r == null) { LOG.warn("Null reader for " + file.getPath()); continue; } // NOTE: getFilterEntries could cause under-sized blooms if the user // switches bloom type (e.g. from ROW to ROWCOL) long keyCount = (r.getBloomFilterType() == store.getFamily().getBloomFilterType()) ? r.getFilterEntries() : r.getEntries(); fd.maxKeyCount += keyCount; // calculate the latest MVCC readpoint in any of the involved store files Map<byte[], byte[]> fileInfo = r.loadFileInfo(); byte tmp[] = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY); if (tmp != null) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp)); } tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN); if (tmp != null) { fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp)); } // If required, calculate the earliest put timestamp of all involved storefiles. // This is used to remove family delete marker during compaction. long earliestPutTs = 0; if (allFiles) { tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS); if (tmp == null) { // There's a file with no information, must be an old one // assume we have very old puts fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP; } else { earliestPutTs = Bytes.toLong(tmp); fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs); } } if (LOG.isDebugEnabled()) { LOG.debug( "Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r.getBloomFilterType().toString() + ", size=" + StringUtils.humanReadableInt(r.length()) + ", encoding=" + r.getHFileReader().getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ? ", earliestPutTs=" + earliestPutTs : "")); } } return fd; }