protected void finalizeWriter( StoreFile.Writer writer, long cacheFlushSeqNum, MonitoredTask status) throws IOException { // Write out the log sequence number that corresponds to this output // hfile. Also write current time in metadata as minFlushTime. // The hfile is current up to and including cacheFlushSeqNum. status.setStatus("Flushing " + store + ": appending metadata"); writer.appendMetadata(cacheFlushSeqNum, false); status.setStatus("Flushing " + store + ": closing flushed file"); writer.close(); }
/** * Do a minor/major compaction on an explicit set of storefiles from a Store. * * @param request the requested compaction that contains all necessary information to complete the * compaction (i.e. the store, the files, etc.) * @return Product of compaction or null if all cells expired or deleted and nothing made it * through the compaction. * @throws IOException */ StoreFile.Writer compact(CompactionRequest request, long maxId) throws IOException { // Calculate maximum key count after compaction (for blooms) // Also calculate earliest put timestamp if major compaction int maxKeyCount = 0; long earliestPutTs = HConstants.LATEST_TIMESTAMP; long maxMVCCReadpoint = 0; // pull out the interesting things from the CR for ease later final Store store = request.getStore(); final boolean majorCompaction = request.isMajor(); final List<StoreFile> filesToCompact = request.getFiles(); for (StoreFile file : filesToCompact) { StoreFile.Reader r = file.getReader(); if (r == null) { LOG.warn("Null reader for " + file.getPath()); continue; } // NOTE: getFilterEntries could cause under-sized blooms if the user // switches bloom type (e.g. from ROW to ROWCOL) long keyCount = (r.getBloomFilterType() == store.getFamily().getBloomFilterType()) ? r.getFilterEntries() : r.getEntries(); maxKeyCount += keyCount; // Calculate the maximum MVCC readpoint used in any of the involved files Map<byte[], byte[]> fileInfo = r.loadFileInfo(); byte[] tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY); if (tmp != null) { maxMVCCReadpoint = Math.max(maxMVCCReadpoint, Bytes.toLong(tmp)); } // For major compactions calculate the earliest put timestamp // of all involved storefiles. This is used to remove // family delete marker during the compaction. if (majorCompaction) { tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS); if (tmp == null) { // There's a file with no information, must be an old one // assume we have very old puts earliestPutTs = HConstants.OLDEST_TIMESTAMP; } else { earliestPutTs = Math.min(earliestPutTs, Bytes.toLong(tmp)); } } if (LOG.isDebugEnabled()) { LOG.debug( "Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r.getBloomFilterType().toString() + ", size=" + StringUtils.humanReadableInt(r.length()) + ", encoding=" + r.getHFileReader().getEncodingOnDisk() + (majorCompaction ? ", earliestPutTs=" + earliestPutTs : "")); } } // keep track of compaction progress this.progress = new CompactionProgress(maxKeyCount); // Get some configs int compactionKVMax = getConf().getInt("hbase.hstore.compaction.kv.max", 10); Compression.Algorithm compression = store.getFamily().getCompression(); // Avoid overriding compression setting for major compactions if the user // has not specified it separately Compression.Algorithm compactionCompression = (store.getFamily().getCompactionCompression() != Compression.Algorithm.NONE) ? store.getFamily().getCompactionCompression() : compression; // For each file, obtain a scanner: List<StoreFileScanner> scanners = StoreFileScanner.getScannersForStoreFiles(filesToCompact, false, false, true); // Make the instantiation lazy in case compaction produces no product; i.e. // where all source cells are expired or deleted. StoreFile.Writer writer = null; // Find the smallest read point across all the Scanners. long smallestReadPoint = store.getHRegion().getSmallestReadPoint(); MultiVersionConsistencyControl.setThreadReadPoint(smallestReadPoint); try { InternalScanner scanner = null; try { if (store.getHRegion().getCoprocessorHost() != null) { scanner = store .getHRegion() .getCoprocessorHost() .preCompactScannerOpen( store, scanners, majorCompaction ? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT, earliestPutTs, request); } if (scanner == null) { Scan scan = new Scan(); scan.setMaxVersions(store.getFamily().getMaxVersions()); /* Include deletes, unless we are doing a major compaction */ scanner = new StoreScanner( store, store.getScanInfo(), scan, scanners, majorCompaction ? ScanType.MAJOR_COMPACT : ScanType.MINOR_COMPACT, smallestReadPoint, earliestPutTs); } if (store.getHRegion().getCoprocessorHost() != null) { InternalScanner cpScanner = store.getHRegion().getCoprocessorHost().preCompact(store, scanner, request); // NULL scanner returned from coprocessor hooks means skip normal processing if (cpScanner == null) { return null; } scanner = cpScanner; } int bytesWritten = 0; // since scanner.next() can return 'false' but still be delivering data, // we have to use a do/while loop. List<KeyValue> kvs = new ArrayList<KeyValue>(); // Limit to "hbase.hstore.compaction.kv.max" (default 10) to avoid OOME boolean hasMore; do { hasMore = scanner.next(kvs, compactionKVMax); if (writer == null && !kvs.isEmpty()) { writer = store.createWriterInTmp( maxKeyCount, compactionCompression, true, maxMVCCReadpoint >= smallestReadPoint); } if (writer != null) { // output to writer: for (KeyValue kv : kvs) { if (kv.getMemstoreTS() <= smallestReadPoint) { kv.setMemstoreTS(0); } writer.append(kv); // update progress per key ++progress.currentCompactedKVs; // check periodically to see if a system stop is requested if (Store.closeCheckInterval > 0) { bytesWritten += kv.getLength(); if (bytesWritten > Store.closeCheckInterval) { bytesWritten = 0; isInterrupted(store, writer); } } } } kvs.clear(); } while (hasMore); } finally { if (scanner != null) { scanner.close(); } } } finally { if (writer != null) { writer.appendMetadata(maxId, majorCompaction); writer.close(); } } return writer; }