@Override public void run() { byte[] key = new byte[keySize]; byte[] value = new byte[valueSize]; Random rand = new Random(Thread.currentThread().getId()); HLog hlog = region.getLog(); try { long startTime = System.currentTimeMillis(); for (int i = 0; i < numIterations; ++i) { Put put = setupPut(rand, key, value, numFamilies); long now = System.currentTimeMillis(); WALEdit walEdit = new WALEdit(); addFamilyMapToWALEdit(put.getFamilyCellMap(), walEdit); HRegionInfo hri = region.getRegionInfo(); if (this.noSync) { hlog.appendNoSync(hri, hri.getTable(), walEdit, new ArrayList<UUID>(), now, htd); } else { hlog.append(hri, hri.getTable(), walEdit, now, htd); } } long totalTime = (System.currentTimeMillis() - startTime); logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime); } catch (Exception e) { LOG.error(getClass().getSimpleName() + " Thread failed", e); } }
private void closeRegion(final HRegion region) throws IOException { if (region != null) { region.close(); HLog wal = region.getLog(); if (wal != null) wal.close(); } }
@Override public HLog.Entry next(HLog.Entry reuse) throws IOException { this.entryStart = this.reader.getPosition(); boolean b = true; if (nextQueue.isEmpty()) { // Read the whole thing at once and fake reading while (b == true) { HLogKey key = HLog.newKey(conf); WALEdit val = new WALEdit(); HLog.Entry e = new HLog.Entry(key, val); codec.setCompression(compressionContext); e.getEdit().setCodec(codec); if (compressionContext != null) { e.getKey().setCompressionContext(compressionContext); } b = this.reader.next(e.getKey(), e.getEdit()); nextQueue.offer(e); numberOfFileEntries++; } } if (nextQueue.size() == this.numberOfFileEntries && getFailureType() == FailureType.BEGINNING) { throw this.addFileInfoToException(new IOException("fake Exception")); } else if (nextQueue.size() == this.numberOfFileEntries / 2 && getFailureType() == FailureType.MIDDLE) { throw this.addFileInfoToException(new IOException("fake Exception")); } else if (nextQueue.size() == 1 && getFailureType() == FailureType.END) { throw this.addFileInfoToException(new IOException("fake Exception")); } if (nextQueue.peek() != null) { edit++; } Entry e = nextQueue.poll(); if (e.getEdit().isEmpty()) { return null; } return e; }
@Override public int run(String[] args) throws Exception { Path rootRegionDir = null; int numThreads = 1; long numIterations = 10000; int numFamilies = 1; boolean noSync = false; boolean verify = false; boolean verbose = false; boolean cleanup = true; boolean noclosefs = false; long roll = Long.MAX_VALUE; // Process command line args for (int i = 0; i < args.length; i++) { String cmd = args[i]; try { if (cmd.equals("-threads")) { numThreads = Integer.parseInt(args[++i]); } else if (cmd.equals("-iterations")) { numIterations = Long.parseLong(args[++i]); } else if (cmd.equals("-path")) { rootRegionDir = new Path(args[++i]); } else if (cmd.equals("-families")) { numFamilies = Integer.parseInt(args[++i]); } else if (cmd.equals("-qualifiers")) { numQualifiers = Integer.parseInt(args[++i]); } else if (cmd.equals("-keySize")) { keySize = Integer.parseInt(args[++i]); } else if (cmd.equals("-valueSize")) { valueSize = Integer.parseInt(args[++i]); } else if (cmd.equals("-nosync")) { noSync = true; } else if (cmd.equals("-verify")) { verify = true; } else if (cmd.equals("-verbose")) { verbose = true; } else if (cmd.equals("-nocleanup")) { cleanup = false; } else if (cmd.equals("-noclosefs")) { noclosefs = true; } else if (cmd.equals("-roll")) { roll = Long.parseLong(args[++i]); } else if (cmd.equals("-h")) { printUsageAndExit(); } else if (cmd.equals("--help")) { printUsageAndExit(); } else { System.err.println("UNEXPECTED: " + cmd); printUsageAndExit(); } } catch (Exception e) { printUsageAndExit(); } } // Run HLog Performance Evaluation // First set the fs from configs. In case we are on hadoop1 FSUtils.setFsDefault(getConf(), FSUtils.getRootDir(getConf())); FileSystem fs = FileSystem.get(getConf()); LOG.info("FileSystem: " + fs); try { if (rootRegionDir == null) { rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("HLogPerformanceEvaluation"); } rootRegionDir = rootRegionDir.makeQualified(fs); cleanRegionRootDir(fs, rootRegionDir); // Initialize Table Descriptor HTableDescriptor htd = createHTableDescriptor(numFamilies); final long whenToRoll = roll; HLog hlog = new FSHLog(fs, rootRegionDir, "wals", getConf()) { int appends = 0; @Override protected void doWrite( HRegionInfo info, HLogKey logKey, WALEdit logEdit, HTableDescriptor htd) throws IOException { this.appends++; if (this.appends % whenToRoll == 0) { LOG.info("Rolling after " + appends + " edits"); rollWriter(); } super.doWrite(info, logKey, logEdit, htd); }; }; hlog.rollWriter(); HRegion region = null; try { region = openRegion(fs, rootRegionDir, htd, hlog); long putTime = runBenchmark(new HLogPutBenchmark(region, htd, numIterations, noSync), numThreads); logBenchmarkResult( "Summary: threads=" + numThreads + ", iterations=" + numIterations, numIterations * numThreads, putTime); if (region != null) { closeRegion(region); region = null; } if (verify) { Path dir = ((FSHLog) hlog).getDir(); long editCount = 0; FileStatus[] fsss = fs.listStatus(dir); if (fsss.length == 0) throw new IllegalStateException("No WAL found"); for (FileStatus fss : fsss) { Path p = fss.getPath(); if (!fs.exists(p)) throw new IllegalStateException(p.toString()); editCount += verify(p, verbose); } long expected = numIterations * numThreads; if (editCount != expected) { throw new IllegalStateException("Counted=" + editCount + ", expected=" + expected); } } } finally { if (region != null) closeRegion(region); // Remove the root dir for this test region if (cleanup) cleanRegionRootDir(fs, rootRegionDir); } } finally { // We may be called inside a test that wants to keep on using the fs. if (!noclosefs) fs.close(); } return (0); }