public static void main(String[] args) throws Exception { ConsoleReporter.enable(2, TimeUnit.SECONDS); Random random = new Random(); while (true) { TimerContext context = timer.time(); Thread.sleep(random.nextInt(1000)); context.stop(); } }
@Override public String report() { arrayStream.reset(); reporter.run(); return arrayStream.toString(); }
@Override public boolean enable() { try { PrintStream stream = createPrintStream(); // static enable() methods omit the option of specifying a // predicate. Calling constructor and starting manually // instead final ConsoleReporter reporter = new ConsoleReporter( Metrics.defaultRegistry(), stream, MetricPredicateTransformer.generatePredicate(getPredicate())); reporter.start(getPeriod(), getRealTimeunit()); } catch (Exception e) { log.error("Failure while enabling console reporter", e); return false; } return true; }
public static void main(String[] args) throws Exception { Config config = new Config(); Cache cache = config.getCache(); // cache data generator sequence FixedSizeElementSequence elementSequence = new FixedSizeElementSequence(Config.NUMBER_OF_ENTRIES, Config.SIZE_OF_ENTRY); cache.removeAll(); LOG.info("cache size should be zero. size=" + cache.getSize() + "\n starting load"); Util.sleepFor(3); cache.setNodeBulkLoadEnabled(true); long start = System.currentTimeMillis(); // single threaded for now // start putting data into the cache PutWorker putWorker = new PutWorker(cache, elementSequence); // TODO:use executor to make this multi-threaded putWorker.start(); // for now, waiting for put to complete putWorker.join(); // now remove bulkmode long endOfWorkerThread = System.currentTimeMillis(); cache.setNodeBulkLoadEnabled(false); Config.CALL_TIMER.stop(); // stop the timer long endOfBulkLoadReset = System.currentTimeMillis(); int endSize = cache.getSize(); LOG.info( "complete. total time for " + Config.NUMBER_OF_ENTRIES + " puts to complete is " + (endOfWorkerThread - start) + "ms, and time it took to reset bulk mode is " + (endOfBulkLoadReset - endOfWorkerThread) + "ms \n End size=" + endSize); // just wait for return // for now, waiting for user input to close the // program. This gives us time to look at the metrics on JMX console or // TMC :) Util.waitForInput(); ConsoleReporter.enable(1, TimeUnit.SECONDS); Util.sleepFor(2); }
@Override public void onStart(Application app) { Logger.info("Creating injector with " + modules.size() + " modules."); // log to the console every minute in DEV if (Play.isDev()) { ConsoleReporter.enable(1, TimeUnit.MINUTES); } Injector injector = Guice.createInjector(Stage.PRODUCTION, modules); for (OnStartListener listener : onStartListeners) { listener.onApplicationStart(app, injector); } }
/** * Enables the console reporter for the given metrics registry, and causes it to print to STDOUT * with the specified period and unrestricted output. * * @param metricsRegistry the metrics registry * @param period the period between successive outputs * @param unit the time unit of {@code period} */ public static void enable(MetricsRegistry metricsRegistry, long period, TimeUnit unit) { final ConsoleReporter reporter = new ConsoleReporter(metricsRegistry, System.out, MetricPredicate.ALL); reporter.start(period, unit); }
@Override public int run(String[] args) throws Exception { Path rootRegionDir = null; int numThreads = 1; long numIterations = 1000000; int numFamilies = 1; int syncInterval = 0; boolean noSync = false; boolean verify = false; boolean verbose = false; boolean cleanup = true; boolean noclosefs = false; long roll = Long.MAX_VALUE; boolean compress = false; String cipher = null; int numRegions = 1; String spanReceivers = getConf().get("hbase.trace.spanreceiver.classes"); boolean trace = spanReceivers != null && !spanReceivers.isEmpty(); double traceFreq = 1.0; // Process command line args for (int i = 0; i < args.length; i++) { String cmd = args[i]; try { if (cmd.equals("-threads")) { numThreads = Integer.parseInt(args[++i]); } else if (cmd.equals("-iterations")) { numIterations = Long.parseLong(args[++i]); } else if (cmd.equals("-path")) { rootRegionDir = new Path(args[++i]); } else if (cmd.equals("-families")) { numFamilies = Integer.parseInt(args[++i]); } else if (cmd.equals("-qualifiers")) { numQualifiers = Integer.parseInt(args[++i]); } else if (cmd.equals("-keySize")) { keySize = Integer.parseInt(args[++i]); } else if (cmd.equals("-valueSize")) { valueSize = Integer.parseInt(args[++i]); } else if (cmd.equals("-syncInterval")) { syncInterval = Integer.parseInt(args[++i]); } else if (cmd.equals("-nosync")) { noSync = true; } else if (cmd.equals("-verify")) { verify = true; } else if (cmd.equals("-verbose")) { verbose = true; } else if (cmd.equals("-nocleanup")) { cleanup = false; } else if (cmd.equals("-noclosefs")) { noclosefs = true; } else if (cmd.equals("-roll")) { roll = Long.parseLong(args[++i]); } else if (cmd.equals("-compress")) { compress = true; } else if (cmd.equals("-encryption")) { cipher = args[++i]; } else if (cmd.equals("-regions")) { numRegions = Integer.parseInt(args[++i]); } else if (cmd.equals("-traceFreq")) { traceFreq = Double.parseDouble(args[++i]); } else if (cmd.equals("-h")) { printUsageAndExit(); } else if (cmd.equals("--help")) { printUsageAndExit(); } else { System.err.println("UNEXPECTED: " + cmd); printUsageAndExit(); } } catch (Exception e) { printUsageAndExit(); } } if (compress) { Configuration conf = getConf(); conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true); } if (cipher != null) { // Set up WAL for encryption Configuration conf = getConf(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase"); conf.setClass( "hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class, WAL.Reader.class); conf.setClass( "hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class, Writer.class); conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true); conf.set(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, cipher); } if (numThreads < numRegions) { LOG.warn("Number of threads is less than the number of regions; some regions will sit idle."); } // Internal config. goes off number of threads; if more threads than handlers, stuff breaks. // In regionserver, number of handlers == number of threads. getConf().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, numThreads); // Run WAL Performance Evaluation // First set the fs from configs. In case we are on hadoop1 FSUtils.setFsDefault(getConf(), FSUtils.getRootDir(getConf())); FileSystem fs = FileSystem.get(getConf()); LOG.info("FileSystem: " + fs); SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null; TraceScope scope = Trace.startSpan("WALPerfEval", trace ? Sampler.ALWAYS : Sampler.NEVER); try { if (rootRegionDir == null) { rootRegionDir = TEST_UTIL.getDataTestDirOnTestFS("WALPerformanceEvaluation"); } rootRegionDir = rootRegionDir.makeQualified(fs); cleanRegionRootDir(fs, rootRegionDir); FSUtils.setRootDir(getConf(), rootRegionDir); final WALFactory wals = new WALFactory(getConf(), null, "wals"); final HRegion[] regions = new HRegion[numRegions]; final Runnable[] benchmarks = new Runnable[numRegions]; final MockRegionServerServices mockServices = new MockRegionServerServices(getConf()); final LogRoller roller = new LogRoller(mockServices, mockServices); Threads.setDaemonThreadRunning(roller.getThread(), "WALPerfEval.logRoller"); try { for (int i = 0; i < numRegions; i++) { // Initialize Table Descriptor // a table per desired region means we can avoid carving up the key space final HTableDescriptor htd = createHTableDescriptor(i, numFamilies); regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller); benchmarks[i] = Trace.wrap( new WALPutBenchmark( regions[i], htd, numIterations, noSync, syncInterval, traceFreq)); } ConsoleReporter.enable(this.metrics, 30, TimeUnit.SECONDS); long putTime = runBenchmark(benchmarks, numThreads); logBenchmarkResult( "Summary: threads=" + numThreads + ", iterations=" + numIterations + ", syncInterval=" + syncInterval, numIterations * numThreads, putTime); for (int i = 0; i < numRegions; i++) { if (regions[i] != null) { closeRegion(regions[i]); regions[i] = null; } } if (verify) { LOG.info("verifying written log entries."); Path dir = new Path( FSUtils.getRootDir(getConf()), DefaultWALProvider.getWALDirectoryName("wals")); long editCount = 0; FileStatus[] fsss = fs.listStatus(dir); if (fsss.length == 0) throw new IllegalStateException("No WAL found"); for (FileStatus fss : fsss) { Path p = fss.getPath(); if (!fs.exists(p)) throw new IllegalStateException(p.toString()); editCount += verify(wals, p, verbose); } long expected = numIterations * numThreads; if (editCount != expected) { throw new IllegalStateException("Counted=" + editCount + ", expected=" + expected); } } } finally { mockServices.stop("test clean up."); for (int i = 0; i < numRegions; i++) { if (regions[i] != null) { closeRegion(regions[i]); } } if (null != roller) { LOG.info("shutting down log roller."); Threads.shutdown(roller.getThread()); } wals.shutdown(); // Remove the root dir for this test region if (cleanup) cleanRegionRootDir(fs, rootRegionDir); } } finally { // We may be called inside a test that wants to keep on using the fs. if (!noclosefs) fs.close(); scope.close(); if (receiverHost != null) receiverHost.closeReceivers(); } return (0); }