@Override public ConsoleReporter get() { ConsoleReporter reporter = ConsoleReporter.forRegistry(metricRegistry).build(); String interval = launchConfig.getOther("metrics.scheduledreporter.interval", "30"); reporter.start(Long.parseLong(interval), TimeUnit.SECONDS); return reporter; }
/** * Initializes the console reporter based on the current configuration. * * @param config the console reporter configuration */ private void configureConsoleReporter(ConsoleReporterConfig config) { if (config.isEnabled()) { consoleReporter = ConsoleReporter.forRegistry(metricRegistry) .convertRatesTo(config.getConvertRates()) .convertDurationsTo(config.getConvertDurations()) .build(); consoleReporter.start(config.getFrequency(), config.getFrequencyUnit()); } }
/** Stop the reporter threads. */ @PreDestroy public void stopReporters() { if (consoleReporter != null) { consoleReporter.stop(); } if (graphiteReporter != null) { graphiteReporter.stop(); } }
/** @author @obazoud (Olivier Bazoud) */ @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration public class ChunkMetricsTest { @Autowired private JobLauncherTestUtils jobLauncherTestUtils; @Autowired private MetricRegistry metricRegistry; private ConsoleReporter reporter = ConsoleReporter.forRegistry(metricRegistry).build(); @Before public void before_reporter() { reporter.start(1, TimeUnit.SECONDS); } @After public void after_reporter() { reporter.stop(); } @Test public void test() throws Exception { Map<String, JobParameter> parameters = new HashMap<String, JobParameter>(); JobExecution jobExecution = jobLauncherTestUtils.launchJob(new JobParameters(parameters)); Assert.assertEquals( jobExecution.getExitStatus().getExitDescription(), BatchStatus.COMPLETED, jobExecution.getStatus()); Map<String, Meter> meters = metricRegistry.getMeters(); assertThat(meters).hasSize(2); assertThat(meters) .containsKey("batch.sampleJob.job.metered") .containsKey("batch.sampleJob.step.chunkStep.step.metered"); assertThat(extractProperty("count", Number.class).from(meters.values())) .contains(1L) .doesNotContain(0L); Map<String, Timer> timers = metricRegistry.getTimers(); assertThat(timers).hasSize(6); assertThat(timers) .containsKey("batch.sampleJob.job.timed") .containsKey("batch.sampleJob.step.chunkStep.chunk.timed") .containsKey("batch.sampleJob.step.chunkStep.step.timed") .containsKey("batch.sampleJob.step.chunkStep.read.timed") .containsKey("batch.sampleJob.step.chunkStep.process.timed") .containsKey("batch.sampleJob.step.chunkStep.write.timed"); assertThat(extractProperty("count", Number.class).from(timers.values())) .contains(1L, 3L, 4L) .doesNotContain(0L); Map<String, Gauge> gauges = metricRegistry.getGauges(); assertThat(gauges).hasSize(0); } }
@SuppressWarnings("OverlyLongMethod") private void run(Config config) { System.err.println( " _\n" + " _ __| |___ __ _\n" + "| '_ \\ / _ \\/ _` |\n" + "| .__/_\\___/\\__, |\n" + "|_| |___/ stress"); final Config stressConfig = config.getConfig("plog.stress"); final int threadCount = stressConfig.getInt("threads"); log.info("Using {} threads", threadCount); final int rate = stressConfig.getInt("rate"); final RateLimiter rateLimiter = RateLimiter.create(rate); final int socketRenewRate = stressConfig.getInt("renew_rate"); final int minSize = stressConfig.getInt("min_size"); final int maxSize = stressConfig.getInt("max_size"); final int sizeIncrements = stressConfig.getInt("size_increments"); final double sizeExponent = stressConfig.getDouble("size_exponent"); final int sizeDelta = maxSize - minSize; final int differentSizes = sizeDelta / sizeIncrements; if (differentSizes == 0) { throw new RuntimeException("No sizes! Decrease plog.stress.size_increments"); } final int stopAfter = stressConfig.getInt("stop_after"); final int packetSize = stressConfig.getInt("udp.size"); final int bufferSize = stressConfig.getInt("udp.SO_SNDBUF"); final Fragmenter fragmenter = new Fragmenter(packetSize); final Random random = new Random(stressConfig.getLong("seed")); final byte[] randomBytes = new byte[maxSize]; random.nextBytes(randomBytes); final ByteBuf randomMessage = Unpooled.wrappedBuffer(randomBytes); log.info("Generating {} different hashes", differentSizes); final int[] precomputedHashes = new int[differentSizes]; for (int i = 0; i < differentSizes; i++) { precomputedHashes[i] = Murmur3.hash32(randomMessage, 0, minSize + sizeIncrements * i, 0); } final ByteBufAllocator allocator = new PooledByteBufAllocator(); final double packetLoss = stressConfig.getDouble("udp.loss"); final Meter socketMeter = registry.meter("Sockets used"); final Meter messageMeter = registry.meter("Messages sent"); final Meter packetMeter = registry.meter("Packets sent"); final Meter sendFailureMeter = registry.meter("Send failures"); final Meter lossMeter = registry.meter("Packets dropped"); final Histogram messageSizeHistogram = registry.histogram("Message size"); final Histogram packetSizeHistogram = registry.histogram("Packet size"); final InetSocketAddress target = new InetSocketAddress(stressConfig.getString("host"), stressConfig.getInt("port")); log.info("Starting with config {}", config); final long consoleRate = stressConfig.getDuration("console.interval", TimeUnit.MILLISECONDS); ConsoleReporter.forRegistry(registry).build().start(consoleRate, TimeUnit.MILLISECONDS); for (int i = 0; i < threadCount; i++) { new Thread("stress_" + i) { private DatagramChannel channel = null; @Override public void run() { try { for (int sent = 0; sent < stopAfter; sent++, messageMeter.mark()) { if (sent % socketRenewRate == 0) { if (channel != null) { channel.close(); } channel = DatagramChannel.open(); channel.socket().setSendBufferSize(bufferSize); socketMeter.mark(); } // global rate limiting rateLimiter.acquire(); final int sizeIndex = (int) (Math.pow(random.nextDouble(), sizeExponent) * differentSizes); final int messageSize = minSize + sizeIncrements * sizeIndex; final int hash = precomputedHashes[sizeIndex]; messageSizeHistogram.update(messageSize); final ByteBuf[] fragments = fragmenter.fragment(allocator, randomMessage, null, sent, messageSize, hash); for (ByteBuf fragment : fragments) { if (random.nextDouble() < packetLoss) { lossMeter.mark(); } else { final int packetSize = fragment.readableBytes(); final ByteBuffer buffer = fragment.nioBuffer(); try { channel.send(buffer, target); packetSizeHistogram.update(packetSize); packetMeter.mark(); } catch (SocketException e) { sendFailureMeter.mark(); } } fragment.release(); } } } catch (Throwable t) { t.printStackTrace(); System.exit(1); } } }.start(); } }
@After public void after_reporter() { reporter.stop(); }
@Before public void before_reporter() { reporter.start(1, TimeUnit.SECONDS); }
@Override public void configureReporters(MetricRegistry metricRegistry) { registerReporter(ConsoleReporter.forRegistry(metricRegistry).build()) .start(1, TimeUnit.SECONDS); }