protected void handle(CQLStatement statement) { final Timer asyncExecTimer = Metrics.defaultRegistry().newTimer(StatementIteratorConsumer.class, "asyncExec"); final TimerContext asyncExecTimerContext = asyncExecTimer.time(); final long startTime = System.nanoTime(); ResultSetFuture future = this.cqlExecutor.executeAsync(statement); futures.add(future); Futures.addCallback( future, new FutureCallback<ResultSet>() { @Override public void onSuccess(final ResultSet result) { Host queriedHost = result.getExecutionInfo().getQueriedHost(); Metrics.defaultRegistry() .newMeter( StatementIteratorConsumer.class, "queriedhost." + queriedHost.getDatacenter(), queriedHost.getDatacenter(), TimeUnit.SECONDS) .mark(); asyncExecTimerContext.stop(); logger.debug("Async exec time {}us", (System.nanoTime() - startTime) / 1000); shutdownLatch.countDown(); } @Override public void onFailure(final Throwable t) { asyncExecTimerContext.stop(); logger.debug("Async failure time {}us", (System.nanoTime() - startTime) / 1000); executionExceptions.add(t); shutdownLatch.countDown(); } }, executorService); }
@Test public void timedMethodsAreTimed() { assertThat(resource().path("timed").get(String.class), is("yay")); final Timer timer = Metrics.newTimer(InstrumentedResource.class, "timed"); assertThat(timer.count(), is(1L)); }
/** * Executes the HTTP request. * * <p>In case of any exception thrown by HttpClient, it will release the connection. In other * cases it is the duty of caller to do it, or process the input stream. * * @param repository to execute the HTTP method for * @param request resource store request that triggered the HTTP request * @param httpRequest HTTP request to be executed * @param baseUrl The BaseURL used to construct final httpRequest * @return response of making the request * @throws RemoteStorageException If an error occurred during execution of HTTP request */ @VisibleForTesting HttpResponse executeRequest( final ProxyRepository repository, final ResourceStoreRequest request, final HttpUriRequest httpRequest, final String baseUrl, final boolean contentRequest) throws RemoteStorageException { final Timer timer = timer(repository, httpRequest, baseUrl); final TimerContext timerContext = timer.time(); Stopwatch stopwatch = null; if (outboundRequestLog.isDebugEnabled()) { stopwatch = new Stopwatch().start(); } try { return doExecuteRequest(repository, request, httpRequest, contentRequest); } finally { timerContext.stop(); if (stopwatch != null) { outboundRequestLog.debug( "[{}] {} {} - {}", repository.getId(), httpRequest.getMethod(), httpRequest.getURI(), stopwatch); } } }
private final Res handleRequest( final Req senseiReq, final IndexReaderFactory<ZoieIndexReader<BoboIndexReader>> readerFactory, final SenseiQueryBuilderFactory queryBuilderFactory) throws Exception { List<ZoieIndexReader<BoboIndexReader>> readerList = null; try { readerList = GetReaderTimer.time( new Callable<List<ZoieIndexReader<BoboIndexReader>>>() { public List<ZoieIndexReader<BoboIndexReader>> call() throws Exception { if (readerFactory == null) return Collections.EMPTY_LIST; return readerFactory.getIndexReaders(); } }); if (logger.isDebugEnabled()) { logger.debug("obtained readerList of size: " + readerList == null ? 0 : readerList.size()); } final List<BoboIndexReader> boboReaders = ZoieIndexReader.extractDecoratedReaders(readerList); return SearchTimer.time( new Callable<Res>() { public Res call() throws Exception { return handlePartitionedRequest(senseiReq, boboReaders, queryBuilderFactory); } }); } finally { if (readerFactory != null && readerList != null) { readerFactory.returnIndexReaders(readerList); } } }
@Override public void reset() { final Map<MetricName, Metric> metricMap = registry.allMetrics(); final Set<Entry<MetricName, Metric>> entrySet = metricMap.entrySet(); for (final Entry<MetricName, Metric> entry : entrySet) { final MetricName name = entry.getKey(); final Metric metric = entry.getValue(); if (metric instanceof Counter) { ((Counter) metric).clear(); } if (metric instanceof Timer) { ((Timer) metric).clear(); } if (metric instanceof Histogram) { ((Histogram) metric).clear(); } if (metric instanceof Clearable) { ((Clearable) metric).clear(); } } }
/** * Main Entry point * * @param args Command line arguments * @throws TTransportException thrift errors * @throws IOException I/O errors * @throws InterruptedException thread errors */ public static void main(String[] args) throws Exception { HadoopNative.requireHadoopNative(); Optional<BenchmarkArgs> parsedArgs = handleCommandLine(args); if (!parsedArgs.isPresent()) { return; } Timer allTime = Metrics.newTimer( InputBenchmark.class, "all-time", TimeUnit.MILLISECONDS, TimeUnit.MILLISECONDS); TimerContext allTimerContext = allTime.time(); run(parsedArgs.get()); allTimerContext.stop(); new ConsoleReporter(System.err).run(); }
public void decorateCompositeActivityValues( final CompositeActivityValues activityValues, final Metadata metadata) { try { timer.time( new Callable<CompositeActivityValues>() { @Override public CompositeActivityValues call() throws Exception { Assert.state(storedFile != null, "The FileStorage is not initialized"); activityValues.activityStorage = CompositeActivityStorage.this; try { if (metadata.count == 0) { activityValues.init(); return activityValues; } activityValues.init( (int) (metadata.count * ActivityPrimitivesStorage.INIT_GROWTH_RATIO)); synchronized (activityValues.deletedIndexes) { if (metadata.count * BYTES_IN_LONG > fileLength) { logger.warn( "The composite activityIndex is corrupted. The file contains " + (fileLength / BYTES_IN_LONG) + " records, while metadata a bigger number " + metadata.count); logger.warn("trimming the metadata"); int newCount = (int) (fileLength / BYTES_IN_LONG); metadata.update(metadata.version, newCount); } for (int i = 0; i < metadata.count; i++) { long value; if (activateMemoryMappedBuffers) { value = buffer.getLong(i * BYTES_IN_LONG); } else { storedFile.seek(i * BYTES_IN_LONG); value = storedFile.readLong(); } if (value != Long.MIN_VALUE) { activityValues.uidToArrayIndex.put(value, i); } else { activityValues.deletedIndexes.add(i); } } } activityValues.indexSize = new AtomicInteger( activityValues.uidToArrayIndex.size() + activityValues.deletedIndexes.size()); } catch (Exception e) { throw new RuntimeException(e); } return activityValues; } }); } catch (Exception e) { throw new RuntimeException(e); } }
public static void main(String[] args) throws Exception { ConsoleReporter.enable(2, TimeUnit.SECONDS); Random random = new Random(); while (true) { TimerContext context = timer.time(); Thread.sleep(random.nextInt(1000)); context.stop(); } }
@Override public void dispatch(Object resource, HttpContext context) { final TimerContext time = timer.time(); try { wrappedDispatcher.dispatch(resource, context); } finally { time.stop(); } }
@Override public void map(Result result, SolrUpdateWriter solrUpdateWriter) { TimerContext timerContext = mappingTimer.time(); try { SolrInputDocument solrInputDocument = new SolrInputDocument(); for (SolrDocumentExtractor documentExtractor : resultDocumentExtractors) { documentExtractor.extractDocument(result, solrInputDocument); } solrUpdateWriter.add(solrInputDocument); } finally { timerContext.stop(); } }
public void commitPengingEvents() { log.info("Flushing pending kafka events to the persistent cache"); long time = System.currentTimeMillis(); int numberOfBatches = 0; for (PersistentCache persistentCache : persistentCaches.values()) { persistentCache.commitPengingEvents(); numberOfBatches += persistentCache.numberOfAvailableBatches(); } numberOfBatchesCounter.clear(); numberOfBatchesCounter.inc(numberOfBatches); timer.update(System.currentTimeMillis() - time, TimeUnit.MILLISECONDS); }
public SnapshotAllResult snapshotAll() throws Exception { final SnapshotAllResult result = new SnapshotAllResult(); result.beginTimestamp = System.currentTimeMillis(); final TimerContext timerContext = snapshotAllTimer.time(); try { snapshotAllAttemptedCounter.incrementAndGet(); doSnapshotAll(result); // if we get to here then the snapshotAll call worked snapshotAllCompletedCounter.incrementAndGet(); } catch (Exception e) { // save exception into result result.exception = e; throw e; } finally { result.completeTimestamp = System.currentTimeMillis(); timerContext.stop(); // always save reference to result this.lastSnapshotAllResult.set(result); } return result; }
@Override public List<SenseiResult> doQuery(final SenseiRequest senseiRequest) { final List<SenseiResult> resultList = new ArrayList<SenseiResult>(); try { resultList.addAll( scatterTimer.time( new Callable<List<SenseiResult>>() { @Override public List<SenseiResult> call() throws Exception { return doCall(senseiRequest); } })); } catch (Exception e) { ErrorMeter.mark(); SenseiResult emptyResult = getEmptyResultInstance(); logger.error("Error running scatter/gather", e); emptyResult.addError( new SenseiError( "Error gathering the results" + e.getMessage(), ErrorType.BrokerGatherError)); return Arrays.asList(emptyResult); } return resultList; }
@Override public void processTimer(MetricName name, Timer timer, PrintStream stream) { processMeter(name, timer, stream); final String durationUnit = abbrev(timer.getDurationUnit()); final Snapshot snapshot = timer.getSnapshot(); stream.printf(locale, " min = %2.2f%s\n", timer.getMin(), durationUnit); stream.printf(locale, " max = %2.2f%s\n", timer.getMax(), durationUnit); stream.printf(locale, " mean = %2.2f%s\n", timer.getMean(), durationUnit); stream.printf(locale, " stddev = %2.2f%s\n", timer.getStdDev(), durationUnit); stream.printf(locale, " median = %2.2f%s\n", snapshot.getMedian(), durationUnit); stream.printf( locale, " 75%% <= %2.2f%s\n", snapshot.get75thPercentile(), durationUnit); stream.printf( locale, " 95%% <= %2.2f%s\n", snapshot.get95thPercentile(), durationUnit); stream.printf( locale, " 98%% <= %2.2f%s\n", snapshot.get98thPercentile(), durationUnit); stream.printf( locale, " 99%% <= %2.2f%s\n", snapshot.get99thPercentile(), durationUnit); stream.printf( locale, " 99.9%% <= %2.2f%s\n", snapshot.get999thPercentile(), durationUnit); }
/** * Starts request timer. * * @return timer context */ public TimerContext startTimer() { return requestsTimer.time(); }
/** Starts sink timer. */ public void startSinkTimer() { sinkTimerContext = sinkTimer.time(); }
@Override public double getEvictionDuration() { return evictionInvokeDuration.meanRate(); }
@Override public void preInstantiationStarted() { preInstantiationDurationContext = preInstantiationDuration.time(); }
@Override public void outputInvokeStarted() { outputInvokeDurationContext = outputInvokeDuration.time(); }
@Override public double getOutputInvokeDuration() { return outputInvokeDuration.meanRate(); }
public final Res execute(final Req senseiReq) { SearchCounter.mark(); Set<Integer> partitions = senseiReq == null ? null : senseiReq.getPartitions(); if (partitions == null) { partitions = new HashSet<Integer>(); int[] containsPart = _core.getPartitions(); if (containsPart != null) { for (int part : containsPart) { partitions.add(part); } } } Res finalResult; if (partitions != null && partitions.size() > 0) { if (logger.isDebugEnabled()) { logger.debug("serving partitions: " + partitions.toString()); } final ArrayList<Res> resultList = new ArrayList<Res>(partitions.size()); Future<Res>[] futures = new Future[partitions.size() - 1]; int i = 0; for (final int partition : partitions) { final long start = System.currentTimeMillis(); final IndexReaderFactory<ZoieIndexReader<BoboIndexReader>> readerFactory = _core.getIndexReaderFactory(partition); if (i < partitions.size() - 1) // Search simultaneously. { try { futures[i] = (Future<Res>) _executorService.submit( new Callable<Res>() { public Res call() throws Exception { Timer timer = getTimer(partition); Res res = timer.time( new Callable<Res>() { @Override public Res call() throws Exception { return handleRequest( senseiReq, readerFactory, _core.getQueryBuilderFactory()); } }); long end = System.currentTimeMillis(); res.setTime(end - start); logger.info( "searching partition: " + partition + " browse took: " + res.getTime()); return res; } }); } catch (Exception e) { senseiReq.addError(new SenseiError(e.getMessage(), ErrorType.BoboExecutionError)); logger.error(e.getMessage(), e); } } else // Reuse current thread. { try { Timer timer = getTimer(partition); Res res = timer.time( new Callable<Res>() { @Override public Res call() throws Exception { return handleRequest( senseiReq, readerFactory, _core.getQueryBuilderFactory()); } }); resultList.add(res); long end = System.currentTimeMillis(); res.setTime(end - start); logger.info("searching partition: " + partition + " browse took: " + res.getTime()); } catch (Exception e) { logger.error(e.getMessage(), e); senseiReq.addError(new SenseiError(e.getMessage(), ErrorType.BoboExecutionError)); resultList.add(getEmptyResultInstance(e)); } } ++i; } for (i = 0; i < futures.length; ++i) { try { Res res = futures[i].get(_timeout, TimeUnit.MILLISECONDS); resultList.add(res); } catch (Exception e) { logger.error(e.getMessage(), e); if (e instanceof TimeoutException) { senseiReq.addError(new SenseiError(e.getMessage(), ErrorType.ExecutionTimeout)); } else { senseiReq.addError(new SenseiError(e.getMessage(), ErrorType.BoboExecutionError)); } resultList.add(getEmptyResultInstance(e)); } } try { finalResult = MergeTimer.time( new Callable<Res>() { public Res call() throws Exception { return mergePartitionedResults(senseiReq, resultList); } }); } catch (Exception e) { logger.error(e.getMessage(), e); finalResult = getEmptyResultInstance(null); finalResult.addError(new SenseiError(e.getMessage(), ErrorType.MergePartitionError)); } } else { if (logger.isInfoEnabled()) { logger.info("no partitions specified"); } finalResult = getEmptyResultInstance(null); finalResult.addError( new SenseiError("no partitions specified", ErrorType.PartitionCallError)); } if (logger.isInfoEnabled()) { logger.info( "searching partitions " + String.valueOf(partitions) + " took: " + finalResult.getTime()); } return finalResult; }
@Override public double getPreInstantiationDuration() { return preInstantiationDuration.meanRate(); }
@Override public void evictionPassStarted() { evictionInvokeDurationContext = evictionInvokeDuration.time(); }