public void cleanupCompletedTransactions() { if (!completedTransactions.isEmpty()) { try { log.tracef( "About to cleanup completed transaction. Initial size is %d", completedTransactions.size()); // this iterator is weekly consistent and will never throw ConcurrentModificationException Iterator<Map.Entry<GlobalTransaction, Long>> iterator = completedTransactions.entrySet().iterator(); long timeout = configuration.transaction().completedTxTimeout(); int removedEntries = 0; long beginning = System.nanoTime(); while (iterator.hasNext()) { Map.Entry<GlobalTransaction, Long> e = iterator.next(); long ageNanos = System.nanoTime() - e.getValue(); if (TimeUnit.NANOSECONDS.toMillis(ageNanos) >= timeout) { iterator.remove(); removedEntries++; } } long duration = System.nanoTime() - beginning; log.tracef( "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, " + "current number of completed transactions is %d", removedEntries, TimeUnit.NANOSECONDS.toMillis(duration), completedTransactions.size()); } catch (Exception e) { log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage()); } } }
public SearchStats.Stats stats() { return new SearchStats.Stats( queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(), fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count()); }
public void handleStreamEvent(StreamEvent event) { if (event.eventType == StreamEvent.Type.STREAM_PREPARED) { SessionInfo session = ((StreamEvent.SessionPreparedEvent) event).session; sessionsByHost.put(session.peer, session); } else if (event.eventType == StreamEvent.Type.FILE_PROGRESS) { ProgressInfo progressInfo = ((StreamEvent.ProgressEvent) event).progress; // update progress Set<ProgressInfo> progresses = progressByHost.get(progressInfo.peer); if (progresses == null) { progresses = Sets.newSetFromMap(new ConcurrentHashMap<ProgressInfo, Boolean>()); progressByHost.put(progressInfo.peer, progresses); } if (progresses.contains(progressInfo)) progresses.remove(progressInfo); progresses.add(progressInfo); StringBuilder sb = new StringBuilder(); sb.append("\rprogress: "); long totalProgress = 0; long totalSize = 0; for (Map.Entry<InetAddress, Set<ProgressInfo>> entry : progressByHost.entrySet()) { SessionInfo session = sessionsByHost.get(entry.getKey()); long size = session.getTotalSizeToSend(); long current = 0; int completed = 0; for (ProgressInfo progress : entry.getValue()) { if (progress.currentBytes == progress.totalBytes) completed++; current += progress.currentBytes; } totalProgress += current; totalSize += size; sb.append("[").append(entry.getKey()); sb.append(" ").append(completed).append("/").append(session.getTotalFilesToSend()); sb.append(" (").append(size == 0 ? 100L : current * 100L / size).append("%)] "); } long time = System.nanoTime(); long deltaTime = Math.max(1L, TimeUnit.NANOSECONDS.toMillis(time - lastTime)); lastTime = time; long deltaProgress = totalProgress - lastProgress; lastProgress = totalProgress; sb.append("[total: ") .append(totalSize == 0 ? 100L : totalProgress * 100L / totalSize) .append("% - "); sb.append(mbPerSec(deltaProgress, deltaTime)).append("MB/s"); sb.append(" (avg: ") .append(mbPerSec(totalProgress, TimeUnit.NANOSECONDS.toMillis(time - start))) .append("MB/s)]"); System.out.print(sb.toString()); } }
IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { return new IndexingStats.Stats( indexMetric.count(), TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()), indexCurrent.count(), indexFailed.count(), deleteMetric.count(), TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()), deleteCurrent.count(), noopUpdates.count(), isThrottled, TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis)); }
@Override public void run(String... strings) throws Exception { String logFormat = "%s call took %d millis with result: %s"; long start1 = nanoTime(); String city = dummy.getCity(); long end1 = nanoTime(); out.println(format(logFormat, "First", TimeUnit.NANOSECONDS.toMillis(end1 - start1), city)); long start2 = nanoTime(); city = dummy.getCity(); long end2 = nanoTime(); out.println(format(logFormat, "Second", TimeUnit.NANOSECONDS.toMillis(end2 - start2), city)); }
public static void main(final String[] args) throws Exception { System.out.println("FalseSharingAtomicLong:"); final long start1 = System.nanoTime(); runAtomicLongTest(); final long duration1 = System.nanoTime() - start1; System.out.println( "w/o padding = " + TimeUnit.NANOSECONDS.toMillis(duration1) + " ms [" + duration1 + " ns]"); final long start2 = System.nanoTime(); runPaddedAtomicLongTest(); final long duration2 = System.nanoTime() - start2; System.out.println( "w/ padding = " + TimeUnit.NANOSECONDS.toMillis(duration2) + " ms [" + duration2 + " ns]"); }
@Test public void testWaitFor() throws Exception { final BlockingArrayQueue<String> results = new BlockingArrayQueue<>(); String channelName = "/chat/msg"; MarkedReference<ServerChannel> channel = bayeux.createChannelIfAbsent(channelName); channel .getReference() .addListener( new ServerChannel.MessageListener() { public boolean onMessage(ServerSession from, ServerChannel channel, Mutable message) { results.add(from.getId()); results.add(channel.getId()); results.add(String.valueOf(message.getData())); return true; } }); BayeuxClient client = newBayeuxClient(); long wait = 1000L; long start = System.nanoTime(); client.handshake(wait); long stop = System.nanoTime(); Assert.assertTrue(TimeUnit.NANOSECONDS.toMillis(stop - start) < wait); Assert.assertNotNull(client.getId()); String data = "Hello World"; client.getChannel(channelName).publish(data); Assert.assertEquals(client.getId(), results.poll(1, TimeUnit.SECONDS)); Assert.assertEquals(channelName, results.poll(1, TimeUnit.SECONDS)); Assert.assertEquals(data, results.poll(1, TimeUnit.SECONDS)); disconnectBayeuxClient(client); }
@Override public void postCall(HttpRequest request, HttpResponseStatus status, HandlerInfo handlerInfo) { HTTPMonitoringEvent httpMonitoringEvent = (HTTPMonitoringEvent) handlerInfo.getAttribute(MONITORING_EVENT); httpMonitoringEvent.setResponseTime( TimeUnit.NANOSECONDS.toMillis( System.nanoTime() - httpMonitoringEvent.getStartNanoTime())); httpMonitoringEvent.setResponseHttpStatusCode(status.code()); Object[] meta = new Object[] { httpMonitoringEvent.getTimestamp(), SERVER_HOST_ADDRESS, SERVER_HOSTNAME, MICROSERVICE }; Object[] payload = new Object[11]; payload[0] = httpMonitoringEvent.getServiceClass(); payload[1] = httpMonitoringEvent.getServiceName(); payload[2] = httpMonitoringEvent.getServiceMethod(); payload[3] = httpMonitoringEvent.getRequestUri(); payload[4] = httpMonitoringEvent.getServiceContext(); payload[5] = httpMonitoringEvent.getHttpMethod(); payload[6] = httpMonitoringEvent.getContentType(); payload[7] = httpMonitoringEvent.getRequestSizeBytes(); payload[8] = httpMonitoringEvent.getReferrer(); payload[9] = httpMonitoringEvent.getResponseHttpStatusCode(); payload[10] = httpMonitoringEvent.getResponseTime(); Event event = new Event( HTTP_MONITORING_STREAM_ID, httpMonitoringEvent.getTimestamp(), meta, null, payload); dataPublisher.publish(event); }
@Nullable private static AnalysisResult analyze( @NotNull final KotlinCoreEnvironment environment, @Nullable String targetDescription) { MessageCollector collector = environment.getConfiguration().get(CLIConfigurationKeys.MESSAGE_COLLECTOR_KEY); assert collector != null; long analysisStart = PerformanceCounter.Companion.currentTime(); AnalyzerWithCompilerReport analyzerWithCompilerReport = new AnalyzerWithCompilerReport(collector); analyzerWithCompilerReport.analyzeAndReport( environment.getSourceFiles(), new Function0<AnalysisResult>() { @NotNull @Override public AnalysisResult invoke() { BindingTrace sharedTrace = new CliLightClassGenerationSupport.NoScopeRecordCliBindingTrace(); ModuleContext moduleContext = TopDownAnalyzerFacadeForJVM.createContextWithSealedModule( environment.getProject(), ModuleNameKt.getModuleName(environment)); return TopDownAnalyzerFacadeForJVM.analyzeFilesWithJavaIntegrationWithCustomContext( moduleContext, environment.getSourceFiles(), sharedTrace, environment.getConfiguration().get(JVMConfigurationKeys.MODULES), environment .getConfiguration() .get(JVMConfigurationKeys.INCREMENTAL_COMPILATION_COMPONENTS), new JvmPackagePartProvider(environment)); } }); long analysisNanos = PerformanceCounter.Companion.currentTime() - analysisStart; String message = "ANALYZE: " + environment.getSourceFiles().size() + " files (" + environment.getSourceLinesOfCode() + " lines) " + (targetDescription != null ? targetDescription : "") + "in " + TimeUnit.NANOSECONDS.toMillis(analysisNanos) + " ms"; K2JVMCompiler.Companion.reportPerf(environment.getConfiguration(), message); AnalysisResult result = analyzerWithCompilerReport.getAnalysisResult(); assert result != null : "AnalysisResult should be non-null, compiling: " + environment.getSourceFiles(); CompilerPluginContext context = new CompilerPluginContext( environment.getProject(), result.getBindingContext(), environment.getSourceFiles()); for (CompilerPlugin plugin : environment.getConfiguration().getList(CLIConfigurationKeys.COMPILER_PLUGINS)) { plugin.processFiles(context); } return analyzerWithCompilerReport.hasErrors() ? null : result; }
public static void write(BufferedImage image, String name, OutputStream out) throws IOException { System.out.println("Writing " + name + ". W:" + image.getWidth() + " H:" + image.getHeight()); final long start = System.nanoTime(); ImageIO.write(image, getType(name), out); final long duration = System.nanoTime() - start; System.out.println("Saved " + name + " in " + TimeUnit.NANOSECONDS.toMillis(duration) + " ms."); }
/** * Drain the queue of pending counts into the provided buffer and write those counts to DynamoDB. * This blocks until data is available in the queue. * * @param buffer A reusable buffer with sufficient space to drain the entire queue if necessary. * This is provided as an optimization to avoid allocating a new buffer every interval. * @throws InterruptedException Thread interrupted while waiting for new data to arrive in the * queue. */ protected void sendQueueToDynamoDB(List<HttpReferrerPairsCount> buffer) throws InterruptedException { // Block while waiting for data buffer.add(counts.take()); // Drain as much of the queue as we can. // DynamoDBMapper will handle splitting the batch sizes for us. counts.drainTo(buffer); try { long start = System.nanoTime(); // Write the contents of the buffer as items to our table List<FailedBatch> failures = mapper.batchWrite(buffer, Collections.emptyList()); long end = System.nanoTime(); LOG.info( String.format( "%d new counts sent to DynamoDB in %dms", buffer.size(), TimeUnit.NANOSECONDS.toMillis(end - start))); for (FailedBatch failure : failures) { LOG.warn( "Error sending count batch to DynamoDB. This will not be retried!", failure.getException()); } } catch (Exception ex) { LOG.error("Error sending new counts to DynamoDB. The some counts may not be persisted.", ex); } }
public void run() { // TODO get rid of current and use the marker file instead? directoryProviderLock.lock(); try { long start = System.nanoTime(); // keep time after lock is acquired for correct measure int oldIndex = current; int index = oldIndex == 1 ? 2 : 1; File destinationFile = new File(destination, Integer.valueOf(index).toString()); try { log.tracef("Copying %s into %s", source, destinationFile); FileHelper.synchronize(source, destinationFile, true, copyChunkSize); current = index; } catch (IOException e) { // don't change current log.unableToSynchronizeSource(indexName, e); return; } if (!new File(destination, CURRENT_DIR_NAME[oldIndex]).delete()) { log.unableToRemovePreviousMarket(indexName); } try { new File(destination, CURRENT_DIR_NAME[index]).createNewFile(); } catch (IOException e) { log.unableToCreateCurrentMarker(indexName, e); } log.tracef( "Copy for %s took %d ms", indexName, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)); } finally { directoryProviderLock.unlock(); inProgress.set(false); } }
String logId() { long delta = System.nanoTime() - started; if (delta > TOO_LONG_LOG) { return plainId() + '+' + TimeUnit.NANOSECONDS.toSeconds(delta) + 's'; } return plainId() + '+' + TimeUnit.NANOSECONDS.toMillis(delta) + "ms"; }
/* * This method handles two different scenarios: * * a) we're handling the initial read, of data from the closest replica + digests * from the rest. In this case we check the digests against each other, * throw an exception if there is a mismatch, otherwise return the data row. * * b) we're checking additional digests that arrived after the minimum to handle * the requested ConsistencyLevel, i.e. asynchronous read repair check */ public Row resolve() throws DigestMismatchException { if (logger.isDebugEnabled()) logger.debug("resolving " + replies.size() + " responses"); long start = System.nanoTime(); // validate digests against each other; throw immediately on mismatch. // also extract the data reply, if any. ColumnFamily data = null; ByteBuffer digest = null; for (MessageIn<ReadResponse> message : replies) { ReadResponse response = message.payload; ByteBuffer newDigest; if (response.isDigestQuery()) { newDigest = response.digest(); } else { // note that this allows for multiple data replies, post-CASSANDRA-5932 data = response.row().cf; newDigest = ColumnFamily.digest(data); } if (digest == null) digest = newDigest; else if (!digest.equals(newDigest)) throw new DigestMismatchException(key, digest, newDigest); } if (logger.isDebugEnabled()) logger.debug("resolve: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start)); return new Row(key, data); }
@Test(groups = "slow") public void testInsertionTiming() { int keySpaceSize = 10000; int k = 100; int maxAdd = 100; TopK<Integer> topK = getInstance(keySpaceSize, k); LOG.info("Timing add() performance with keySpaceSize = %s, k = %s", keySpaceSize, k); Random random = new Random(0); long totalTime = 0; long count = 0; long begin = System.nanoTime(); while (System.nanoTime() - begin < TEST_TIME_NANOS) { long start = System.nanoTime(); topK.add(random.nextInt(keySpaceSize), random.nextInt(maxAdd)); if (System.nanoTime() - begin > TimeUnit.SECONDS.toNanos(1)) { // discard the first second of measurements totalTime += System.nanoTime() - start; ++count; } } LOG.info( "Processed %s entries in %s ms. Insertion rate = %s entries/s", count, TimeUnit.NANOSECONDS.toMillis(totalTime), count / (totalTime * 1.0 / TimeUnit.SECONDS.toNanos(1))); }
protected void printStatusMessage(long startTime, long totalTodoCount, long doneCount) { long elapsedMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); log.indexingDocumentsCompleted(doneCount, elapsedMs); float estimateSpeed = doneCount * 1000f / elapsedMs; float estimatePercentileComplete = doneCount * 100f / totalTodoCount; log.indexingSpeed(estimateSpeed, estimatePercentileComplete); }
public String pendingFramesTimeStamps() { threadChecker.checkIsOnValidThread(); List<Long> timeStampsMs = new ArrayList<Long>(); for (long ts : timeStampsNs) { timeStampsMs.add(TimeUnit.NANOSECONDS.toMillis(ts)); } return timeStampsMs.toString(); }
@Override public List<Long> getMillisecondsList(String path) { List<Long> nanos = getNanosecondsList(path); List<Long> l = new ArrayList<Long>(); for (Long n : nanos) { l.add(TimeUnit.NANOSECONDS.toMillis(n)); } return l; }
/** * Returns the recorded response triggered by {@code request}. Throws if the response isn't * enqueued before the timeout. */ public synchronized RecordedResponse await(URL url) throws Exception { long timeoutMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) + TIMEOUT_MILLIS; while (true) { for (Iterator<RecordedResponse> i = responses.iterator(); i.hasNext(); ) { RecordedResponse recordedResponse = i.next(); if (recordedResponse.request.url().equals(url)) { i.remove(); return recordedResponse; } } long nowMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); if (nowMillis >= timeoutMillis) break; wait(timeoutMillis - nowMillis); } throw new AssertionError("Timed out waiting for response to " + url); }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final long startNanos = System.nanoTime(); final AmazonSQSClient client = getClient(); final SendMessageBatchRequest request = new SendMessageBatchRequest(); final String queueUrl = context.getProperty(QUEUE_URL).evaluateAttributeExpressions(flowFile).getValue(); request.setQueueUrl(queueUrl); final Set<SendMessageBatchRequestEntry> entries = new HashSet<>(); final SendMessageBatchRequestEntry entry = new SendMessageBatchRequestEntry(); entry.setId(flowFile.getAttribute("uuid")); final ByteArrayOutputStream baos = new ByteArrayOutputStream(); session.exportTo(flowFile, baos); final String flowFileContent = baos.toString(); entry.setMessageBody(flowFileContent); final Map<String, MessageAttributeValue> messageAttributes = new HashMap<>(); for (final PropertyDescriptor descriptor : userDefinedProperties) { final MessageAttributeValue mav = new MessageAttributeValue(); mav.setDataType("String"); mav.setStringValue( context.getProperty(descriptor).evaluateAttributeExpressions(flowFile).getValue()); messageAttributes.put(descriptor.getName(), mav); } entry.setMessageAttributes(messageAttributes); entry.setDelaySeconds(context.getProperty(DELAY).asTimePeriod(TimeUnit.SECONDS).intValue()); entries.add(entry); request.setEntries(entries); try { client.sendMessageBatch(request); } catch (final Exception e) { getLogger() .error( "Failed to send messages to Amazon SQS due to {}; routing to failure", new Object[] {e}); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); return; } getLogger() .info("Successfully published message to Amazon SQS for {}", new Object[] {flowFile}); session.transfer(flowFile, REL_SUCCESS); final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); session.getProvenanceReporter().send(flowFile, queueUrl, transmissionMillis); }
private final boolean private_callProcedure( ProcedureCallback callback, int expectedSerializedSize, ProcedureInvocation invocation, long timeoutNanos) throws IOException, NoConnectionsException { if (m_isShutdown) { return false; } if (callback == null) { callback = new NullCallback(); } final long nowNanos = System.nanoTime(); // Blessed threads (the ones that invoke callbacks) are not subject to backpressure boolean isBlessed = m_blessedThreadIds.contains(Thread.currentThread().getId()); if (m_blockingQueue) { while (!m_distributer.queue(invocation, callback, isBlessed, nowNanos, timeoutNanos)) { /* * Wait on backpressure honoring the timeout settings */ final long delta = Math.max(1, System.nanoTime() - nowNanos); final long timeout = timeoutNanos == Distributer.USE_DEFAULT_CLIENT_TIMEOUT ? m_distributer.getProcedureTimeoutNanos() : timeoutNanos; try { if (backpressureBarrier(nowNanos, timeout - delta)) { final ClientResponseImpl r = new ClientResponseImpl( ClientResponse.CONNECTION_TIMEOUT, ClientResponse.UNINITIALIZED_APP_STATUS_CODE, "", new VoltTable[0], String.format( "No response received in the allotted time (set to %d ms).", TimeUnit.NANOSECONDS.toMillis(timeoutNanos))); try { callback.clientCallback(r); } catch (Throwable t) { m_distributer.uncaughtException(callback, r, t); } } } catch (InterruptedException e) { throw new java.io.InterruptedIOException( "Interrupted while invoking procedure asynchronously"); } } return true; } else { return m_distributer.queue(invocation, callback, isBlessed, nowNanos, timeoutNanos); } }
@Override public synchronized void send(final HeartbeatMessage heartbeatMessage) throws IOException { final long sendStart = System.nanoTime(); final String heartbeatAddress = getHeartbeatAddress(); final HeartbeatResponseMessage responseMessage = protocolSender.heartbeat(heartbeatMessage, heartbeatAddress); final byte[] payloadBytes = heartbeatMessage.getHeartbeat().getPayload(); final HeartbeatPayload payload = HeartbeatPayload.unmarshal(payloadBytes); final List<NodeConnectionStatus> nodeStatusList = payload.getClusterStatus(); final Map<NodeIdentifier, Long> updateIdMap = nodeStatusList .stream() .collect( Collectors.toMap( status -> status.getNodeIdentifier(), status -> status.getUpdateIdentifier())); final List<NodeConnectionStatus> updatedStatuses = responseMessage.getUpdatedNodeStatuses(); if (updatedStatuses != null) { for (final NodeConnectionStatus updatedStatus : updatedStatuses) { final NodeIdentifier nodeId = updatedStatus.getNodeIdentifier(); final Long updateId = updateIdMap.get(nodeId); final boolean updated = clusterCoordinator.resetNodeStatus(updatedStatus, updateId == null ? -1L : updateId); if (updated) { logger.info( "After receiving heartbeat response, updated status of {} to {}", updatedStatus.getNodeIdentifier(), updatedStatus); } else { logger.debug( "After receiving heartbeat response, did not update status of {} to {} because the update is out-of-date", updatedStatus.getNodeIdentifier(), updatedStatus); } } } final long sendNanos = System.nanoTime() - sendStart; final long sendMillis = TimeUnit.NANOSECONDS.toMillis(sendNanos); final DateFormat dateFormatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSS", Locale.US); final String flowElectionMessage = responseMessage.getFlowElectionMessage(); final String formattedElectionMessage = flowElectionMessage == null ? "" : "; " + flowElectionMessage; logger.info( "Heartbeat created at {} and sent to {} at {}; send took {} millis{}", dateFormatter.format(new Date(heartbeatMessage.getHeartbeat().getCreatedTimestamp())), heartbeatAddress, dateFormatter.format(new Date()), sendMillis, formattedElectionMessage); }
/** Report the parser rate in triples per second. */ public long triplesPerSecond() { long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(getElapsedNanos()); if (elapsedMillis == 0) { // Note: Avoid divide by zero error. elapsedMillis = 1; } return ((long) (((double) nparsed) / ((double) elapsedMillis) * 1000d)); }
public void returnBuffer(long timeStamp) { checkIsOnValidThread(); final ByteBuffer returnedFrame = pendingBuffers.remove(timeStamp); if (returnedFrame == null) { throw new RuntimeException( "unknown data buffer with time stamp " + timeStamp + "returned?!?"); } if (camera != null && returnedFrame.capacity() == frameSize) { camera.addCallbackBuffer(returnedFrame.array()); if (queuedBuffers.isEmpty()) { Logging.d( TAG, "Frame returned when camera is running out of capture" + " buffers for TS " + TimeUnit.NANOSECONDS.toMillis(timeStamp)); } queuedBuffers.put(returnedFrame.array(), returnedFrame); return; } if (returnedFrame.capacity() != frameSize) { Logging.d( TAG, "returnBuffer with time stamp " + TimeUnit.NANOSECONDS.toMillis(timeStamp) + " called with old frame size, " + returnedFrame.capacity() + "."); // Since this frame has the wrong size, don't requeue it. Frames with the correct size are // created in queueCameraBuffers so this must be an old buffer. return; } Logging.d( TAG, "returnBuffer with time stamp " + TimeUnit.NANOSECONDS.toMillis(timeStamp) + " called after camera has been stopped."); }
@Override public String toString() { StringWriter sw = new StringWriter(); Locale locale = Locale.ROOT; try (PrintWriter output = new PrintWriter(sw)) { final Snapshot snapshot = timer.getSnapshot(); output.printf(locale, "Benchmark Results%n"); output.printf(locale, " count = %d%n", timer.getCount()); output.printf(locale, " mean rate = %2.2f calls/%s%n", timer.getMeanRate(), "s"); output.printf( locale, " min = %d %s%n", TimeUnit.NANOSECONDS.toMillis(snapshot.getMin()), "ms"); output.printf( locale, " max = %d %s%n", TimeUnit.NANOSECONDS.toMillis(snapshot.getMax()), "ms"); output.printf(locale, " mean = %2.2f %s%n", snapshot.getMean() / 1000000, "ms"); output.printf( locale, " stddev = %2.2f %s%n", snapshot.getStdDev() / 1000000, "ms"); output.printf( locale, " median = %2.2f %s%n", snapshot.getMedian() / 1000000, "ms"); output.printf( locale, " 75%% <= %2.2f %s%n", snapshot.get75thPercentile() / 1000000, "ms"); output.printf( locale, " 95%% <= %2.2f %s%n", snapshot.get95thPercentile() / 1000000, "ms"); output.printf( locale, " 99.9%% <= %2.2f %s%n", snapshot.get999thPercentile() / 1000000, "ms"); } return sw.toString(); }
@Override public void artifactDownloaded(RepositoryEvent event) { super.artifactDownloaded(event); Artifact artifact = event.getArtifact(); String key = artifactAsString(artifact); long downloadTimeNanos = System.nanoTime() - startTimes.remove(key); double downloadTimeMs = TimeUnit.NANOSECONDS.toMillis(downloadTimeNanos); double downloadTimeSec = TimeUnit.NANOSECONDS.toSeconds(downloadTimeNanos); long size = artifact.getFile().length(); double sizeK = (1 / 1024D) * size; double downloadRateKBytesPerSecond = sizeK / downloadTimeSec; info( "Downloaded %s (%d bytes) in %gms (%g kbytes/sec).", key, size, downloadTimeMs, downloadRateKBytesPerSecond); }
public boolean addResource(String url, String origin, String referrer) { // We start the push period here and not when initializing the main resource, because a // browser with a // prefilled cache won't request the subresources. If the browser with warmed up cache now // hits the main // resource after a server restart, the push period shouldn't start until the first // subresource is // being requested. firstResourceAdded.compareAndSet(-1, System.nanoTime()); long delay = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - firstResourceAdded.get()); if (!referrer.startsWith(origin) && !isPushOriginAllowed(origin)) { if (LOG.isDebugEnabled()) LOG.debug( "Skipped store of push metadata {} for {}: Origin: {} doesn't match or origin not allowed", url, name, origin); return false; } // This check is not strictly concurrent-safe, but limiting // the number of associated resources is achieved anyway // although in rare cases few more resources will be stored if (resources.size() >= maxAssociatedResources) { if (LOG.isDebugEnabled()) LOG.debug( "Skipped store of push metadata {} for {}: max associated resources ({}) reached", url, name, maxAssociatedResources); return false; } if (delay > referrerPushPeriod) { if (LOG.isDebugEnabled()) LOG.debug( "Delay: {}ms longer than referrerPushPeriod ({}ms). Not adding resource: {} for: {}", delay, referrerPushPeriod, url, name); return false; } if (LOG.isDebugEnabled()) LOG.debug("Adding: {} to: {} with delay: {}ms.", url, this, delay); resources.add(url); return true; }
@Override public boolean stopAndWait(long waitTime) { logger.info("Garbage collection is stopping, clearing out remaining contexts."); stopRequested = true; // Purge the sink of any scheduled gc's, this needs to be equivalent to stopping the garbage // collector thread. gcSink.clear(); long start = System.nanoTime(); while (gcRunning) { ThreadUtil.reallySleep(1000); if (TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start) > waitTime) { return false; } } return true; }
public static void captureAndSaveImage(OutputStream output) throws InterruptedException, ExecutionException, IOException { final ExecutorService service = Executors.newFixedThreadPool(2); final long start = System.nanoTime(); try { final Future<BufferedImage> leftFuture = capture(service, "left", DEVICE_PATH + LEFT, 0); final Future<BufferedImage> rightFuture = capture(service, "right", DEVICE_PATH + RIGHT, VERTICAL_OFFSET); Image.writeStereoImage(leftFuture.get(), rightFuture.get(), output); } finally { final long duration = System.nanoTime() - start; System.out.println("total: " + TimeUnit.NANOSECONDS.toMillis(duration) + " ms."); service.shutdown(); } }
private static int calculatePriorityLevel(long threadUsageNanos) { long millis = TimeUnit.NANOSECONDS.toMillis(threadUsageNanos); int priorityLevel; if (millis < 1000) { priorityLevel = 0; } else if (millis < 10_000) { priorityLevel = 1; } else if (millis < 60_000) { priorityLevel = 2; } else if (millis < 300_000) { priorityLevel = 3; } else { priorityLevel = 4; } return priorityLevel; }