コード例 #1
0
ファイル: Request.java プロジェクト: Ciscen/picasso
 String logId() {
   long delta = System.nanoTime() - started;
   if (delta > TOO_LONG_LOG) {
     return plainId() + '+' + TimeUnit.NANOSECONDS.toSeconds(delta) + 's';
   }
   return plainId() + '+' + TimeUnit.NANOSECONDS.toMillis(delta) + "ms";
 }
コード例 #2
0
ファイル: CODataBufferTest.java プロジェクト: BikiniLuxe/mct
  @Test
  public void putDataTimeRangeTest() throws Exception {
    long time = System.currentTimeMillis();

    Map<String, String> value = new HashMap<String, String>();
    value.put("value", "1.3");
    value.put("status", "ok");
    Map<Long, Map<String, String>> feedData1 = new HashMap<Long, Map<String, String>>();
    feedData1.put(time, value);

    Map<String, String> value2 = new HashMap<String, String>();
    value2.put("value", "1.4");
    value2.put("status", "ok");
    Map<Long, Map<String, String>> feedData2 = new HashMap<Long, Map<String, String>>();
    feedData2.put(time + 100, value2);

    Map<String, Map<Long, Map<String, String>>> data =
        new HashMap<String, Map<Long, Map<String, String>>>();
    data.put(testFeedID1, feedData1);
    data.put(testFeedID2, feedData2);
    codataBuffer.putData(data, TimeUnit.MILLISECONDS, null);

    long nanotime = TimeUnit.NANOSECONDS.convert(time, TimeUnit.MILLISECONDS);
    long nanotime2 = TimeUnit.NANOSECONDS.convert(time + 100, TimeUnit.MILLISECONDS);
    Assert.assertEquals(codataBuffer.metaDataBuffer.getStartTimestamp(0, testFeedID1), nanotime);
    Assert.assertEquals(codataBuffer.metaDataBuffer.getEndTimestamp(0, testFeedID1), nanotime);
    Assert.assertEquals(codataBuffer.metaDataBuffer.getStartTimestamp(0, testFeedID2), nanotime2);
    Assert.assertEquals(codataBuffer.metaDataBuffer.getEndTimestamp(0, testFeedID2), nanotime2);
  }
コード例 #3
0
  public void cleanupCompletedTransactions() {
    if (!completedTransactions.isEmpty()) {
      try {
        log.tracef(
            "About to cleanup completed transaction. Initial size is %d",
            completedTransactions.size());
        // this iterator is weekly consistent and will never throw ConcurrentModificationException
        Iterator<Map.Entry<GlobalTransaction, Long>> iterator =
            completedTransactions.entrySet().iterator();
        long timeout = configuration.transaction().completedTxTimeout();

        int removedEntries = 0;
        long beginning = System.nanoTime();
        while (iterator.hasNext()) {
          Map.Entry<GlobalTransaction, Long> e = iterator.next();
          long ageNanos = System.nanoTime() - e.getValue();
          if (TimeUnit.NANOSECONDS.toMillis(ageNanos) >= timeout) {
            iterator.remove();
            removedEntries++;
          }
        }
        long duration = System.nanoTime() - beginning;

        log.tracef(
            "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, "
                + "current number of completed transactions is %d",
            removedEntries, TimeUnit.NANOSECONDS.toMillis(duration), completedTransactions.size());
      } catch (Exception e) {
        log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage());
      }
    }
  }
コード例 #4
0
    @Override
    public <R> OperationResult<R> execute(Operation<Cassandra.Client, R> op)
        throws ConnectionException {
      long startTime = System.nanoTime();
      long latency = 0;
      setTimeout(cpConfig.getSocketTimeout()); // In case the configurationchanged
      operationCounter.incrementAndGet();

      // Set a new keyspace, if it changed
      lastException = null;
      if (op.getKeyspace() != null
          && (keyspaceName == null || !op.getKeyspace().equals(keyspaceName))) {
        CassandraOperationTracer tracer =
            tracerFactory.newTracer(CassandraOperationType.SET_KEYSPACE).start();
        try {
          cassandraClient.set_keyspace(op.getKeyspace());
          if (asConfig.getCqlVersion() != null)
            cassandraClient.set_cql_version(asConfig.getCqlVersion());
          keyspaceName = op.getKeyspace();
          long now = System.nanoTime();
          latency = now - startTime;
          pool.addLatencySample(latency, now);
          tracer.success();
        } catch (Exception e) {
          long now = System.nanoTime();
          latency = now - startTime;
          lastException = ThriftConverter.ToConnectionPoolException(e).setLatency(latency);
          if (e instanceof IsTimeoutException) {
            pool.addLatencySample(
                TimeUnit.NANOSECONDS.convert(cpConfig.getSocketTimeout(), TimeUnit.MILLISECONDS),
                now);
          }
          tracer.failure(lastException);
          throw lastException;
        }
        startTime = System.nanoTime(); // We don't want to include
        // the set_keyspace in our
        // latency calculation
      }

      // Execute the operation
      try {
        R result = op.execute(cassandraClient, this);
        long now = System.nanoTime();
        latency = now - startTime;
        pool.addLatencySample(latency, now);
        return new OperationResultImpl<R>(getHost(), result, latency);
      } catch (Exception e) {
        long now = System.nanoTime();
        latency = now - startTime;
        lastException = ThriftConverter.ToConnectionPoolException(e).setLatency(latency);
        if (e instanceof IsTimeoutException) {
          pool.addLatencySample(
              TimeUnit.NANOSECONDS.convert(cpConfig.getSocketTimeout(), TimeUnit.MILLISECONDS),
              now);
        }
        throw lastException;
      }
    }
コード例 #5
0
 public SearchStats.Stats stats() {
   return new SearchStats.Stats(
       queryMetric.count(),
       TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()),
       queryCurrent.count(),
       fetchMetric.count(),
       TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()),
       fetchCurrent.count());
 }
コード例 #6
0
    public void handleStreamEvent(StreamEvent event) {
      if (event.eventType == StreamEvent.Type.STREAM_PREPARED) {
        SessionInfo session = ((StreamEvent.SessionPreparedEvent) event).session;
        sessionsByHost.put(session.peer, session);
      } else if (event.eventType == StreamEvent.Type.FILE_PROGRESS) {
        ProgressInfo progressInfo = ((StreamEvent.ProgressEvent) event).progress;

        // update progress
        Set<ProgressInfo> progresses = progressByHost.get(progressInfo.peer);
        if (progresses == null) {
          progresses = Sets.newSetFromMap(new ConcurrentHashMap<ProgressInfo, Boolean>());
          progressByHost.put(progressInfo.peer, progresses);
        }
        if (progresses.contains(progressInfo)) progresses.remove(progressInfo);
        progresses.add(progressInfo);

        StringBuilder sb = new StringBuilder();
        sb.append("\rprogress: ");

        long totalProgress = 0;
        long totalSize = 0;
        for (Map.Entry<InetAddress, Set<ProgressInfo>> entry : progressByHost.entrySet()) {
          SessionInfo session = sessionsByHost.get(entry.getKey());

          long size = session.getTotalSizeToSend();
          long current = 0;
          int completed = 0;
          for (ProgressInfo progress : entry.getValue()) {
            if (progress.currentBytes == progress.totalBytes) completed++;
            current += progress.currentBytes;
          }
          totalProgress += current;
          totalSize += size;
          sb.append("[").append(entry.getKey());
          sb.append(" ").append(completed).append("/").append(session.getTotalFilesToSend());
          sb.append(" (").append(size == 0 ? 100L : current * 100L / size).append("%)] ");
        }
        long time = System.nanoTime();
        long deltaTime = Math.max(1L, TimeUnit.NANOSECONDS.toMillis(time - lastTime));
        lastTime = time;
        long deltaProgress = totalProgress - lastProgress;
        lastProgress = totalProgress;

        sb.append("[total: ")
            .append(totalSize == 0 ? 100L : totalProgress * 100L / totalSize)
            .append("% - ");
        sb.append(mbPerSec(deltaProgress, deltaTime)).append("MB/s");
        sb.append(" (avg: ")
            .append(mbPerSec(totalProgress, TimeUnit.NANOSECONDS.toMillis(time - start)))
            .append("MB/s)]");

        System.out.print(sb.toString());
      }
    }
コード例 #7
0
  /**
   * Causes the current thread to wait until this instance acquires leadership unless the thread is
   * {@linkplain Thread#interrupt interrupted}, the specified waiting time elapses or the instance
   * is {@linkplain #close() closed}.
   *
   * <p>If this instance already is the leader then this method returns immediately with the value
   * {@code true}.
   *
   * <p>Otherwise the current thread becomes disabled for thread scheduling purposes and lies
   * dormant until one of four things happen:
   *
   * <ul>
   *   <li>This instance becomes the leader
   *   <li>Some other thread {@linkplain Thread#interrupt interrupts} the current thread
   *   <li>The specified waiting time elapses.
   *   <li>The instance is {@linkplain #close() closed}
   * </ul>
   *
   * <p>If the current thread:
   *
   * <ul>
   *   <li>has its interrupted status set on entry to this method; or
   *   <li>is {@linkplain Thread#interrupt interrupted} while waiting,
   * </ul>
   *
   * then {@link InterruptedException} is thrown and the current thread's interrupted status is
   * cleared.
   *
   * <p>If the specified waiting time elapses or the instance is {@linkplain #close() closed} then
   * the value {@code false} is returned. If the time is less than or equal to zero, the method will
   * not wait at all.
   *
   * @param timeout the maximum time to wait
   * @param unit the time unit of the {@code timeout} argument
   * @return {@code true} if the count reached zero and {@code false} if the waiting time elapsed
   *     before the count reached zero or the instances was closed
   * @throws InterruptedException if the current thread is interrupted while waiting
   */
  public boolean await(long timeout, TimeUnit unit) throws InterruptedException {
    long waitNanos = TimeUnit.NANOSECONDS.convert(timeout, unit);

    synchronized (this) {
      while ((waitNanos > 0) && (state.get() == State.STARTED) && !hasLeadership.get()) {
        long startNanos = System.nanoTime();
        TimeUnit.NANOSECONDS.timedWait(this, waitNanos);
        long elapsed = System.nanoTime() - startNanos;
        waitNanos -= elapsed;
      }
    }
    return hasLeadership();
  }
コード例 #8
0
 IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) {
   return new IndexingStats.Stats(
       indexMetric.count(),
       TimeUnit.NANOSECONDS.toMillis(indexMetric.sum()),
       indexCurrent.count(),
       indexFailed.count(),
       deleteMetric.count(),
       TimeUnit.NANOSECONDS.toMillis(deleteMetric.sum()),
       deleteCurrent.count(),
       noopUpdates.count(),
       isThrottled,
       TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis));
 }
コード例 #9
0
    @Override
    public void run(String... strings) throws Exception {

      String logFormat = "%s call took %d millis with result: %s";
      long start1 = nanoTime();
      String city = dummy.getCity();
      long end1 = nanoTime();
      out.println(format(logFormat, "First", TimeUnit.NANOSECONDS.toMillis(end1 - start1), city));

      long start2 = nanoTime();
      city = dummy.getCity();
      long end2 = nanoTime();
      out.println(format(logFormat, "Second", TimeUnit.NANOSECONDS.toMillis(end2 - start2), city));
    }
コード例 #10
0
 @Override
 public void artifactDownloaded(RepositoryEvent event) {
   super.artifactDownloaded(event);
   Artifact artifact = event.getArtifact();
   String key = artifactAsString(artifact);
   long downloadTimeNanos = System.nanoTime() - startTimes.remove(key);
   double downloadTimeMs = TimeUnit.NANOSECONDS.toMillis(downloadTimeNanos);
   double downloadTimeSec = TimeUnit.NANOSECONDS.toSeconds(downloadTimeNanos);
   long size = artifact.getFile().length();
   double sizeK = (1 / 1024D) * size;
   double downloadRateKBytesPerSecond = sizeK / downloadTimeSec;
   info(
       "Downloaded %s (%d bytes) in %gms (%g kbytes/sec).",
       key, size, downloadTimeMs, downloadRateKBytesPerSecond);
 }
コード例 #11
0
  public static void main(final String[] args) throws Exception {
    System.out.println("FalseSharingAtomicLong:");

    final long start1 = System.nanoTime();
    runAtomicLongTest();
    final long duration1 = System.nanoTime() - start1;
    System.out.println(
        "w/o padding = " + TimeUnit.NANOSECONDS.toMillis(duration1) + " ms [" + duration1 + " ns]");

    final long start2 = System.nanoTime();
    runPaddedAtomicLongTest();
    final long duration2 = System.nanoTime() - start2;
    System.out.println(
        "w/ padding  = " + TimeUnit.NANOSECONDS.toMillis(duration2) + " ms [" + duration2 + " ns]");
  }
コード例 #12
0
  public void start(long transactionCount) {
    int[] processors = generateProcessorRange();

    System.out.printf("Multiverse> Uncontended update lean-transaction benchmark\n");
    System.out.printf("Multiverse> 1 GammaTxnRef per transaction\n");
    System.out.printf("Multiverse> %s Transactions per thread\n", format(transactionCount));
    System.out.printf(
        "Multiverse> Running with the following processor range %s\n", Arrays.toString(processors));
    Result[] result = new Result[processors.length];

    System.out.println("Multiverse> Starting warmup run");
    test(1, transactionCount);
    System.out.println("Multiverse> Finished warmup run");

    long startNs = System.nanoTime();

    for (int k = 0; k < processors.length; k++) {
      int processorCount = processors[k];
      double performance = test(processorCount, transactionCount);
      result[k] = new Result(processorCount, performance);
    }

    long durationNs = System.nanoTime() - startNs;
    System.out.printf(
        "Multiverse> Benchmark took %s seconds\n", TimeUnit.NANOSECONDS.toSeconds(durationNs));

    toGnuplot(result);
  }
コード例 #13
0
ファイル: GridmixJob.java プロジェクト: JichengSong/hadoop-20
 protected GridmixJob(Configuration conf, long submissionMillis, String name) throws IOException {
   job = new Job(conf, name);
   submissionTimeNanos = TimeUnit.NANOSECONDS.convert(submissionMillis, TimeUnit.MILLISECONDS);
   jobdesc = null;
   outdir = null;
   seq = -1;
 }
コード例 #14
0
 public void run() {
   // TODO get rid of current and use the marker file instead?
   directoryProviderLock.lock();
   try {
     long start = System.nanoTime(); // keep time after lock is acquired for correct measure
     int oldIndex = current;
     int index = oldIndex == 1 ? 2 : 1;
     File destinationFile = new File(destination, Integer.valueOf(index).toString());
     try {
       log.tracef("Copying %s into %s", source, destinationFile);
       FileHelper.synchronize(source, destinationFile, true, copyChunkSize);
       current = index;
     } catch (IOException e) {
       // don't change current
       log.unableToSynchronizeSource(indexName, e);
       return;
     }
     if (!new File(destination, CURRENT_DIR_NAME[oldIndex]).delete()) {
       log.unableToRemovePreviousMarket(indexName);
     }
     try {
       new File(destination, CURRENT_DIR_NAME[index]).createNewFile();
     } catch (IOException e) {
       log.unableToCreateCurrentMarker(indexName, e);
     }
     log.tracef(
         "Copy for %s took %d ms",
         indexName, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
   } finally {
     directoryProviderLock.unlock();
     inProgress.set(false);
   }
 }
コード例 #15
0
    @Override
    public void postCall(HttpRequest request, HttpResponseStatus status, HandlerInfo handlerInfo) {
      HTTPMonitoringEvent httpMonitoringEvent =
          (HTTPMonitoringEvent) handlerInfo.getAttribute(MONITORING_EVENT);
      httpMonitoringEvent.setResponseTime(
          TimeUnit.NANOSECONDS.toMillis(
              System.nanoTime() - httpMonitoringEvent.getStartNanoTime()));
      httpMonitoringEvent.setResponseHttpStatusCode(status.code());

      Object[] meta =
          new Object[] {
            httpMonitoringEvent.getTimestamp(), SERVER_HOST_ADDRESS, SERVER_HOSTNAME, MICROSERVICE
          };
      Object[] payload = new Object[11];
      payload[0] = httpMonitoringEvent.getServiceClass();
      payload[1] = httpMonitoringEvent.getServiceName();
      payload[2] = httpMonitoringEvent.getServiceMethod();
      payload[3] = httpMonitoringEvent.getRequestUri();
      payload[4] = httpMonitoringEvent.getServiceContext();
      payload[5] = httpMonitoringEvent.getHttpMethod();
      payload[6] = httpMonitoringEvent.getContentType();
      payload[7] = httpMonitoringEvent.getRequestSizeBytes();
      payload[8] = httpMonitoringEvent.getReferrer();
      payload[9] = httpMonitoringEvent.getResponseHttpStatusCode();
      payload[10] = httpMonitoringEvent.getResponseTime();
      Event event =
          new Event(
              HTTP_MONITORING_STREAM_ID, httpMonitoringEvent.getTimestamp(), meta, null, payload);
      dataPublisher.publish(event);
    }
コード例 #16
0
ファイル: UtilCache.java プロジェクト: CrasOu/haze
  public void setPropertiesParams(String[] propNames) {
    ResourceBundle res = ResourceBundle.getBundle("cache");

    if (res != null) {
      String value = getPropertyParam(res, propNames, "maxSize");
      if (UtilValidate.isNotEmpty(value)) {
        this.sizeLimit = Integer.parseInt(value);
      }
      value = getPropertyParam(res, propNames, "maxInMemory");
      if (UtilValidate.isNotEmpty(value)) {
        this.maxInMemory = Integer.parseInt(value);
      }
      value = getPropertyParam(res, propNames, "expireTime");
      if (UtilValidate.isNotEmpty(value)) {
        this.expireTimeNanos =
            TimeUnit.NANOSECONDS.convert(Long.parseLong(value), TimeUnit.MILLISECONDS);
      }
      value = getPropertyParam(res, propNames, "useSoftReference");
      if (value != null) {
        useSoftReference = "true".equals(value);
      }
      value = getPropertyParam(res, propNames, "useFileSystemStore");
      if (value != null) {
        useFileSystemStore = "true".equals(value);
      }
      value = getPropertyParam(res, new String[0], "cache.file.store");
      if (value != null) {
        fileStore = value;
      }
    }
  }
コード例 #17
0
  @Nullable
  private static AnalysisResult analyze(
      @NotNull final KotlinCoreEnvironment environment, @Nullable String targetDescription) {
    MessageCollector collector =
        environment.getConfiguration().get(CLIConfigurationKeys.MESSAGE_COLLECTOR_KEY);
    assert collector != null;

    long analysisStart = PerformanceCounter.Companion.currentTime();
    AnalyzerWithCompilerReport analyzerWithCompilerReport =
        new AnalyzerWithCompilerReport(collector);
    analyzerWithCompilerReport.analyzeAndReport(
        environment.getSourceFiles(),
        new Function0<AnalysisResult>() {
          @NotNull
          @Override
          public AnalysisResult invoke() {
            BindingTrace sharedTrace =
                new CliLightClassGenerationSupport.NoScopeRecordCliBindingTrace();
            ModuleContext moduleContext =
                TopDownAnalyzerFacadeForJVM.createContextWithSealedModule(
                    environment.getProject(), ModuleNameKt.getModuleName(environment));

            return TopDownAnalyzerFacadeForJVM.analyzeFilesWithJavaIntegrationWithCustomContext(
                moduleContext,
                environment.getSourceFiles(),
                sharedTrace,
                environment.getConfiguration().get(JVMConfigurationKeys.MODULES),
                environment
                    .getConfiguration()
                    .get(JVMConfigurationKeys.INCREMENTAL_COMPILATION_COMPONENTS),
                new JvmPackagePartProvider(environment));
          }
        });
    long analysisNanos = PerformanceCounter.Companion.currentTime() - analysisStart;
    String message =
        "ANALYZE: "
            + environment.getSourceFiles().size()
            + " files ("
            + environment.getSourceLinesOfCode()
            + " lines) "
            + (targetDescription != null ? targetDescription : "")
            + "in "
            + TimeUnit.NANOSECONDS.toMillis(analysisNanos)
            + " ms";
    K2JVMCompiler.Companion.reportPerf(environment.getConfiguration(), message);

    AnalysisResult result = analyzerWithCompilerReport.getAnalysisResult();
    assert result != null
        : "AnalysisResult should be non-null, compiling: " + environment.getSourceFiles();

    CompilerPluginContext context =
        new CompilerPluginContext(
            environment.getProject(), result.getBindingContext(), environment.getSourceFiles());
    for (CompilerPlugin plugin :
        environment.getConfiguration().getList(CLIConfigurationKeys.COMPILER_PLUGINS)) {
      plugin.processFiles(context);
    }

    return analyzerWithCompilerReport.hasErrors() ? null : result;
  }
コード例 #18
0
  /**
   * Main constructor \w manual Dependency Injection
   *
   * @param notebookIndex - (nullable) for indexing all notebooks on creating.
   * @throws IOException
   * @throws SchedulerException
   */
  public Notebook(
      ZeppelinConfiguration conf,
      NotebookRepo notebookRepo,
      SchedulerFactory schedulerFactory,
      InterpreterFactory replFactory,
      JobListenerFactory jobListenerFactory,
      SearchService notebookIndex,
      NotebookAuthorization notebookAuthorization,
      Credentials credentials)
      throws IOException, SchedulerException {
    this.conf = conf;
    this.notebookRepo = notebookRepo;
    this.schedulerFactory = schedulerFactory;
    this.replFactory = replFactory;
    this.jobListenerFactory = jobListenerFactory;
    this.notebookIndex = notebookIndex;
    this.notebookAuthorization = notebookAuthorization;
    this.credentials = credentials;
    quertzSchedFact = new org.quartz.impl.StdSchedulerFactory();
    quartzSched = quertzSchedFact.getScheduler();
    quartzSched.start();
    CronJob.notebook = this;

    loadAllNotes();
    if (this.notebookIndex != null) {
      long start = System.nanoTime();
      logger.info("Notebook indexing started...");
      notebookIndex.addIndexDocs(notes.values());
      logger.info(
          "Notebook indexing finished: {} indexed in {}s",
          notes.size(),
          TimeUnit.NANOSECONDS.toSeconds(start - System.nanoTime()));
    }
  }
コード例 #19
0
ファイル: Image.java プロジェクト: jonasry/stereosvr
 public static void write(BufferedImage image, String name, OutputStream out) throws IOException {
   System.out.println("Writing " + name + ". W:" + image.getWidth() + " H:" + image.getHeight());
   final long start = System.nanoTime();
   ImageIO.write(image, getType(name), out);
   final long duration = System.nanoTime() - start;
   System.out.println("Saved " + name + " in " + TimeUnit.NANOSECONDS.toMillis(duration) + " ms.");
 }
コード例 #20
0
ファイル: MyExam.java プロジェクト: avivrm/thinking_in_java
 public Student(String name, int workTime, CountDownLatch countDownLatch) {
   this.name = name;
   this.workTime = workTime;
   this.submitTime =
       (int) TimeUnit.NANOSECONDS.convert(System.nanoTime() + workTime, TimeUnit.NANOSECONDS);
   this.countDownLatch = countDownLatch;
 }
コード例 #21
0
 protected void printStatusMessage(long startTime, long totalTodoCount, long doneCount) {
   long elapsedMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
   log.indexingDocumentsCompleted(doneCount, elapsedMs);
   float estimateSpeed = doneCount * 1000f / elapsedMs;
   float estimatePercentileComplete = doneCount * 100f / totalTodoCount;
   log.indexingSpeed(estimateSpeed, estimatePercentileComplete);
 }
コード例 #22
0
ファイル: BayeuxClientTest.java プロジェクト: gkaspi/cometd
  @Test
  public void testWaitFor() throws Exception {
    final BlockingArrayQueue<String> results = new BlockingArrayQueue<>();

    String channelName = "/chat/msg";
    MarkedReference<ServerChannel> channel = bayeux.createChannelIfAbsent(channelName);
    channel
        .getReference()
        .addListener(
            new ServerChannel.MessageListener() {
              public boolean onMessage(ServerSession from, ServerChannel channel, Mutable message) {
                results.add(from.getId());
                results.add(channel.getId());
                results.add(String.valueOf(message.getData()));
                return true;
              }
            });

    BayeuxClient client = newBayeuxClient();
    long wait = 1000L;
    long start = System.nanoTime();
    client.handshake(wait);
    long stop = System.nanoTime();
    Assert.assertTrue(TimeUnit.NANOSECONDS.toMillis(stop - start) < wait);
    Assert.assertNotNull(client.getId());
    String data = "Hello World";
    client.getChannel(channelName).publish(data);

    Assert.assertEquals(client.getId(), results.poll(1, TimeUnit.SECONDS));
    Assert.assertEquals(channelName, results.poll(1, TimeUnit.SECONDS));
    Assert.assertEquals(data, results.poll(1, TimeUnit.SECONDS));

    disconnectBayeuxClient(client);
  }
コード例 #23
0
ファイル: TestIntegerTopK.java プロジェクト: hgschmie/jcommon
  @Test(groups = "slow")
  public void testInsertionTiming() {
    int keySpaceSize = 10000;
    int k = 100;
    int maxAdd = 100;
    TopK<Integer> topK = getInstance(keySpaceSize, k);

    LOG.info("Timing add() performance with keySpaceSize = %s, k = %s", keySpaceSize, k);

    Random random = new Random(0);
    long totalTime = 0;
    long count = 0;
    long begin = System.nanoTime();

    while (System.nanoTime() - begin < TEST_TIME_NANOS) {
      long start = System.nanoTime();

      topK.add(random.nextInt(keySpaceSize), random.nextInt(maxAdd));

      if (System.nanoTime() - begin > TimeUnit.SECONDS.toNanos(1)) {
        // discard the first second of measurements
        totalTime += System.nanoTime() - start;
        ++count;
      }
    }

    LOG.info(
        "Processed %s entries in %s ms. Insertion rate = %s entries/s",
        count,
        TimeUnit.NANOSECONDS.toMillis(totalTime),
        count / (totalTime * 1.0 / TimeUnit.SECONDS.toNanos(1)));
  }
コード例 #24
0
  /*
   * This method handles two different scenarios:
   *
   * a) we're handling the initial read, of data from the closest replica + digests
   *    from the rest.  In this case we check the digests against each other,
   *    throw an exception if there is a mismatch, otherwise return the data row.
   *
   * b) we're checking additional digests that arrived after the minimum to handle
   *    the requested ConsistencyLevel, i.e. asynchronous read repair check
   */
  public Row resolve() throws DigestMismatchException {
    if (logger.isDebugEnabled()) logger.debug("resolving " + replies.size() + " responses");

    long start = System.nanoTime();

    // validate digests against each other; throw immediately on mismatch.
    // also extract the data reply, if any.
    ColumnFamily data = null;
    ByteBuffer digest = null;

    for (MessageIn<ReadResponse> message : replies) {
      ReadResponse response = message.payload;

      ByteBuffer newDigest;
      if (response.isDigestQuery()) {
        newDigest = response.digest();
      } else {
        // note that this allows for multiple data replies, post-CASSANDRA-5932
        data = response.row().cf;
        newDigest = ColumnFamily.digest(data);
      }

      if (digest == null) digest = newDigest;
      else if (!digest.equals(newDigest)) throw new DigestMismatchException(key, digest, newDigest);
    }

    if (logger.isDebugEnabled())
      logger.debug("resolve: {} ms.", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start));
    return new Row(key, data);
  }
コード例 #25
0
 public Student(String name, long submitTime) {
   this.name = name;
   this.workTime = submitTime;
   this.submitTime =
       TimeUnit.NANOSECONDS.convert(submitTime, TimeUnit.MILLISECONDS) + System.nanoTime();
   System.out.println(this.name + "交卷,用时" + workTime);
 }
コード例 #26
0
  /**
   * Drain the queue of pending counts into the provided buffer and write those counts to DynamoDB.
   * This blocks until data is available in the queue.
   *
   * @param buffer A reusable buffer with sufficient space to drain the entire queue if necessary.
   *     This is provided as an optimization to avoid allocating a new buffer every interval.
   * @throws InterruptedException Thread interrupted while waiting for new data to arrive in the
   *     queue.
   */
  protected void sendQueueToDynamoDB(List<HttpReferrerPairsCount> buffer)
      throws InterruptedException {
    // Block while waiting for data
    buffer.add(counts.take());
    // Drain as much of the queue as we can.
    // DynamoDBMapper will handle splitting the batch sizes for us.
    counts.drainTo(buffer);
    try {
      long start = System.nanoTime();
      // Write the contents of the buffer as items to our table
      List<FailedBatch> failures = mapper.batchWrite(buffer, Collections.emptyList());
      long end = System.nanoTime();
      LOG.info(
          String.format(
              "%d new counts sent to DynamoDB in %dms",
              buffer.size(), TimeUnit.NANOSECONDS.toMillis(end - start)));

      for (FailedBatch failure : failures) {
        LOG.warn(
            "Error sending count batch to DynamoDB. This will not be retried!",
            failure.getException());
      }
    } catch (Exception ex) {
      LOG.error("Error sending new counts to DynamoDB. The some counts may not be persisted.", ex);
    }
  }
コード例 #27
0
ファイル: Exam.java プロジェクト: taoxie/Interview
 public Student(String name, long submitTime) {
   super();
   this.name = name;
   workTime = submitTime;
   // 都转为转为ns
   this.submitTime =
       TimeUnit.NANOSECONDS.convert(submitTime, TimeUnit.MILLISECONDS) + System.nanoTime();
 }
コード例 #28
0
 public String pendingFramesTimeStamps() {
   threadChecker.checkIsOnValidThread();
   List<Long> timeStampsMs = new ArrayList<Long>();
   for (long ts : timeStampsNs) {
     timeStampsMs.add(TimeUnit.NANOSECONDS.toMillis(ts));
   }
   return timeStampsMs.toString();
 }
コード例 #29
0
ファイル: PutSQS.java プロジェクト: RajiMenon/nifi
  @Override
  public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
      return;
    }

    final long startNanos = System.nanoTime();
    final AmazonSQSClient client = getClient();
    final SendMessageBatchRequest request = new SendMessageBatchRequest();
    final String queueUrl =
        context.getProperty(QUEUE_URL).evaluateAttributeExpressions(flowFile).getValue();
    request.setQueueUrl(queueUrl);

    final Set<SendMessageBatchRequestEntry> entries = new HashSet<>();

    final SendMessageBatchRequestEntry entry = new SendMessageBatchRequestEntry();
    entry.setId(flowFile.getAttribute("uuid"));
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(flowFile, baos);
    final String flowFileContent = baos.toString();
    entry.setMessageBody(flowFileContent);

    final Map<String, MessageAttributeValue> messageAttributes = new HashMap<>();

    for (final PropertyDescriptor descriptor : userDefinedProperties) {
      final MessageAttributeValue mav = new MessageAttributeValue();
      mav.setDataType("String");
      mav.setStringValue(
          context.getProperty(descriptor).evaluateAttributeExpressions(flowFile).getValue());
      messageAttributes.put(descriptor.getName(), mav);
    }

    entry.setMessageAttributes(messageAttributes);
    entry.setDelaySeconds(context.getProperty(DELAY).asTimePeriod(TimeUnit.SECONDS).intValue());
    entries.add(entry);

    request.setEntries(entries);

    try {
      client.sendMessageBatch(request);
    } catch (final Exception e) {
      getLogger()
          .error(
              "Failed to send messages to Amazon SQS due to {}; routing to failure",
              new Object[] {e});
      flowFile = session.penalize(flowFile);
      session.transfer(flowFile, REL_FAILURE);
      return;
    }

    getLogger()
        .info("Successfully published message to Amazon SQS for {}", new Object[] {flowFile});
    session.transfer(flowFile, REL_SUCCESS);
    final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    session.getProvenanceReporter().send(flowFile, queueUrl, transmissionMillis);
  }
コード例 #30
0
ファイル: SimpleConfig.java プロジェクト: ksaka9821/config
 @Override
 public List<Long> getMillisecondsList(String path) {
   List<Long> nanos = getNanosecondsList(path);
   List<Long> l = new ArrayList<Long>();
   for (Long n : nanos) {
     l.add(TimeUnit.NANOSECONDS.toMillis(n));
   }
   return l;
 }