コード例 #1
0
  /**
   * Main constructor \w manual Dependency Injection
   *
   * @param notebookIndex - (nullable) for indexing all notebooks on creating.
   * @throws IOException
   * @throws SchedulerException
   */
  public Notebook(
      ZeppelinConfiguration conf,
      NotebookRepo notebookRepo,
      SchedulerFactory schedulerFactory,
      InterpreterFactory replFactory,
      JobListenerFactory jobListenerFactory,
      SearchService notebookIndex,
      NotebookAuthorization notebookAuthorization,
      Credentials credentials)
      throws IOException, SchedulerException {
    this.conf = conf;
    this.notebookRepo = notebookRepo;
    this.schedulerFactory = schedulerFactory;
    this.replFactory = replFactory;
    this.jobListenerFactory = jobListenerFactory;
    this.notebookIndex = notebookIndex;
    this.notebookAuthorization = notebookAuthorization;
    this.credentials = credentials;
    quertzSchedFact = new org.quartz.impl.StdSchedulerFactory();
    quartzSched = quertzSchedFact.getScheduler();
    quartzSched.start();
    CronJob.notebook = this;

    loadAllNotes();
    if (this.notebookIndex != null) {
      long start = System.nanoTime();
      logger.info("Notebook indexing started...");
      notebookIndex.addIndexDocs(notes.values());
      logger.info(
          "Notebook indexing finished: {} indexed in {}s",
          notes.size(),
          TimeUnit.NANOSECONDS.toSeconds(start - System.nanoTime()));
    }
  }
コード例 #2
0
  public void start(long transactionCount) {
    int[] processors = generateProcessorRange();

    System.out.printf("Multiverse> Uncontended update lean-transaction benchmark\n");
    System.out.printf("Multiverse> 1 GammaTxnRef per transaction\n");
    System.out.printf("Multiverse> %s Transactions per thread\n", format(transactionCount));
    System.out.printf(
        "Multiverse> Running with the following processor range %s\n", Arrays.toString(processors));
    Result[] result = new Result[processors.length];

    System.out.println("Multiverse> Starting warmup run");
    test(1, transactionCount);
    System.out.println("Multiverse> Finished warmup run");

    long startNs = System.nanoTime();

    for (int k = 0; k < processors.length; k++) {
      int processorCount = processors[k];
      double performance = test(processorCount, transactionCount);
      result[k] = new Result(processorCount, performance);
    }

    long durationNs = System.nanoTime() - startNs;
    System.out.printf(
        "Multiverse> Benchmark took %s seconds\n", TimeUnit.NANOSECONDS.toSeconds(durationNs));

    toGnuplot(result);
  }
コード例 #3
0
ファイル: Request.java プロジェクト: Ciscen/picasso
 String logId() {
   long delta = System.nanoTime() - started;
   if (delta > TOO_LONG_LOG) {
     return plainId() + '+' + TimeUnit.NANOSECONDS.toSeconds(delta) + 's';
   }
   return plainId() + '+' + TimeUnit.NANOSECONDS.toMillis(delta) + "ms";
 }
コード例 #4
0
ファイル: QuantileDigest.java プロジェクト: cyenjung/pinot
  /*
   * Get the exponentially-decayed approximate counts of values in multiple buckets. The elements in
   * the provided list denote the upper bound each of the buckets and must be sorted in ascending
   * order.
   *
   * The approximate count in each bucket is guaranteed to be within 2 * totalCount * maxError of
   * the real count.
   */
  public List<Bucket> getHistogram(List<Long> bucketUpperBounds) {
    checkArgument(
        Ordering.natural().isOrdered(bucketUpperBounds),
        "buckets must be sorted in increasing order");

    final ImmutableList.Builder<Bucket> builder = ImmutableList.builder();
    final PeekingIterator<Long> iterator = Iterators.peekingIterator(bucketUpperBounds.iterator());

    final AtomicDouble sum = new AtomicDouble();
    final AtomicDouble lastSum = new AtomicDouble();

    // for computing weighed average of values in bucket
    final AtomicDouble bucketWeightedSum = new AtomicDouble();

    final double normalizationFactor = weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read()));

    postOrderTraversal(
        root,
        new Callback() {
          @Override
          public boolean process(Node node) {

            while (iterator.hasNext() && iterator.peek() <= node.getUpperBound()) {
              double bucketCount = sum.get() - lastSum.get();

              Bucket bucket =
                  new Bucket(
                      bucketCount / normalizationFactor, bucketWeightedSum.get() / bucketCount);

              builder.add(bucket);
              lastSum.set(sum.get());
              bucketWeightedSum.set(0);
              iterator.next();
            }

            bucketWeightedSum.addAndGet(node.getMiddle() * node.weightedCount);
            sum.addAndGet(node.weightedCount);
            return iterator.hasNext();
          }
        });

    while (iterator.hasNext()) {
      double bucketCount = sum.get() - lastSum.get();
      Bucket bucket =
          new Bucket(bucketCount / normalizationFactor, bucketWeightedSum.get() / bucketCount);

      builder.add(bucket);

      iterator.next();
    }

    return builder.build();
  }
コード例 #5
0
ファイル: QuantileDigest.java プロジェクト: cyenjung/pinot
  @VisibleForTesting
  QuantileDigest(double maxError, double alpha, Ticker ticker, boolean compressAutomatically) {
    checkArgument(maxError >= 0 && maxError <= 1, "maxError must be in range [0, 1]");
    checkArgument(alpha >= 0 && alpha < 1, "alpha must be in range [0, 1)");

    this.maxError = maxError;
    this.alpha = alpha;
    this.ticker = ticker;
    this.compressAutomatically = compressAutomatically;

    landmarkInSeconds = TimeUnit.NANOSECONDS.toSeconds(ticker.read());
  }
コード例 #6
0
ファイル: QuantileDigest.java プロジェクト: cyenjung/pinot
  /** Adds a value to this digest. The value must be {@code >= 0} */
  public void add(long value, long count) {
    checkArgument(count > 0, "count must be > 0");

    long nowInSeconds = TimeUnit.NANOSECONDS.toSeconds(ticker.read());

    int maxExpectedNodeCount = 3 * calculateCompressionFactor();
    if (nowInSeconds - landmarkInSeconds >= RESCALE_THRESHOLD_SECONDS) {
      rescale(nowInSeconds);
      compress(); // need to compress to get rid of nodes that may have decayed to ~ 0
    } else if (nonZeroNodeCount > MAX_SIZE_FACTOR * maxExpectedNodeCount && compressAutomatically) {
      // The size (number of non-zero nodes) of the digest is at most 3 * compression factor
      // If we're over MAX_SIZE_FACTOR of the expected size, compress
      // Note: we don't compress as soon as we go over expectedNodeCount to avoid unnecessarily
      // running a compression for every new added element when we're close to boundary
      compress();
    }

    double weight = weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read())) * count;

    max = Math.max(max, value);
    min = Math.min(min, value);

    insert(longToBits(value), weight);
  }
コード例 #7
0
 @Override
 public void artifactDownloaded(RepositoryEvent event) {
   super.artifactDownloaded(event);
   Artifact artifact = event.getArtifact();
   String key = artifactAsString(artifact);
   long downloadTimeNanos = System.nanoTime() - startTimes.remove(key);
   double downloadTimeMs = TimeUnit.NANOSECONDS.toMillis(downloadTimeNanos);
   double downloadTimeSec = TimeUnit.NANOSECONDS.toSeconds(downloadTimeNanos);
   long size = artifact.getFile().length();
   double sizeK = (1 / 1024D) * size;
   double downloadRateKBytesPerSecond = sizeK / downloadTimeSec;
   info(
       "Downloaded %s (%d bytes) in %gms (%g kbytes/sec).",
       key, size, downloadTimeMs, downloadRateKBytesPerSecond);
 }
コード例 #8
0
ファイル: QuantileDigest.java プロジェクト: cyenjung/pinot
  private void rescaleToCommonLandmark(QuantileDigest one, QuantileDigest two) {
    long nowInSeconds = TimeUnit.NANOSECONDS.toSeconds(ticker.read());

    // 1. rescale this and other to common landmark
    long targetLandmark = Math.max(one.landmarkInSeconds, two.landmarkInSeconds);

    if (nowInSeconds - targetLandmark >= RESCALE_THRESHOLD_SECONDS) {
      targetLandmark = nowInSeconds;
    }

    if (targetLandmark != one.landmarkInSeconds) {
      one.rescale(targetLandmark);
    }

    if (targetLandmark != two.landmarkInSeconds) {
      two.rescale(targetLandmark);
    }
  }
コード例 #9
0
    @Override
    public void channelRead(@NotNull ChannelHandlerContext ctx, @NotNull Object msg) {
      try {
        bytesReceived += ((ByteBuf) msg).readableBytes();

        if (i++ % 10000 == 0) System.out.print(".");
        if (TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - startTime) >= 10) {
          long time = System.nanoTime() - startTime;
          System.out.printf("\nThroughput was %.1f MB/s%n", 1e3 * bytesReceived / time);
          return;
        }

      } finally {
        ReferenceCountUtil.release(msg); // (2)
      }

      final ByteBuf outMsg = ctx.alloc().buffer(bufferSize); // (2)
      outMsg.writeBytes(payload);

      ctx.writeAndFlush(outMsg); // (3)
    }
コード例 #10
0
ファイル: RunNiFi.java プロジェクト: kkasravi/nifi
  private boolean waitForStart() {
    lock.lock();
    try {
      final long startTime = System.nanoTime();

      while (ccPort < 1) {
        try {
          startupCondition.await(1, TimeUnit.SECONDS);
        } catch (final InterruptedException ie) {
          return false;
        }

        final long waitNanos = System.nanoTime() - startTime;
        final long waitSeconds = TimeUnit.NANOSECONDS.toSeconds(waitNanos);
        if (waitSeconds > STARTUP_WAIT_SECONDS) {
          return false;
        }
      }
    } finally {
      lock.unlock();
    }
    return true;
  }
コード例 #11
0
ファイル: QuantileDigest.java プロジェクト: cyenjung/pinot
 /** Number (decayed) of elements added to this quantile digest */
 public double getCount() {
   return weightedCount / weight(TimeUnit.NANOSECONDS.toSeconds(ticker.read()));
 }
コード例 #12
0
  /**
   * 读取全部的行情数据。
   *
   * @return List<MarketDataBean>
   */
  public List<MarketDataBean> loadMarketData() {
    log.info("准备读取上交所和深交所的股票数据!");
    // 根据 “CPU 个数” 和 “单个 CPU 核数” 计算出 “虚拟 CPU 数量”。
    int virtualCPUNums = cpuNums * coreNums;

    // --- 参数验证 ---

    if (StringUtils.isBlank(marketDataFolderpath)) {
      throw new IllegalArgumentException("市场行情数据文件夹路径不能为空 [" + marketDataFolderpath + "]!");
    }

    /*
     * 任务可分为 “计算密集型” 和 “IO 密集型” 两种,该任务属于 I/O 密集型操作,阻塞系数暂定为 0.6
     * 第一种计算方法:线程数 = 虚拟 CPU 数量 / (1 - 阻塞系数)
     * loadDataThreadNums = (int)(virtualCPUNums / (1 - 0.6));
     *
     * 参考 Java Concurrency in Practice 得出了一个估算线程池大小的经验公式
     * 第二种计算方法:线程数 = 虚拟 CPU 数量 * 目标 CPU 使用率 * (1 + 等待时间 / 计算时间)
     * loadDataThreadNums = (int)(virtualCPUNums * 0.8 * (1 + 1/100))
     *
     * 经测试,第一种方法计算出的线程数为 118,平均执行耗时 16 秒,第二种方法计算出的线程数为 57,平均执行耗时 29 秒,第一种明显优于第二种,
     * 可见第二种方法没有考虑到任务的场景。
     */
    if (loadDataThreadNums <= 0) {
      loadDataThreadNums = (int) (virtualCPUNums / (1 - 0.6));
    }

    if (monitoringInterval < 1000) {
      monitoringInterval = 1000;
    }

    // --- 具体业务 ---

    System.out.println("虚拟 CPU 数量 = " + virtualCPUNums + ", 装载市场行情数据的线程数 = " + loadDataThreadNums);
    System.out.println(
        "是否启动监听线程 = " + startMonitorTask + ", 监听任务的监控间隔时间(毫秒) = " + monitoringInterval);

    // 开始时间。
    long startTime = System.nanoTime();

    // 初始市场行情数据的集合。
    List<MarketDataBean> marketDataList = new ArrayList<MarketDataBean>(0);

    try {

      /*
       * 1、得到市场行情文件,重新初始装载股票行情数据的Bean类集合,避免重新 Hash 的开销。
       */
      File[] marketDataFiles = getMarketDataFiles(marketDataFolderpath);
      if (marketDataFiles == null || marketDataFiles.length == 0) {
        return marketDataList;
      }

      /*
       * 2、读取市场行情数据文件路径集合。
       */
      Map<String, String> marketDataFilepathMap = getMarketDataFilepath(marketDataFiles);

      /*
       * 3、根据 “数据文件数量” 和 “线程数” 计算分隔系数,对装载市场行情数据文件路径的集合进行分割。
       *    分组系数 = 文件数量 / 线程数 * 0.2;也就是说每个线程只承担 “最大读取量的 20%”,目的是减少每线程执行任务的时长,但这会增加任务量。
       *
       *    当 “文件数量” < “线程数” 时,(文件数量 / 线程数) = 0,此时 “分组系数 = 1”;
       */
      BigDecimal numberOfFiles = BigDecimal.valueOf(marketDataFilepathMap.size());
      int splitNum =
          numberOfFiles
              .divide(BigDecimal.valueOf(loadDataThreadNums), 0, RoundingMode.UP)
              .multiply(BigDecimal.valueOf(0.2))
              .setScale(0, RoundingMode.HALF_UP)
              .intValue();
      splitNum = (splitNum <= 0) ? 1 : splitNum;

      List<Map<String, String>> marketDataFilepathMapList =
          splitMarketDataFilepathMap(marketDataFilepathMap, splitNum);

      log.info("总共要读取的行情文件数 = " + numberOfFiles + ", 行情文件分组系数(每线程读取行情文件数) = " + splitNum);

      /*
       * 4、启用一根线程对处理进度进行监控。
       */
      ExecutorService moniterExec = getMonitorLoadMarketDataThreadPool();
      AtomicInteger readMarketDataNum = new AtomicInteger(0);
      DataLoadMonitorTask dataLoadMonitorTask = null;
      if (this.startMonitorTask) {
        dataLoadMonitorTask =
            new DataLoadMonitorTask(
                marketDataFilepathMap.size(), readMarketDataNum, this.monitoringInterval);
        moniterExec.execute(dataLoadMonitorTask);
        moniterExec.shutdown();
      }

      /*
       * 5、多线程读取沪深A股和沪深指数的行情数据。
       */
      ExecutorService workerExec = getLoadMarketDataThreadPool(loadDataThreadNums);
      marketDataList =
          readMarketDataToBean(
              workerExec, marketDataFilepathMapList, startMonitorTask, readMarketDataNum);
      if (startMonitorTask && dataLoadMonitorTask != null) {
        dataLoadMonitorTask.stop();
      }

    } catch (Exception e) {
      throw new RuntimeException(e);
    }

    // 结束时间。
    long endTime = System.nanoTime();

    // 耗时。
    long elapsedTime = TimeUnit.NANOSECONDS.toSeconds((endTime - startTime));

    log.info("此次共载入行情数据文件 = " + marketDataList.size() + " 个,耗时 = " + elapsedTime + " 秒");

    return marketDataList;
  }
コード例 #13
0
ファイル: BayeuxClientTest.java プロジェクト: gkaspi/cometd
  @Test
  public void testPerf() throws Exception {
    Runtime.getRuntime().addShutdownHook(new DumpThread());

    final int rooms = stress ? 100 : 10;
    final int publish = stress ? 4000 : 100;
    final int batch = stress ? 10 : 2;
    final int pause = stress ? 50 : 10;
    BayeuxClient[] clients = new BayeuxClient[stress ? 500 : 2 * rooms];

    final AtomicInteger connections = new AtomicInteger();
    final AtomicInteger received = new AtomicInteger();

    for (int i = 0; i < clients.length; i++) {
      final AtomicBoolean connected = new AtomicBoolean();
      final BayeuxClient client = newBayeuxClient();
      final String room = "/channel/" + (i % rooms);
      clients[i] = client;

      client
          .getChannel(Channel.META_HANDSHAKE)
          .addListener(
              new ClientSessionChannel.MessageListener() {
                public void onMessage(ClientSessionChannel channel, Message message) {
                  if (connected.getAndSet(false)) connections.decrementAndGet();

                  if (message.isSuccessful()) {
                    client
                        .getChannel(room)
                        .subscribe(
                            new ClientSessionChannel.MessageListener() {
                              public void onMessage(ClientSessionChannel channel, Message message) {
                                received.incrementAndGet();
                              }
                            });
                  }
                }
              });

      client
          .getChannel(Channel.META_CONNECT)
          .addListener(
              new ClientSessionChannel.MessageListener() {
                public void onMessage(ClientSessionChannel channel, Message message) {
                  if (!connected.getAndSet(message.isSuccessful())) {
                    connections.incrementAndGet();
                  }
                }
              });

      clients[i].handshake();
      client.waitFor(5000, State.CONNECTED);
    }

    Assert.assertEquals(clients.length, connections.get());

    long start0 = System.nanoTime();
    for (int i = 0; i < publish; i++) {
      final int sender = random.nextInt(clients.length);
      final String channel = "/channel/" + random.nextInt(rooms);

      String data = "data from " + sender + " to " + channel;
      // System.err.println(data);
      clients[sender].getChannel(channel).publish(data);

      if (i % batch == (batch - 1)) {
        System.err.print('.');
        Thread.sleep(pause);
      }
      if (i % 1000 == 999) System.err.println();
    }
    System.err.println();

    int expected = clients.length * publish / rooms;

    long start = System.nanoTime();
    while (received.get() < expected
        && TimeUnit.NANOSECONDS.toSeconds(System.nanoTime() - start) < 10) {
      Thread.sleep(100);
      System.err.println("received " + received.get() + "/" + expected);
    }
    System.err.println(
        (received.get() * 1000 * 1000 * 1000L) / (System.nanoTime() - start0) + " m/s");

    Assert.assertEquals(expected, received.get());

    for (BayeuxClient client : clients) Assert.assertTrue(client.disconnect(1000));
  }
コード例 #14
0
ファイル: DecayCounter.java プロジェクト: aloneiz/platform
 private long getTickInSeconds() {
   return TimeUnit.NANOSECONDS.toSeconds(ticker.read());
 }
コード例 #15
0
 long startTiming() {
   if (TimeUnit.NANOSECONDS.toSeconds(getDurationNano()) > REPORT_INTERVAL_SECS) {
     reportMetrics();
   }
   return System.nanoTime();
 }
コード例 #16
0
ファイル: RunNiFi.java プロジェクト: kkasravi/nifi
  public void stop() throws IOException {
    final Logger logger = cmdLogger;
    final Integer port = getCurrentPort(logger);
    if (port == null) {
      logger.info("Apache NiFi is not currently running");
      return;
    }

    final Properties nifiProps = loadProperties(logger);
    final String secretKey = nifiProps.getProperty("secret.key");

    try (final Socket socket = new Socket()) {
      logger.debug("Connecting to NiFi instance");
      socket.setSoTimeout(60000);
      socket.connect(new InetSocketAddress("localhost", port));
      logger.debug("Established connection to NiFi instance.");
      socket.setSoTimeout(60000);

      logger.debug("Sending SHUTDOWN Command to port {}", port);
      final OutputStream out = socket.getOutputStream();
      out.write((SHUTDOWN_CMD + " " + secretKey + "\n").getBytes(StandardCharsets.UTF_8));
      out.flush();
      socket.shutdownOutput();

      final InputStream in = socket.getInputStream();
      int lastChar;
      final StringBuilder sb = new StringBuilder();
      while ((lastChar = in.read()) > -1) {
        sb.append((char) lastChar);
      }
      final String response = sb.toString().trim();

      logger.debug("Received response to SHUTDOWN command: {}", response);

      if (SHUTDOWN_CMD.equals(response)) {
        logger.info("Apache NiFi has accepted the Shutdown Command and is shutting down now");

        final String pid = nifiProps.getProperty("pid");
        if (pid != null) {
          final Properties bootstrapProperties = new Properties();
          try (final FileInputStream fis = new FileInputStream(bootstrapConfigFile)) {
            bootstrapProperties.load(fis);
          }

          String gracefulShutdown =
              bootstrapProperties.getProperty(
                  GRACEFUL_SHUTDOWN_PROP, DEFAULT_GRACEFUL_SHUTDOWN_VALUE);
          int gracefulShutdownSeconds;
          try {
            gracefulShutdownSeconds = Integer.parseInt(gracefulShutdown);
          } catch (final NumberFormatException nfe) {
            gracefulShutdownSeconds = Integer.parseInt(DEFAULT_GRACEFUL_SHUTDOWN_VALUE);
          }

          final long startWait = System.nanoTime();
          while (isProcessRunning(pid, logger)) {
            logger.info("Waiting for Apache NiFi to finish shutting down...");
            final long waitNanos = System.nanoTime() - startWait;
            final long waitSeconds = TimeUnit.NANOSECONDS.toSeconds(waitNanos);
            if (waitSeconds >= gracefulShutdownSeconds && gracefulShutdownSeconds > 0) {
              if (isProcessRunning(pid, logger)) {
                logger.warn(
                    "NiFi has not finished shutting down after {} seconds. Killing process.",
                    gracefulShutdownSeconds);
                try {
                  killProcessTree(pid, logger);
                } catch (final IOException ioe) {
                  logger.error("Failed to kill Process with PID {}", pid);
                }
              }
              break;
            } else {
              try {
                Thread.sleep(2000L);
              } catch (final InterruptedException ie) {
              }
            }
          }

          final File statusFile = getStatusFile(logger);
          if (statusFile.exists() && !statusFile.delete()) {
            logger.error(
                "Failed to delete status file {}; this file should be cleaned up manually",
                statusFile);
          }
          logger.info("NiFi has finished shutting down.");
        }
      } else {
        logger.error("When sending SHUTDOWN command to NiFi, got unexpected response {}", response);
      }
    } catch (final IOException ioe) {
      logger.error(
          "Failed to send shutdown command to port {} due to {}",
          new Object[] {port, ioe.toString(), ioe});
    }
  }
コード例 #17
0
  protected String buildGraph(String seriesDirectory, Method<?, ?> method, List<Double> datas) {
    XYSeries series = new XYSeries("XYGraph", false, false);

    double snapshot = 0;
    for (Double data : datas) {
      double seconds = TimeUnit.NANOSECONDS.toSeconds(data.intValue());
      series.add(snapshot++, seconds);
    }

    XYSeriesCollection seriesCollection = new XYSeriesCollection();
    seriesCollection.addSeries(series);
    JFreeChart chart =
        ChartFactory.createXYLineChart(
            null,
            "Snapshots",
            "Time",
            seriesCollection,
            PlotOrientation.VERTICAL,
            false,
            false,
            false);
    chart.setTitle(new TextTitle(method.getName(), new Font("Arial", Font.BOLD, 11)));

    XYPlot xyPlot = chart.getXYPlot();
    NumberAxis yAxis = (NumberAxis) xyPlot.getRangeAxis();
    yAxis.setAutoRange(true);
    yAxis.setAutoRangeIncludesZero(true);

    NumberAxis xAxis = (NumberAxis) xyPlot.getDomainAxis();
    xAxis.setAutoRange(true);
    xAxis.setAutoRangeIncludesZero(true);
    // xAxis.setTickUnit(new NumberTickUnit(1));

    StringBuilder builder = new StringBuilder(method.getClassName());
    builder.append(method.getName());
    builder.append(method.getDescription());

    String fileName = Long.toString(Toolkit.hash(builder.toString()));
    fileName += ".jpeg";

    File chartSeriesDirectory = new File(IConstants.chartDirectory, seriesDirectory);
    File chartFile = new File(chartSeriesDirectory, fileName);
    try {
      if (!IConstants.chartDirectory.exists()) {
        //noinspection ResultOfMethodCallIgnored
        IConstants.chartDirectory.mkdirs();
      }
      if (!chartSeriesDirectory.exists()) {
        //noinspection ResultOfMethodCallIgnored
        chartSeriesDirectory.mkdirs();
      }
      if (!chartFile.exists()) {
        //noinspection ResultOfMethodCallIgnored
        chartFile.createNewFile();
      }
      ChartUtilities.saveChartAsJPEG(chartFile, chart, 450, 150);
      builder = new StringBuilder(IConstants.CHARTS);
      builder.append(File.separatorChar);
      builder.append(seriesDirectory);
      builder.append(File.separatorChar);
      builder.append(fileName);

      return builder.toString();
    } catch (Exception e) {
      logger.error("Exception generating the graph", e);
    }
    return null;
  }
コード例 #18
0
 private static long tick() {
   return TimeUnit.NANOSECONDS.toSeconds(System.nanoTime());
 }