public void testNonStopTimer() throws Exception {
   long startTime = System.nanoTime();
   int loopTmes = 4;
   long timeout = 500;
   for (int i = 0; i < loopTmes; i++) {
     System.out.println("executing loop count" + i);
     nonStopManager.begin(timeout);
     try {
       blockUntilAborted();
     } finally {
       Assert.assertTrue(abortableOperationManager.isAborted());
       Thread.currentThread().interrupt();
       nonStopManager.finish();
       // check that aborted status is cleared.
       Assert.assertFalse(abortableOperationManager.isAborted());
       // check that interrupted flag is cleared.
       Assert.assertFalse(Thread.interrupted());
     }
   }
   long timeTaken = System.nanoTime() - startTime;
   System.out.println("time taken to execute operations " + timeTaken);
   Assert.assertTrue(
       (timeTaken >= loopTmes * TimeUnit.MILLISECONDS.toNanos(timeout)
           && timeTaken
               < (loopTmes * TimeUnit.MILLISECONDS.toNanos(timeout)
                   + TimeUnit.SECONDS.toNanos(2))));
 }
  @Test
  public void testRespectsTime() throws Exception {
    final long startTime = System.nanoTime();
    final AtomicLong currentTime = new AtomicLong(startTime);
    Ticker ticker =
        new Ticker() {
          @Override
          public long read() {
            return currentTime.get();
          }
        };
    ProportionalZoneFailureDetector detector =
        new ProportionalZoneFailureDetector(2, Duration.ONE_HOUR, 0.9, ticker);

    for (int i = 0; i < 2; i++) {
      detector.onStartupFailure(loc1, entity1, new Throwable("simulated failure"));
    }
    assertTrue(detector.hasFailed(loc1));

    currentTime.set(startTime + TimeUnit.MILLISECONDS.toNanos(1000 * 60 * 60 - 1));
    assertTrue(detector.hasFailed(loc1));

    currentTime.set(startTime + TimeUnit.MILLISECONDS.toNanos(1000 * 60 * 60 + 1));
    assertFalse(detector.hasFailed(loc1));
  }
  @Theory
  public void shouldRetransmitOnNakAfterLinger(
      final BiConsumer<RetransmitHandlerTest, Integer> creator) {
    createTermBuffer(creator, 5);
    handler.onNak(TERM_ID, offsetOfFrame(0), ALIGNED_FRAME_LENGTH);
    processTimersUntil(() -> wheel.clock().nanoTime() >= TimeUnit.MILLISECONDS.toNanos(100));
    handler.onNak(TERM_ID, offsetOfFrame(0), ALIGNED_FRAME_LENGTH);
    processTimersUntil(() -> wheel.clock().nanoTime() >= TimeUnit.MILLISECONDS.toNanos(200));

    verify(retransmitSender, times(2)).resend(TERM_ID, offsetOfFrame(0), ALIGNED_FRAME_LENGTH);
  }
Esempio n. 4
0
  protected ChannelExec openCommandChannel(ClientSession session, String cmd) throws IOException {
    long waitTimeout =
        PropertyResolverUtils.getLongProperty(
            session, SCP_EXEC_CHANNEL_OPEN_TIMEOUT, DEFAULT_EXEC_CHANNEL_OPEN_TIMEOUT);
    ChannelExec channel = session.createExecChannel(cmd);

    long startTime = System.nanoTime();
    try {
      channel.open().verify(waitTimeout);
      long endTime = System.nanoTime();
      long nanosWait = endTime - startTime;
      if (log.isTraceEnabled()) {
        log.trace(
            "openCommandChannel("
                + session
                + ")["
                + cmd
                + "]"
                + " completed after "
                + nanosWait
                + " nanos out of "
                + TimeUnit.MILLISECONDS.toNanos(waitTimeout));
      }

      return channel;
    } catch (IOException | RuntimeException e) {
      long endTime = System.nanoTime();
      long nanosWait = endTime - startTime;
      if (log.isTraceEnabled()) {
        log.trace(
            "openCommandChannel("
                + session
                + ")["
                + cmd
                + "]"
                + " failed ("
                + e.getClass().getSimpleName()
                + ")"
                + " to complete after "
                + nanosWait
                + " nanos out of "
                + TimeUnit.MILLISECONDS.toNanos(waitTimeout)
                + ": "
                + e.getMessage());
      }

      channel.close(false);
      throw e;
    }
  }
 /**
  * Print usage information about threads from this factory to logger with the provided priority
  *
  * @param logger
  */
 public synchronized void printUsageInformation(final Logger logger, final Priority priority) {
   logger.debug("Number of threads monitored: " + getnThreadsAnalyzed());
   logger.debug(
       "Total runtime " + new AutoFormattingTime(TimeUnit.MILLISECONDS.toNanos(getTotalTime())));
   for (final State state : State.values()) {
     logger.debug(
         String.format(
             "\tPercent of time spent %s is %.2f",
             state.getUserFriendlyName(), getStatePercent(state)));
   }
   logger.log(
       priority,
       String.format(
           "CPU      efficiency : %6.2f%% of time spent %s",
           getStatePercent(State.USER_CPU), State.USER_CPU.getUserFriendlyName()));
   logger.log(
       priority,
       String.format(
           "Walker inefficiency : %6.2f%% of time spent %s",
           getStatePercent(State.BLOCKING), State.BLOCKING.getUserFriendlyName()));
   logger.log(
       priority,
       String.format(
           "I/O    inefficiency : %6.2f%% of time spent %s",
           getStatePercent(State.WAITING_FOR_IO), State.WAITING_FOR_IO.getUserFriendlyName()));
   logger.log(
       priority,
       String.format(
           "Thread inefficiency : %6.2f%% of time spent %s",
           getStatePercent(State.WAITING), State.WAITING.getUserFriendlyName()));
 }
  @Test
  public void contextDeadlineShouldNotOverrideSmallerMetadataTimeout() {
    long deadlineNanos = TimeUnit.SECONDS.toNanos(2);
    Context context =
        Context.current()
            .withDeadlineAfter(deadlineNanos, TimeUnit.NANOSECONDS, deadlineCancellationExecutor);
    context.attach();

    CallOptions callOpts = CallOptions.DEFAULT.withDeadlineAfter(1, TimeUnit.SECONDS);
    ClientCallImpl<Void, Void> call =
        new ClientCallImpl<Void, Void>(
            DESCRIPTOR,
            MoreExecutors.directExecutor(),
            callOpts,
            provider,
            deadlineCancellationExecutor);

    Metadata headers = new Metadata();

    call.start(callListener, headers);

    assertTrue(headers.containsKey(GrpcUtil.TIMEOUT_KEY));
    Long timeout = headers.get(GrpcUtil.TIMEOUT_KEY);
    assertNotNull(timeout);

    long callOptsNanos = TimeUnit.SECONDS.toNanos(1);
    long deltaNanos = TimeUnit.MILLISECONDS.toNanos(400);
    assertTimeoutBetween(timeout, callOptsNanos - deltaNanos, callOptsNanos);
  }
  // Called on cameraThread so must not "synchronized".
  @Override
  public void onPreviewFrame(byte[] data, Camera callbackCamera) {
    checkIsOnCameraThread();
    if (camera == null) {
      return;
    }
    if (camera != callbackCamera) {
      throw new RuntimeException("Unexpected camera in callback!");
    }

    final long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());

    if (eventsHandler != null && !firstFrameReported) {
      eventsHandler.onFirstFrameAvailable();
      firstFrameReported = true;
    }

    // Mark the frame owning |data| as used.
    // Note that since data is directBuffer,
    // data.length >= videoBuffers.frameSize.
    if (videoBuffers.reserveByteBuffer(data, captureTimeNs)) {
      cameraStatistics.addPendingFrame(captureTimeNs);
      frameObserver.onByteBufferFrameCaptured(
          data,
          videoBuffers.frameSize,
          captureFormat.width,
          captureFormat.height,
          getFrameOrientation(),
          captureTimeNs);
    } else {
      Logging.w(TAG, "reserveByteBuffer failed - dropping frame.");
    }
  }
  public void get() throws WriteTimeoutException, WriteFailureException {
    long requestTimeout =
        writeType == WriteType.COUNTER
            ? DatabaseDescriptor.getCounterWriteRpcTimeout()
            : DatabaseDescriptor.getWriteRpcTimeout();

    long timeout = TimeUnit.MILLISECONDS.toNanos(requestTimeout) - (System.nanoTime() - start);

    boolean success;
    try {
      success = condition.await(timeout, TimeUnit.NANOSECONDS);
    } catch (InterruptedException ex) {
      throw new AssertionError(ex);
    }

    if (!success) {
      int blockedFor = totalBlockFor();
      int acks = ackCount();
      // It's pretty unlikely, but we can race between exiting await above and here, so
      // that we could now have enough acks. In that case, we "lie" on the acks count to
      // avoid sending confusing info to the user (see CASSANDRA-6491).
      if (acks >= blockedFor) acks = blockedFor - 1;
      throw new WriteTimeoutException(writeType, consistencyLevel, acks, blockedFor);
    }

    if (totalBlockFor() + failures > totalEndpoints()) {
      throw new WriteFailureException(
          consistencyLevel, ackCount(), failures, totalBlockFor(), writeType);
    }
  }
  public void cleanupTimedOutTransactions() {
    if (trace)
      log.tracef(
          "About to cleanup remote transactions older than %d ms",
          configuration.transaction().completedTxTimeout());
    long beginning = timeService.time();
    long cutoffCreationTime =
        beginning - TimeUnit.MILLISECONDS.toNanos(configuration.transaction().completedTxTimeout());
    List<GlobalTransaction> toKill = new ArrayList<>();

    // Check remote transactions.
    for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) {
      GlobalTransaction gtx = e.getKey();
      RemoteTransaction remoteTx = e.getValue();
      if (remoteTx != null) {
        if (trace) log.tracef("Checking transaction %s", gtx);
        // Check the time.
        if (remoteTx.getCreationTime() - cutoffCreationTime < 0) {
          long duration =
              timeService.timeDuration(
                  remoteTx.getCreationTime(), beginning, TimeUnit.MILLISECONDS);
          log.remoteTransactionTimeout(gtx, duration);
          toKill.add(gtx);
        }
      }
    }

    // Rollback the orphaned transactions and release any held locks.
    for (GlobalTransaction gtx : toKill) {
      killTransaction(gtx);
    }
  }
Esempio n. 10
0
 public long period(Statistics statistics) {
   long period = 0;
   if (statistics != null) {
     period = TimeUnit.MILLISECONDS.toNanos(statistics.getEnd() - statistics.getBegin());
   }
   return period;
 }
 public static void waitForRehashToComplete(Cache... caches) {
   // give it 1 second to start rehashing
   // TODO Should look at the last committed view instead and check if it contains all the caches
   LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   for (Cache c : caches) {
     CacheViewsManager cacheViewsManager =
         TestingUtil.extractGlobalComponent(c.getCacheManager(), CacheViewsManager.class);
     RpcManager rpcManager = TestingUtil.extractComponent(c, RpcManager.class);
     while (cacheViewsManager.getCommittedView(c.getName()).getMembers().size() != caches.length) {
       if (System.currentTimeMillis() > giveup) {
         String message =
             String.format(
                 "Timed out waiting for rehash to complete on node %s, expected member list is %s, current member list is %s!",
                 rpcManager.getAddress(),
                 Arrays.toString(caches),
                 cacheViewsManager.getCommittedView(c.getName()));
         log.error(message);
         throw new RuntimeException(message);
       }
       LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
     }
     log.trace("Node " + rpcManager.getAddress() + " finished rehash task.");
   }
 }
Esempio n. 12
0
  /**
   * Blocks until an object is received that is handled by process, or the specified timeout has
   * passed.
   *
   * <p>Once closed any attempt to wait will throw an exception.
   *
   * @param timeout The timeout in milliseconds.
   * @return The object that resolved the blocking.
   * @throws QpidException
   * @throws FailoverException
   */
  public Object block(long timeout) throws QpidException, FailoverException {
    if (timeout < 0) {
      throw new IllegalArgumentException("timeout must be zero or greater");
    }

    long nanoTimeout = TimeUnit.MILLISECONDS.toNanos(timeout);

    _lock.lock();

    try {
      if (_closed) {
        throw throwClosedException();
      }

      if (_error == null) {
        _waiting.set(true);

        while (!_ready && _error == null) {
          try {
            nanoTimeout = _receivedCondition.awaitNanos(nanoTimeout);

            if (nanoTimeout <= 0 && !_ready && _error == null) {
              _error = new AMQTimeoutException("Server did not respond in a timely fashion", null);
              _ready = true;
            }
          } catch (InterruptedException e) {
            _logger.error(e.getMessage(), e);
            // IGNORE    -- //fixme this isn't ideal as being interrupted isn't equivalent to
            // success
          }
        }
      }

      if (_error != null) {
        if (_error instanceof QpidException) {
          throw (QpidException) _error;
        } else if (_error instanceof FailoverException) {
          // This should ensure that FailoverException is not wrapped and can be caught.
          throw (FailoverException) _error; // needed to expose FailoverException.
        } else {
          throw new QpidException("Woken up due to " + _error.getClass(), _error);
        }
      }

    } finally {
      _waiting.set(false);

      // Release Error handling thread
      if (_error != null) {
        _errorAck = true;
        _errorConditionAck.signal();

        _error = null;
      }
      _lock.unlock();
    }

    return _doneObject;
  }
Esempio n. 13
0
 protected DataWriter createFileDataWriter(Iterable<DataRecorderChannel> channels) {
   int writesPerSecond =
       (int) ((double) TimeUnit.SECONDS.toNanos(1) / TimeUnit.MILLISECONDS.toNanos(20));
   return new FileDataWriter(
       channels,
       dataRecorderFilenameGenerator,
       writesPerSecond,
       estimatedRecordDurationInSeconds);
 }
Esempio n. 14
0
  @Theory
  public void shouldStopRetransmitOnRetransmitReception(
      final BiConsumer<RetransmitHandlerTest, Integer> creator) {
    createTermBuffer(creator, 5);
    handler.onNak(TERM_ID, offsetOfFrame(0), ALIGNED_FRAME_LENGTH);
    handler.onRetransmitReceived(TERM_ID, offsetOfFrame(0));
    processTimersUntil(() -> wheel.clock().nanoTime() >= TimeUnit.MILLISECONDS.toNanos(100));

    verifyZeroInteractions(retransmitSender);
  }
Esempio n. 15
0
 public boolean schedule(long lazyTimeout) {
   long execution = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(lazyTimeout);
   if (_task == null || execution < _execution) {
     cancel();
     _execution = execution;
     _task = _bayeux.schedule(this, lazyTimeout);
     return true;
   }
   return false;
 }
Esempio n. 16
0
  @Theory
  public void shouldRetransmitOnNakOverMtuLength(
      final BiConsumer<RetransmitHandlerTest, Integer> creator) {
    final int numFramesPerMtu = MTU_LENGTH / ALIGNED_FRAME_LENGTH;
    createTermBuffer(creator, numFramesPerMtu * 5);
    handler.onNak(TERM_ID, offsetOfFrame(0), MTU_LENGTH * 2);
    processTimersUntil(() -> wheel.clock().nanoTime() >= TimeUnit.MILLISECONDS.toNanos(100));

    verify(retransmitSender).resend(TERM_ID, offsetOfFrame(0), MTU_LENGTH * 2);
  }
Esempio n. 17
0
 @Override
 public Long getNanoseconds(String path) {
   Long ns = null;
   try {
     ns = TimeUnit.MILLISECONDS.toNanos(getLong(path));
   } catch (ConfigException.WrongType e) {
     ConfigValue v = find(path, ConfigValueType.STRING);
     ns = parseDuration((String) v.unwrapped(), v.origin(), path);
   }
   return ns;
 }
  /** create a nano time stamp relative to Unix Epoch */
  public static long getCurrentStandardNano() {

    long epochTimeReference =
        TimeUnit.MILLISECONDS.toNanos(
            FrameworkServiceDataHolder.getInstance().getUnixTimeReference());
    long currentSystemNano = System.nanoTime();
    long currentStandardNano =
        epochTimeReference
            + (currentSystemNano - FrameworkServiceDataHolder.getInstance().getNanoTimeReference());
    return currentStandardNano;
  }
Esempio n. 19
0
  /**
   * Wait on backpressure with a timeout. Returns true on timeout, false otherwise. Timeout nanos is
   * the initial timeout quantity which will be adjusted to reflect remaining time on spurious
   * wakeups
   */
  public boolean backpressureBarrier(final long start, long timeoutNanos)
      throws InterruptedException {
    if (m_isShutdown) {
      return false;
    }
    if (m_blessedThreadIds.contains(Thread.currentThread().getId())) {
      throw new RuntimeException(
          "Can't invoke backpressureBarrier from within the client callback thread "
              + " without deadlocking the client library");
    }
    if (m_backpressure) {
      synchronized (m_backpressureLock) {
        if (m_backpressure) {
          while (m_backpressure && !m_isShutdown) {
            if (start != 0) {
              // Wait on the condition for the specified timeout remaining
              m_backpressureLock.wait(
                  timeoutNanos / TimeUnit.MILLISECONDS.toNanos(1),
                  (int) (timeoutNanos % TimeUnit.MILLISECONDS.toNanos(1)));

              // Condition is true, break and return false
              if (!m_backpressure) break;

              // Calculate whether the timeout should be triggered
              final long nowNanos = System.nanoTime();
              final long deltaNanos = Math.max(1, nowNanos - start);
              if (deltaNanos >= timeoutNanos) {
                return true;
              }

              // Reassigning timeout nanos with remainder of timeout
              timeoutNanos -= deltaNanos;
            } else {
              m_backpressureLock.wait();
            }
          }
        }
      }
    }
    return false;
  }
Esempio n. 20
0
  @Theory
  public void shouldGoIntoLingerOnImmediateRetransmit(
      final BiConsumer<RetransmitHandlerTest, Integer> creator) {
    createTermBuffer(creator, 5);
    handler = newZeroDelayRetransmitHandler();

    handler.onNak(TERM_ID, offsetOfFrame(0), ALIGNED_FRAME_LENGTH);
    processTimersUntil(() -> wheel.clock().nanoTime() >= TimeUnit.MILLISECONDS.toNanos(40));
    handler.onNak(TERM_ID, offsetOfFrame(0), ALIGNED_FRAME_LENGTH);

    verify(retransmitSender).resend(TERM_ID, offsetOfFrame(0), ALIGNED_FRAME_LENGTH);
  }
Esempio n. 21
0
  private ClientTransport obtainActiveTransport() {
    ClientTransport savedActiveTransport = activeTransport;
    // If we know there is an active transport and we are not in backoff mode, return quickly.
    if (savedActiveTransport != null && !(savedActiveTransport instanceof InactiveTransport)) {
      return savedActiveTransport;
    }
    synchronized (lock) {
      if (shutdown) {
        return null;
      }
      savedActiveTransport = activeTransport;
      if (savedActiveTransport instanceof InactiveTransport) {
        if (System.nanoTime() > TimeUnit.MILLISECONDS.toNanos(reconnectTimeMillis)) {
          // The timeout expired, clear the inactive transport and update the shutdown status to
          // something that is retryable.
          activeTransport = null;
          savedActiveTransport = activeTransport;
        } else {
          // We are still in backoff mode, just return the inactive transport.
          return savedActiveTransport;
        }
      }

      if (savedActiveTransport != null) {
        return savedActiveTransport;
      }
      // There is no active transport, or we just finished backoff.  Create a new transport.
      ClientTransport newActiveTransport = transportFactory.newClientTransport();
      transports.add(newActiveTransport);
      boolean failed = true;
      try {
        newActiveTransport.start(new TransportListener(newActiveTransport));
        failed = false;
      } finally {
        if (failed) {
          transports.remove(newActiveTransport);
        }
      }
      // It's possible that start() called transportShutdown() and transportTerminated(). If so, we
      // wouldn't want to make it the active transport.
      if (transports.contains(newActiveTransport)) {
        // start() must return before we set activeTransport, since activeTransport is accessed
        // without a lock.
        activeTransport = newActiveTransport;
      }
      return newActiveTransport;
    }
  }
Esempio n. 22
0
 @Override
 public List<Long> getNanosecondsList(String path) {
   List<Long> l = new ArrayList<Long>();
   List<? extends ConfigValue> list = getList(path);
   for (ConfigValue v : list) {
     if (v.valueType() == ConfigValueType.NUMBER) {
       l.add(TimeUnit.MILLISECONDS.toNanos(((Number) v.unwrapped()).longValue()));
     } else if (v.valueType() == ConfigValueType.STRING) {
       String s = (String) v.unwrapped();
       Long n = parseDuration(s, v.origin(), path);
       l.add(n);
     } else {
       throw new ConfigException.WrongType(
           v.origin(), path, "duration string or number of nanoseconds", v.valueType().name());
     }
   }
   return l;
 }
 /**
  * Constructor
  *
  * @param name The unique name of this manager.
  * @param agents An array of Agents.
  * @param batchSize The number of events to include in a batch.
  * @param retries The number of times to retry connecting before giving up.
  * @param connectTimeout The connection timeout in ms.
  * @param requestTimeout The request timeout in ms.
  */
 protected FlumeAvroManager(
     final String name,
     final String shortName,
     final Agent[] agents,
     final int batchSize,
     final int delayMillis,
     final int retries,
     final int connectTimeout,
     final int requestTimeout) {
   super(name);
   this.agents = agents;
   this.batchSize = batchSize;
   this.delayMillis = delayMillis;
   this.delayNanos = TimeUnit.MILLISECONDS.toNanos(delayMillis);
   this.retries = retries;
   this.connectTimeoutMillis = connectTimeout;
   this.requestTimeoutMillis = requestTimeout;
   this.rpcClient = connect(agents, retries, connectTimeout, requestTimeout);
 }
 /**
  * @deprecated Should use {@link #waitForRehashToComplete(org.infinispan.Cache[])} instead, this
  *     is not reliable with merges
  */
 public static void waitForInitRehashToComplete(Cache... caches) {
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   for (Cache c : caches) {
     StateTransferManager stateTransferManager =
         TestingUtil.extractComponent(c, StateTransferManager.class);
     RpcManager rpcManager = TestingUtil.extractComponent(c, RpcManager.class);
     while (!stateTransferManager.isJoinComplete()) {
       if (System.currentTimeMillis() > giveup) {
         String message =
             "Timed out waiting for join to complete on node " + rpcManager.getAddress() + " !";
         log.error(message);
         throw new RuntimeException(message);
       }
       LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
     }
     log.trace("Node " + rpcManager.getAddress() + " finished join task.");
   }
 }
 @Override
 public List<OTEServerLocation> findServerByMachine(String regex, long timeoutMs)
     throws URISyntaxException {
   lock.lock();
   List<OTEServerLocation> locations = findServerByMachine(regex);
   try {
     long nanos = TimeUnit.MILLISECONDS.toNanos(timeoutMs);
     while (nanos > 0 && locations.size() == 0) {
       try {
         nanos = condition.awaitNanos(nanos);
         locations = findServerByMachine(regex);
       } catch (InterruptedException e) {
         e.printStackTrace();
       }
     }
   } finally {
     lock.unlock();
   }
   return locations;
 }
 public static void waitForRehashToComplete(Cache cache, int groupSize) {
   LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   CacheViewsManager cacheViewsManager =
       TestingUtil.extractGlobalComponent(cache.getCacheManager(), CacheViewsManager.class);
   RpcManager rpcManager = TestingUtil.extractComponent(cache, RpcManager.class);
   while (cacheViewsManager.getCommittedView(cache.getName()).getMembers().size() != groupSize) {
     if (System.currentTimeMillis() > giveup) {
       String message =
           String.format(
               "Timed out waiting for rehash to complete on node %s, expected member count %s, current member count is %s!",
               rpcManager.getAddress(),
               groupSize,
               cacheViewsManager.getCommittedView(cache.getName()));
       log.error(message);
       throw new RuntimeException(message);
     }
     LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
   }
   log.trace("Node " + rpcManager.getAddress() + " finished rehash task.");
 }
  /**
   * Test method for {@link BasicMongoClientMetrics#receive(String, long, Message, Reply, long)} .
   */
  @Test
  public void testReceive() {
    final Message mockSentMessage = createMock(Message.class);
    final Reply mockReply = createMock(Reply.class);

    final BasicMongoClientMetrics metrics = new BasicMongoClientMetrics();

    // One for the DB and one for the collection.
    expect(mockReply.size()).andReturn(202).times(5);
    expect(mockSentMessage.getOperationName()).andReturn("operation");
    expect(mockSentMessage.getDatabaseName()).andReturn("db");
    expect(mockSentMessage.getCollectionName()).andReturn("collection");
    expectLastCall();

    replay(mockSentMessage, mockReply);

    metrics.receive("server1", 0L, mockSentMessage, mockReply, TimeUnit.MILLISECONDS.toNanos(1));

    metrics.close();

    verify(mockSentMessage, mockReply);
  }
Esempio n. 28
0
 @Override
 public final void setPushToProcessMilli(final int value) {
   NonPositiveArgumentException.check(value, "pushToProcessMilli");
   pushToProcessNano = TimeUnit.MILLISECONDS.toNanos(value);
 }
Esempio n. 29
0
public abstract class TestIntegerTopK {
  // few hundred millis each test to keep tests short
  private static final long TEST_TIME_NANOS = TimeUnit.MILLISECONDS.toNanos(1250);
  private static Logger LOG;

  @BeforeMethod(alwaysRun = true)
  public void setUp() throws Exception {
    LOG = getLogger();
  }

  protected abstract TopK<Integer> getInstance(int keySpaceSize, int k);

  /**
   * defers logger creation so we log based on subclass name; may do efficient caching
   *
   * @return Logger
   */
  protected abstract Logger getLogger();

  @Test(groups = "fast")
  public void testTop3() {
    TopK<Integer> topK = getInstance(10, 3);

    assertTopK(topK);
    topK.add(1, 3);
    assertTopK(topK, 1);
    topK.add(2, 2);
    assertTopK(topK, 1, 2);
    topK.add(3, 8);
    assertTopK(topK, 3, 1, 2);
    topK.add(4, 1);
    assertTopK(topK, 3, 1, 2);
    topK.add(4, 3);
    assertTopK(topK, 3, 4, 1);
    topK.add(2, 3);
    assertTopK(topK, 3, 2, 4);
  }

  @Test(
      groups = "fast",
      expectedExceptions = IllegalArgumentException.class,
      expectedExceptionsMessageRegExp = "count to add must be non-negative, got -3")
  public void testAddNegative() {
    TopK<Integer> topK = getInstance(10, 3);
    topK.add(0, -3);
  }

  @Test(
      groups = "fast",
      expectedExceptions = NullPointerException.class,
      expectedExceptionsMessageRegExp = "key can't be null")
  public void testNullKey() {
    TopK<Integer> topK = getInstance(10, 3);
    topK.add(null, 1);
  }

  @Test(groups = "slow")
  public void testInsertionTiming() {
    int keySpaceSize = 10000;
    int k = 100;
    int maxAdd = 100;
    TopK<Integer> topK = getInstance(keySpaceSize, k);

    LOG.info("Timing add() performance with keySpaceSize = %s, k = %s", keySpaceSize, k);

    Random random = new Random(0);
    long totalTime = 0;
    long count = 0;
    long begin = System.nanoTime();

    while (System.nanoTime() - begin < TEST_TIME_NANOS) {
      long start = System.nanoTime();

      topK.add(random.nextInt(keySpaceSize), random.nextInt(maxAdd));

      if (System.nanoTime() - begin > TimeUnit.SECONDS.toNanos(1)) {
        // discard the first second of measurements
        totalTime += System.nanoTime() - start;
        ++count;
      }
    }

    LOG.info(
        "Processed %s entries in %s ms. Insertion rate = %s entries/s",
        count,
        TimeUnit.NANOSECONDS.toMillis(totalTime),
        count / (totalTime * 1.0 / TimeUnit.SECONDS.toNanos(1)));
  }

  @Test(groups = "slow")
  public void testRetrievalTiming() {
    int keySpaceSize = 10000;
    int k = 100;
    int maxAdd = 100;
    TopK<Integer> topK = getInstance(keySpaceSize, k);

    LOG.info("Timing getTopK() performance with keySpaceSize = %s, k = %s", keySpaceSize, k);

    Random random = new Random(0);
    long totalTime = 0;
    long count = 0;
    long begin = System.nanoTime();

    while (System.nanoTime() - begin < TEST_TIME_NANOS) {

      topK.add(random.nextInt(keySpaceSize), random.nextInt(maxAdd));

      long start = System.nanoTime();

      topK.getTopK();

      if (System.nanoTime() - begin > TimeUnit.SECONDS.toNanos(1)) {
        // discard the first second of measurements
        totalTime += System.nanoTime() - start;
        ++count;
      }
    }

    LOG.info(
        "Processed %s entries in %s ms. Retrieval rate = %s retrievals/s",
        count,
        TimeUnit.NANOSECONDS.toMillis(totalTime),
        count / (totalTime * 1.0 / TimeUnit.SECONDS.toNanos(1)));
  }

  private static void assertTopK(TopK<Integer> topK, Integer... expected) {
    assertEquals(topK.getTopK(), Arrays.asList(expected));
  }
}
  @Test
  public void testBuildJson() throws IOException {
    ProjectFilesystem projectFilesystem = new ProjectFilesystem(tmpDir.getRoot().toPath());

    ObjectMapper mapper = new ObjectMapper();

    ChromeTraceBuildListener listener =
        new ChromeTraceBuildListener(
            projectFilesystem,
            new FakeClock(1409702151000000000L),
            mapper,
            Locale.US,
            TimeZone.getTimeZone("America/Los_Angeles"),
            /* tracesToKeep */ 42,
            false);

    BuildTarget target = BuildTargetFactory.newInstance("//fake:rule");

    FakeBuildRule rule =
        new FakeBuildRule(
            target,
            new SourcePathResolver(new BuildRuleResolver()),
            ImmutableSortedSet.<BuildRule>of());
    RuleKey ruleKey = new RuleKey("abc123");
    rule.setRuleKey(ruleKey);
    String stepShortName = "fakeStep";
    String stepDescription = "I'm a Fake Step!";
    UUID stepUuid = UUID.randomUUID();

    ExecutionContext context = createMock(ExecutionContext.class);
    replay(context);

    ImmutableSet<BuildTarget> buildTargets = ImmutableSet.of(target);
    Iterable<String> buildArgs = Iterables.transform(buildTargets, Functions.toStringFunction());
    Clock fakeClock = new IncrementingFakeClock(TimeUnit.MILLISECONDS.toNanos(1));
    BuckEventBus eventBus =
        BuckEventBusFactory.newInstance(
            fakeClock, new BuildId("ChromeTraceBuildListenerTestBuildId"));
    eventBus.register(listener);

    eventBus.post(
        CommandEvent.started("party", ImmutableList.of("arg1", "arg2"), /* isDaemon */ true));
    eventBus.post(ArtifactCacheConnectEvent.started());
    eventBus.post(ArtifactCacheConnectEvent.finished());
    eventBus.post(BuildEvent.started(buildArgs));
    eventBus.post(
        ArtifactCacheEvent.started(ArtifactCacheEvent.Operation.FETCH, ImmutableSet.of(ruleKey)));
    eventBus.post(
        ArtifactCacheEvent.finished(
            ArtifactCacheEvent.Operation.FETCH, ImmutableSet.of(ruleKey), CacheResult.hit("http")));
    eventBus.post(BuildRuleEvent.started(rule));
    eventBus.post(StepEvent.started(stepShortName, stepDescription, stepUuid));

    eventBus.post(StepEvent.finished(stepShortName, stepDescription, stepUuid, 0));
    eventBus.post(
        BuildRuleEvent.finished(
            rule,
            BuildRuleStatus.SUCCESS,
            CacheResult.miss(),
            Optional.of(BuildRuleSuccessType.BUILT_LOCALLY),
            Optional.<HashCode>absent(),
            Optional.<Long>absent()));

    try (TraceEventLogger ignored =
        TraceEventLogger.start(eventBus, "planning", ImmutableMap.of("nefarious", "true"))) {
      eventBus.post(new TraceEvent("scheming", ChromeTraceEvent.Phase.BEGIN));
      eventBus.post(
          new TraceEvent(
              "scheming", ChromeTraceEvent.Phase.END, ImmutableMap.of("success", "false")));
    }

    eventBus.post(BuildEvent.finished(buildArgs, 0));
    eventBus.post(
        CommandEvent.finished(
            "party", ImmutableList.of("arg1", "arg2"), /* isDaemon */ true, /* exitCode */ 0));
    listener.outputTrace(new BuildId("BUILD_ID"));

    File resultFile = new File(tmpDir.getRoot(), BuckConstant.BUCK_TRACE_DIR + "/build.trace");

    List<ChromeTraceEvent> resultMap =
        mapper.readValue(resultFile, new TypeReference<List<ChromeTraceEvent>>() {});

    assertEquals(17, resultMap.size());

    assertEquals("process_name", resultMap.get(0).getName());
    assertEquals(ChromeTraceEvent.Phase.METADATA, resultMap.get(0).getPhase());
    assertEquals(ImmutableMap.of("name", "buck"), resultMap.get(0).getArgs());

    assertEquals("party", resultMap.get(1).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(1).getPhase());
    assertEquals(ImmutableMap.of("command_args", "arg1 arg2"), resultMap.get(1).getArgs());

    assertEquals("artifact_connect", resultMap.get(2).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(2).getPhase());

    assertEquals("artifact_connect", resultMap.get(3).getName());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(3).getPhase());

    assertEquals("build", resultMap.get(4).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(4).getPhase());

    assertEquals("artifact_fetch", resultMap.get(5).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(5).getPhase());

    assertEquals("artifact_fetch", resultMap.get(6).getName());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(6).getPhase());

    // BuildRuleEvent.Started
    assertEquals("//fake:rule", resultMap.get(7).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(7).getPhase());
    assertEquals(ImmutableMap.of("rule_key", "abc123"), resultMap.get(7).getArgs());

    assertEquals("fakeStep", resultMap.get(8).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(8).getPhase());

    assertEquals("fakeStep", resultMap.get(9).getName());
    assertEquals(
        ImmutableMap.of(
            "description", "I'm a Fake Step!",
            "exit_code", "0"),
        resultMap.get(9).getArgs());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(9).getPhase());

    // BuildRuleEvent.Finished
    assertEquals("//fake:rule", resultMap.get(10).getName());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(10).getPhase());
    assertEquals(
        ImmutableMap.of(
            "cache_result", "miss",
            "success_type", "BUILT_LOCALLY"),
        resultMap.get(10).getArgs());

    assertEquals("planning", resultMap.get(11).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(11).getPhase());
    assertEquals(ImmutableMap.of("nefarious", "true"), resultMap.get(11).getArgs());

    assertEquals("scheming", resultMap.get(12).getName());
    assertEquals(ChromeTraceEvent.Phase.BEGIN, resultMap.get(12).getPhase());
    assertEquals(ImmutableMap.of(), resultMap.get(12).getArgs());

    assertEquals("scheming", resultMap.get(13).getName());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(13).getPhase());
    assertEquals(ImmutableMap.of("success", "false"), resultMap.get(13).getArgs());

    assertEquals("planning", resultMap.get(14).getName());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(14).getPhase());
    assertEquals(ImmutableMap.of(), resultMap.get(14).getArgs());

    assertEquals("build", resultMap.get(15).getName());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(15).getPhase());

    assertEquals("party", resultMap.get(16).getName());
    assertEquals(ChromeTraceEvent.Phase.END, resultMap.get(16).getPhase());
    assertEquals(
        ImmutableMap.of(
            "command_args", "arg1 arg2",
            "daemon", "true"),
        resultMap.get(16).getArgs());

    verify(context);
  }