@Activate
  public void activate(ComponentContext context) {
    cfgService.registerProperties(getClass());

    modified(context);

    local = clusterService.getLocalNode().id();

    messageHandlingExecutor =
        Executors.newFixedThreadPool(
            messageHandlerThreadPoolSize,
            groupedThreads("onos/store/statistic", "message-handlers", log));

    clusterCommunicator.addSubscriber(
        GET_CURRENT,
        SERIALIZER::decode,
        this::getCurrentStatisticInternal,
        SERIALIZER::encode,
        messageHandlingExecutor);

    clusterCommunicator.addSubscriber(
        GET_CURRENT,
        SERIALIZER::decode,
        this::getPreviousStatisticInternal,
        SERIALIZER::encode,
        messageHandlingExecutor);

    log.info("Started");
  }
 @Deactivate
 public void deactivate() {
   cfgService.unregisterProperties(getClass(), false);
   clusterCommunicator.removeSubscriber(GET_PREVIOUS);
   clusterCommunicator.removeSubscriber(GET_CURRENT);
   messageHandlingExecutor.shutdown();
   log.info("Stopped");
 }
  @Override
  public CompletableFuture<Void> destroy() {
    destroyed = true;

    executor.shutdown();
    backgroundExecutor.shutdown();
    communicationExecutor.shutdown();

    listeners.clear();

    clusterCommunicator.removeSubscriber(updateMessageSubject);
    clusterCommunicator.removeSubscriber(updateRequestSubject);
    clusterCommunicator.removeSubscriber(antiEntropyAdvertisementSubject);
    return CompletableFuture.completedFuture(null);
  }
  @Before
  public void setUp() throws Exception {
    clusterCommunicator = createNiceMock(ClusterCommunicationService.class);
    clusterCommunicator.addSubscriber(
        anyObject(MessageSubject.class),
        anyObject(ClusterMessageHandler.class),
        anyObject(ExecutorService.class));
    expectLastCall().anyTimes();
    replay(clusterCommunicator);
    ClusterService clusterService = new TestClusterService();

    testGossipDeviceStore =
        new TestGossipDeviceStore(deviceClockService, clusterService, clusterCommunicator);
    testGossipDeviceStore.mastershipService = new TestMastershipService();

    TestDatabaseManager testDatabaseManager = new TestDatabaseManager();
    testDatabaseManager.init(clusterService, clusterCommunicator);
    testGossipDeviceStore.storageService = testDatabaseManager;
    testGossipDeviceStore.deviceClockService = deviceClockService;

    gossipDeviceStore = testGossipDeviceStore;
    gossipDeviceStore.activate();
    deviceStore = gossipDeviceStore;
    verify(clusterCommunicator);
    reset(clusterCommunicator);
  }
 @Deactivate
 public void deactivate() {
   communicationService.removeSubscriber(PACKET_OUT_SUBJECT);
   messageHandlingExecutor.shutdown();
   tracker = null;
   log.info("Stopped");
 }
  @Override
  public Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) {
    NodeId master = mastershipService.getMasterFor(deviceId);

    if (master == null) {
      log.debug("Failed to getFlowEntries: No master for {}", deviceId);
      return Collections.emptyList();
    }

    if (Objects.equal(local, master)) {
      return flowTable.getFlowEntries(deviceId);
    }

    log.trace(
        "Forwarding getFlowEntries to {}, which is the primary (master) for device {}",
        master,
        deviceId);

    return Tools.futureGetOrElse(
        clusterCommunicator.sendAndReceive(
            deviceId,
            FlowStoreMessageSubjects.GET_DEVICE_FLOW_ENTRIES,
            SERIALIZER::encode,
            SERIALIZER::decode,
            master),
        FLOW_RULE_STORE_TIMEOUT_MILLIS,
        TimeUnit.MILLISECONDS,
        Collections.emptyList());
  }
  @Override
  public FlowEntry getFlowEntry(FlowRule rule) {
    NodeId master = mastershipService.getMasterFor(rule.deviceId());

    if (master == null) {
      log.debug("Failed to getFlowEntry: No master for {}", rule.deviceId());
      return null;
    }

    if (Objects.equal(local, master)) {
      return flowTable.getFlowEntry(rule);
    }

    log.trace(
        "Forwarding getFlowEntry to {}, which is the primary (master) for device {}",
        master,
        rule.deviceId());

    return Tools.futureGetOrElse(
        clusterCommunicator.sendAndReceive(
            rule,
            FlowStoreMessageSubjects.GET_FLOW_ENTRY,
            SERIALIZER::encode,
            SERIALIZER::decode,
            master),
        FLOW_RULE_STORE_TIMEOUT_MILLIS,
        TimeUnit.MILLISECONDS,
        null);
  }
Example #8
0
 protected <K, V> void unregisterMap(DefaultAsyncConsistentMap<K, V> map) {
   if (maps.remove(map.name()) != null) {
     clusterCommunicator.removeSubscriber(mapUpdatesSubject(map.name()));
   }
   if (map.applicationId() != null) {
     mapsByApplication.remove(map.applicationId(), map);
   }
 }
  private void registerMessageHandlers(ExecutorService executor) {

    clusterCommunicator.addSubscriber(APPLY_BATCH_FLOWS, new OnStoreBatch(), executor);
    clusterCommunicator.<FlowRuleBatchEvent>addSubscriber(
        REMOTE_APPLY_COMPLETED, SERIALIZER::decode, this::notifyDelegate, executor);
    clusterCommunicator.addSubscriber(
        GET_FLOW_ENTRY, SERIALIZER::decode, flowTable::getFlowEntry, SERIALIZER::encode, executor);
    clusterCommunicator.addSubscriber(
        GET_DEVICE_FLOW_ENTRIES,
        SERIALIZER::decode,
        flowTable::getFlowEntries,
        SERIALIZER::encode,
        executor);
    clusterCommunicator.addSubscriber(
        REMOVE_FLOW_ENTRY,
        SERIALIZER::decode,
        this::removeFlowRuleInternal,
        SERIALIZER::encode,
        executor);
    clusterCommunicator.addSubscriber(
        REMOVE_FLOW_ENTRY,
        SERIALIZER::decode,
        this::removeFlowRuleInternal,
        SERIALIZER::encode,
        executor);
    clusterCommunicator.addSubscriber(
        FLOW_TABLE_BACKUP,
        SERIALIZER::decode,
        flowTable::onBackupReceipt,
        SERIALIZER::encode,
        executor);
  }
  @Override
  public void storeBatch(FlowRuleBatchOperation operation) {
    if (operation.getOperations().isEmpty()) {
      notifyDelegate(
          FlowRuleBatchEvent.completed(
              new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
              new CompletedBatchOperation(true, Collections.emptySet(), operation.deviceId())));
      return;
    }

    DeviceId deviceId = operation.deviceId();
    NodeId master = mastershipService.getMasterFor(deviceId);

    if (master == null) {
      log.warn("No master for {} : flows will be marked for removal", deviceId);

      updateStoreInternal(operation);

      notifyDelegate(
          FlowRuleBatchEvent.completed(
              new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
              new CompletedBatchOperation(true, Collections.emptySet(), operation.deviceId())));
      return;
    }

    if (Objects.equal(local, master)) {
      storeBatchInternal(operation);
      return;
    }

    log.trace(
        "Forwarding storeBatch to {}, which is the primary (master) for device {}",
        master,
        deviceId);

    clusterCommunicator
        .unicast(operation, APPLY_BATCH_FLOWS, SERIALIZER::encode, master)
        .whenComplete(
            (result, error) -> {
              if (error != null) {
                log.warn("Failed to storeBatch: {} to {}", operation, master, error);

                Set<FlowRule> allFailures =
                    operation
                        .getOperations()
                        .stream()
                        .map(op -> op.target())
                        .collect(Collectors.toSet());

                notifyDelegate(
                    FlowRuleBatchEvent.completed(
                        new FlowRuleBatchRequest(operation.id(), Collections.emptySet()),
                        new CompletedBatchOperation(false, allFailures, deviceId)));
              }
            });
  }
Example #11
0
  private <T> void resetCommunicatorExpectingSingleBroadcast(
      Capture<T> message, Capture<MessageSubject> subject, Capture<Function<T, byte[]>> encoder) {

    message.reset();
    subject.reset();
    encoder.reset();
    reset(clusterCommunicator);
    clusterCommunicator.broadcast(capture(message), capture(subject), capture(encoder));
    expectLastCall().once();
    replay(clusterCommunicator);
  }
 private void sendUpdateRequestToPeer(NodeId peer, Set<K> keys) {
   UpdateRequest<K> request = new UpdateRequest<>(localNodeId, keys);
   clusterCommunicator
       .unicast(request, updateRequestSubject, serializer::encode, peer)
       .whenComplete(
           (result, error) -> {
             if (error != null) {
               log.debug("Failed to send update request to {}: {}", peer, error.getMessage());
             }
           });
 }
 @Override
 public void batchOperationComplete(FlowRuleBatchEvent event) {
   // FIXME: need a per device pending response
   NodeId nodeId = pendingResponses.remove(event.subject().batchId());
   if (nodeId == null) {
     notifyDelegate(event);
   } else {
     // TODO check unicast return value
     clusterCommunicator.unicast(event, REMOTE_APPLY_COMPLETED, SERIALIZER::encode, nodeId);
     // error log: log.warn("Failed to respond to peer for batch operation result");
   }
 }
 private void unregisterMessageHandlers() {
   clusterCommunicator.removeSubscriber(REMOVE_FLOW_ENTRY);
   clusterCommunicator.removeSubscriber(GET_DEVICE_FLOW_ENTRIES);
   clusterCommunicator.removeSubscriber(GET_FLOW_ENTRY);
   clusterCommunicator.removeSubscriber(APPLY_BATCH_FLOWS);
   clusterCommunicator.removeSubscriber(REMOTE_APPLY_COMPLETED);
   clusterCommunicator.removeSubscriber(FLOW_TABLE_BACKUP);
 }
Example #15
0
 private void putDevice(DeviceId deviceId, String swVersion, SparseAnnotations... annotations) {
   DeviceDescription description =
       new DefaultDeviceDescription(
           deviceId.uri(), SWITCH, MFR, HW, swVersion, SN, CID, annotations);
   reset(clusterCommunicator);
   clusterCommunicator.<InternalDeviceEvent>broadcast(
       anyObject(InternalDeviceEvent.class),
       anyObject(MessageSubject.class),
       anyObject(Function.class));
   expectLastCall().anyTimes();
   replay(clusterCommunicator);
   deviceStore.createOrUpdateDevice(PID, deviceId, description);
   verify(clusterCommunicator);
 }
  @Activate
  public void activate() {
    messageHandlingExecutor =
        Executors.newFixedThreadPool(
            messageHandlerThreadPoolSize,
            groupedThreads("onos/store/packet", "message-handlers", log));

    communicationService.<OutboundPacket>addSubscriber(
        PACKET_OUT_SUBJECT,
        SERIALIZER::decode,
        packet -> notifyDelegate(new PacketEvent(Type.EMIT, packet)),
        messageHandlingExecutor);

    tracker = new PacketRequestTracker();

    log.info("Started");
  }
 private void sendAdvertisementToPeer(NodeId peer) {
   long adCreationTime = System.currentTimeMillis();
   AntiEntropyAdvertisement<K> ad = createAdvertisement();
   clusterCommunicator
       .sendAndReceive(
           ad, antiEntropyAdvertisementSubject, serializer::encode, serializer::decode, peer)
       .whenComplete(
           (result, error) -> {
             if (error != null) {
               log.debug(
                   "Failed to send anti-entropy advertisement to {}: {}",
                   peer,
                   error.getMessage());
             } else if (result == AntiEntropyResponse.PROCESSED) {
               antiEntropyTimes.put(peer, adCreationTime);
             }
           });
 }
Example #18
0
  protected <K, V> DefaultAsyncConsistentMap<K, V> registerMap(
      DefaultAsyncConsistentMap<K, V> map) {
    DefaultAsyncConsistentMap<K, V> existing = maps.putIfAbsent(map.name(), map);
    if (existing != null) {
      // FIXME: We need to cleanly support different map instances with same name.
      log.info("Map by name {} already exists", map.name());
      return existing;
    } else {
      if (map.applicationId() != null) {
        mapsByApplication.put(map.applicationId(), map);
      }
    }

    clusterCommunicator.<MapEvent<K, V>>addSubscriber(
        mapUpdatesSubject(map.name()),
        map.serializer()::decode,
        map::notifyLocalListeners,
        eventDispatcher);
    return map;
  }
Example #19
0
 @Deactivate
 public void deactivate() {
   CompletableFuture.allOf(inMemoryDatabase.close(), partitionedDatabase.close())
       .thenCompose(v -> coordinator.close())
       .whenComplete(
           (result, error) -> {
             if (error != null) {
               log.warn("Failed to cleanly close databases.", error);
             } else {
               log.info("Successfully closed databases.");
             }
           });
   clusterCommunicator.removeSubscriber(QUEUE_UPDATED_TOPIC);
   maps.values().forEach(this::unregisterMap);
   if (applicationService != null) {
     applicationService.removeListener(appListener);
   }
   eventDispatcher.shutdown();
   queuePollExecutor.shutdown();
   log.info("Stopped");
 }
  @Override
  public Set<FlowEntry> getPreviousFlowStatistic(ConnectPoint connectPoint) {
    final DeviceId deviceId = connectPoint.deviceId();

    NodeId master = mastershipService.getMasterFor(deviceId);
    if (master == null) {
      log.warn("No master for {}", deviceId);
      return Collections.emptySet();
    }

    if (Objects.equal(local, master)) {
      return getPreviousStatisticInternal(connectPoint);
    } else {
      return Tools.futureGetOrElse(
          clusterCommunicator.sendAndReceive(
              connectPoint, GET_PREVIOUS, SERIALIZER::encode, SERIALIZER::decode, master),
          STATISTIC_STORE_TIMEOUT_MILLIS,
          TimeUnit.MILLISECONDS,
          Collections.emptySet());
    }
  }
  @Override
  public void emit(OutboundPacket packet) {
    NodeId myId = clusterService.getLocalNode().id();
    NodeId master = mastershipService.getMasterFor(packet.sendThrough());

    if (master == null) {
      return;
    }

    if (myId.equals(master)) {
      notifyDelegate(new PacketEvent(Type.EMIT, packet));
      return;
    }

    communicationService
        .unicast(packet, PACKET_OUT_SUBJECT, SERIALIZER::encode, master)
        .whenComplete(
            (r, error) -> {
              if (error != null) {
                log.warn("Failed to send packet-out to {}", master, error);
              }
            });
  }
Example #22
0
  @Activate
  public void activate() {
    localNodeId = clusterService.getLocalNode().id();
    // load database configuration
    File databaseDefFile = new File(PARTITION_DEFINITION_FILE);
    log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath());

    Map<String, Set<NodeInfo>> partitionMap;
    try {
      DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile);
      if (!databaseDefFile.exists()) {
        createDefaultDatabaseDefinition(databaseDefStore);
      }
      partitionMap = databaseDefStore.read().getPartitions();
    } catch (IOException e) {
      throw new IllegalStateException("Failed to load database config", e);
    }

    String[] activeNodeUris =
        partitionMap
            .values()
            .stream()
            .reduce((s1, s2) -> Sets.union(s1, s2))
            .get()
            .stream()
            .map(this::nodeToUri)
            .toArray(String[]::new);

    String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode()));
    Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator);

    ClusterConfig clusterConfig =
        new ClusterConfig()
            .withProtocol(protocol)
            .withElectionTimeout(electionTimeoutMillis(activeNodeUris))
            .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris))
            .withMembers(activeNodeUris)
            .withLocalMember(localNodeUri);

    CopycatConfig copycatConfig =
        new CopycatConfig()
            .withName("onos")
            .withClusterConfig(clusterConfig)
            .withDefaultSerializer(new DatabaseSerializer())
            .withDefaultExecutor(
                Executors.newSingleThreadExecutor(
                    new NamedThreadFactory("copycat-coordinator-%d")));

    coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());

    DatabaseConfig inMemoryDatabaseConfig =
        newDatabaseConfig(BASE_PARTITION_NAME, newInMemoryLog(), activeNodeUris);
    inMemoryDatabase =
        coordinator.getResource(
            inMemoryDatabaseConfig.getName(),
            inMemoryDatabaseConfig
                .resolve(clusterConfig)
                .withSerializer(copycatConfig.getDefaultSerializer())
                .withDefaultExecutor(copycatConfig.getDefaultExecutor()));

    List<Database> partitions =
        partitionMap
            .entrySet()
            .stream()
            .map(
                entry -> {
                  String[] replicas =
                      entry.getValue().stream().map(this::nodeToUri).toArray(String[]::new);
                  return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas);
                })
            .map(
                config -> {
                  Database db =
                      coordinator.getResource(
                          config.getName(),
                          config
                              .resolve(clusterConfig)
                              .withSerializer(copycatConfig.getDefaultSerializer())
                              .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
                  return db;
                })
            .collect(Collectors.toList());

    partitionedDatabase = new PartitionedDatabase("onos-store", partitions);

    CompletableFuture<Void> status =
        coordinator
            .open()
            .thenCompose(
                v ->
                    CompletableFuture.allOf(inMemoryDatabase.open(), partitionedDatabase.open())
                        .whenComplete(
                            (db, error) -> {
                              if (error != null) {
                                log.error("Failed to initialize database.", error);
                              } else {
                                log.info("Successfully initialized database.");
                              }
                            }));

    Futures.getUnchecked(status);

    transactionManager = new TransactionManager(partitionedDatabase, consistentMapBuilder());
    partitionedDatabase.setTransactionManager(transactionManager);

    eventDispatcher =
        Executors.newSingleThreadExecutor(
            groupedThreads("onos/store/manager", "map-event-dispatcher"));

    queuePollExecutor =
        Executors.newFixedThreadPool(4, groupedThreads("onos/store/manager", "queue-poll-handler"));

    clusterCommunicator.<String>addSubscriber(
        QUEUE_UPDATED_TOPIC,
        data -> new String(data, Charsets.UTF_8),
        name -> {
          DefaultDistributedQueue q = queues.get(name);
          if (q != null) {
            q.tryPoll();
          }
        },
        queuePollExecutor);
    log.info("Started");
  }
  /**
   * Creates a new eventually consistent map shared amongst multiple instances.
   *
   * <p>See {@link org.onosproject.store.service.EventuallyConsistentMapBuilder} for more
   * description of the parameters expected by the map.
   *
   * @param mapName a String identifier for the map.
   * @param clusterService the cluster service
   * @param clusterCommunicator the cluster communications service
   * @param ns a Kryo namespace that can serialize both K and V
   * @param timestampProvider provider of timestamps for K and V
   * @param peerUpdateFunction function that provides a set of nodes to immediately update to when
   *     there writes to the map
   * @param eventExecutor executor to use for processing incoming events from peers
   * @param communicationExecutor executor to use for sending events to peers
   * @param backgroundExecutor executor to use for background anti-entropy tasks
   * @param tombstonesDisabled true if this map should not maintain tombstones
   * @param antiEntropyPeriod period that the anti-entropy task should run
   * @param antiEntropyTimeUnit time unit for anti-entropy period
   * @param convergeFaster make anti-entropy try to converge faster
   * @param persistent persist data to disk
   * @param persistenceService persistence service
   */
  EventuallyConsistentMapImpl(
      String mapName,
      ClusterService clusterService,
      ClusterCommunicationService clusterCommunicator,
      KryoNamespace ns,
      BiFunction<K, V, Timestamp> timestampProvider,
      BiFunction<K, V, Collection<NodeId>> peerUpdateFunction,
      ExecutorService eventExecutor,
      ExecutorService communicationExecutor,
      ScheduledExecutorService backgroundExecutor,
      boolean tombstonesDisabled,
      long antiEntropyPeriod,
      TimeUnit antiEntropyTimeUnit,
      boolean convergeFaster,
      boolean persistent,
      PersistenceService persistenceService) {
    this.mapName = mapName;
    this.serializer = createSerializer(ns);
    this.persistenceService = persistenceService;
    this.persistent = persistent;
    if (persistent) {
      items =
          this.persistenceService
              .<K, MapValue<V>>persistentMapBuilder()
              .withName(PERSISTENT_LOCAL_MAP_NAME)
              .withSerializer(this.serializer)
              .build();
    } else {
      items = Maps.newConcurrentMap();
    }
    senderPending = Maps.newConcurrentMap();
    destroyedMessage = mapName + ERROR_DESTROYED;

    this.clusterService = clusterService;
    this.clusterCommunicator = clusterCommunicator;
    this.localNodeId = clusterService.getLocalNode().id();

    this.timestampProvider = timestampProvider;

    if (peerUpdateFunction != null) {
      this.peerUpdateFunction = peerUpdateFunction;
    } else {
      this.peerUpdateFunction =
          (key, value) ->
              clusterService
                  .getNodes()
                  .stream()
                  .map(ControllerNode::id)
                  .filter(nodeId -> !nodeId.equals(localNodeId))
                  .collect(Collectors.toList());
    }

    if (eventExecutor != null) {
      this.executor = eventExecutor;
    } else {
      // should be a normal executor; it's used for receiving messages
      this.executor =
          Executors.newFixedThreadPool(8, groupedThreads("onos/ecm", mapName + "-fg-%d", log));
    }

    if (communicationExecutor != null) {
      this.communicationExecutor = communicationExecutor;
    } else {
      // sending executor; should be capped
      // TODO this probably doesn't need to be bounded anymore
      this.communicationExecutor =
          newFixedThreadPool(8, groupedThreads("onos/ecm", mapName + "-publish-%d", log));
    }

    if (backgroundExecutor != null) {
      this.backgroundExecutor = backgroundExecutor;
    } else {
      this.backgroundExecutor =
          newSingleThreadScheduledExecutor(groupedThreads("onos/ecm", mapName + "-bg-%d", log));
    }

    // start anti-entropy thread
    this.backgroundExecutor.scheduleAtFixedRate(
        this::sendAdvertisement, initialDelaySec, antiEntropyPeriod, antiEntropyTimeUnit);

    updateMessageSubject = new MessageSubject("ecm-" + mapName + "-update");
    clusterCommunicator.addSubscriber(
        updateMessageSubject, serializer::decode, this::processUpdates, this.executor);

    antiEntropyAdvertisementSubject = new MessageSubject("ecm-" + mapName + "-anti-entropy");
    clusterCommunicator.addSubscriber(
        antiEntropyAdvertisementSubject,
        serializer::decode,
        this::handleAntiEntropyAdvertisement,
        serializer::encode,
        this.backgroundExecutor);

    updateRequestSubject = new MessageSubject("ecm-" + mapName + "-update-request");
    clusterCommunicator.addSubscriber(
        updateRequestSubject,
        serializer::decode,
        this::handleUpdateRequests,
        this.backgroundExecutor);

    if (!tombstonesDisabled) {
      previousTombstonePurgeTime = 0;
      this.backgroundExecutor.scheduleWithFixedDelay(
          this::purgeTombstones, initialDelaySec, antiEntropyPeriod, TimeUnit.SECONDS);
    }

    this.tombstonesDisabled = tombstonesDisabled;
    this.lightweightAntiEntropy = !convergeFaster;

    // Initiate first round of Gossip
    this.bootstrap();
  }
  @Override
  public FlowRuleEvent removeFlowRule(FlowEntry rule) {
    final DeviceId deviceId = rule.deviceId();
    NodeId master = mastershipService.getMasterFor(deviceId);

    if (Objects.equal(local, master)) {
      // bypass and handle it locally
      return removeFlowRuleInternal(rule);
    }

    if (master == null) {
      log.warn("Failed to removeFlowRule: No master for {}", deviceId);
      // TODO: revisit if this should be null (="no-op") or Exception
      return null;
    }

    log.trace(
        "Forwarding removeFlowRule to {}, which is the master for device {}", master, deviceId);

    return Futures.get(
        clusterCommunicator.sendAndReceive(
            rule, REMOVE_FLOW_ENTRY, SERIALIZER::encode, SERIALIZER::decode, master),
        FLOW_RULE_STORE_TIMEOUT_MILLIS,
        TimeUnit.MILLISECONDS,
        RuntimeException.class);
  }