@Activate
  public void activate(ComponentContext context) {
    cfgService.registerProperties(getClass());

    modified(context);

    local = clusterService.getLocalNode().id();

    messageHandlingExecutor =
        Executors.newFixedThreadPool(
            messageHandlerThreadPoolSize,
            groupedThreads("onos/store/statistic", "message-handlers", log));

    clusterCommunicator.addSubscriber(
        GET_CURRENT,
        SERIALIZER::decode,
        this::getCurrentStatisticInternal,
        SERIALIZER::encode,
        messageHandlingExecutor);

    clusterCommunicator.addSubscriber(
        GET_CURRENT,
        SERIALIZER::decode,
        this::getPreviousStatisticInternal,
        SERIALIZER::encode,
        messageHandlingExecutor);

    log.info("Started");
  }
  @Before
  public void setUp() throws Exception {
    clusterCommunicator = createNiceMock(ClusterCommunicationService.class);
    clusterCommunicator.addSubscriber(
        anyObject(MessageSubject.class),
        anyObject(ClusterMessageHandler.class),
        anyObject(ExecutorService.class));
    expectLastCall().anyTimes();
    replay(clusterCommunicator);
    ClusterService clusterService = new TestClusterService();

    testGossipDeviceStore =
        new TestGossipDeviceStore(deviceClockService, clusterService, clusterCommunicator);
    testGossipDeviceStore.mastershipService = new TestMastershipService();

    TestDatabaseManager testDatabaseManager = new TestDatabaseManager();
    testDatabaseManager.init(clusterService, clusterCommunicator);
    testGossipDeviceStore.storageService = testDatabaseManager;
    testGossipDeviceStore.deviceClockService = deviceClockService;

    gossipDeviceStore = testGossipDeviceStore;
    gossipDeviceStore.activate();
    deviceStore = gossipDeviceStore;
    verify(clusterCommunicator);
    reset(clusterCommunicator);
  }
  private void registerMessageHandlers(ExecutorService executor) {

    clusterCommunicator.addSubscriber(APPLY_BATCH_FLOWS, new OnStoreBatch(), executor);
    clusterCommunicator.<FlowRuleBatchEvent>addSubscriber(
        REMOTE_APPLY_COMPLETED, SERIALIZER::decode, this::notifyDelegate, executor);
    clusterCommunicator.addSubscriber(
        GET_FLOW_ENTRY, SERIALIZER::decode, flowTable::getFlowEntry, SERIALIZER::encode, executor);
    clusterCommunicator.addSubscriber(
        GET_DEVICE_FLOW_ENTRIES,
        SERIALIZER::decode,
        flowTable::getFlowEntries,
        SERIALIZER::encode,
        executor);
    clusterCommunicator.addSubscriber(
        REMOVE_FLOW_ENTRY,
        SERIALIZER::decode,
        this::removeFlowRuleInternal,
        SERIALIZER::encode,
        executor);
    clusterCommunicator.addSubscriber(
        REMOVE_FLOW_ENTRY,
        SERIALIZER::decode,
        this::removeFlowRuleInternal,
        SERIALIZER::encode,
        executor);
    clusterCommunicator.addSubscriber(
        FLOW_TABLE_BACKUP,
        SERIALIZER::decode,
        flowTable::onBackupReceipt,
        SERIALIZER::encode,
        executor);
  }
  @Activate
  public void activate() {
    messageHandlingExecutor =
        Executors.newFixedThreadPool(
            messageHandlerThreadPoolSize,
            groupedThreads("onos/store/packet", "message-handlers", log));

    communicationService.<OutboundPacket>addSubscriber(
        PACKET_OUT_SUBJECT,
        SERIALIZER::decode,
        packet -> notifyDelegate(new PacketEvent(Type.EMIT, packet)),
        messageHandlingExecutor);

    tracker = new PacketRequestTracker();

    log.info("Started");
  }
Example #5
0
  protected <K, V> DefaultAsyncConsistentMap<K, V> registerMap(
      DefaultAsyncConsistentMap<K, V> map) {
    DefaultAsyncConsistentMap<K, V> existing = maps.putIfAbsent(map.name(), map);
    if (existing != null) {
      // FIXME: We need to cleanly support different map instances with same name.
      log.info("Map by name {} already exists", map.name());
      return existing;
    } else {
      if (map.applicationId() != null) {
        mapsByApplication.put(map.applicationId(), map);
      }
    }

    clusterCommunicator.<MapEvent<K, V>>addSubscriber(
        mapUpdatesSubject(map.name()),
        map.serializer()::decode,
        map::notifyLocalListeners,
        eventDispatcher);
    return map;
  }
Example #6
0
  @Activate
  public void activate() {
    localNodeId = clusterService.getLocalNode().id();
    // load database configuration
    File databaseDefFile = new File(PARTITION_DEFINITION_FILE);
    log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath());

    Map<String, Set<NodeInfo>> partitionMap;
    try {
      DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile);
      if (!databaseDefFile.exists()) {
        createDefaultDatabaseDefinition(databaseDefStore);
      }
      partitionMap = databaseDefStore.read().getPartitions();
    } catch (IOException e) {
      throw new IllegalStateException("Failed to load database config", e);
    }

    String[] activeNodeUris =
        partitionMap
            .values()
            .stream()
            .reduce((s1, s2) -> Sets.union(s1, s2))
            .get()
            .stream()
            .map(this::nodeToUri)
            .toArray(String[]::new);

    String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode()));
    Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator);

    ClusterConfig clusterConfig =
        new ClusterConfig()
            .withProtocol(protocol)
            .withElectionTimeout(electionTimeoutMillis(activeNodeUris))
            .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris))
            .withMembers(activeNodeUris)
            .withLocalMember(localNodeUri);

    CopycatConfig copycatConfig =
        new CopycatConfig()
            .withName("onos")
            .withClusterConfig(clusterConfig)
            .withDefaultSerializer(new DatabaseSerializer())
            .withDefaultExecutor(
                Executors.newSingleThreadExecutor(
                    new NamedThreadFactory("copycat-coordinator-%d")));

    coordinator = new DefaultClusterCoordinator(copycatConfig.resolve());

    DatabaseConfig inMemoryDatabaseConfig =
        newDatabaseConfig(BASE_PARTITION_NAME, newInMemoryLog(), activeNodeUris);
    inMemoryDatabase =
        coordinator.getResource(
            inMemoryDatabaseConfig.getName(),
            inMemoryDatabaseConfig
                .resolve(clusterConfig)
                .withSerializer(copycatConfig.getDefaultSerializer())
                .withDefaultExecutor(copycatConfig.getDefaultExecutor()));

    List<Database> partitions =
        partitionMap
            .entrySet()
            .stream()
            .map(
                entry -> {
                  String[] replicas =
                      entry.getValue().stream().map(this::nodeToUri).toArray(String[]::new);
                  return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas);
                })
            .map(
                config -> {
                  Database db =
                      coordinator.getResource(
                          config.getName(),
                          config
                              .resolve(clusterConfig)
                              .withSerializer(copycatConfig.getDefaultSerializer())
                              .withDefaultExecutor(copycatConfig.getDefaultExecutor()));
                  return db;
                })
            .collect(Collectors.toList());

    partitionedDatabase = new PartitionedDatabase("onos-store", partitions);

    CompletableFuture<Void> status =
        coordinator
            .open()
            .thenCompose(
                v ->
                    CompletableFuture.allOf(inMemoryDatabase.open(), partitionedDatabase.open())
                        .whenComplete(
                            (db, error) -> {
                              if (error != null) {
                                log.error("Failed to initialize database.", error);
                              } else {
                                log.info("Successfully initialized database.");
                              }
                            }));

    Futures.getUnchecked(status);

    transactionManager = new TransactionManager(partitionedDatabase, consistentMapBuilder());
    partitionedDatabase.setTransactionManager(transactionManager);

    eventDispatcher =
        Executors.newSingleThreadExecutor(
            groupedThreads("onos/store/manager", "map-event-dispatcher"));

    queuePollExecutor =
        Executors.newFixedThreadPool(4, groupedThreads("onos/store/manager", "queue-poll-handler"));

    clusterCommunicator.<String>addSubscriber(
        QUEUE_UPDATED_TOPIC,
        data -> new String(data, Charsets.UTF_8),
        name -> {
          DefaultDistributedQueue q = queues.get(name);
          if (q != null) {
            q.tryPoll();
          }
        },
        queuePollExecutor);
    log.info("Started");
  }
  /**
   * Creates a new eventually consistent map shared amongst multiple instances.
   *
   * <p>See {@link org.onosproject.store.service.EventuallyConsistentMapBuilder} for more
   * description of the parameters expected by the map.
   *
   * @param mapName a String identifier for the map.
   * @param clusterService the cluster service
   * @param clusterCommunicator the cluster communications service
   * @param ns a Kryo namespace that can serialize both K and V
   * @param timestampProvider provider of timestamps for K and V
   * @param peerUpdateFunction function that provides a set of nodes to immediately update to when
   *     there writes to the map
   * @param eventExecutor executor to use for processing incoming events from peers
   * @param communicationExecutor executor to use for sending events to peers
   * @param backgroundExecutor executor to use for background anti-entropy tasks
   * @param tombstonesDisabled true if this map should not maintain tombstones
   * @param antiEntropyPeriod period that the anti-entropy task should run
   * @param antiEntropyTimeUnit time unit for anti-entropy period
   * @param convergeFaster make anti-entropy try to converge faster
   * @param persistent persist data to disk
   * @param persistenceService persistence service
   */
  EventuallyConsistentMapImpl(
      String mapName,
      ClusterService clusterService,
      ClusterCommunicationService clusterCommunicator,
      KryoNamespace ns,
      BiFunction<K, V, Timestamp> timestampProvider,
      BiFunction<K, V, Collection<NodeId>> peerUpdateFunction,
      ExecutorService eventExecutor,
      ExecutorService communicationExecutor,
      ScheduledExecutorService backgroundExecutor,
      boolean tombstonesDisabled,
      long antiEntropyPeriod,
      TimeUnit antiEntropyTimeUnit,
      boolean convergeFaster,
      boolean persistent,
      PersistenceService persistenceService) {
    this.mapName = mapName;
    this.serializer = createSerializer(ns);
    this.persistenceService = persistenceService;
    this.persistent = persistent;
    if (persistent) {
      items =
          this.persistenceService
              .<K, MapValue<V>>persistentMapBuilder()
              .withName(PERSISTENT_LOCAL_MAP_NAME)
              .withSerializer(this.serializer)
              .build();
    } else {
      items = Maps.newConcurrentMap();
    }
    senderPending = Maps.newConcurrentMap();
    destroyedMessage = mapName + ERROR_DESTROYED;

    this.clusterService = clusterService;
    this.clusterCommunicator = clusterCommunicator;
    this.localNodeId = clusterService.getLocalNode().id();

    this.timestampProvider = timestampProvider;

    if (peerUpdateFunction != null) {
      this.peerUpdateFunction = peerUpdateFunction;
    } else {
      this.peerUpdateFunction =
          (key, value) ->
              clusterService
                  .getNodes()
                  .stream()
                  .map(ControllerNode::id)
                  .filter(nodeId -> !nodeId.equals(localNodeId))
                  .collect(Collectors.toList());
    }

    if (eventExecutor != null) {
      this.executor = eventExecutor;
    } else {
      // should be a normal executor; it's used for receiving messages
      this.executor =
          Executors.newFixedThreadPool(8, groupedThreads("onos/ecm", mapName + "-fg-%d", log));
    }

    if (communicationExecutor != null) {
      this.communicationExecutor = communicationExecutor;
    } else {
      // sending executor; should be capped
      // TODO this probably doesn't need to be bounded anymore
      this.communicationExecutor =
          newFixedThreadPool(8, groupedThreads("onos/ecm", mapName + "-publish-%d", log));
    }

    if (backgroundExecutor != null) {
      this.backgroundExecutor = backgroundExecutor;
    } else {
      this.backgroundExecutor =
          newSingleThreadScheduledExecutor(groupedThreads("onos/ecm", mapName + "-bg-%d", log));
    }

    // start anti-entropy thread
    this.backgroundExecutor.scheduleAtFixedRate(
        this::sendAdvertisement, initialDelaySec, antiEntropyPeriod, antiEntropyTimeUnit);

    updateMessageSubject = new MessageSubject("ecm-" + mapName + "-update");
    clusterCommunicator.addSubscriber(
        updateMessageSubject, serializer::decode, this::processUpdates, this.executor);

    antiEntropyAdvertisementSubject = new MessageSubject("ecm-" + mapName + "-anti-entropy");
    clusterCommunicator.addSubscriber(
        antiEntropyAdvertisementSubject,
        serializer::decode,
        this::handleAntiEntropyAdvertisement,
        serializer::encode,
        this.backgroundExecutor);

    updateRequestSubject = new MessageSubject("ecm-" + mapName + "-update-request");
    clusterCommunicator.addSubscriber(
        updateRequestSubject,
        serializer::decode,
        this::handleUpdateRequests,
        this.backgroundExecutor);

    if (!tombstonesDisabled) {
      previousTombstonePurgeTime = 0;
      this.backgroundExecutor.scheduleWithFixedDelay(
          this::purgeTombstones, initialDelaySec, antiEntropyPeriod, TimeUnit.SECONDS);
    }

    this.tombstonesDisabled = tombstonesDisabled;
    this.lightweightAntiEntropy = !convergeFaster;

    // Initiate first round of Gossip
    this.bootstrap();
  }