private void createDefaultDatabaseDefinition(DatabaseDefinitionStore store) { // Assumes IPv4 is returned. String ip = ClusterDefinitionManager.getSiteLocalAddress(); NodeInfo node = NodeInfo.from(ip, ip, COPYCAT_TCP_PORT); try { store.write(DatabaseDefinition.from(ImmutableSet.of(node))); } catch (IOException e) { log.warn("Unable to write default cluster definition", e); } }
@Activate public void activate() { localNodeId = clusterService.getLocalNode().id(); // load database configuration File databaseDefFile = new File(PARTITION_DEFINITION_FILE); log.info("Loading database definition: {}", databaseDefFile.getAbsolutePath()); Map<String, Set<NodeInfo>> partitionMap; try { DatabaseDefinitionStore databaseDefStore = new DatabaseDefinitionStore(databaseDefFile); if (!databaseDefFile.exists()) { createDefaultDatabaseDefinition(databaseDefStore); } partitionMap = databaseDefStore.read().getPartitions(); } catch (IOException e) { throw new IllegalStateException("Failed to load database config", e); } String[] activeNodeUris = partitionMap .values() .stream() .reduce((s1, s2) -> Sets.union(s1, s2)) .get() .stream() .map(this::nodeToUri) .toArray(String[]::new); String localNodeUri = nodeToUri(NodeInfo.of(clusterService.getLocalNode())); Protocol protocol = new CopycatCommunicationProtocol(clusterService, clusterCommunicator); ClusterConfig clusterConfig = new ClusterConfig() .withProtocol(protocol) .withElectionTimeout(electionTimeoutMillis(activeNodeUris)) .withHeartbeatInterval(heartbeatTimeoutMillis(activeNodeUris)) .withMembers(activeNodeUris) .withLocalMember(localNodeUri); CopycatConfig copycatConfig = new CopycatConfig() .withName("onos") .withClusterConfig(clusterConfig) .withDefaultSerializer(new DatabaseSerializer()) .withDefaultExecutor( Executors.newSingleThreadExecutor( new NamedThreadFactory("copycat-coordinator-%d"))); coordinator = new DefaultClusterCoordinator(copycatConfig.resolve()); DatabaseConfig inMemoryDatabaseConfig = newDatabaseConfig(BASE_PARTITION_NAME, newInMemoryLog(), activeNodeUris); inMemoryDatabase = coordinator.getResource( inMemoryDatabaseConfig.getName(), inMemoryDatabaseConfig .resolve(clusterConfig) .withSerializer(copycatConfig.getDefaultSerializer()) .withDefaultExecutor(copycatConfig.getDefaultExecutor())); List<Database> partitions = partitionMap .entrySet() .stream() .map( entry -> { String[] replicas = entry.getValue().stream().map(this::nodeToUri).toArray(String[]::new); return newDatabaseConfig(entry.getKey(), newPersistentLog(), replicas); }) .map( config -> { Database db = coordinator.getResource( config.getName(), config .resolve(clusterConfig) .withSerializer(copycatConfig.getDefaultSerializer()) .withDefaultExecutor(copycatConfig.getDefaultExecutor())); return db; }) .collect(Collectors.toList()); partitionedDatabase = new PartitionedDatabase("onos-store", partitions); CompletableFuture<Void> status = coordinator .open() .thenCompose( v -> CompletableFuture.allOf(inMemoryDatabase.open(), partitionedDatabase.open()) .whenComplete( (db, error) -> { if (error != null) { log.error("Failed to initialize database.", error); } else { log.info("Successfully initialized database."); } })); Futures.getUnchecked(status); transactionManager = new TransactionManager(partitionedDatabase, consistentMapBuilder()); partitionedDatabase.setTransactionManager(transactionManager); eventDispatcher = Executors.newSingleThreadExecutor( groupedThreads("onos/store/manager", "map-event-dispatcher")); queuePollExecutor = Executors.newFixedThreadPool(4, groupedThreads("onos/store/manager", "queue-poll-handler")); clusterCommunicator.<String>addSubscriber( QUEUE_UPDATED_TOPIC, data -> new String(data, Charsets.UTF_8), name -> { DefaultDistributedQueue q = queues.get(name); if (q != null) { q.tryPoll(); } }, queuePollExecutor); log.info("Started"); }
protected String nodeToUri(NodeInfo node) { return String.format("onos://%s:%d", node.getIp(), node.getTcpPort()); }