ManagedCluster(Clusters.Cluster spec) throws URISyntaxException { this.spec = spec; this.name = spec.getName(); for (int i = 0; i < spec.getMembers().size(); i++) { startMember(i + 1); } }
private void startMember(int serverId) throws URISyntaxException { Clusters.Member member = spec.getMembers().get(serverId - 1); StringBuilder initialHosts = new StringBuilder(spec.getMembers().get(0).getHost()); for (int i = 1; i < spec.getMembers().size(); i++) initialHosts.append(",").append(spec.getMembers().get(i).getHost()); if (member.isFullHaMember()) { int haPort = new URI("cluster://" + member.getHost()).getPort() + 3000; GraphDatabaseBuilder graphDatabaseBuilder = new HighlyAvailableGraphDatabaseFactory() .newHighlyAvailableDatabaseBuilder( new File(new File(root, name), "server" + serverId).getAbsolutePath()) .setConfig(ClusterSettings.cluster_name, name) .setConfig(ClusterSettings.initial_hosts, initialHosts.toString()) .setConfig(HaSettings.server_id, serverId + "") .setConfig(ClusterSettings.cluster_server, member.getHost()) .setConfig(HaSettings.ha_server, ":" + haPort) .setConfig(commonConfig); if (instanceConfig.containsKey(serverId)) { graphDatabaseBuilder.setConfig(instanceConfig.get(serverId)); } config(graphDatabaseBuilder, name, serverId); logger.info("Starting cluster node " + serverId + " in cluster " + name); final GraphDatabaseService graphDatabase = graphDatabaseBuilder.newGraphDatabase(); members.put(serverId, (HighlyAvailableGraphDatabase) graphDatabase); life.add( new LifecycleAdapter() { @Override public void stop() throws Throwable { graphDatabase.shutdown(); } }); } else { Map<String, String> config = MapUtil.stringMap( ClusterSettings.cluster_name.name(), name, ClusterSettings.initial_hosts.name(), initialHosts.toString(), ClusterSettings.cluster_server.name(), member.getHost()); Logging clientLogging = new Logging() { @Override public StringLogger getLogger(Class loggingClass) { return new Slf4jStringLogger(logger); } }; life.add( new ClusterClient( ClusterClient.adapt(new Config(config)), clientLogging, new CoordinatorIncapableCredentialsProvider())); } // logger.info( "Started cluster node " + serverId + " in cluster " // + name ); }
/** * Provides a cluster specification with default values * * @param memberCount the total number of members in the cluster to start. */ public static Provider clusterOfSize(int memberCount) { Clusters.Cluster cluster = new Clusters.Cluster("neo4j.ha"); for (int i = 0; i < memberCount; i++) { cluster.getMembers().add(new Clusters.Member(5001 + i, true)); } final Clusters clusters = new Clusters(); clusters.getClusters().add(cluster); return provided(clusters); }
ManagedCluster(Clusters.Cluster spec) throws URISyntaxException, IOException { this.spec = spec; this.name = spec.getName(); for (int i = 0; i < spec.getMembers().size(); i++) { startMember(new InstanceId(i + 1)); } for (HighlyAvailableGraphDatabaseProxy member : members.values()) { insertInitialData( member.get(), name, member.get().getConfig().get(ClusterSettings.server_id)); } }
/** * Provides a cluster specification with default values * * @param haMemberCount the total number of members in the cluster to start. */ public static Provider clusterWithAdditionalArbiters(int haMemberCount, int arbiterCount) { Clusters.Cluster cluster = new Clusters.Cluster("neo4j.ha"); int counter = 0; for (int i = 0; i < arbiterCount; i++, counter++) { cluster.getMembers().add(new Clusters.Member(5001 + counter, false)); } for (int i = 0; i < haMemberCount; i++, counter++) { cluster.getMembers().add(new Clusters.Member(5001 + counter, true)); } final Clusters clusters = new Clusters(); clusters.getClusters().add(cluster); return provided(clusters); }
@Override public void start() throws Throwable { Clusters clusters = clustersProvider.clusters(); life = new LifeSupport(); for (int i = 0; i < clusters.getClusters().size(); i++) { Clusters.Cluster cluster = clusters.getClusters().get(i); ManagedCluster managedCluster = new ManagedCluster(cluster); clusterMap.put(cluster.getName(), managedCluster); life.add(managedCluster); } life.start(); }
@Override public void start() throws Throwable { Clusters clusters = clustersProvider.clusters(); life = new LifeSupport(); // Started so instances added here will be started immediately, and in case of exceptions they // can be // shutdown() or stop()ped properly life.start(); for (int i = 0; i < clusters.getClusters().size(); i++) { Clusters.Cluster cluster = clusters.getClusters().get(i); ManagedCluster managedCluster = new ManagedCluster(cluster); clusterMap.put(cluster.getName(), managedCluster); life.add(managedCluster); } }
/** The total number of members of the cluster. */ public int size() { return spec.getMembers().size(); }
private void startMember(InstanceId serverId) throws URISyntaxException, IOException { Clusters.Member member = spec.getMembers().get(serverId.toIntegerIndex() - 1); StringBuilder initialHosts = new StringBuilder(spec.getMembers().get(0).getHost()); for (int i = 1; i < spec.getMembers().size(); i++) { initialHosts.append(",").append(spec.getMembers().get(i).getHost()); } File parent = new File(root, name); URI clusterUri = new URI("cluster://" + member.getHost()); if (member.isFullHaMember()) { int clusterPort = clusterUri.getPort(); int haPort = clusterUri.getPort() + 3000; File storeDir = new File(parent, "server" + serverId); if (storeDirInitializer != null) { storeDirInitializer.initializeStoreDir(serverId.toIntegerIndex(), storeDir); } GraphDatabaseBuilder builder = dbFactory.newHighlyAvailableDatabaseBuilder(storeDir.getAbsolutePath()); builder.setConfig(ClusterSettings.cluster_name, name); builder.setConfig(ClusterSettings.initial_hosts, initialHosts.toString()); builder.setConfig(ClusterSettings.server_id, serverId + ""); builder.setConfig(ClusterSettings.cluster_server, "0.0.0.0:" + clusterPort); builder.setConfig(HaSettings.ha_server, ":" + haPort); builder.setConfig(OnlineBackupSettings.online_backup_enabled, Settings.FALSE); builder.setConfig(commonConfig); if (instanceConfig.containsKey(serverId.toIntegerIndex())) { builder.setConfig(instanceConfig.get(serverId.toIntegerIndex())); } config(builder, name, serverId); final HighlyAvailableGraphDatabaseProxy graphDatabase = new HighlyAvailableGraphDatabaseProxy(builder); members.put(serverId, graphDatabase); life.add( new LifecycleAdapter() { @Override public void stop() throws Throwable { graphDatabase.get().shutdown(); } }); } else { Map<String, String> config = MapUtil.stringMap( ClusterSettings.cluster_name.name(), name, ClusterSettings.initial_hosts.name(), initialHosts.toString(), ClusterSettings.server_id.name(), serverId + "", ClusterSettings.cluster_server.name(), "0.0.0.0:" + clusterUri.getPort(), GraphDatabaseSettings.store_dir.name(), new File(parent, "arbiter" + serverId).getAbsolutePath()); Config config1 = new Config( config, InternalAbstractGraphDatabase.Configuration.class, GraphDatabaseSettings.class); ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory(); ClusterClient clusterClient = new ClusterClient( new Monitors(), ClusterClient.adapt(config1), NullLogService.getInstance(), new NotElectableElectionCredentialsProvider(), objectStreamFactory, objectStreamFactory); arbiters.add( new ClusterMembers( clusterClient, clusterClient, new ClusterMemberEvents() { @Override public void addClusterMemberListener(ClusterMemberListener listener) { // noop } @Override public void removeClusterMemberListener(ClusterMemberListener listener) { // noop } }, clusterClient.getServerId())); life.add(new FutureLifecycleAdapter<>(clusterClient)); } }