private synchronized void activate() throws Exception {
    if (!started) {
      return;
    }

    if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
      HornetQServerLogger.LOGGER.debug(
          "Activating cluster connection nodeID="
              + nodeManager.getNodeId()
              + " for server="
              + this.server);
    }

    liveNotifier = new LiveNotifier();
    liveNotifier.updateAsLive();
    liveNotifier.schedule();

    serverLocator = clusterConnector.createServerLocator();

    if (serverLocator != null) {

      if (!useDuplicateDetection) {
        HornetQServerLogger.LOGGER.debug(
            "DuplicateDetection is disabled, sending clustered messages blocked");
      }

      final TopologyMember currentMember = topology.getMember(manager.getNodeId());

      if (currentMember == null) {
        // sanity check only
        throw new IllegalStateException(
            "InternalError! The ClusterConnection doesn't know about its own node = " + this);
      }

      serverLocator.setNodeID(nodeManager.getNodeId().toString());
      serverLocator.setIdentity("(main-ClusterConnection::" + server.toString() + ")");
      serverLocator.setReconnectAttempts(0);
      serverLocator.setClusterConnection(true);
      serverLocator.setClusterTransportConfiguration(connector);
      serverLocator.setInitialConnectAttempts(-1);
      serverLocator.setClientFailureCheckPeriod(clientFailureCheckPeriod);
      serverLocator.setConnectionTTL(connectionTTL);
      serverLocator.setConfirmationWindowSize(confirmationWindowSize);
      // if not using duplicate detection, we will send blocked
      serverLocator.setBlockOnDurableSend(!useDuplicateDetection);
      serverLocator.setBlockOnNonDurableSend(!useDuplicateDetection);
      serverLocator.setCallTimeout(callTimeout);
      serverLocator.setCallFailoverTimeout(callFailoverTimeout);
      // No producer flow control on the bridges, as we don't want to lock the queues
      serverLocator.setProducerWindowSize(-1);

      if (retryInterval > 0) {
        this.serverLocator.setRetryInterval(retryInterval);
      }

      addClusterTopologyListener(this);

      serverLocator.setAfterConnectionInternalListener(this);

      serverLocator.start(server.getExecutorFactory().getExecutor());
    }

    if (managementService != null) {
      TypedProperties props = new TypedProperties();
      props.putSimpleStringProperty(new SimpleString("name"), name);
      Notification notification =
          new Notification(
              nodeManager.getNodeId().toString(),
              NotificationType.CLUSTER_CONNECTION_STARTED,
              props);
      HornetQServerLogger.LOGGER.debug("sending notification: " + notification);
      managementService.sendNotification(notification);
    }
  }
  public synchronized void start() throws Exception {
    if (started) {
      return;
    }

    ClassLoader tccl =
        AccessController.doPrivileged(
            new PrivilegedAction<ClassLoader>() {
              public ClassLoader run() {
                return Thread.currentThread().getContextClassLoader();
              }
            });

    // The remoting service maintains it's own thread pool for handling remoting traffic
    // If OIO each connection will have it's own thread
    // If NIO these are capped at nio-remoting-threads which defaults to num cores * 3
    // This needs to be a different thread pool to the main thread pool especially for OIO where we
    // may need
    // to support many hundreds of connections, but the main thread pool must be kept small for
    // better performance

    ThreadFactory tFactory =
        new HornetQThreadFactory(
            "HornetQ-remoting-threads-" + server.toString() + "-" + System.identityHashCode(this),
            false,
            tccl);

    threadPool = Executors.newCachedThreadPool(tFactory);

    ClassLoader loader = Thread.currentThread().getContextClassLoader();

    for (TransportConfiguration info : acceptorsConfig) {
      try {
        Class<?> clazz = loader.loadClass(info.getFactoryClassName());

        AcceptorFactory factory = (AcceptorFactory) clazz.newInstance();

        // Check valid properties

        if (info.getParams() != null) {
          Set<String> invalid =
              ConfigurationHelper.checkKeys(
                  factory.getAllowableProperties(), info.getParams().keySet());

          if (!invalid.isEmpty()) {
            HornetQServerLogger.LOGGER.invalidAcceptorKeys(
                ConfigurationHelper.stringSetToCommaListString(invalid));

            continue;
          }
        }

        String protocol =
            ConfigurationHelper.getStringProperty(
                TransportConstants.PROTOCOL_PROP_NAME,
                TransportConstants.DEFAULT_PROTOCOL,
                info.getParams());

        ProtocolManager manager = protocolMap.get(protocol);
        if (manager == null) {
          throw HornetQMessageBundle.BUNDLE.noProtocolManagerFound(protocol);
        }
        ClusterConnection clusterConnection = lookupClusterConnection(info);

        Acceptor acceptor =
            factory.createAcceptor(
                clusterConnection,
                info.getParams(),
                new DelegatingBufferHandler(),
                manager,
                this,
                threadPool,
                scheduledThreadPool,
                manager);

        if (defaultInvmSecurityPrincipal != null && acceptor.isUnsecurable()) {
          acceptor.setDefaultHornetQPrincipal(defaultInvmSecurityPrincipal);
        }

        acceptors.add(acceptor);

        if (managementService != null) {
          acceptor.setNotificationService(managementService);

          managementService.registerAcceptor(acceptor, info);
        }
      } catch (Exception e) {
        HornetQServerLogger.LOGGER.errorCreatingAcceptor(e, info.getFactoryClassName());
      }
    }

    for (Acceptor a : acceptors) {
      a.start();
    }

    // This thread checks connections that need to be closed, and also flushes confirmations
    failureCheckAndFlushThread =
        new FailureCheckAndFlushThread(RemotingServiceImpl.CONNECTION_TTL_CHECK_INTERVAL);

    failureCheckAndFlushThread.start();

    started = true;
  }