예제 #1
0
  public synchronized void freeze(final CoreRemotingConnection connectionToKeepOpen) {
    if (!started) return;
    failureCheckAndFlushThread.close(false);

    for (Acceptor acceptor : acceptors) {
      try {
        acceptor.pause();
      } catch (Exception e) {
        HornetQServerLogger.LOGGER.errorStoppingAcceptor();
      }
    }
    HashMap<Object, ConnectionEntry> connectionEntries =
        new HashMap<Object, ConnectionEntry>(connections);

    // Now we ensure that no connections will process any more packets after this method is
    // complete then send a disconnect packet
    for (Entry<Object, ConnectionEntry> entry : connectionEntries.entrySet()) {
      RemotingConnection conn = entry.getValue().connection;

      if (conn.equals(connectionToKeepOpen)) continue;

      if (HornetQServerLogger.LOGGER.isTraceEnabled()) {
        HornetQServerLogger.LOGGER.trace("Sending connection.disconnection packet to " + conn);
      }

      if (!conn.isClient()) {
        conn.disconnect(false);
        connections.remove(entry.getKey());
      }
    }
  }
예제 #2
0
 public synchronized void allowInvmSecurityOverride(HornetQPrincipal principal) {
   defaultInvmSecurityPrincipal = principal;
   for (Acceptor acceptor : acceptors) {
     if (acceptor.isUnsecurable()) {
       acceptor.setDefaultHornetQPrincipal(principal);
     }
   }
 }
예제 #3
0
  public void stop(final boolean criticalError) throws Exception {
    if (!started) {
      return;
    }

    failureCheckAndFlushThread.close(criticalError);

    // We need to stop them accepting first so no new connections are accepted after we send the
    // disconnect message
    for (Acceptor acceptor : acceptors) {
      if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
        HornetQServerLogger.LOGGER.debug("Pausing acceptor " + acceptor);
      }
      acceptor.pause();
    }

    if (HornetQServerLogger.LOGGER.isDebugEnabled()) {
      HornetQServerLogger.LOGGER.debug("Sending disconnect on live connections");
    }

    HashSet<ConnectionEntry> connectionEntries = new HashSet<ConnectionEntry>(connections.values());

    // Now we ensure that no connections will process any more packets after this method is complete
    // then send a disconnect packet
    for (ConnectionEntry entry : connectionEntries) {
      RemotingConnection conn = entry.connection;

      if (HornetQServerLogger.LOGGER.isTraceEnabled()) {
        HornetQServerLogger.LOGGER.trace("Sending connection.disconnection packet to " + conn);
      }

      conn.disconnect(criticalError);
    }

    for (Acceptor acceptor : acceptors) {
      acceptor.stop();
    }

    acceptors.clear();

    connections.clear();

    if (managementService != null) {
      managementService.unregisterAcceptors();
    }

    threadPool.shutdown();

    if (!criticalError) {
      boolean ok = threadPool.awaitTermination(10000, TimeUnit.MILLISECONDS);

      if (!ok) {
        HornetQServerLogger.LOGGER.timeoutRemotingThreadPool();
      }
    }

    started = false;
  }
예제 #4
0
    public void handlePacket(final Packet packet) {
      if (packet.getType() == PacketImpl.PING) {
        Ping ping = (Ping) packet;

        if (config.getConnectionTTLOverride() == -1) {
          // Allow clients to specify connection ttl
          entry.ttl = ping.getConnectionTTL();
        }

        // Just send a ping back
        channel0.send(packet);
      } else if (packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY
          || packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY_V2) {
        SubscribeClusterTopologyUpdatesMessage msg =
            (SubscribeClusterTopologyUpdatesMessage) packet;

        if (packet.getType() == PacketImpl.SUBSCRIBE_TOPOLOGY_V2) {
          channel0
              .getConnection()
              .setClientVersion(
                  ((SubscribeClusterTopologyUpdatesMessageV2) msg).getClientVersion());
        }

        final ClusterTopologyListener listener =
            new ClusterTopologyListener() {
              public void nodeUP(
                  final long uniqueEventID,
                  final String nodeID,
                  final Pair<TransportConfiguration, TransportConfiguration> connectorPair,
                  final boolean last) {
                // Using an executor as most of the notifications on the Topology
                // may come from a channel itself
                // What could cause deadlocks
                entry.connectionExecutor.execute(
                    new Runnable() {
                      public void run() {
                        if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) {
                          channel0.send(
                              new ClusterTopologyChangeMessage_V2(
                                  uniqueEventID, nodeID, connectorPair, last));
                        } else {
                          channel0.send(
                              new ClusterTopologyChangeMessage(nodeID, connectorPair, last));
                        }
                      }
                    });
              }

              public void nodeDown(final long uniqueEventID, final String nodeID) {
                // Using an executor as most of the notifications on the Topology
                // may come from a channel itself
                // What could cause deadlocks
                entry.connectionExecutor.execute(
                    new Runnable() {
                      public void run() {
                        if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) {
                          channel0.send(new ClusterTopologyChangeMessage_V2(uniqueEventID, nodeID));
                        } else {
                          channel0.send(new ClusterTopologyChangeMessage(nodeID));
                        }
                      }
                    });
              }

              @Override
              public String toString() {
                return "Remote Proxy on channel "
                    + Integer.toHexString(System.identityHashCode(this));
              }
            };

        if (acceptorUsed.getClusterConnection() != null) {
          acceptorUsed.getClusterConnection().addClusterTopologyListener(listener);

          rc.addCloseListener(
              new CloseListener() {
                public void connectionClosed() {
                  acceptorUsed.getClusterConnection().removeClusterTopologyListener(listener);
                }
              });
        } else {
          // if not clustered, we send a single notification to the client containing the node-id
          // where the server is connected to
          // This is done this way so Recovery discovery could also use the node-id for
          // non-clustered setups
          entry.connectionExecutor.execute(
              new Runnable() {
                public void run() {
                  String nodeId = server.getNodeID().toString();
                  Pair<TransportConfiguration, TransportConfiguration> emptyConfig =
                      new Pair<TransportConfiguration, TransportConfiguration>(null, null);
                  if (channel0.supports(PacketImpl.CLUSTER_TOPOLOGY_V2)) {
                    channel0.send(
                        new ClusterTopologyChangeMessage_V2(
                            System.currentTimeMillis(), nodeId, emptyConfig, true));
                  } else {
                    channel0.send(new ClusterTopologyChangeMessage(nodeId, emptyConfig, true));
                  }
                }
              });
        }
      } else if (packet.getType() == PacketImpl.NODE_ANNOUNCE) {
        NodeAnnounceMessage msg = (NodeAnnounceMessage) packet;

        Pair<TransportConfiguration, TransportConfiguration> pair;
        if (msg.isBackup()) {
          pair = new Pair<TransportConfiguration, TransportConfiguration>(null, msg.getConnector());
        } else {
          pair =
              new Pair<TransportConfiguration, TransportConfiguration>(
                  msg.getConnector(), msg.getBackupConnector());
        }
        if (isTrace) {
          HornetQLogger.LOGGER.trace(
              "Server "
                  + server
                  + " receiving nodeUp from NodeID="
                  + msg.getNodeID()
                  + ", pair="
                  + pair);
        }

        if (acceptorUsed != null) {
          ClusterConnection clusterConn = acceptorUsed.getClusterConnection();
          if (clusterConn != null) {
            clusterConn.nodeAnnounced(
                msg.getCurrentEventID(), msg.getNodeID(), pair, msg.isBackup());
          } else {
            HornetQLogger.LOGGER.debug("Cluster connection is null on acceptor = " + acceptorUsed);
          }
        } else {
          HornetQLogger.LOGGER.debug(
              "there is no acceptor used configured at the CoreProtocolManager " + this);
        }
      } else if (packet.getType() == PacketImpl.BACKUP_REGISTRATION) {
        BackupRegistrationMessage msg = (BackupRegistrationMessage) packet;
        ClusterConnection clusterConnection = acceptorUsed.getClusterConnection();

        if (clusterConnection.verify(msg.getClusterUser(), msg.getClusterPassword())) {
          try {
            server.startReplication(
                rc, clusterConnection, getPair(msg.getConnector(), true), msg.isFailBackRequest());
          } catch (HornetQException e) {
            channel0.send(new BackupRegistrationFailedMessage(e));
          }
        } else {
          channel0.send(new BackupRegistrationFailedMessage(null));
        }
      }
    }
예제 #5
0
  public synchronized void start() throws Exception {
    if (started) {
      return;
    }

    ClassLoader tccl =
        AccessController.doPrivileged(
            new PrivilegedAction<ClassLoader>() {
              public ClassLoader run() {
                return Thread.currentThread().getContextClassLoader();
              }
            });

    // The remoting service maintains it's own thread pool for handling remoting traffic
    // If OIO each connection will have it's own thread
    // If NIO these are capped at nio-remoting-threads which defaults to num cores * 3
    // This needs to be a different thread pool to the main thread pool especially for OIO where we
    // may need
    // to support many hundreds of connections, but the main thread pool must be kept small for
    // better performance

    ThreadFactory tFactory =
        new HornetQThreadFactory(
            "HornetQ-remoting-threads-" + server.toString() + "-" + System.identityHashCode(this),
            false,
            tccl);

    threadPool = Executors.newCachedThreadPool(tFactory);

    ClassLoader loader = Thread.currentThread().getContextClassLoader();

    for (TransportConfiguration info : acceptorsConfig) {
      try {
        Class<?> clazz = loader.loadClass(info.getFactoryClassName());

        AcceptorFactory factory = (AcceptorFactory) clazz.newInstance();

        // Check valid properties

        if (info.getParams() != null) {
          Set<String> invalid =
              ConfigurationHelper.checkKeys(
                  factory.getAllowableProperties(), info.getParams().keySet());

          if (!invalid.isEmpty()) {
            HornetQServerLogger.LOGGER.invalidAcceptorKeys(
                ConfigurationHelper.stringSetToCommaListString(invalid));

            continue;
          }
        }

        String protocol =
            ConfigurationHelper.getStringProperty(
                TransportConstants.PROTOCOL_PROP_NAME,
                TransportConstants.DEFAULT_PROTOCOL,
                info.getParams());

        ProtocolManager manager = protocolMap.get(protocol);
        if (manager == null) {
          throw HornetQMessageBundle.BUNDLE.noProtocolManagerFound(protocol);
        }
        ClusterConnection clusterConnection = lookupClusterConnection(info);

        Acceptor acceptor =
            factory.createAcceptor(
                clusterConnection,
                info.getParams(),
                new DelegatingBufferHandler(),
                manager,
                this,
                threadPool,
                scheduledThreadPool,
                manager);

        if (defaultInvmSecurityPrincipal != null && acceptor.isUnsecurable()) {
          acceptor.setDefaultHornetQPrincipal(defaultInvmSecurityPrincipal);
        }

        acceptors.add(acceptor);

        if (managementService != null) {
          acceptor.setNotificationService(managementService);

          managementService.registerAcceptor(acceptor, info);
        }
      } catch (Exception e) {
        HornetQServerLogger.LOGGER.errorCreatingAcceptor(e, info.getFactoryClassName());
      }
    }

    for (Acceptor a : acceptors) {
      a.start();
    }

    // This thread checks connections that need to be closed, and also flushes confirmations
    failureCheckAndFlushThread =
        new FailureCheckAndFlushThread(RemotingServiceImpl.CONNECTION_TTL_CHECK_INTERVAL);

    failureCheckAndFlushThread.start();

    started = true;
  }