@Override
  public void Run(int port) {
    EventLoopGroup boss = null;
    EventLoopGroup work = null;
    ServerBootstrap bootStrap = null;
    ChannelFuture future = null;

    try {
      boss = new NioEventLoopGroup();
      work = new NioEventLoopGroup();
      bootStrap = new ServerBootstrap();

      // server setting
      bootStrap.group(boss, work);
      bootStrap.channel(NioServerSocketChannel.class);
      bootStrap.childHandler(new StringServerChannelInit());
      bootStrap.option(ChannelOption.SO_BACKLOG, 128);
      bootStrap.option(ChannelOption.TCP_NODELAY, true);
      bootStrap.childOption(ChannelOption.SO_KEEPALIVE, true);

      // server start
      future = bootStrap.bind(port).sync();

      // server shutdown
      future.channel().closeFuture().sync();
    } catch (InterruptedException e) {
      e.printStackTrace();
    } finally {
      boss.shutdownGracefully();
      work.shutdownGracefully();
    }
  }
Пример #2
0
  public void start(int port, int bossThreads, int workerThreads) {

    System.out.println("Boss Threads: " + bossThreads);
    System.out.println("Worker Threads: " + workerThreads);

    bossGroup = new NioEventLoopGroup(bossThreads);
    workerGroup = new NioEventLoopGroup(workerThreads);
    try {
      final ServerBootstrap b = new ServerBootstrap();
      // b.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
      // b.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
      b.option(ChannelOption.SO_BACKLOG, 1024);
      b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
      b.childOption(ChannelOption.SO_KEEPALIVE, true);

      b.group(bossGroup, workerGroup)
          .channel(NioServerSocketChannel.class)
          .handler(new LoggingHandler(LogLevel.INFO))
          .childHandler(
              new ChannelInitializer<SocketChannel>() {

                StringDecoder stringDecoder = new StringDecoder(CharsetUtil.UTF_8);
                StringEncoder stringEncoder = new StringEncoder(CharsetUtil.UTF_8);
                HelloServerHandler serverHandler = new HelloServerHandler();

                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                  final ChannelPipeline pipeline = ch.pipeline();

                  // decoders
                  pipeline.addLast(
                      "framer", new DelimiterBasedFrameDecoder(8192, Delimiters.lineDelimiter()));
                  pipeline.addLast("stringDecoder", stringDecoder);

                  // encoders
                  pipeline.addLast("stringEncoder", stringEncoder);

                  // business logic handler
                  pipeline.addLast("serverHandler", serverHandler);
                }
              });

      b.bind(port).sync().channel().closeFuture().sync();

    } catch (InterruptedException e) {
      throw new RuntimeException(e);
    } finally {
      stop();
    }
  }
    public void run() {
      // construct boss and worker threads (num threads = number of cores)

      EventLoopGroup bossGroup = new NioEventLoopGroup();
      EventLoopGroup workerGroup = new NioEventLoopGroup();

      try {
        ServerBootstrap b = new ServerBootstrap();
        bootstrap.put(conf.getPort(), b);

        b.group(bossGroup, workerGroup);
        b.channel(NioServerSocketChannel.class);
        b.option(ChannelOption.SO_BACKLOG, 100);
        b.option(ChannelOption.TCP_NODELAY, true);
        b.option(ChannelOption.SO_KEEPALIVE, true);
        // b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR);

        boolean compressComm = false;
        b.childHandler(new ServerInitializer(compressComm));

        // Start the server.
        logger.info(
            "Starting server " + conf.getNodeId() + ", listening on port = " + conf.getPort());
        ChannelFuture f = b.bind(conf.getPort()).syncUninterruptibly();

        // should use a future channel listener to do this step
        // allChannels.add(f.channel());

        // block until the server socket is closed.
        f.channel().closeFuture().sync();
      } catch (Exception ex) {
        // on bind().sync()
        logger.error("Failed to setup public handler.", ex);
      } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
      }

      // We can also accept connections from a other ports (e.g., isolate
      // read
      // and writes)
    }
Пример #4
0
  public void applyConnectionOptions(ServerBootstrap bootstrap) {
    bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
    if (tcpSendBufferSize != -1) {
      bootstrap.childOption(ChannelOption.SO_SNDBUF, tcpSendBufferSize);
    }
    if (tcpReceiveBufferSize != -1) {
      bootstrap.childOption(ChannelOption.SO_RCVBUF, tcpReceiveBufferSize);
      bootstrap.childOption(
          ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(tcpReceiveBufferSize));
    }

    bootstrap.option(ChannelOption.SO_LINGER, soLinger);
    if (trafficClass != -1) {
      bootstrap.childOption(ChannelOption.IP_TOS, trafficClass);
    }
    bootstrap.childOption(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE);

    bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive);
    bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
    bootstrap.option(ChannelOption.SO_BACKLOG, acceptBackLog);
  }
Пример #5
0
  private ServerBootstrap createBootstrap() {
    final ServerBootstrap boot =
        createBootstrapOfType(
            mTachyonConf.getEnum(Constants.WORKER_NETWORK_NETTY_CHANNEL, ChannelType.class));

    // use pooled buffers
    boot.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    boot.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    // set write buffer
    // this is the default, but its recommended to set it in case of change in future netty.
    boot.childOption(
        ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK,
        (int) mTachyonConf.getBytes(Constants.WORKER_NETWORK_NETTY_WATERMARK_HIGH));
    boot.childOption(
        ChannelOption.WRITE_BUFFER_LOW_WATER_MARK,
        (int) mTachyonConf.getBytes(Constants.WORKER_NETWORK_NETTY_WATERMARK_LOW));

    // more buffer settings on Netty socket option, one can tune them by specifying
    // properties, e.g.:
    // tachyon.worker.network.netty.backlog=50
    // tachyon.worker.network.netty.buffer.send=64KB
    // tachyon.worker.network.netty.buffer.receive=64KB
    if (mTachyonConf.containsKey(Constants.WORKER_NETWORK_NETTY_BACKLOG)) {
      boot.option(
          ChannelOption.SO_BACKLOG, mTachyonConf.getInt(Constants.WORKER_NETWORK_NETTY_BACKLOG));
    }
    if (mTachyonConf.containsKey(Constants.WORKER_NETWORK_NETTY_BUFFER_SEND)) {
      boot.option(
          ChannelOption.SO_SNDBUF,
          (int) mTachyonConf.getBytes(Constants.WORKER_NETWORK_NETTY_BUFFER_SEND));
    }
    if (mTachyonConf.containsKey(Constants.WORKER_NETWORK_NETTY_BUFFER_RECEIVE)) {
      boot.option(
          ChannelOption.SO_RCVBUF,
          (int) mTachyonConf.getBytes(Constants.WORKER_NETWORK_NETTY_BUFFER_RECEIVE));
    }
    return boot;
  }
    public void run() {
      // construct boss and worker threads (num threads = number of cores)

      // UDP: not a good option as the message will be dropped

      EventLoopGroup bossGroup = new NioEventLoopGroup();
      EventLoopGroup workerGroup = new NioEventLoopGroup();

      try {
        ServerBootstrap b = new ServerBootstrap();
        bootstrap.put(conf.getMgmtPort(), b);

        b.group(bossGroup, workerGroup);
        b.channel(NioServerSocketChannel.class);
        b.option(ChannelOption.SO_BACKLOG, 100);
        b.option(ChannelOption.TCP_NODELAY, true);
        b.option(ChannelOption.SO_KEEPALIVE, true);
        // b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR);

        boolean compressComm = false;
        b.childHandler(new ManagementInitializer(compressComm));

        // Start the server.

        logger.info(
            "Starting mgmt " + conf.getNodeId() + ", listening on port = " + conf.getMgmtPort());
        ChannelFuture f = b.bind(conf.getMgmtPort()).syncUninterruptibly();

        // block until the server socket is closed.
        f.channel().closeFuture().sync();
      } catch (Exception ex) {
        // on bind().sync()
        logger.error("Failed to setup public handler.", ex);
      } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
      }
    }
  public synchronized void start(String host, int port, final boolean sasl) throws Exception {
    this.host = host;
    this.port = port;
    this.sasl = sasl;

    if (channelClazz != null) {
      // Already started
      return;
    }

    int threadsToUse = Runtime.getRuntime().availableProcessors() * 3;
    channelClazz = NioServerSocketChannel.class;
    eventLoopGroup =
        new NioEventLoopGroup(
            threadsToUse,
            new SimpleServerThreadFactory(
                "simple-server", true, Thread.currentThread().getContextClassLoader()));

    bootstrap = new ServerBootstrap();
    bootstrap.group(eventLoopGroup);
    bootstrap.channel(channelClazz);

    ChannelInitializer<Channel> factory =
        new ChannelInitializer<Channel>() {
          @Override
          public void initChannel(Channel channel) throws Exception {
            ChannelPipeline pipeline = channel.pipeline();
            pipeline.addLast("amqp-handler", new ProtocolDecoder());
          }
        };
    bootstrap.childHandler(factory);

    bootstrap
        .option(ChannelOption.SO_REUSEADDR, true)
        .childOption(ChannelOption.SO_REUSEADDR, true)
        .childOption(ChannelOption.SO_KEEPALIVE, true)
        .
        //       childOption(ChannelOption.AUTO_READ, false).
        childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    channelGroup =
        new DefaultChannelGroup("activemq-accepted-channels", GlobalEventExecutor.INSTANCE);

    serverChannelGroup =
        new DefaultChannelGroup("activemq-acceptor-channels", GlobalEventExecutor.INSTANCE);

    SocketAddress address;
    address = new InetSocketAddress(host, port);
    Channel serverChannel = bootstrap.bind(address).syncUninterruptibly().channel();
    serverChannelGroup.add(serverChannel);
  }
Пример #8
0
  public void run() throws Exception {
    // Configure the server.
    EventLoopGroup bossGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
      ServerBootstrap b = new ServerBootstrap();
      b.option(ChannelOption.SO_BACKLOG, 1024);
      b.group(bossGroup, workerGroup)
          .channel(NioServerSocketChannel.class)
          .childHandler(new SpdyServerInitializer());

      Channel ch = b.bind(port).sync().channel();
      ch.closeFuture().sync();
    } finally {
      bossGroup.shutdownGracefully();
      workerGroup.shutdownGracefully();
    }
  }
Пример #9
0
  /** Æô¶¯master */
  public static void startMaster() {
    EventLoopGroup bossGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
      ServerBootstrap bootstrap = new ServerBootstrap();
      bootstrap.group(bossGroup, workerGroup);
      bootstrap.channel(NioServerSocketChannel.class);
      bootstrap.option(ChannelOption.SO_BACKLOG, 128);
      bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
      bootstrap.childHandler(new MasterChannelInitializer());

      bootstrap.bind(Constant.NOTIFY_PORT).sync();
    } catch (InterruptedException e) {
      log.error("interrupt", e);
      workerGroup.shutdownGracefully();
      bossGroup.shutdownGracefully();
    }
  }
Пример #10
0
  public void run() {
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
      ServerBootstrap b = new ServerBootstrap();
      b.option(ChannelOption.SO_BACKLOG, 1024);
      b.group(bossGroup, workerGroup)
          .channel(NioServerSocketChannel.class)
          .handler(new LoggingHandler(LogLevel.INFO))
          .childHandler(initializer);

      channel = b.bind(address).sync().channel();

      channel.closeFuture().sync();
    } catch (InterruptedException e) {
      e.printStackTrace();
    } finally {
      bossGroup.shutdownGracefully();
      workerGroup.shutdownGracefully();
    }
  }
Пример #11
0
  public void bing(int port) throws Exception {

    EventLoopGroup bossGroup = new NioEventLoopGroup();
    EventLoopGroup workGroup = new NioEventLoopGroup();

    try {

      ServerBootstrap b = new ServerBootstrap();
      b.group(bossGroup, workGroup);
      b.channel(NioServerSocketChannel.class);
      b.option(ChannelOption.SO_BACKLOG, 1024);
      b.childHandler(new ChildChannelHandler());

      // 绑定端口
      ChannelFuture f = b.bind(port).sync();
      // 等待服务端监听端口关闭
      f.channel().closeFuture().sync();

    } finally {
      bossGroup.shutdownGracefully();
      workGroup.shutdownGracefully();
    }
  }
  public synchronized void start() throws Exception {
    if (channelClazz != null) {
      // Already started
      return;
    }

    if (useInvm) {
      channelClazz = LocalServerChannel.class;
      eventLoopGroup = new LocalEventLoopGroup();
    } else {
      int threadsToUse;

      if (nioRemotingThreads == -1) {
        // Default to number of cores * 3

        threadsToUse = Runtime.getRuntime().availableProcessors() * 3;
      } else {
        threadsToUse = this.nioRemotingThreads;
      }
      channelClazz = NioServerSocketChannel.class;
      eventLoopGroup =
          new NioEventLoopGroup(
              threadsToUse,
              new ActiveMQThreadFactory("activemq-netty-threads", true, getThisClassLoader()));
    }

    bootstrap = new ServerBootstrap();
    bootstrap.group(eventLoopGroup);
    bootstrap.channel(channelClazz);
    final SSLContext context;
    if (sslEnabled) {
      try {
        if (keyStorePath == null
            && TransportConstants.DEFAULT_TRUSTSTORE_PROVIDER.equals(keyStoreProvider))
          throw new IllegalArgumentException(
              "If \""
                  + TransportConstants.SSL_ENABLED_PROP_NAME
                  + "\" is true then \""
                  + TransportConstants.KEYSTORE_PATH_PROP_NAME
                  + "\" must be non-null "
                  + "unless an alternative \""
                  + TransportConstants.KEYSTORE_PROVIDER_PROP_NAME
                  + "\" has been specified.");
        context =
            SSLSupport.createContext(
                keyStoreProvider,
                keyStorePath,
                keyStorePassword,
                trustStoreProvider,
                trustStorePath,
                trustStorePassword);
      } catch (Exception e) {
        IllegalStateException ise =
            new IllegalStateException("Unable to create NettyAcceptor for " + host + ":" + port);
        ise.initCause(e);
        throw ise;
      }
    } else {
      context = null; // Unused
    }

    final AtomicBoolean warningPrinted = new AtomicBoolean(false);

    ChannelInitializer<Channel> factory =
        new ChannelInitializer<Channel>() {
          @Override
          public void initChannel(Channel channel) throws Exception {
            ChannelPipeline pipeline = channel.pipeline();
            if (sslEnabled) {
              SSLEngine engine = context.createSSLEngine();

              engine.setUseClientMode(false);

              if (needClientAuth) engine.setNeedClientAuth(true);

              // setting the enabled cipher suites resets the enabled protocols so we need
              // to save the enabled protocols so that after the customer cipher suite is enabled
              // we can reset the enabled protocols if a customer protocol isn't specified
              String[] originalProtocols = engine.getEnabledProtocols();

              if (enabledCipherSuites != null) {
                try {
                  engine.setEnabledCipherSuites(
                      SSLSupport.parseCommaSeparatedListIntoArray(enabledCipherSuites));
                } catch (IllegalArgumentException e) {
                  ActiveMQServerLogger.LOGGER.invalidCipherSuite(
                      SSLSupport.parseArrayIntoCommandSeparatedList(
                          engine.getSupportedCipherSuites()));
                  throw e;
                }
              }

              if (enabledProtocols != null) {
                try {
                  engine.setEnabledProtocols(
                      SSLSupport.parseCommaSeparatedListIntoArray(enabledProtocols));
                } catch (IllegalArgumentException e) {
                  ActiveMQServerLogger.LOGGER.invalidProtocol(
                      SSLSupport.parseArrayIntoCommandSeparatedList(
                          engine.getSupportedProtocols()));
                  throw e;
                }
              } else {
                engine.setEnabledProtocols(originalProtocols);
              }

              // Strip "SSLv3" from the current enabled protocols to address the POODLE exploit.
              // This recommendation came from
              // http://www.oracle.com/technetwork/java/javase/documentation/cve-2014-3566-2342133.html
              String[] protocols = engine.getEnabledProtocols();
              Set<String> set = new HashSet<>();
              for (String s : protocols) {
                if (s.equals("SSLv3") || s.equals("SSLv2Hello")) {
                  if (!warningPrinted.get()) {
                    ActiveMQServerLogger.LOGGER.disallowedProtocol(s);
                  }
                  continue;
                }
                set.add(s);
              }
              warningPrinted.set(true);
              engine.setEnabledProtocols(set.toArray(new String[0]));

              SslHandler handler = new SslHandler(engine);

              pipeline.addLast("ssl", handler);
            }
            pipeline.addLast(protocolHandler.getProtocolDecoder());
          }
        };
    bootstrap.childHandler(factory);

    // Bind
    bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
    if (tcpReceiveBufferSize != -1) {
      bootstrap.childOption(ChannelOption.SO_RCVBUF, tcpReceiveBufferSize);
    }
    if (tcpSendBufferSize != -1) {
      bootstrap.childOption(ChannelOption.SO_SNDBUF, tcpSendBufferSize);
    }
    if (backlog != -1) {
      bootstrap.option(ChannelOption.SO_BACKLOG, backlog);
    }
    bootstrap.option(ChannelOption.SO_REUSEADDR, true);
    bootstrap.childOption(ChannelOption.SO_REUSEADDR, true);
    bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
    bootstrap.childOption(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE);
    channelGroup =
        new DefaultChannelGroup("activemq-accepted-channels", GlobalEventExecutor.INSTANCE);

    serverChannelGroup =
        new DefaultChannelGroup("activemq-acceptor-channels", GlobalEventExecutor.INSTANCE);

    if (httpUpgradeEnabled) {
      // the channel will be bound by the Web container and hand over after the HTTP Upgrade
      // handshake is successful
    } else {
      startServerChannels();

      paused = false;

      if (notificationService != null) {
        TypedProperties props = new TypedProperties();
        props.putSimpleStringProperty(
            new SimpleString("factory"), new SimpleString(NettyAcceptorFactory.class.getName()));
        props.putSimpleStringProperty(new SimpleString("host"), new SimpleString(host));
        props.putIntProperty(new SimpleString("port"), port);
        Notification notification =
            new Notification(null, CoreNotificationType.ACCEPTOR_STARTED, props);
        notificationService.sendNotification(notification);
      }

      if (batchDelay > 0) {
        flusher = new BatchFlusher();

        batchFlusherFuture =
            scheduledThreadPool.scheduleWithFixedDelay(
                flusher, batchDelay, batchDelay, TimeUnit.MILLISECONDS);
      }

      ActiveMQServerLogger.LOGGER.startedAcceptor(host, port, protocolsString);
    }
  }