/**
 * @author Jeremy Prime
 * @since 2.0.0
 */
public class Pac4jConfigurationFactory {

  private static final Logger LOG = LoggerFactory.getLogger(Pac4jConfigurationFactory.class);
  public static final String AUTHORIZER_ADMIN = "admin";
  public static final String AUTHORIZER_CUSTOM = "custom";

  public static Config configFor(final JsonObject jsonConf) {
    final String baseUrl = jsonConf.getString("baseUrl");
    final Clients clients =
        new Clients(
            baseUrl + "/callback",
            // oAuth clients
            facebookClient(jsonConf),
            twitterClient());
    final Config config = new Config(clients);
    config.addAuthorizer(AUTHORIZER_ADMIN, new RequireAnyRoleAuthorizer("ROLE_ADMIN"));
    config.addAuthorizer(AUTHORIZER_CUSTOM, new CustomAuthorizer());
    LOG.info("Config created " + config.toString());
    return config;
  }

  public static FacebookClient facebookClient(final JsonObject jsonConf) {
    final String fbId = jsonConf.getString("fbId");
    final String fbSecret = jsonConf.getString("fbSecret");
    return new FacebookClient(fbId, fbSecret);
  }

  public static TwitterClient twitterClient() {
    return new TwitterClient(
        "K9dtF7hwOweVHMxIr8Qe4gshl", "9tlc3TBpl5aX47BGGgMNC8glDqVYi8mJKHG6LiWYVD4Sh1F9Oj");
  }
}
示例#2
0
/**
 * handle connection quit
 *
 * <p>There is not much point in encapsulating this but its useful for the connection pool
 *
 * <p>this operation does not throw any error, it just closes the connection in the end
 *
 * @author <a href="http://oss.lehmann.cx/">Alexander Lehmann</a>
 */
class SMTPQuit {

  private static final Logger log = LoggerFactory.getLogger(SMTPQuit.class);

  private final SMTPConnection connection;

  private final Handler<Void> resultHandler;

  SMTPQuit(SMTPConnection connection, Handler<Void> resultHandler) {
    this.connection = connection;
    this.resultHandler = resultHandler;
  }

  void start() {
    connection.setErrorHandler(
        th -> {
          log.debug("QUIT failed, ignoring exception", th);
          resultHandler.handle(null);
        });
    connection.write(
        "QUIT",
        message -> {
          log.debug("QUIT result: " + message);
          if (!StatusCode.isStatusOk(message)) {
            log.warn("quit failed: " + message);
          }
          resultHandler.handle(null);
        });
  }
}
 protected JolokiaHandler(Map<String, String> configParameters, Restrictor restrictor) {
   log = new JolokiaLogHandler(LoggerFactory.getLogger(JolokiaHandler.class));
   Configuration config = initConfig(configParameters);
   if (restrictor == null) {
     restrictor =
         createRestrictor(NetworkUtil.replaceExpression(config.get(ConfigKey.POLICY_LOCATION)));
   }
   log.info("Using restrictor " + restrictor);
   BackendManager backendManager = new BackendManager(config, log, restrictor);
   requestHandler = new HttpRequestHandler(config, backendManager, log);
 }
示例#4
0
public class IdAndWorkingDir {

  private static final String JAVA_MAIN_DIR = "src/main/java/";
  private final String verticleId;
  private final Logger logger = LoggerFactory.getLogger(this.getClass());

  public IdAndWorkingDir(final Class clazz) {
    this.verticleId = clazz.getName();
    final String vertxWorkingDir = buildVertxWorkingDir(JAVA_MAIN_DIR, clazz);
    logger.info("vertxWorkingDir: " + vertxWorkingDir);
    System.setProperty("vertx.cwd", vertxWorkingDir);
  }

  public String getVerticleId() {
    return verticleId;
  }

  private String buildVertxWorkingDir(final String prefix, final Class clazz) {
    return prefix + clazz.getPackage().getName().replace(".", "/");
  }
}
示例#5
0
public class Pong extends AbstractVerticle {
  private final Logger log = LoggerFactory.getLogger(getClass());

  @Override
  public void start() {
    getVertx()
        .eventBus()
        .consumer(
            "ping-pong",
            message -> {
              log.info(String.format("ping-pong receive: %s", message));
              message.reply("pong");
            })
        .completionHandler(
            event -> {
              if (event.succeeded()) log.info("complete handler");
              else log.info("failed");
            });
    log.info("Pong started");
  }
}
/** @author Danila Ponomarenko */
@Component(RootCrawler.BEAN_NAME)
public final class RootCrawler implements Publisher<String> {
  private static final Logger logger = LoggerFactory.getLogger(RootCrawler.class);

  public static final String BEAN_NAME = "rootCrawler";

  @Autowired private DataProviderClient client;
  @Autowired private EventBus eventBus;

  public void crawl() {
    client
        .getAgencyTags()
        .setHandler(
            r -> {
              if (r.succeeded()) {
                process(r.result());
              }
            });
  }

  private static final String AGENCIES_ADDRESS = "crawler.agencies";

  @Override
  public void subscribe(@NotNull Consumer<String> agencyConsumer) {
    eventBus.consumer(
        RootCrawler.AGENCIES_ADDRESS,
        r -> {
          agencyConsumer.accept((String) r.body());
        });
  }

  private void process(@NotNull Collection<String> tags) {
    logger.info(tags.size() + " agencies loaded");
    for (String tag : tags) {
      eventBus.send(AGENCIES_ADDRESS, tag);
    }
  }
}
public class RequestBodyParamInjector implements AnnotatedParamInjector<RequestBody> {

  private static final Logger log = LoggerFactory.getLogger(RequestBodyParamInjector.class);

  private Map<String, PayloadMarshaller> marshallers;

  public RequestBodyParamInjector(Map<String, PayloadMarshaller> marshallers) {
    this.marshallers = marshallers;
  }

  @Override
  public Object resolve(RoutingContext context, RequestBody annotation, Class<?> resultClass) {
    String body = context.getBodyAsString();
    if (resultClass.equals(String.class)) {
      return body;
    }
    String contentType = ContentTypeProcessor.getContentType(context);
    if (contentType == null) {
      log.error("No suitable Content-Type found, request body can't be read");
      return null;
    }
    if (contentType.equals("application/json") && resultClass.equals(JsonObject.class)) {
      return new JsonObject(body);
    }
    PayloadMarshaller marshaller = marshallers.get(contentType);
    if (marshaller == null) {
      log.error(
          "No marshaller found for Content-Type : " + contentType + ", request body can't be read");
      return null;
    }
    try {
      return marshaller.unmarshallPayload(body, resultClass);
    } catch (MarshallingException me) {
      context.fail(me);
      return null;
    }
  }
}
示例#8
0
public class PaginationProcessor extends NoopAfterAllProcessor implements Processor {

  private static final Logger log = LoggerFactory.getLogger(PaginationProcessor.class);

  @Override
  public void preHandle(RoutingContext context) {
    context.data().put(PaginationContext.DATA_ATTR, PaginationContext.fromContext(context));
    context.next();
  }

  @Override
  public void postHandle(RoutingContext context) {
    PaginationContext pageContext =
        (PaginationContext) context.data().get(PaginationContext.DATA_ATTR);
    String linkHeader = pageContext.buildLinkHeader(context.request());
    if (linkHeader != null) {
      context.response().headers().add(HttpHeaders.LINK, linkHeader);
    } else {
      log.warn("You did not set the total count on PaginationContext, response won't be paginated");
    }
    context.next();
  }
}
示例#9
0
  private Twinkle() {
    final Logger logger = LoggerFactory.getLogger(Twinkle.class);

    final Vertx vertx = Vertx.vertx(vertxOptions());
    final HttpServer httpServer = vertx.createHttpServer(httpServerOptions());
    final Router router = Router.router(vertx);

    router.get("/ping").handler(context -> context.response().end("pong"));
    router.mountSubRouter("/stats", new Metrics(vertx, httpServer).router());

    httpServer
        .requestHandler(router::accept)
        .listen(
            8080,
            result -> {
              if (result.succeeded()) {
                logger.info("Twinkle started");
              } else {
                logger.error("Twinkle failed to start", result.cause());
                vertx.close(shutdown -> logger.info("Twinkle shut down"));
              }
            });
  }
示例#10
0
/**
 * This class is optimised for performance when used on the same event loop that is was passed to
 * the handler with. However it can be used safely from other threads.
 *
 * <p>The internal state is protected using the synchronized keyword. If always used on the same
 * event loop, then we benefit from biased locking which makes the overhead of synchronized near
 * zero.
 *
 * @author <a href="http://tfox.org">Tim Fox</a>
 */
public class NetSocketImpl extends ConnectionBase implements NetSocket {

  private static final Logger log = LoggerFactory.getLogger(NetSocketImpl.class);

  private final String writeHandlerID;
  private final MessageConsumer registration;
  private final SSLHelper helper;
  private final boolean client;
  private Object metric;
  private Handler<Buffer> dataHandler;
  private Handler<Void> endHandler;
  private Handler<Void> drainHandler;
  private Buffer pendingData;
  private boolean paused = false;
  private ChannelFuture writeFuture;

  public NetSocketImpl(
      VertxInternal vertx,
      Channel channel,
      ContextImpl context,
      SSLHelper helper,
      boolean client,
      TCPMetrics metrics,
      Object metric) {
    super(vertx, channel, context, metrics);
    this.helper = helper;
    this.client = client;
    this.writeHandlerID = UUID.randomUUID().toString();
    this.metric = metric;
    Handler<Message<Buffer>> writeHandler = msg -> write(msg.body());
    registration = vertx.eventBus().<Buffer>localConsumer(writeHandlerID).handler(writeHandler);
  }

  protected synchronized void setMetric(Object metric) {
    this.metric = metric;
  }

  @Override
  protected synchronized Object metric() {
    return metric;
  }

  @Override
  public String writeHandlerID() {
    return writeHandlerID;
  }

  @Override
  public NetSocket write(Buffer data) {
    ByteBuf buf = data.getByteBuf();
    write(buf);
    return this;
  }

  @Override
  public NetSocket write(String str) {
    write(Unpooled.copiedBuffer(str, CharsetUtil.UTF_8));
    return this;
  }

  @Override
  public NetSocket write(String str, String enc) {
    if (enc == null) {
      write(str);
    } else {
      write(Unpooled.copiedBuffer(str, Charset.forName(enc)));
    }
    return this;
  }

  @Override
  public synchronized NetSocket handler(Handler<Buffer> dataHandler) {
    this.dataHandler = dataHandler;
    return this;
  }

  @Override
  public synchronized NetSocket pause() {
    if (!paused) {
      paused = true;
      doPause();
    }
    return this;
  }

  @Override
  public synchronized NetSocket resume() {
    if (paused) {
      paused = false;
      if (pendingData != null) {
        // Send empty buffer to trigger sending of pending data
        context.runOnContext(v -> handleDataReceived(Buffer.buffer()));
      }
      doResume();
    }
    return this;
  }

  @Override
  public NetSocket setWriteQueueMaxSize(int maxSize) {
    doSetWriteQueueMaxSize(maxSize);
    return this;
  }

  @Override
  public boolean writeQueueFull() {
    return isNotWritable();
  }

  @Override
  public synchronized NetSocket endHandler(Handler<Void> endHandler) {
    this.endHandler = endHandler;
    return this;
  }

  @Override
  public synchronized NetSocket drainHandler(Handler<Void> drainHandler) {
    this.drainHandler = drainHandler;
    vertx.runOnContext(
        v ->
            callDrainHandler()); // If the channel is already drained, we want to call it
                                 // immediately
    return this;
  }

  @Override
  public NetSocket sendFile(String filename, long offset, long length) {
    return sendFile(filename, offset, length, null);
  }

  @Override
  public NetSocket sendFile(
      String filename, long offset, long length, final Handler<AsyncResult<Void>> resultHandler) {
    File f = vertx.resolveFile(filename);
    if (f.isDirectory()) {
      throw new IllegalArgumentException("filename must point to a file and not to a directory");
    }
    RandomAccessFile raf = null;
    try {
      raf = new RandomAccessFile(f, "r");
      ChannelFuture future =
          super.sendFile(raf, Math.min(offset, f.length()), Math.min(length, f.length() - offset));
      if (resultHandler != null) {
        future.addListener(
            fut -> {
              final AsyncResult<Void> res;
              if (future.isSuccess()) {
                res = Future.succeededFuture();
              } else {
                res = Future.failedFuture(future.cause());
              }
              vertx.runOnContext(v -> resultHandler.handle(res));
            });
      }
    } catch (IOException e) {
      try {
        if (raf != null) {
          raf.close();
        }
      } catch (IOException ignore) {
      }
      if (resultHandler != null) {
        vertx.runOnContext(v -> resultHandler.handle(Future.failedFuture(e)));
      } else {
        log.error("Failed to send file", e);
      }
    }
    return this;
  }

  @Override
  public SocketAddress remoteAddress() {
    return super.remoteAddress();
  }

  public SocketAddress localAddress() {
    return super.localAddress();
  }

  @Override
  public synchronized NetSocket exceptionHandler(Handler<Throwable> handler) {
    this.exceptionHandler = handler;
    return this;
  }

  @Override
  public synchronized NetSocket closeHandler(Handler<Void> handler) {
    this.closeHandler = handler;
    return this;
  }

  @Override
  public synchronized void close() {
    if (writeFuture != null) {
      // Close after all data is written
      writeFuture.addListener(ChannelFutureListener.CLOSE);
      channel.flush();
    } else {
      super.close();
    }
  }

  @Override
  public synchronized NetSocket upgradeToSsl(final Handler<Void> handler) {
    SslHandler sslHandler = channel.pipeline().get(SslHandler.class);
    if (sslHandler == null) {
      sslHandler = helper.createSslHandler(vertx);
      channel.pipeline().addFirst("ssl", sslHandler);
    }
    sslHandler
        .handshakeFuture()
        .addListener(
            future ->
                context.executeFromIO(
                    () -> {
                      if (future.isSuccess()) {
                        handler.handle(null);
                      } else {
                        log.error(future.cause());
                      }
                    }));
    return this;
  }

  @Override
  public boolean isSsl() {
    return channel.pipeline().get(SslHandler.class) != null;
  }

  @Override
  public X509Certificate[] peerCertificateChain() throws SSLPeerUnverifiedException {
    return getPeerCertificateChain();
  }

  @Override
  protected synchronized void handleInterestedOpsChanged() {
    checkContext();
    callDrainHandler();
  }

  @Override
  public void end() {
    close();
  }

  @Override
  protected synchronized void handleClosed() {
    checkContext();
    if (endHandler != null) {
      endHandler.handle(null);
    }
    super.handleClosed();
    if (vertx.eventBus() != null) {
      registration.unregister();
    }
  }

  synchronized void handleDataReceived(Buffer data) {
    checkContext();
    if (paused) {
      if (pendingData == null) {
        pendingData = data.copy();
      } else {
        pendingData.appendBuffer(data);
      }
      return;
    }
    if (pendingData != null) {
      data = pendingData.appendBuffer(data);
    }
    reportBytesRead(data.length());
    if (dataHandler != null) {
      dataHandler.handle(data);
    }
  }

  private void write(ByteBuf buff) {
    reportBytesWritten(buff.readableBytes());
    writeFuture = super.writeToChannel(buff);
  }

  private synchronized void callDrainHandler() {
    if (drainHandler != null) {
      if (!writeQueueFull()) {
        drainHandler.handle(null);
      }
    }
  }
}
/** @author <a href="mailto:[email protected]">Nick Scavelli</a> */
public class JDBCClientImpl implements JDBCClient {

  private static final Logger log = LoggerFactory.getLogger(JDBCClient.class);

  private static final String DS_LOCAL_MAP_NAME = "__vertx.JDBCClient.datasources";

  private final Vertx vertx;
  private final DataSourceHolder holder;

  // We use this executor to execute getConnection requests
  private final ExecutorService exec;
  private final DataSource ds;

  /*
  Create client with specific datasource
   */
  public JDBCClientImpl(Vertx vertx, DataSource dataSource) {
    Objects.requireNonNull(vertx);
    Objects.requireNonNull(dataSource);
    this.vertx = vertx;
    this.holder = new DataSourceHolder(dataSource);
    this.exec = holder.exec();
    this.ds = dataSource;
  }

  /*
  Create client with shared datasource
   */
  public JDBCClientImpl(Vertx vertx, JsonObject config, String datasourceName) {
    Objects.requireNonNull(vertx);
    Objects.requireNonNull(config);
    Objects.requireNonNull(datasourceName);
    this.vertx = vertx;
    this.holder = lookupHolder(datasourceName, config);
    this.exec = holder.exec();
    this.ds = holder.ds();
  }

  @Override
  public void close() {
    holder.close();
  }

  @Override
  public JDBCClient getConnection(Handler<AsyncResult<SQLConnection>> handler) {
    Context ctx = vertx.getOrCreateContext();
    exec.execute(
        () -> {
          Future<SQLConnection> res = Future.future();
          try {
            /*
            This can block until a connection is free.
            We don't want to do that while running on a worker as we can enter a deadlock situation as the worker
            might have obtained a connection, and won't release it until it is run again
            There is a general principle here:
            *User code* should be executed on a worker and can potentially block, it's up to the *user* to deal with
            deadlocks that might occur there.
            If the *service code* internally blocks waiting for a resource that might be obtained by *user code*, then
            this can cause deadlock, so the service should ensure it never does this, by executing such code
            (e.g. getConnection) on a different thread to the worker pool.
            We don't want to use the vert.x internal pool for this as the threads might end up all blocked preventing
            other important operations from occurring (e.g. async file access)
            */
            Connection conn = ds.getConnection();
            SQLConnection sconn = new JDBCConnectionImpl(vertx, conn);
            res.complete(sconn);
          } catch (SQLException e) {
            res.fail(e);
          }
          ctx.runOnContext(v -> res.setHandler(handler));
        });
    return this;
  }

  private DataSourceHolder lookupHolder(String datasourceName, JsonObject config) {
    synchronized (vertx) {
      LocalMap<String, DataSourceHolder> map = vertx.sharedData().getLocalMap(DS_LOCAL_MAP_NAME);
      DataSourceHolder theHolder = map.get(datasourceName);
      if (theHolder == null) {
        theHolder = new DataSourceHolder(config, () -> removeFromMap(map, datasourceName));
        map.put(datasourceName, theHolder);
      } else {
        theHolder.incRefCount();
      }
      return theHolder;
    }
  }

  private void removeFromMap(LocalMap<String, DataSourceHolder> map, String dataSourceName) {
    synchronized (vertx) {
      map.remove(dataSourceName);
      if (map.isEmpty()) {
        map.close();
      }
    }
  }

  private ClassLoader getClassLoader() {
    ClassLoader tccl = Thread.currentThread().getContextClassLoader();
    return tccl == null ? getClass().getClassLoader() : tccl;
  }

  private class DataSourceHolder implements Shareable {

    DataSourceProvider provider;
    JsonObject config;
    Runnable closeRunner;
    DataSource ds;
    ExecutorService exec;
    int refCount = 1;

    public DataSourceHolder(DataSource ds) {
      this.ds = ds;
    }

    public DataSourceHolder(JsonObject config, Runnable closeRunner) {
      this.config = config;
      this.closeRunner = closeRunner;
    }

    synchronized DataSource ds() {
      if (ds == null) {
        String providerClass = config.getString("provider_class");
        if (providerClass == null) {
          providerClass = DEFAULT_PROVIDER_CLASS;
        }
        try {
          Class clazz = getClassLoader().loadClass(providerClass);
          provider = (DataSourceProvider) clazz.newInstance();
          ds = provider.getDataSource(config);
        } catch (Exception e) {
          throw new RuntimeException(e);
        }
      }
      return ds;
    }

    synchronized ExecutorService exec() {
      if (exec == null) {
        exec =
            new ThreadPoolExecutor(
                1,
                1,
                1000L,
                TimeUnit.MILLISECONDS,
                new LinkedBlockingQueue<>(),
                (r -> new Thread(r, "vertx-jdbc-service-get-connection-thread")));
      }
      return exec;
    }

    synchronized void incRefCount() {
      refCount++;
    }

    synchronized void close() {
      if (--refCount == 0) {
        if (provider != null) {
          vertx.executeBlocking(
              future -> {
                try {
                  provider.close(ds);
                  future.complete();
                } catch (SQLException e) {
                  future.fail(e);
                }
              },
              null);
        }
        if (exec != null) {
          exec.shutdown();
        }
        if (closeRunner != null) {
          closeRunner.run();
        }
      }
    }
  }
}
示例#12
0
/**
 * Handles HA
 *
 * <p>We compute failover and whether there is a quorum synchronously as we receive nodeAdded and
 * nodeRemoved events from the cluster manager.
 *
 * <p>It's vital that this is done synchronously as the cluster manager only guarantees that the set
 * of nodes retrieved from getNodes() is the same for each node in the cluster when processing the
 * exact same nodeAdded/nodeRemoved event.
 *
 * <p>As HA modules are deployed, if a quorum has been attained they are deployed immediately,
 * otherwise the deployment information is added to a list.
 *
 * <p>Periodically we check the value of attainedQuorum and if true we deploy any HA deploymentIDs
 * waiting for a quorum.
 *
 * <p>If false, we check if there are any HA deploymentIDs current deployed, and if so undeploy
 * them, and add them to the list of deploymentIDs waiting for a quorum.
 *
 * <p>By doing this check periodically we can avoid race conditions resulting in modules being
 * deployed after a quorum has been lost, and without having to resort to exclusive locking which is
 * actually quite tricky here, and prone to deadlock·
 *
 * <p>We maintain a clustered map where the key is the node id and the value is some stringified
 * JSON which describes the group of the cluster and an array of the HA modules deployed on that
 * node.
 *
 * <p>There is an entry in the map for each node of the cluster.
 *
 * <p>When a node joins the cluster or an HA module is deployed or undeployed that entry is updated.
 *
 * <p>When a node leaves the cluster cleanly, it removes it's own entry before leaving.
 *
 * <p>When the cluster manager sends us an event to say a node has left the cluster we check if its
 * entry in the cluster map is there, and if so we infer a clean close has happened and no failover
 * will occur.
 *
 * <p>If the map entry is there it implies the node died suddenly. In that case each node of the
 * cluster must compute whether it is the failover node for the failed node.
 *
 * <p>First each node of the cluster determines whether it is in the same group as the failed node,
 * if not then it will not be a candidate for the failover node. Nodes in the cluster only failover
 * to other nodes in the same group.
 *
 * <p>If the node is in the same group then the node takes the UUID of the failed node, computes the
 * hash-code and chooses a node from the list of nodes in the cluster by taking the hash-code modulo
 * the number of nodes as an index to the list of nodes.
 *
 * <p>The cluster manager guarantees each node in the cluster sees the same set of nodes for each
 * membership event that is processed. Therefore it is guaranteed that each node in the cluster will
 * compute the same value. It is critical that any cluster manager implementation provides this
 * guarantee!
 *
 * <p>Once the value has been computed, it is compared to the current node, and if it is the same
 * the current node assumes failover for the failed node.
 *
 * <p>During failover the failover node deploys all the HA modules from the failed node, as
 * described in the JSON with the same values of config and instances.
 *
 * <p>Once failover is complete the failover node removes the cluster map entry for the failed node.
 *
 * <p>If the failover node itself fails while it is processing failover for another node, then this
 * is also checked by other nodes when they detect the failure of the second node.
 *
 * @author <a href="http://tfox.org">Tim Fox</a>
 */
public class HAManager {

  private static final Logger log = LoggerFactory.getLogger(HAManager.class);

  private static final String CLUSTER_MAP_NAME = "__vertx.haInfo";
  private static final long QUORUM_CHECK_PERIOD = 1000;

  private final VertxInternal vertx;
  private final DeploymentManager deploymentManager;
  private final ClusterManager clusterManager;
  private final int quorumSize;
  private final String group;
  private final JsonObject haInfo;
  private final Map<String, String> clusterMap;
  private final String nodeID;
  private final Queue<Runnable> toDeployOnQuorum = new ConcurrentLinkedQueue<>();
  private final boolean enabled;

  private long quorumTimerID;
  private volatile boolean attainedQuorum;
  private volatile FailoverCompleteHandler failoverCompleteHandler;
  private volatile FailoverCompleteHandler nodeCrashedHandler;
  private volatile boolean failDuringFailover;
  private volatile boolean stopped;
  private volatile boolean killed;

  public HAManager(
      VertxInternal vertx,
      DeploymentManager deploymentManager,
      ClusterManager clusterManager,
      int quorumSize,
      String group,
      boolean enabled) {
    this.vertx = vertx;
    this.deploymentManager = deploymentManager;
    this.clusterManager = clusterManager;
    this.quorumSize = enabled ? quorumSize : 0;
    this.group = enabled ? group : "__DISABLED__";
    this.enabled = enabled;
    this.haInfo = new JsonObject();
    haInfo.put("verticles", new JsonArray());
    haInfo.put("group", this.group);
    this.clusterMap = clusterManager.getSyncMap(CLUSTER_MAP_NAME);
    this.nodeID = clusterManager.getNodeID();
    clusterManager.nodeListener(
        new NodeListener() {
          @Override
          public void nodeAdded(String nodeID) {
            HAManager.this.nodeAdded(nodeID);
          }

          @Override
          public void nodeLeft(String leftNodeID) {
            HAManager.this.nodeLeft(leftNodeID);
          }
        });
    clusterMap.put(nodeID, haInfo.encode());
    quorumTimerID = vertx.setPeriodic(QUORUM_CHECK_PERIOD, tid -> checkHADeployments());
    // Call check quorum to compute whether we have an initial quorum
    synchronized (this) {
      checkQuorum();
    }
  }

  // Remove the information on the deployment from the cluster - this is called when an HA module is
  // undeployed
  public void removeFromHA(String depID) {
    Deployment dep = deploymentManager.getDeployment(depID);
    if (dep == null || !dep.deploymentOptions().isHa()) {
      return;
    }
    synchronized (haInfo) {
      JsonArray haMods = haInfo.getJsonArray("verticles");
      Iterator<Object> iter = haMods.iterator();
      while (iter.hasNext()) {
        Object obj = iter.next();
        JsonObject mod = (JsonObject) obj;
        if (mod.getString("dep_id").equals(depID)) {
          iter.remove();
        }
      }
      clusterMap.put(nodeID, haInfo.encode());
    }
  }

  public void addDataToAHAInfo(String key, JsonObject value) {
    synchronized (haInfo) {
      haInfo.put(key, value);
      clusterMap.put(nodeID, haInfo.encode());
    }
  }
  // Deploy an HA verticle
  public void deployVerticle(
      final String verticleName,
      DeploymentOptions deploymentOptions,
      final Handler<AsyncResult<String>> doneHandler) {
    if (attainedQuorum) {
      doDeployVerticle(verticleName, deploymentOptions, doneHandler);
    } else {
      log.info(
          "Quorum not attained. Deployment of verticle will be delayed until there's a quorum.");
      addToHADeployList(verticleName, deploymentOptions, doneHandler);
    }
  }

  public void stop() {
    if (!stopped) {
      if (clusterManager.isActive()) {

        clusterMap.remove(nodeID);
      }
      vertx.cancelTimer(quorumTimerID);
      stopped = true;
    }
  }

  public void simulateKill() {
    if (!stopped) {
      killed = true;
      clusterManager.leave(
          ar -> {
            if (ar.failed()) {
              log.error("Failed to leave cluster", ar.cause());
            }
          });
      vertx.cancelTimer(quorumTimerID);
      stopped = true;
    }
  }

  public void setFailoverCompleteHandler(FailoverCompleteHandler failoverCompleteHandler) {
    this.failoverCompleteHandler = failoverCompleteHandler;
  }

  public void setNodeCrashedHandler(FailoverCompleteHandler removeSubsHandler) {
    this.nodeCrashedHandler = removeSubsHandler;
  }

  public boolean isKilled() {
    return killed;
  }

  public boolean isEnabled() {
    return enabled;
  }

  // For testing:
  public void failDuringFailover(boolean fail) {
    failDuringFailover = fail;
  }

  private void doDeployVerticle(
      final String verticleName,
      DeploymentOptions deploymentOptions,
      final Handler<AsyncResult<String>> doneHandler) {
    final Handler<AsyncResult<String>> wrappedHandler =
        asyncResult -> {
          if (asyncResult.succeeded()) {
            // Tell the other nodes of the cluster about the verticle for HA purposes
            addToHA(asyncResult.result(), verticleName, deploymentOptions);
          }
          if (doneHandler != null) {
            doneHandler.handle(asyncResult);
          } else if (asyncResult.failed()) {
            log.error("Failed to deploy verticle", asyncResult.cause());
          }
        };
    deploymentManager.deployVerticle(verticleName, deploymentOptions, wrappedHandler);
  }

  // A node has joined the cluster
  // synchronize this in case the cluster manager is naughty and calls it concurrently
  private synchronized void nodeAdded(final String nodeID) {
    // This is not ideal but we need to wait for the group information to appear - and this will be
    // shortly
    // after the node has been added
    checkQuorumWhenAdded(nodeID, System.currentTimeMillis());
  }

  // A node has left the cluster
  // synchronize this in case the cluster manager is naughty and calls it concurrently
  private synchronized void nodeLeft(String leftNodeID) {

    checkQuorum();
    if (attainedQuorum) {

      // Check for failover
      String sclusterInfo = clusterMap.get(leftNodeID);

      if (sclusterInfo == null) {
        // Clean close - do nothing
      } else {
        JsonObject clusterInfo = new JsonObject(sclusterInfo);
        checkRemoveSubs(leftNodeID, clusterInfo);
        checkFailover(leftNodeID, clusterInfo);
      }

      // We also check for and potentially resume any previous failovers that might have failed
      // We can determine this if there any ids in the cluster map which aren't in the node list
      List<String> nodes = clusterManager.getNodes();

      for (Map.Entry<String, String> entry : clusterMap.entrySet()) {
        if (!leftNodeID.equals(entry.getKey()) && !nodes.contains(entry.getKey())) {
          JsonObject haInfo = new JsonObject(entry.getValue());
          checkRemoveSubs(entry.getKey(), haInfo);
          checkFailover(entry.getKey(), haInfo);
        }
      }
    }
  }

  private synchronized void checkQuorumWhenAdded(final String nodeID, final long start) {
    if (clusterMap.containsKey(nodeID)) {
      checkQuorum();
    } else {
      vertx.setTimer(
          200,
          tid -> {
            // This can block on a monitor so it needs to run as a worker
            vertx.executeBlockingInternal(
                () -> {
                  if (System.currentTimeMillis() - start > 10000) {
                    log.warn("Timed out waiting for group information to appear");
                  } else if (!stopped) {
                    ContextImpl context = vertx.getContext();
                    try {
                      // Remove any context we have here (from the timer) otherwise will screw
                      // things up when verticles are deployed
                      ContextImpl.setContext(null);
                      checkQuorumWhenAdded(nodeID, start);
                    } finally {
                      ContextImpl.setContext(context);
                    }
                  }
                  return null;
                },
                null);
          });
    }
  }

  // Check if there is a quorum for our group
  private void checkQuorum() {
    if (quorumSize == 0) {
      this.attainedQuorum = true;
    } else {
      List<String> nodes = clusterManager.getNodes();
      int count = 0;
      for (String node : nodes) {
        String json = clusterMap.get(node);
        if (json != null) {
          JsonObject clusterInfo = new JsonObject(json);
          String group = clusterInfo.getString("group");
          if (group.equals(this.group)) {
            count++;
          }
        }
      }
      boolean attained = count >= quorumSize;
      if (!attainedQuorum && attained) {
        // A quorum has been attained so we can deploy any currently undeployed HA deploymentIDs
        log.info(
            "A quorum has been obtained. Any deploymentIDs waiting on a quorum will now be deployed");
        this.attainedQuorum = true;
      } else if (attainedQuorum && !attained) {
        // We had a quorum but we lost it - we must undeploy any HA deploymentIDs
        log.info(
            "There is no longer a quorum. Any HA deploymentIDs will be undeployed until a quorum is re-attained");
        this.attainedQuorum = false;
      }
    }
  }

  // Add some information on a deployment in the cluster so other nodes know about it
  private void addToHA(
      String deploymentID, String verticleName, DeploymentOptions deploymentOptions) {
    String encoded;
    synchronized (haInfo) {
      JsonObject verticleConf = new JsonObject().put("dep_id", deploymentID);
      verticleConf.put("verticle_name", verticleName);
      verticleConf.put("options", deploymentOptions.toJson());
      JsonArray haMods = haInfo.getJsonArray("verticles");
      haMods.add(verticleConf);
      encoded = haInfo.encode();
      clusterMap.put(nodeID, encoded);
    }
  }

  // Add the deployment to an internal list of deploymentIDs - these will be executed when a quorum
  // is attained
  private void addToHADeployList(
      final String verticleName,
      final DeploymentOptions deploymentOptions,
      final Handler<AsyncResult<String>> doneHandler) {
    toDeployOnQuorum.add(
        () -> {
          ContextImpl ctx = vertx.getContext();
          try {
            ContextImpl.setContext(null);
            deployVerticle(verticleName, deploymentOptions, doneHandler);
          } finally {
            ContextImpl.setContext(ctx);
          }
        });
  }

  private void checkHADeployments() {
    try {
      if (attainedQuorum) {
        deployHADeployments();
      } else {
        undeployHADeployments();
      }
    } catch (Throwable t) {
      log.error("Failed when checking HA deploymentIDs", t);
    }
  }

  // Undeploy any HA deploymentIDs now there is no quorum
  private void undeployHADeployments() {
    for (String deploymentID : deploymentManager.deployments()) {
      Deployment dep = deploymentManager.getDeployment(deploymentID);
      if (dep != null) {
        if (dep.deploymentOptions().isHa()) {
          ContextImpl ctx = vertx.getContext();
          try {
            ContextImpl.setContext(null);
            deploymentManager.undeployVerticle(
                deploymentID,
                result -> {
                  if (result.succeeded()) {
                    log.info(
                        "Successfully undeployed HA deployment "
                            + deploymentID
                            + "-"
                            + dep.verticleIdentifier()
                            + " as there is no quorum");
                    addToHADeployList(
                        dep.verticleIdentifier(),
                        dep.deploymentOptions(),
                        result1 -> {
                          if (result1.succeeded()) {
                            log.info(
                                "Successfully redeployed verticle "
                                    + dep.verticleIdentifier()
                                    + " after quorum was re-attained");
                          } else {
                            log.error(
                                "Failed to redeploy verticle "
                                    + dep.verticleIdentifier()
                                    + " after quorum was re-attained",
                                result1.cause());
                          }
                        });
                  } else {
                    log.error("Failed to undeploy deployment on lost quorum", result.cause());
                  }
                });
          } finally {
            ContextImpl.setContext(ctx);
          }
        }
      }
    }
  }

  // Deploy any deploymentIDs that are waiting for a quorum
  private void deployHADeployments() {
    int size = toDeployOnQuorum.size();
    if (size != 0) {
      log.info(
          "There are "
              + size
              + " HA deploymentIDs waiting on a quorum. These will now be deployed");
      Runnable task;
      while ((task = toDeployOnQuorum.poll()) != null) {
        try {
          task.run();
        } catch (Throwable t) {
          log.error("Failed to run redeployment task", t);
        }
      }
    }
  }

  // Handle failover
  private void checkFailover(String failedNodeID, JsonObject theHAInfo) {
    try {
      JsonArray deployments = theHAInfo.getJsonArray("verticles");
      String group = theHAInfo.getString("group");
      String chosen = chooseHashedNode(group, failedNodeID.hashCode());
      if (chosen != null && chosen.equals(this.nodeID)) {
        if (deployments != null && deployments.size() != 0) {
          log.info(
              "node"
                  + nodeID
                  + " says: Node "
                  + failedNodeID
                  + " has failed. This node will deploy "
                  + deployments.size()
                  + " deploymentIDs from that node.");
          for (Object obj : deployments) {
            JsonObject app = (JsonObject) obj;
            processFailover(app);
          }
        }
        // Failover is complete! We can now remove the failed node from the cluster map
        clusterMap.remove(failedNodeID);
        callFailoverCompleteHandler(failedNodeID, theHAInfo, true);
      }
    } catch (Throwable t) {
      log.error("Failed to handle failover", t);
      callFailoverCompleteHandler(failedNodeID, theHAInfo, false);
    }
  }

  private void checkRemoveSubs(String failedNodeID, JsonObject theHAInfo) {
    String chosen = chooseHashedNode(null, failedNodeID.hashCode());
    if (chosen != null && chosen.equals(this.nodeID)) {
      callFailoverCompleteHandler(nodeCrashedHandler, failedNodeID, theHAInfo, true);
    }
  }

  private void callFailoverCompleteHandler(String nodeID, JsonObject haInfo, boolean result) {
    callFailoverCompleteHandler(failoverCompleteHandler, nodeID, haInfo, result);
  }

  private void callFailoverCompleteHandler(
      FailoverCompleteHandler handler, String nodeID, JsonObject haInfo, boolean result) {
    if (handler != null) {
      CountDownLatch latch = new CountDownLatch(1);
      // The testsuite requires that this is called on a Vert.x thread
      vertx.runOnContext(
          v -> {
            handler.handle(nodeID, haInfo, result);
            latch.countDown();
          });
      try {
        latch.await(30, TimeUnit.SECONDS);
      } catch (InterruptedException ignore) {
      }
    }
  }

  // Process the failover of a deployment
  private void processFailover(JsonObject failedVerticle) {
    if (failDuringFailover) {
      throw new VertxException("Oops!");
    }
    // This method must block until the failover is complete - i.e. the verticle is successfully
    // redeployed
    final String verticleName = failedVerticle.getString("verticle_name");
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicReference<Throwable> err = new AtomicReference<>();
    // Now deploy this verticle on this node
    ContextImpl ctx = vertx.getContext();
    if (ctx != null) {
      // We could be on main thread in which case we don't want to overwrite tccl
      ContextImpl.setContext(null);
    }
    JsonObject options = failedVerticle.getJsonObject("options");
    try {
      doDeployVerticle(
          verticleName,
          new DeploymentOptions(options),
          result -> {
            if (result.succeeded()) {
              log.info("Successfully redeployed verticle " + verticleName + " after failover");
            } else {
              log.error("Failed to redeploy verticle after failover", result.cause());
              err.set(result.cause());
            }
            latch.countDown();
            Throwable t = err.get();
            if (t != null) {
              throw new VertxException(t);
            }
          });
    } finally {
      if (ctx != null) {
        ContextImpl.setContext(ctx);
      }
    }
    try {
      if (!latch.await(120, TimeUnit.SECONDS)) {
        throw new VertxException("Timed out waiting for redeploy on failover");
      }
    } catch (InterruptedException e) {
      throw new IllegalStateException(e);
    }
  }

  // Compute the failover node
  private String chooseHashedNode(String group, int hashCode) {
    List<String> nodes = clusterManager.getNodes();
    ArrayList<String> matchingMembers = new ArrayList<>();
    for (String node : nodes) {
      String sclusterInfo = clusterMap.get(node);
      if (sclusterInfo != null) {
        JsonObject clusterInfo = new JsonObject(sclusterInfo);
        String memberGroup = clusterInfo.getString("group");
        if (group == null || group.equals(memberGroup)) {
          matchingMembers.add(node);
        }
      }
    }
    if (!matchingMembers.isEmpty()) {
      // Hashcodes can be -ve so make it positive
      long absHash = (long) hashCode + Integer.MAX_VALUE;
      long lpos = absHash % matchingMembers.size();
      return matchingMembers.get((int) lpos);
    } else {
      return null;
    }
  }
}
/** Created by Giovanni Baleani on 15/07/2015. */
public class EventBusBridgeWebsocketServerVerticle extends AbstractVerticle {

  private static Logger logger =
      LoggerFactory.getLogger(EventBusBridgeWebsocketServerVerticle.class);

  private String address;
  private HttpServer netServer;
  private int localBridgePort;
  private int idleTimeout;
  private String ssl_cert_key;
  private String ssl_cert;
  private String ssl_trust;

  @Override
  public void start() throws Exception {
    address = MQTTSession.ADDRESS;

    JsonObject conf = config();

    localBridgePort = conf.getInteger("local_bridge_port", 7007);
    idleTimeout = conf.getInteger("socket_idle_timeout", 120);
    ssl_cert_key = conf.getString("ssl_cert_key");
    ssl_cert = conf.getString("ssl_cert");
    ssl_trust = conf.getString("ssl_trust");

    // [WebSocket -> BUS] listen WebSocket publish to BUS
    HttpServerOptions opt =
        new HttpServerOptions()
            .setTcpKeepAlive(true)
            .setIdleTimeout(idleTimeout)
            .setPort(localBridgePort);

    if (ssl_cert_key != null && ssl_cert != null && ssl_trust != null) {
      opt.setSsl(true)
          .setClientAuth(ClientAuth.REQUIRED)
          .setPemKeyCertOptions(
              new PemKeyCertOptions().setKeyPath(ssl_cert_key).setCertPath(ssl_cert))
          .setPemTrustOptions(new PemTrustOptions().addCertPath(ssl_trust));
    }

    netServer = vertx.createHttpServer(opt);
    netServer
        .websocketHandler(
            sock -> {
              final EventBusWebsocketBridge ebnb =
                  new EventBusWebsocketBridge(sock, vertx.eventBus(), address);
              sock.closeHandler(
                  aVoid -> {
                    logger.info(
                        "Bridge Server - closed connection from client ip: "
                            + sock.remoteAddress());
                    ebnb.stop();
                  });
              sock.exceptionHandler(
                  throwable -> {
                    logger.error("Bridge Server - Exception: " + throwable.getMessage(), throwable);
                    ebnb.stop();
                  });

              logger.info("Bridge Server - new connection from client ip: " + sock.remoteAddress());

              RecordParser parser = ebnb.initialHandhakeProtocolParser();
              sock.handler(parser::handle);
            })
        .listen();
  }

  @Override
  public void stop() throws Exception {
    netServer.close();
  }
}
public class EverythingIsPossibleHandler implements Handler<RoutingContext> {

  private static final Logger log = LoggerFactory.getLogger(EverythingIsPossibleHandler.class);

  private JDBCClient jdbcClient;

  public static EverythingIsPossibleHandler create(JDBCClient jc) {
    EverythingIsPossibleHandler ach = new EverythingIsPossibleHandler();
    ach.jdbcClient = jc;
    return ach;
  }

  String Location = "";
  String FoodType = "";
  String Budget = "";
  String Length = "";
  String[] Hotel = new String[15];
  String[] Rest = new String[15];
  String[] Act = new String[15];
  int Hotelcounter = 0;
  int Restcounter = 0;
  int Actcounter = 0;
  String username = "";

  @Override
  public void handle(RoutingContext context) {

    username = context.user().principal().getString("username");

    jdbcClient.getConnection(
        connectionRes -> {
          if (connectionRes.succeeded()) {
            // System.out.println("Able to get JDBC Connection");
            queryLocation(context, connectionRes);

          } else {
            log.error("Could not connect to the database.");
            context.fail(402);
          }
        });
  }

  private void queryLocation(RoutingContext context, AsyncResult<SQLConnection> connectionRes) {
    // Get and set locations of user for future queries
    SQLConnection connection = connectionRes.result();
    // System.out.println("SELECT Location FROM preferences WHERE username = '******'");
    connection.query(
        "SELECT Location FROM preferences WHERE username = '******'",
        res2 -> {
          if (res2.succeeded()) {
            // System.out.println("Able to get query location");
            ResultSet resultSet = res2.result();
            for (JsonArray line : res2.result().getResults()) {
              Location = line.encode();
              Location = Location.replaceAll("[^a-zA-Z,' ']", "");
              // System.out.println("userLocation:"+Location);
            }
            context.session().put("location", Location);
            queryBudget(context, connection);

          } else {
            log.error("Could not select from the user table");
          }
        });
  }

  private void queryBudget(RoutingContext context, SQLConnection connection) {
    connection.query(
        "SELECT budget FROM preferences WHERE username = '******'",
        res2 -> {
          if (res2.succeeded()) {
            // System.out.println("Able to get budget query");
            ResultSet resultSet = res2.result();
            for (JsonArray line : res2.result().getResults()) {
              Budget = line.encode();
              Budget = Budget.replaceAll("[^a-zA-Z,' ']", "");
              // System.out.println("Budget: "+Budget);
            }
            queryHotels(context, connection);
          } else {
            log.error("Could not select budget from pref table table");
          }
        });
  }

  private void queryHotels(RoutingContext context, SQLConnection connection) {
    // Retrieve Hotels
    connection.query(
        "SELECT name FROM hotel ",
        res2 -> {
          if (res2.succeeded()) {
            // System.out.println("Able to get hotel query");
            for (JsonArray line : res2.result().getResults()) {
              Hotel[Hotelcounter] = line.encode();
              Hotel[Hotelcounter] = Hotel[Hotelcounter].replaceAll("[^a-zA-Z' ']", "");
              Hotelcounter++;
            }
            Hotelcounter = 0;
            queryHotelPricing(context, connection);
          } else {
            log.error("Could not select from the user table");
          }
        });
  }

  private void queryHotelPricing(RoutingContext context, SQLConnection connection) {
    // Retrieve Hotel Pricing
    connection.query(
        "SELECT price FROM hotel",
        res3 -> {
          if (res3.succeeded()) {
            // System.out.println("Able to get hotel pricing");
            Hotelcounter = 0;
            for (JsonArray line1 : res3.result().getResults()) {
              String temp = Hotel[Hotelcounter];
              temp = temp.concat("   ($" + line1.encode() + ")");
              temp = temp.replaceAll("[^a-zA-Z,' '0-9$()]", "");
              Hotel[Hotelcounter] = temp;
              // System.out.println("hotel with price: " + Hotel[Hotelcounter]);
              Hotelcounter++;
            }
            context.session().put("hotels", Hotel);
            queryResturants(context, connection);
            Hotelcounter = 0;
          } else {
            log.error("could not select from user table above");
          }
        });
  }

  private void queryResturants(final RoutingContext context, final SQLConnection connection) {
    // Retrieve Resturants
    connection.query(
        "SELECT name FROM resturant",
        res4 -> {
          if (res4.succeeded()) {
            // System.out.println("Able to get resturant query");
            for (JsonArray line2 : res4.result().getResults()) {
              // System.out.println("resturant: "+line2.encode());
              String Resttemp = line2.encode();
              Resttemp = Resttemp.replaceAll("[^a-zA-Z,' '0-9]", "");
              Rest[Restcounter] = Resttemp;
              Restcounter++;
            }
            Restcounter = 0;
            context.session().put("resturants", Rest);

            queryActivites(context, connection);
          } else {
            log.error("could not select form resturant table");
          }
        });
  }

  private void queryActivites(RoutingContext context, SQLConnection connection) {
    // Retrieve Activies
    connection.query(
        "SELECT name FROM activities",
        res5 -> {
          if (res5.succeeded()) {
            // System.out.println("Able to get activities query");
            for (JsonArray line3 : res5.result().getResults()) {
              // System.out.println("Activities: "+line3.encode());
              String ActTemp = line3.encode();
              ActTemp = ActTemp.replaceAll("[^a-zA-Z,' '0-9]", "");
              Act[Actcounter] = ActTemp;
              Actcounter++;
            }
            Actcounter = 0;
            context.session().put("activities", Act);

            context.next();
          } else {
            log.error("could not select form the activites table");
          }
        });
  }
}
示例#15
0
public class BlueGreenProxy extends AbstractVerticle {

  private static final Logger logger = LoggerFactory.getLogger(BlueGreenProxy.class);

  private static AtomicBoolean statisticianCreated = new AtomicBoolean(false);

  private final URI primaryHost;

  private final URI secondaryHost;

  private int port;

  {
    primaryHost = URI.create(System.getProperty("primary.host"));
    secondaryHost = URI.create(System.getProperty("secondary.host"));
    port = Integer.valueOf(System.getProperty("port", "8090"));
  }

  public void start() throws Exception {
    if (statisticianCreated.compareAndSet(false, true)) {
      getVertx()
          .deployVerticle(
              "com.alertme.test.Statistician",
              res -> {
                if (res.failed()) {
                  logger.error(format("Failed to create statistician: %s", res.result()));
                }
              });
    }
    logger.info(
        format("Initializing HTTP Server Proxy instance %s", Thread.currentThread().getName()));
    logger.info(format("Primary host: %s, secondary host: %s", primaryHost, secondaryHost));
    final HttpClient client = vertx.createHttpClient();
    vertx
        .createHttpServer()
        .requestHandler(
            request -> {
              collectStats(request);

              final Buffer requestBody = buffer();
              request.handler(requestBody::appendBuffer);

              request.endHandler(
                  end -> {
                    final ProxyRequest primaryProxyRequest =
                        new ProxyRequest(
                            request.method(),
                            request.headers(),
                            request.uri(),
                            requestBody,
                            client,
                            primaryHost);
                    final ProxyRequest secondaryProxyRequest =
                        new ProxyRequest(
                            request.method(),
                            request.headers(),
                            request.uri(),
                            requestBody,
                            client,
                            secondaryHost);

                    final TriConsumer<Buffer, Integer, MultiMap> writeResponse =
                        (body, code, headers) -> {
                          if (headers != null) {
                            request.response().headers().setAll(headers);
                          }
                          if (code == null) {
                            logger.error("Code is empty, assuming server error occurred");
                            code = INTERNAL_SERVER_ERROR.code();
                          }
                          request.response().setStatusCode(code);
                          request.response().setChunked(true);
                          if (body != null) {
                            request.response().write(body);
                          }
                          request.response().end();
                          // TODO if we start writing async, then we should call this only when
                          // request ended
                        };

                    if (request.method() == HttpMethod.GET
                        || request.method() == HttpMethod.HEAD
                        || request.method() == HttpMethod.OPTIONS) {
                      primaryProxyRequest.onFailure(
                          (body, code, headers) -> secondaryProxyRequest.request());
                      primaryProxyRequest.onSuccess(writeResponse);
                      secondaryProxyRequest.onFailure(
                          (body, code, headers) ->
                              primaryProxyRequest.writeResponse(writeResponse));
                      secondaryProxyRequest.onSuccess(writeResponse);
                      primaryProxyRequest.request();
                    } else {
                      primaryProxyRequest.onComplete(writeResponse);
                      primaryProxyRequest.request();
                      secondaryProxyRequest.request();
                    }
                  });
            })
        .listen(port);
  }

  @FunctionalInterface
  interface TriConsumer<A, B, C> {
    void apply(A a, B b, C c);
  }

  private static class ProxyRequest {

    private final HttpClient client;

    private final URI proxyToUri;
    private final HttpMethod method;
    private final String uri;
    private final Buffer requestBody;

    private TriConsumer<Buffer, Integer, MultiMap> onFailure;
    private TriConsumer<Buffer, Integer, MultiMap> onSuccess;
    private TriConsumer<Buffer, Integer, MultiMap> onComplete;

    private Integer code;
    private MultiMap headers;
    private Buffer body;

    public ProxyRequest(
        final HttpMethod method,
        final MultiMap headers,
        final String uri,
        final Buffer requestBody,
        final HttpClient client,
        final URI proxyToUri) {

      this.client = client;
      this.proxyToUri = proxyToUri;
      this.body = buffer();
      this.method = method;
      this.headers = headers;
      this.uri = uri;
      this.requestBody = requestBody;
    }

    public void request() {
      final HttpClientRequest httpRequest =
          client.request(
              method,
              proxyToUri.getPort(),
              proxyToUri.getHost(),
              uri,
              resp -> {
                headers = resp.headers();
                code = resp.statusCode();
                resp.handler(this.body::appendBuffer);
                resp.endHandler(
                    end -> {
                      if (code >= 200 && code < 300) {
                        call(onSuccess);
                      } else {
                        call(onFailure);
                      }
                      call(onComplete);
                    });
                // TODO can we start writing without waiting the whole buffer?
              });
      httpRequest.exceptionHandler(
          ex -> {
            logger.error(format("Got exception processing request: %s", ex.getMessage()));
            code = INTERNAL_SERVER_ERROR.code();
            call(onFailure);
            call(onComplete);
          });
      httpRequest.headers().setAll(headers);
      httpRequest.write(requestBody);
      httpRequest.end();
    }

    private void call(final TriConsumer<Buffer, Integer, MultiMap> function) {
      if (function != null) {
        function.apply(body, code, headers);
      }
    }

    public void onFailure(final TriConsumer<Buffer, Integer, MultiMap> onFail) {
      this.onFailure = onFail;
    }

    public void onSuccess(final TriConsumer<Buffer, Integer, MultiMap> onSuccess) {
      this.onSuccess = onSuccess;
    }

    public void onComplete(final TriConsumer<Buffer, Integer, MultiMap> onComplete) {
      this.onComplete = onComplete;
    }

    public void writeResponse(final TriConsumer<Buffer, Integer, MultiMap> responseHandler) {
      call(responseHandler);
    }
  }

  private void collectStats(final HttpServerRequest request) {
    getVertx().eventBus().publish("proxy.stats", 1);
  }
}
示例#16
0
/**
 * This class is thread-safe
 *
 * <p>Some parts (e.g. content negotiation) from Yoke by Paulo Lopes
 *
 * @author <a href="http://tfox.org">Tim Fox</a>
 * @author <a href="http://[email protected]">Paulo Lopes</a>
 */
public class RouteImpl implements Route {

  private static final Logger log = LoggerFactory.getLogger(RouteImpl.class);

  private final RouterImpl router;
  private final Set<HttpMethod> methods = new HashSet<>();
  private final Set<String> consumes = new LinkedHashSet<>();
  private final Set<String> produces = new LinkedHashSet<>();
  private String path;
  private int order;
  private boolean enabled = true;
  private Handler<RoutingContext> contextHandler;
  private Handler<RoutingContext> failureHandler;
  private boolean added;
  private Pattern pattern;
  private List<String> groups;
  private boolean useNormalisedPath = true;

  RouteImpl(RouterImpl router, int order) {
    this.router = router;
    this.order = order;
  }

  RouteImpl(RouterImpl router, int order, HttpMethod method, String path) {
    this(router, order);
    methods.add(method);
    checkPath(path);
    setPath(path);
  }

  RouteImpl(RouterImpl router, int order, String path) {
    this(router, order);
    checkPath(path);
    setPath(path);
  }

  RouteImpl(RouterImpl router, int order, HttpMethod method, String regex, boolean bregex) {
    this(router, order);
    methods.add(method);
    setRegex(regex);
  }

  RouteImpl(RouterImpl router, int order, String regex, boolean bregex) {
    this(router, order);
    setRegex(regex);
  }

  @Override
  public synchronized Route method(HttpMethod method) {
    methods.add(method);
    return this;
  }

  @Override
  public synchronized Route path(String path) {
    checkPath(path);
    setPath(path);
    return this;
  }

  @Override
  public synchronized Route pathRegex(String regex) {
    setRegex(regex);
    return this;
  }

  @Override
  public synchronized Route produces(String contentType) {
    produces.add(contentType);
    return this;
  }

  @Override
  public synchronized Route consumes(String contentType) {
    consumes.add(contentType);
    return this;
  }

  @Override
  public synchronized Route order(int order) {
    if (added) {
      throw new IllegalStateException("Can't change order after route is active");
    }
    this.order = order;
    return this;
  }

  @Override
  public synchronized Route last() {
    return order(Integer.MAX_VALUE);
  }

  @Override
  public synchronized Route handler(Handler<RoutingContext> contextHandler) {
    if (this.contextHandler != null) {
      log.warn("Setting handler for a route more than once!");
    }
    this.contextHandler = contextHandler;
    checkAdd();
    return this;
  }

  @Override
  public Route blockingHandler(Handler<RoutingContext> contextHandler) {
    return blockingHandler(contextHandler, true);
  }

  @Override
  public synchronized Route blockingHandler(
      Handler<RoutingContext> contextHandler, boolean ordered) {
    return handler(new BlockingHandlerDecorator(contextHandler, ordered));
  }

  @Override
  public synchronized Route failureHandler(Handler<RoutingContext> exceptionHandler) {
    if (this.failureHandler != null) {
      log.warn("Setting failureHandler for a route more than once!");
    }
    this.failureHandler = exceptionHandler;
    checkAdd();
    return this;
  }

  @Override
  public synchronized Route remove() {
    router.remove(this);
    return this;
  }

  @Override
  public synchronized Route disable() {
    enabled = false;
    return this;
  }

  @Override
  public synchronized Route enable() {
    enabled = true;
    return this;
  }

  @Override
  public Route useNormalisedPath(boolean useNormalisedPath) {
    this.useNormalisedPath = useNormalisedPath;
    return this;
  }

  @Override
  public String getPath() {
    return path;
  }

  @Override
  public String toString() {
    StringBuilder sb = new StringBuilder("Route[ ");
    sb.append("path:").append(path);
    sb.append(" pattern:").append(pattern);
    sb.append(" handler:").append(contextHandler);
    sb.append(" failureHandler:").append(failureHandler);
    sb.append(" order:").append(order);
    sb.append(" methods:[");
    int cnt = 0;
    for (HttpMethod method : methods) {
      sb.append(method);
      cnt++;
      if (cnt < methods.size()) {
        sb.append(",");
      }
    }
    sb.append("]]@").append(System.identityHashCode(this));
    return sb.toString();
  }

  synchronized void handleContext(RoutingContext context) {
    if (contextHandler != null) {
      contextHandler.handle(context);
    }
  }

  synchronized void handleFailure(RoutingContext context) {
    if (failureHandler != null) {
      failureHandler.handle(context);
    }
  }

  synchronized boolean matches(RoutingContext context, String mountPoint, boolean failure) {

    if (failure && failureHandler == null || !failure && contextHandler == null) {
      return false;
    }
    if (!enabled) {
      return false;
    }
    HttpServerRequest request = context.request();
    if (!methods.isEmpty() && !methods.contains(request.method())) {
      return false;
    }
    if (path != null && pattern == null && !pathMatches(mountPoint, context)) {
      return false;
    }
    if (pattern != null) {
      String path =
          useNormalisedPath
              ? Utils.normalisePath(context.request().path(), false)
              : context.request().path();
      if (mountPoint != null) {
        path = path.substring(mountPoint.length());
      }

      Matcher m = pattern.matcher(path);
      if (m.matches()) {
        if (m.groupCount() > 0) {
          Map<String, String> params = new HashMap<>(m.groupCount());
          if (groups != null) {
            // Pattern - named params
            // decode the path as it could contain escaped chars.
            try {
              for (int i = 0; i < groups.size(); i++) {
                final String k = groups.get(i);
                final String value =
                    URLDecoder.decode(URLDecoder.decode(m.group("p" + i), "UTF-8"), "UTF-8");
                if (!request.params().contains(k)) {
                  params.put(k, value);
                } else {
                  context.pathParams().put(k, value);
                }
              }
            } catch (UnsupportedEncodingException e) {
              context.fail(e);
              return false;
            }
          } else {
            // Straight regex - un-named params
            // decode the path as it could contain escaped chars.
            try {
              for (int i = 0; i < m.groupCount(); i++) {
                String group = m.group(i + 1);
                if (group != null) {
                  final String k = "param" + i;
                  final String value = URLDecoder.decode(group, "UTF-8");
                  if (!request.params().contains(k)) {
                    params.put(k, value);
                  } else {
                    context.pathParams().put(k, value);
                  }
                }
              }
            } catch (UnsupportedEncodingException e) {
              context.fail(e);
              return false;
            }
          }
          request.params().addAll(params);
          context.pathParams().putAll(params);
        }
      } else {
        return false;
      }
    }
    if (!consumes.isEmpty()) {
      // Can this route consume the specified content type
      String contentType = request.headers().get("content-type");
      boolean matches = false;
      for (String ct : consumes) {
        if (ctMatches(contentType, ct)) {
          matches = true;
          break;
        }
      }
      if (!matches) {
        return false;
      }
    }
    if (!produces.isEmpty()) {
      String accept = request.headers().get("accept");
      if (accept != null) {
        List<String> acceptableTypes = Utils.getSortedAcceptableMimeTypes(accept);
        for (String acceptable : acceptableTypes) {
          for (String produce : produces) {
            if (ctMatches(produce, acceptable)) {
              context.setAcceptableContentType(produce);
              return true;
            }
          }
        }
      } else {
        // According to rfc2616-sec14,
        // If no Accept header field is present, then it is assumed that the client accepts all
        // media types.
        context.setAcceptableContentType(produces.iterator().next());
        return true;
      }
      return false;
    }
    return true;
  }

  RouterImpl router() {
    return router;
  }

  /*
  E.g.
  "text/html", "text/*"  - returns true
  "text/html", "html" - returns true
  "application/json", "json" - returns true
  "application/*", "json" - returns true
  TODO - don't parse consumes types on each request - they can be preparsed!
   */
  private boolean ctMatches(String actualCT, String allowsCT) {

    if (allowsCT.equals("*") || allowsCT.equals("*/*")) {
      return true;
    }

    if (actualCT == null) {
      return false;
    }

    // get the content type only (exclude charset)
    actualCT = actualCT.split(";")[0];

    // if we received an incomplete CT
    if (allowsCT.indexOf('/') == -1) {
      // when the content is incomplete we assume */type, e.g.:
      // json -> */json
      allowsCT = "*/" + allowsCT;
    }

    // process wildcards
    if (allowsCT.contains("*")) {
      String[] consumesParts = allowsCT.split("/");
      String[] requestParts = actualCT.split("/");
      return "*".equals(consumesParts[0]) && consumesParts[1].equals(requestParts[1])
          || "*".equals(consumesParts[1]) && consumesParts[0].equals(requestParts[0]);
    }

    return actualCT.contains(allowsCT);
  }

  private boolean pathMatches(String mountPoint, RoutingContext ctx) {
    String thePath = mountPoint == null ? path : mountPoint + path;
    String requestPath =
        useNormalisedPath ? Utils.normalisePath(ctx.request().path(), false) : ctx.request().path();
    if (exactPath) {
      return pathMatchesExact(requestPath, thePath);
    } else {
      if (thePath.endsWith("/") && requestPath.equals(removeTrailing(thePath))) {
        return true;
      }
      return requestPath.startsWith(thePath);
    }
  }

  private boolean pathMatchesExact(String path1, String path2) {
    // Ignore trailing slash when matching paths
    return removeTrailing(path1).equals(removeTrailing(path2));
  }

  private String removeTrailing(String path) {
    int i = path.length();
    if (path.charAt(i - 1) == '/') {
      path = path.substring(0, i - 1);
    }
    return path;
  }

  private void setPath(String path) {
    // See if the path contains ":" - if so then it contains parameter capture groups and we have to
    // generate
    // a regex for that
    if (path.indexOf(':') != -1) {
      createPatternRegex(path);
      this.path = path;
    } else {
      if (path.charAt(path.length() - 1) != '*') {
        exactPath = true;
        this.path = path;
      } else {
        exactPath = false;
        this.path = path.substring(0, path.length() - 1);
      }
    }
  }

  private void setRegex(String regex) {
    pattern = Pattern.compile(regex);
  }

  private void createPatternRegex(String path) {
    // We need to search for any :<token name> tokens in the String and replace them with named
    // capture groups
    Matcher m = Pattern.compile(":([A-Za-z][A-Za-z0-9_]*)").matcher(path);
    StringBuffer sb = new StringBuffer();
    groups = new ArrayList<>();
    int index = 0;
    while (m.find()) {
      String param = "p" + index;
      String group = m.group().substring(1);
      if (groups.contains(group)) {
        throw new IllegalArgumentException(
            "Cannot use identifier " + group + " more than once in pattern string");
      }
      m.appendReplacement(sb, "(?<" + param + ">[^/]+)");
      groups.add(group);
      index++;
    }
    m.appendTail(sb);
    path = sb.toString();
    pattern = Pattern.compile(path);
  }

  private void checkPath(String path) {
    if ("".equals(path) || path.charAt(0) != '/') {
      throw new IllegalArgumentException("Path must start with /");
    }
  }

  private boolean exactPath;

  int order() {
    return order;
  }

  private void checkAdd() {
    if (!added) {
      router.add(this);
      added = true;
    }
  }
}
示例#17
0
/**
 * This class is optimised for performance when used on the same event loop that is was passed to
 * the handler with. However it can be used safely from other threads.
 *
 * <p>The internal state is protected using the synchronized keyword. If always used on the same
 * event loop, then we benefit from biased locking which makes the overhead of synchronized near
 * zero.
 *
 * @author <a href="http://tfox.org">Tim Fox</a>
 */
public class HttpClientRequestImpl implements HttpClientRequest {

  private static final Logger log = LoggerFactory.getLogger(HttpClientRequestImpl.class);

  private final String host;
  private final int port;
  private final HttpClientImpl client;
  private final HttpRequest request;
  private final VertxInternal vertx;
  private final io.vertx.core.http.HttpMethod method;
  private Handler<HttpClientResponse> respHandler;
  private Handler<Void> endHandler;
  private boolean chunked;
  private Handler<Void> continueHandler;
  private volatile ClientConnection conn;
  private Handler<Void> drainHandler;
  private Handler<Throwable> exceptionHandler;
  private boolean headWritten;
  private boolean completed;
  private ByteBuf pendingChunks;
  private int pendingMaxSize = -1;
  private boolean connecting;
  private boolean writeHead;
  private long written;
  private long currentTimeoutTimerId = -1;
  private MultiMap headers;
  private boolean exceptionOccurred;
  private long lastDataReceived;
  private Object metric;

  HttpClientRequestImpl(
      HttpClientImpl client,
      io.vertx.core.http.HttpMethod method,
      String host,
      int port,
      String relativeURI,
      VertxInternal vertx) {
    this.host = host;
    this.port = port;
    this.client = client;
    this.request =
        new DefaultHttpRequest(
            toNettyHttpVersion(client.getOptions().getProtocolVersion()),
            toNettyHttpMethod(method),
            relativeURI,
            false);
    this.chunked = false;
    this.method = method;
    this.vertx = vertx;
  }

  @Override
  public HttpClientRequest handler(Handler<HttpClientResponse> handler) {
    synchronized (getLock()) {
      if (handler != null) {
        checkComplete();
        respHandler = checkConnect(method, handler);
      } else {
        respHandler = null;
      }
      return this;
    }
  }

  @Override
  public HttpClientRequest pause() {
    return this;
  }

  @Override
  public HttpClientRequest resume() {
    return this;
  }

  @Override
  public HttpClientRequest endHandler(Handler<Void> endHandler) {
    synchronized (getLock()) {
      if (endHandler != null) {
        checkComplete();
      }
      this.endHandler = endHandler;
      return this;
    }
  }

  @Override
  public HttpClientRequestImpl setChunked(boolean chunked) {
    synchronized (getLock()) {
      checkComplete();
      if (written > 0) {
        throw new IllegalStateException(
            "Cannot set chunked after data has been written on request");
      }
      // HTTP 1.0 does not support chunking so we ignore this if HTTP 1.0
      if (client.getOptions().getProtocolVersion() != io.vertx.core.http.HttpVersion.HTTP_1_0) {
        this.chunked = chunked;
      }
      return this;
    }
  }

  @Override
  public boolean isChunked() {
    synchronized (getLock()) {
      return chunked;
    }
  }

  @Override
  public io.vertx.core.http.HttpMethod method() {
    return method;
  }

  @Override
  public String uri() {
    return request.getUri();
  }

  @Override
  public MultiMap headers() {
    synchronized (getLock()) {
      if (headers == null) {
        headers = new HeadersAdaptor(request.headers());
      }
      return headers;
    }
  }

  @Override
  public HttpClientRequest putHeader(String name, String value) {
    synchronized (getLock()) {
      checkComplete();
      headers().set(name, value);
      return this;
    }
  }

  @Override
  public HttpClientRequest putHeader(String name, Iterable<String> values) {
    synchronized (getLock()) {
      checkComplete();
      headers().set(name, values);
      return this;
    }
  }

  @Override
  public HttpClientRequestImpl write(Buffer chunk) {
    synchronized (getLock()) {
      checkComplete();
      checkResponseHandler();
      ByteBuf buf = chunk.getByteBuf();
      write(buf, false);
      return this;
    }
  }

  @Override
  public HttpClientRequestImpl write(String chunk) {
    synchronized (getLock()) {
      checkComplete();
      checkResponseHandler();
      return write(Buffer.buffer(chunk));
    }
  }

  @Override
  public HttpClientRequestImpl write(String chunk, String enc) {
    synchronized (getLock()) {
      Objects.requireNonNull(enc, "no null encoding accepted");
      checkComplete();
      checkResponseHandler();
      return write(Buffer.buffer(chunk, enc));
    }
  }

  @Override
  public HttpClientRequest setWriteQueueMaxSize(int maxSize) {
    synchronized (getLock()) {
      checkComplete();
      if (conn != null) {
        conn.doSetWriteQueueMaxSize(maxSize);
      } else {
        pendingMaxSize = maxSize;
      }
      return this;
    }
  }

  @Override
  public boolean writeQueueFull() {
    synchronized (getLock()) {
      checkComplete();
      return conn != null && conn.isNotWritable();
    }
  }

  @Override
  public HttpClientRequest drainHandler(Handler<Void> handler) {
    synchronized (getLock()) {
      checkComplete();
      this.drainHandler = handler;
      if (conn != null) {
        conn.getContext().runOnContext(v -> conn.handleInterestedOpsChanged());
      }
      return this;
    }
  }

  @Override
  public HttpClientRequest exceptionHandler(Handler<Throwable> handler) {
    synchronized (getLock()) {
      if (handler != null) {
        checkComplete();
        this.exceptionHandler =
            t -> {
              cancelOutstandingTimeoutTimer();
              handler.handle(t);
            };
      } else {
        this.exceptionHandler = null;
      }
      return this;
    }
  }

  @Override
  public HttpClientRequest continueHandler(Handler<Void> handler) {
    synchronized (getLock()) {
      checkComplete();
      this.continueHandler = handler;
      return this;
    }
  }

  @Override
  public HttpClientRequestImpl sendHead() {
    synchronized (getLock()) {
      checkComplete();
      checkResponseHandler();
      if (conn != null) {
        if (!headWritten) {
          writeHead();
        }
      } else {
        connect();
        writeHead = true;
      }
      return this;
    }
  }

  @Override
  public void end(String chunk) {
    synchronized (getLock()) {
      end(Buffer.buffer(chunk));
    }
  }

  @Override
  public void end(String chunk, String enc) {
    synchronized (getLock()) {
      Objects.requireNonNull(enc, "no null encoding accepted");
      end(Buffer.buffer(chunk, enc));
    }
  }

  @Override
  public void end(Buffer chunk) {
    synchronized (getLock()) {
      checkComplete();
      checkResponseHandler();
      if (!chunked && !contentLengthSet()) {
        headers().set(CONTENT_LENGTH, String.valueOf(chunk.length()));
      }
      write(chunk.getByteBuf(), true);
    }
  }

  @Override
  public void end() {
    synchronized (getLock()) {
      checkComplete();
      checkResponseHandler();
      write(Unpooled.EMPTY_BUFFER, true);
    }
  }

  @Override
  public HttpClientRequest setTimeout(long timeoutMs) {
    synchronized (getLock()) {
      cancelOutstandingTimeoutTimer();
      currentTimeoutTimerId = client.getVertx().setTimer(timeoutMs, id -> handleTimeout(timeoutMs));
      return this;
    }
  }

  @Override
  public HttpClientRequest putHeader(CharSequence name, CharSequence value) {
    synchronized (getLock()) {
      checkComplete();
      headers().set(name, value);
      return this;
    }
  }

  @Override
  public HttpClientRequest putHeader(CharSequence name, Iterable<CharSequence> values) {
    synchronized (getLock()) {
      checkComplete();
      headers().set(name, values);
      return this;
    }
  }

  void dataReceived() {
    synchronized (getLock()) {
      if (currentTimeoutTimerId != -1) {
        lastDataReceived = System.currentTimeMillis();
      }
    }
  }

  void handleDrained() {
    synchronized (getLock()) {
      if (drainHandler != null) {
        try {
          drainHandler.handle(null);
        } catch (Throwable t) {
          handleException(t);
        }
      }
    }
  }

  void handleException(Throwable t) {
    synchronized (getLock()) {
      cancelOutstandingTimeoutTimer();
      exceptionOccurred = true;
      getExceptionHandler().handle(t);
    }
  }

  void handleResponse(HttpClientResponseImpl resp) {
    synchronized (getLock()) {
      // If an exception occurred (e.g. a timeout fired) we won't receive the response.
      if (!exceptionOccurred) {
        cancelOutstandingTimeoutTimer();
        try {
          if (resp.statusCode() == 100) {
            if (continueHandler != null) {
              continueHandler.handle(null);
            }
          } else {
            if (respHandler != null) {
              respHandler.handle(resp);
            }
            if (endHandler != null) {
              endHandler.handle(null);
            }
          }
        } catch (Throwable t) {
          handleException(t);
        }
      }
    }
  }

  HttpRequest getRequest() {
    return request;
  }

  // After connecting we should synchronize on the client connection instance to prevent deadlock
  // conditions
  // but there is a catch - the client connection is null before connecting so we synchronized on
  // this before that
  // point
  private Object getLock() {
    // We do the initial check outside the synchronized block to prevent the hit of synchronized
    // once the conn has
    // been set
    if (conn != null) {
      return conn;
    } else {
      synchronized (this) {
        if (conn != null) {
          return conn;
        } else {
          return this;
        }
      }
    }
  }

  private Handler<HttpClientResponse> checkConnect(
      io.vertx.core.http.HttpMethod method, Handler<HttpClientResponse> handler) {
    if (method == io.vertx.core.http.HttpMethod.CONNECT) {
      // special handling for CONNECT
      handler = connectHandler(handler);
    }
    return handler;
  }

  private Handler<HttpClientResponse> connectHandler(Handler<HttpClientResponse> responseHandler) {
    Objects.requireNonNull(responseHandler, "no null responseHandler accepted");
    return resp -> {
      HttpClientResponse response;
      if (resp.statusCode() == 200) {
        // connect successful force the modification of the ChannelPipeline
        // beside this also pause the socket for now so the user has a chance to register its
        // dataHandler
        // after received the NetSocket
        NetSocket socket = resp.netSocket();
        socket.pause();

        response =
            new HttpClientResponse() {
              private boolean resumed;

              @Override
              public int statusCode() {
                return resp.statusCode();
              }

              @Override
              public String statusMessage() {
                return resp.statusMessage();
              }

              @Override
              public MultiMap headers() {
                return resp.headers();
              }

              @Override
              public String getHeader(String headerName) {
                return resp.getHeader(headerName);
              }

              @Override
              public String getHeader(CharSequence headerName) {
                return resp.getHeader(headerName);
              }

              @Override
              public String getTrailer(String trailerName) {
                return resp.getTrailer(trailerName);
              }

              @Override
              public MultiMap trailers() {
                return resp.trailers();
              }

              @Override
              public List<String> cookies() {
                return resp.cookies();
              }

              @Override
              public HttpClientResponse bodyHandler(Handler<Buffer> bodyHandler) {
                resp.bodyHandler(bodyHandler);
                return this;
              }

              @Override
              public synchronized NetSocket netSocket() {
                if (!resumed) {
                  resumed = true;
                  vertx
                      .getContext()
                      .runOnContext(
                          (v) ->
                              socket
                                  .resume()); // resume the socket now as the user had the chance to
                                              // register a dataHandler
                }
                return socket;
              }

              @Override
              public HttpClientResponse endHandler(Handler<Void> endHandler) {
                resp.endHandler(endHandler);
                return this;
              }

              @Override
              public HttpClientResponse handler(Handler<Buffer> handler) {
                resp.handler(handler);
                return this;
              }

              @Override
              public HttpClientResponse pause() {
                resp.pause();
                return this;
              }

              @Override
              public HttpClientResponse resume() {
                resp.resume();
                return this;
              }

              @Override
              public HttpClientResponse exceptionHandler(Handler<Throwable> handler) {
                resp.exceptionHandler(handler);
                return this;
              }
            };
      } else {
        response = resp;
      }
      responseHandler.handle(response);
    };
  }

  private Handler<Throwable> getExceptionHandler() {
    return exceptionHandler != null ? exceptionHandler : log::error;
  }

  private void cancelOutstandingTimeoutTimer() {
    if (currentTimeoutTimerId != -1) {
      client.getVertx().cancelTimer(currentTimeoutTimerId);
      currentTimeoutTimerId = -1;
    }
  }

  private void handleTimeout(long timeoutMs) {
    if (lastDataReceived == 0) {
      timeout(timeoutMs);
    } else {
      long now = System.currentTimeMillis();
      long timeSinceLastData = now - lastDataReceived;
      if (timeSinceLastData >= timeoutMs) {
        timeout(timeoutMs);
      } else {
        // reschedule
        lastDataReceived = 0;
        setTimeout(timeoutMs - timeSinceLastData);
      }
    }
  }

  private void timeout(long timeoutMs) {
    handleException(
        new TimeoutException("The timeout period of " + timeoutMs + "ms has been exceeded"));
  }

  private synchronized void connect() {
    if (!connecting) {
      // We defer actual connection until the first part of body is written or end is called
      // This gives the user an opportunity to set an exception handler before connecting so
      // they can capture any exceptions on connection
      client.getConnection(
          port,
          host,
          conn -> {
            synchronized (this) {
              if (exceptionOccurred) {
                // The request already timed out before it has left the pool waiter queue
                // So return it
                conn.close();
              } else if (!conn.isClosed()) {
                connected(conn);
              } else {
                // The connection has been closed - closed connections can be in the pool
                // Get another connection - Note that we DO NOT call connectionClosed() on the pool
                // at this point
                // that is done asynchronously in the connection closeHandler()
                connect();
              }
            }
          },
          exceptionHandler,
          vertx.getOrCreateContext());

      connecting = true;
    }
  }

  private void connected(ClientConnection conn) {
    conn.setCurrentRequest(this);
    this.conn = conn;
    this.metric =
        client
            .httpClientMetrics()
            .requestBegin(conn.metric(), conn.localAddress(), conn.remoteAddress(), this);

    // If anything was written or the request ended before we got the connection, then
    // we need to write it now

    if (pendingMaxSize != -1) {
      conn.doSetWriteQueueMaxSize(pendingMaxSize);
    }

    if (pendingChunks != null) {
      ByteBuf pending = pendingChunks;
      pendingChunks = null;

      if (completed) {
        // we also need to write the head so optimize this and write all out in once
        writeHeadWithContent(pending, true);

        conn.reportBytesWritten(written);

        if (respHandler != null) {
          conn.endRequest();
        }
      } else {
        writeHeadWithContent(pending, false);
      }
    } else {
      if (completed) {
        // we also need to write the head so optimize this and write all out in once
        writeHeadWithContent(Unpooled.EMPTY_BUFFER, true);

        conn.reportBytesWritten(written);

        if (respHandler != null) {
          conn.endRequest();
        }
      } else {
        if (writeHead) {
          writeHead();
        }
      }
    }
  }

  void reportResponseEnd(HttpClientResponseImpl resp) {
    HttpClientMetrics metrics = client.httpClientMetrics();
    if (metrics.isEnabled()) {
      metrics.responseEnd(metric, resp);
    }
  }

  private boolean contentLengthSet() {
    return headers != null && request.headers().contains(CONTENT_LENGTH);
  }

  private void writeHead() {
    prepareHeaders();
    conn.writeToChannel(request);
    headWritten = true;
  }

  private void writeHeadWithContent(ByteBuf buf, boolean end) {
    prepareHeaders();
    if (end) {
      conn.writeToChannel(new AssembledFullHttpRequest(request, buf));
    } else {
      conn.writeToChannel(new AssembledHttpRequest(request, buf));
    }
    headWritten = true;
  }

  private void prepareHeaders() {
    HttpHeaders headers = request.headers();
    headers.remove(TRANSFER_ENCODING);
    if (!headers.contains(HOST)) {
      request.headers().set(HOST, conn.hostHeader());
    }
    if (chunked) {
      HttpHeaders.setTransferEncodingChunked(request);
    }
    if (client.getOptions().isTryUseCompression()
        && request.headers().get(ACCEPT_ENCODING) == null) {
      // if compression should be used but nothing is specified by the user support deflate and
      // gzip.
      request.headers().set(ACCEPT_ENCODING, DEFLATE_GZIP);
    }
    if (!client.getOptions().isKeepAlive()
        && client.getOptions().getProtocolVersion() == io.vertx.core.http.HttpVersion.HTTP_1_1) {
      request.headers().set(CONNECTION, CLOSE);
    } else if (client.getOptions().isKeepAlive()
        && client.getOptions().getProtocolVersion() == io.vertx.core.http.HttpVersion.HTTP_1_0) {
      request.headers().set(CONNECTION, KEEP_ALIVE);
    }
  }

  private void write(ByteBuf buff, boolean end) {
    int readableBytes = buff.readableBytes();
    if (readableBytes == 0 && !end) {
      // nothing to write to the connection just return
      return;
    }

    if (end) {
      completed = true;
    }
    if (!end && !chunked && !contentLengthSet()) {
      throw new IllegalStateException(
          "You must set the Content-Length header to be the total size of the message "
              + "body BEFORE sending any data if you are not using HTTP chunked encoding.");
    }

    written += buff.readableBytes();
    if (conn == null) {
      if (pendingChunks == null) {
        pendingChunks = buff;
      } else {
        CompositeByteBuf pending;
        if (pendingChunks instanceof CompositeByteBuf) {
          pending = (CompositeByteBuf) pendingChunks;
        } else {
          pending = Unpooled.compositeBuffer();
          pending.addComponent(pendingChunks).writerIndex(pendingChunks.writerIndex());
          pendingChunks = pending;
        }
        pending.addComponent(buff).writerIndex(pending.writerIndex() + buff.writerIndex());
      }
      connect();
    } else {
      if (!headWritten) {
        writeHeadWithContent(buff, end);
      } else {
        if (end) {
          if (buff.isReadable()) {
            conn.writeToChannel(new DefaultLastHttpContent(buff, false));
          } else {
            conn.writeToChannel(LastHttpContent.EMPTY_LAST_CONTENT);
          }
        } else {
          conn.writeToChannel(new DefaultHttpContent(buff));
        }
      }
      if (end) {
        conn.reportBytesWritten(written);

        if (respHandler != null) {
          conn.endRequest();
        }
      }
    }
  }

  private void checkComplete() {
    if (completed) {
      throw new IllegalStateException("Request already complete");
    }
  }

  private void checkResponseHandler() {
    if (respHandler == null) {
      throw new IllegalStateException(
          "You must set an handler for the HttpClientResponse before connecting");
    }
  }

  private HttpMethod toNettyHttpMethod(io.vertx.core.http.HttpMethod method) {
    switch (method) {
      case CONNECT:
        {
          return HttpMethod.CONNECT;
        }
      case GET:
        {
          return HttpMethod.GET;
        }
      case PUT:
        {
          return HttpMethod.PUT;
        }
      case POST:
        {
          return HttpMethod.POST;
        }
      case DELETE:
        {
          return HttpMethod.DELETE;
        }
      case HEAD:
        {
          return HttpMethod.HEAD;
        }
      case OPTIONS:
        {
          return HttpMethod.OPTIONS;
        }
      case TRACE:
        {
          return HttpMethod.TRACE;
        }
      case PATCH:
        {
          return HttpMethod.PATCH;
        }
      default:
        throw new IllegalArgumentException();
    }
  }

  private HttpVersion toNettyHttpVersion(io.vertx.core.http.HttpVersion version) {
    switch (version) {
      case HTTP_1_0:
        {
          return HttpVersion.HTTP_1_0;
        }
      case HTTP_1_1:
        {
          return HttpVersion.HTTP_1_1;
        }
      default:
        throw new IllegalArgumentException("Unsupported HTTP version: " + version);
    }
  }
}
/**
 * This abstract implementation serves as a baseline for slacker command executors. It includes the
 * communication protocol for registering the executor for the supported command as well as utility
 * methods for creating the sending the execution results.
 *
 * @author david
 * @since 1.0
 */
public abstract class AbstractSlackerExecutor implements SlackerExecutor {

  // the logger instance
  private static final Logger LOGGER = LoggerFactory.getLogger(AbstractSlackerExecutor.class);

  // the vertx instance that deployed this verticle
  private Vertx vertx;

  // the verticle context
  private Context context;

  // the future factory
  private FutureFactory futureFactory;

  // the executor slacker requests consumer
  private Optional<MessageConsumer<SlackerRequest>> consumer;

  @Override
  public void init(final Vertx vertx, final Context context) {
    this.vertx = vertx;
    this.context = context;
    this.futureFactory = ServiceHelper.loadFactory(FutureFactory.class);
    // register the slacker message codecs
    registerCodecs();
  }

  /**
   * Registers the required {@link MessageCodec} for the {@link SlackerRequest} and {@link
   * SlackerResponse} messages.
   */
  private void registerCodecs() {
    try {
      vertx
          .eventBus()
          .registerCodec(new SlackerRequestMessageCodec())
          .registerCodec(new SlackerResponseMessageCodec());
    } catch (final IllegalStateException e) {
      LOGGER.debug("codecs already registered", e);
    }
  }

  @Override
  public Vertx getVertx() {
    return vertx;
  }

  @Override
  public void start(final Future<Void> startFuture) throws Exception {
    LOGGER.info("starting {0}..", identifier());

    // start the HELLO slacker protocol
    final JsonObject helloMessage =
        new JsonObject().put("i", identifier()).put("d", description()).put("v", version());
    vertx
        .eventBus()
        .send(
            "reg.slacker-server",
            helloMessage,
            result -> {
              if (result.succeeded() && JsonObject.class.isInstance(result.result().body())) {
                final JsonObject response = (JsonObject) result.result().body();
                if (response.containsKey("a")) {
                  // everything went smoothly - register the listener and complete the startup
                  registerListener(response.getString("a"));
                  LOGGER.info("successfully registered {0} executor", identifier());
                  startFuture.complete();
                } else {
                  failStart(startFuture, "no address to bind was received");
                }
              } else {
                // something unexpected happened
                failStart(
                    startFuture,
                    Optional.ofNullable(result.cause())
                        .map(Throwable::getMessage)
                        .orElse("invalid response"));
              }
            });
  }

  /**
   * Registers the listener for for this executor on the underlying event bus, so that this executor
   * can successfully receive slacker command requests.
   *
   * <p>Note that this method should only be called when the executor has been successfully
   * registered at the slacker-server.
   *
   * @param address the address assigned by slacker-server
   */
  protected void registerListener(final String address) {
    consumer = Optional.of(vertx.eventBus().consumer(address, this::handleExecutorEvent));
  }

  /**
   * Creates a success response with the result code equal to {@link ResultCode#OK} and no message
   * as reply. This factory method shall be used whenever the executor work-flow has been completed
   * and as such we want to send a reply to the server without sending any message to the
   * channel/issuer of the command.
   *
   * @return a new instance of a success slacker response
   */
  protected SlackerResponse success() {
    return SlackerResponseFactory.create(ResultCode.OK, Optional.empty());
  }

  /**
   * Creates a success response with the result code equal to {@link ResultCode#OK} and with the
   * given message as reply. This factory method shall be used whenever the executor work-flow has
   * been completed and as such we want to send a reply to the server and also inform the
   * channel/issuer of the command.
   *
   * @param message the message to be included at the response
   * @return a new instance of an success slacker response
   */
  protected SlackerResponse success(final String message) {
    return response(ResultCode.OK, message);
  }

  /**
   * Creates a error response with the result code equal to {@link ResultCode#ERROR} and with the
   * given message as the error reason. This factory method shall be used to create the reply
   * message whenever and unexpected error has occurred, such as an {@link Exception} that has been
   * thrown/catched.
   *
   * @param message the message with the error reason
   * @return a new instance of an error slacker response
   */
  protected SlackerResponse error(final String message) {
    return response(ResultCode.ERROR, message);
  }

  /**
   * Creates a invalid response with the result code equal to {@link ResultCode#INVALID} and with
   * the given message as the error reason. This factory method shall be used whenever invalid data
   * as been received.
   *
   * @param message the message with the error reason
   * @return a new instance of an error slacker response
   */
  protected SlackerResponse invalid(final String message) {
    return response(ResultCode.INVALID, message);
  }

  private SlackerResponse response(final ResultCode code, final String message) {
    return SlackerResponseFactory.create(
        code, Optional.of(Objects.requireNonNull(message, "message")));
  }

  /**
   * Handles an incoming request from the event bus
   *
   * @param request the request message to be handled
   */
  private void handleExecutorEvent(final Message<SlackerRequest> request) {
    LOGGER.info("<=<= receiving incoming request <=<=");
    LOGGER.debug(request);

    // execute the request handling asynchronously
    context.runOnContext(
        a -> {
          final Future<SlackerResponse> future = futureFactory.future();
          execute(request.body(), future);
          future.setHandler(
              handler -> {
                if (handler.succeeded()) {
                  LOGGER.info("=>=> successfully handled request =>=>");
                  LOGGER.debug(handler.result());
                  request.reply(
                      handler.result(),
                      new DeliveryOptions().setCodecName(SlackerResponseMessageCodec.NAME));
                } else {
                  request.fail(ResultCode.ERROR.ordinal(), handler.cause().getMessage());
                  LOGGER.error("failed to handle request", handler.cause());
                }
              });
        });
  }

  /**
   * Fails the startup of this executor with the given failure reason
   *
   * @param startFuture the start future to be canceled
   * @param errorMessage the error/failure message
   */
  private void failStart(final Future<Void> startFuture, final String errorMessage) {
    final String reason =
        String.format("unable to register '%s' executor: %s", identifier(), errorMessage);
    LOGGER.error(reason);
    startFuture.fail(reason);
  }

  @Override
  public void stop(final Future<Void> stopFuture) throws Exception {
    LOGGER.info("stopping {0}..", identifier());
    consumer.ifPresent(MessageConsumer::unregister);
    stopFuture.complete();
  }
}
示例#19
0
/** @author <a href="http://tfox.org">Tim Fox</a> */
public class ClusteredMessage<U, V> extends MessageImpl<U, V> {

  private static final Logger log = LoggerFactory.getLogger(ClusteredMessage.class);

  private static final byte WIRE_PROTOCOL_VERSION = 1;

  private ServerID sender;
  private Buffer wireBuffer;
  private int bodyPos;
  private int headersPos;
  private boolean fromWire;

  public ClusteredMessage() {}

  public ClusteredMessage(
      ServerID sender,
      String address,
      String replyAddress,
      MultiMap headers,
      U sentBody,
      MessageCodec<U, V> messageCodec,
      boolean send,
      EventBusImpl bus) {
    super(address, replyAddress, headers, sentBody, messageCodec, send, bus);
    this.sender = sender;
  }

  protected ClusteredMessage(ClusteredMessage<U, V> other) {
    super(other);
    this.sender = other.sender;
    if (other.sentBody == null) {
      this.wireBuffer = other.wireBuffer;
      this.bodyPos = other.bodyPos;
      this.headersPos = other.headersPos;
    }
    this.fromWire = other.fromWire;
  }

  public ClusteredMessage<U, V> copyBeforeReceive() {
    return new ClusteredMessage<>(this);
  }

  @Override
  public MultiMap headers() {
    // Lazily decode headers
    if (headers == null) {
      // The message has been read from the wire
      if (headersPos != 0) {
        decodeHeaders();
      }
      if (headers == null) {
        headers = new CaseInsensitiveHeaders();
      }
    }
    return headers;
  }

  @Override
  public V body() {
    // Lazily decode the body
    if (receivedBody == null && bodyPos != 0) {
      // The message has been read from the wire
      decodeBody();
    }
    return receivedBody;
  }

  @Override
  public String replyAddress() {
    return replyAddress;
  }

  public Buffer encodeToWire() {
    int length = 1024; // TODO make this configurable
    Buffer buffer = Buffer.buffer(length);
    buffer.appendInt(0);
    buffer.appendByte(WIRE_PROTOCOL_VERSION);
    byte systemCodecID = messageCodec.systemCodecID();
    buffer.appendByte(systemCodecID);
    if (systemCodecID == -1) {
      // User codec
      writeString(buffer, messageCodec.name());
    }
    buffer.appendByte(send ? (byte) 0 : (byte) 1);
    writeString(buffer, address);
    if (replyAddress != null) {
      writeString(buffer, replyAddress);
    } else {
      buffer.appendInt(0);
    }
    buffer.appendInt(sender.port);
    writeString(buffer, sender.host);
    encodeHeaders(buffer);
    writeBody(buffer);
    buffer.setInt(0, buffer.length() - 4);
    return buffer;
  }

  public void readFromWire(Buffer buffer, CodecManager codecManager) {
    int pos = 0;
    // Overall Length already read when passed in here
    byte protocolVersion = buffer.getByte(pos);
    if (protocolVersion > WIRE_PROTOCOL_VERSION) {
      throw new IllegalStateException(
          "Invalid wire protocol version "
              + protocolVersion
              + " should be <= "
              + WIRE_PROTOCOL_VERSION);
    }
    pos++;
    byte systemCodecCode = buffer.getByte(pos);
    pos++;
    if (systemCodecCode == -1) {
      // User codec
      int length = buffer.getInt(pos);
      pos += 4;
      byte[] bytes = buffer.getBytes(pos, pos + length);
      String codecName = new String(bytes, CharsetUtil.UTF_8);
      messageCodec = codecManager.getCodec(codecName);
      if (messageCodec == null) {
        throw new IllegalStateException("No message codec registered with name " + codecName);
      }
      pos += length;
    } else {
      messageCodec = codecManager.systemCodecs()[systemCodecCode];
    }
    byte bsend = buffer.getByte(pos);
    send = bsend == 0;
    pos++;
    int length = buffer.getInt(pos);
    pos += 4;
    byte[] bytes = buffer.getBytes(pos, pos + length);
    address = new String(bytes, CharsetUtil.UTF_8);
    pos += length;
    length = buffer.getInt(pos);
    pos += 4;
    if (length != 0) {
      bytes = buffer.getBytes(pos, pos + length);
      replyAddress = new String(bytes, CharsetUtil.UTF_8);
      pos += length;
    }
    int senderPort = buffer.getInt(pos);
    pos += 4;
    length = buffer.getInt(pos);
    pos += 4;
    bytes = buffer.getBytes(pos, pos + length);
    String senderHost = new String(bytes, CharsetUtil.UTF_8);
    pos += length;
    headersPos = pos;
    int headersLength = buffer.getInt(pos);
    pos += headersLength;
    bodyPos = pos;
    sender = new ServerID(senderPort, senderHost);
    wireBuffer = buffer;
    fromWire = true;
  }

  private void decodeBody() {
    receivedBody = messageCodec.decodeFromWire(bodyPos, wireBuffer);
    bodyPos = 0;
  }

  private void encodeHeaders(Buffer buffer) {
    if (headers != null && !headers.isEmpty()) {
      int headersLengthPos = buffer.length();
      buffer.appendInt(0);
      buffer.appendInt(headers.size());
      List<Map.Entry<String, String>> entries = headers.entries();
      for (Map.Entry<String, String> entry : entries) {
        writeString(buffer, entry.getKey());
        writeString(buffer, entry.getValue());
      }
      int headersEndPos = buffer.length();
      buffer.setInt(headersLengthPos, headersEndPos - headersLengthPos);
    } else {
      buffer.appendInt(4);
    }
  }

  private void decodeHeaders() {
    int length = wireBuffer.getInt(headersPos);
    if (length != 4) {
      headersPos += 4;
      int numHeaders = wireBuffer.getInt(headersPos);
      headersPos += 4;
      headers = new CaseInsensitiveHeaders();
      for (int i = 0; i < numHeaders; i++) {
        int keyLength = wireBuffer.getInt(headersPos);
        headersPos += 4;
        byte[] bytes = wireBuffer.getBytes(headersPos, headersPos + keyLength);
        String key = new String(bytes, CharsetUtil.UTF_8);
        headersPos += keyLength;
        int valLength = wireBuffer.getInt(headersPos);
        headersPos += 4;
        bytes = wireBuffer.getBytes(headersPos, headersPos + valLength);
        String val = new String(bytes, CharsetUtil.UTF_8);
        headersPos += valLength;
        headers.add(key, val);
      }
    }
    headersPos = 0;
  }

  private void writeBody(Buffer buff) {
    messageCodec.encodeToWire(buff, sentBody);
  }

  private void writeString(Buffer buff, String str) {
    byte[] strBytes = str.getBytes(CharsetUtil.UTF_8);
    buff.appendInt(strBytes.length);
    buff.appendBytes(strBytes);
  }

  ServerID getSender() {
    return sender;
  }

  public boolean isFromWire() {
    return fromWire;
  }
}