@Override public void start(final Future<Void> startFuture) throws Exception { LOGGER.info("starting {0}..", identifier()); // start the HELLO slacker protocol final JsonObject helloMessage = new JsonObject().put("i", identifier()).put("d", description()).put("v", version()); vertx .eventBus() .send( "reg.slacker-server", helloMessage, result -> { if (result.succeeded() && JsonObject.class.isInstance(result.result().body())) { final JsonObject response = (JsonObject) result.result().body(); if (response.containsKey("a")) { // everything went smoothly - register the listener and complete the startup registerListener(response.getString("a")); LOGGER.info("successfully registered {0} executor", identifier()); startFuture.complete(); } else { failStart(startFuture, "no address to bind was received"); } } else { // something unexpected happened failStart( startFuture, Optional.ofNullable(result.cause()) .map(Throwable::getMessage) .orElse("invalid response")); } }); }
// Check if there is a quorum for our group private void checkQuorum() { if (quorumSize == 0) { this.attainedQuorum = true; } else { List<String> nodes = clusterManager.getNodes(); int count = 0; for (String node : nodes) { String json = clusterMap.get(node); if (json != null) { JsonObject clusterInfo = new JsonObject(json); String group = clusterInfo.getString("group"); if (group.equals(this.group)) { count++; } } } boolean attained = count >= quorumSize; if (!attainedQuorum && attained) { // A quorum has been attained so we can deploy any currently undeployed HA deploymentIDs log.info( "A quorum has been obtained. Any deploymentIDs waiting on a quorum will now be deployed"); this.attainedQuorum = true; } else if (attainedQuorum && !attained) { // We had a quorum but we lost it - we must undeploy any HA deploymentIDs log.info( "There is no longer a quorum. Any HA deploymentIDs will be undeployed until a quorum is re-attained"); this.attainedQuorum = false; } } }
@Override public void start() throws Exception { address = MQTTSession.ADDRESS; JsonObject conf = config(); localBridgePort = conf.getInteger("local_bridge_port", 7007); idleTimeout = conf.getInteger("socket_idle_timeout", 120); ssl_cert_key = conf.getString("ssl_cert_key"); ssl_cert = conf.getString("ssl_cert"); ssl_trust = conf.getString("ssl_trust"); // [WebSocket -> BUS] listen WebSocket publish to BUS HttpServerOptions opt = new HttpServerOptions() .setTcpKeepAlive(true) .setIdleTimeout(idleTimeout) .setPort(localBridgePort); if (ssl_cert_key != null && ssl_cert != null && ssl_trust != null) { opt.setSsl(true) .setClientAuth(ClientAuth.REQUIRED) .setPemKeyCertOptions( new PemKeyCertOptions().setKeyPath(ssl_cert_key).setCertPath(ssl_cert)) .setPemTrustOptions(new PemTrustOptions().addCertPath(ssl_trust)); } netServer = vertx.createHttpServer(opt); netServer .websocketHandler( sock -> { final EventBusWebsocketBridge ebnb = new EventBusWebsocketBridge(sock, vertx.eventBus(), address); sock.closeHandler( aVoid -> { logger.info( "Bridge Server - closed connection from client ip: " + sock.remoteAddress()); ebnb.stop(); }); sock.exceptionHandler( throwable -> { logger.error("Bridge Server - Exception: " + throwable.getMessage(), throwable); ebnb.stop(); }); logger.info("Bridge Server - new connection from client ip: " + sock.remoteAddress()); RecordParser parser = ebnb.initialHandhakeProtocolParser(); sock.handler(parser::handle); }) .listen(); }
// Handle failover private void checkFailover(String failedNodeID, JsonObject theHAInfo) { try { JsonArray deployments = theHAInfo.getJsonArray("verticles"); String group = theHAInfo.getString("group"); String chosen = chooseHashedNode(group, failedNodeID.hashCode()); if (chosen != null && chosen.equals(this.nodeID)) { if (deployments != null && deployments.size() != 0) { log.info( "node" + nodeID + " says: Node " + failedNodeID + " has failed. This node will deploy " + deployments.size() + " deploymentIDs from that node."); for (Object obj : deployments) { JsonObject app = (JsonObject) obj; processFailover(app); } } // Failover is complete! We can now remove the failed node from the cluster map clusterMap.remove(failedNodeID); callFailoverCompleteHandler(failedNodeID, theHAInfo, true); } } catch (Throwable t) { log.error("Failed to handle failover", t); callFailoverCompleteHandler(failedNodeID, theHAInfo, false); } }
@Override public void start() { getVertx() .eventBus() .consumer( "ping-pong", message -> { log.info(String.format("ping-pong receive: %s", message)); message.reply("pong"); }) .completionHandler( event -> { if (event.succeeded()) log.info("complete handler"); else log.info("failed"); }); log.info("Pong started"); }
// Undeploy any HA deploymentIDs now there is no quorum private void undeployHADeployments() { for (String deploymentID : deploymentManager.deployments()) { Deployment dep = deploymentManager.getDeployment(deploymentID); if (dep != null) { if (dep.deploymentOptions().isHa()) { ContextImpl ctx = vertx.getContext(); try { ContextImpl.setContext(null); deploymentManager.undeployVerticle( deploymentID, result -> { if (result.succeeded()) { log.info( "Successfully undeployed HA deployment " + deploymentID + "-" + dep.verticleIdentifier() + " as there is no quorum"); addToHADeployList( dep.verticleIdentifier(), dep.deploymentOptions(), result1 -> { if (result1.succeeded()) { log.info( "Successfully redeployed verticle " + dep.verticleIdentifier() + " after quorum was re-attained"); } else { log.error( "Failed to redeploy verticle " + dep.verticleIdentifier() + " after quorum was re-attained", result1.cause()); } }); } else { log.error("Failed to undeploy deployment on lost quorum", result.cause()); } }); } finally { ContextImpl.setContext(ctx); } } } } }
// Deploy an HA verticle public void deployVerticle( final String verticleName, DeploymentOptions deploymentOptions, final Handler<AsyncResult<String>> doneHandler) { if (attainedQuorum) { doDeployVerticle(verticleName, deploymentOptions, doneHandler); } else { log.info( "Quorum not attained. Deployment of verticle will be delayed until there's a quorum."); addToHADeployList(verticleName, deploymentOptions, doneHandler); } }
private Twinkle() { final Logger logger = LoggerFactory.getLogger(Twinkle.class); final Vertx vertx = Vertx.vertx(vertxOptions()); final HttpServer httpServer = vertx.createHttpServer(httpServerOptions()); final Router router = Router.router(vertx); router.get("/ping").handler(context -> context.response().end("pong")); router.mountSubRouter("/stats", new Metrics(vertx, httpServer).router()); httpServer .requestHandler(router::accept) .listen( 8080, result -> { if (result.succeeded()) { logger.info("Twinkle started"); } else { logger.error("Twinkle failed to start", result.cause()); vertx.close(shutdown -> logger.info("Twinkle shut down")); } }); }
public static Config configFor(final JsonObject jsonConf) { final String baseUrl = jsonConf.getString("baseUrl"); final Clients clients = new Clients( baseUrl + "/callback", // oAuth clients facebookClient(jsonConf), twitterClient()); final Config config = new Config(clients); config.addAuthorizer(AUTHORIZER_ADMIN, new RequireAnyRoleAuthorizer("ROLE_ADMIN")); config.addAuthorizer(AUTHORIZER_CUSTOM, new CustomAuthorizer()); LOG.info("Config created " + config.toString()); return config; }
/** * Handles an incoming request from the event bus * * @param request the request message to be handled */ private void handleExecutorEvent(final Message<SlackerRequest> request) { LOGGER.info("<=<= receiving incoming request <=<="); LOGGER.debug(request); // execute the request handling asynchronously context.runOnContext( a -> { final Future<SlackerResponse> future = futureFactory.future(); execute(request.body(), future); future.setHandler( handler -> { if (handler.succeeded()) { LOGGER.info("=>=> successfully handled request =>=>"); LOGGER.debug(handler.result()); request.reply( handler.result(), new DeliveryOptions().setCodecName(SlackerResponseMessageCodec.NAME)); } else { request.fail(ResultCode.ERROR.ordinal(), handler.cause().getMessage()); LOGGER.error("failed to handle request", handler.cause()); } }); }); }
// Process the failover of a deployment private void processFailover(JsonObject failedVerticle) { if (failDuringFailover) { throw new VertxException("Oops!"); } // This method must block until the failover is complete - i.e. the verticle is successfully // redeployed final String verticleName = failedVerticle.getString("verticle_name"); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<Throwable> err = new AtomicReference<>(); // Now deploy this verticle on this node ContextImpl ctx = vertx.getContext(); if (ctx != null) { // We could be on main thread in which case we don't want to overwrite tccl ContextImpl.setContext(null); } JsonObject options = failedVerticle.getJsonObject("options"); try { doDeployVerticle( verticleName, new DeploymentOptions(options), result -> { if (result.succeeded()) { log.info("Successfully redeployed verticle " + verticleName + " after failover"); } else { log.error("Failed to redeploy verticle after failover", result.cause()); err.set(result.cause()); } latch.countDown(); Throwable t = err.get(); if (t != null) { throw new VertxException(t); } }); } finally { if (ctx != null) { ContextImpl.setContext(ctx); } } try { if (!latch.await(120, TimeUnit.SECONDS)) { throw new VertxException("Timed out waiting for redeploy on failover"); } } catch (InterruptedException e) { throw new IllegalStateException(e); } }
// Deploy any deploymentIDs that are waiting for a quorum private void deployHADeployments() { int size = toDeployOnQuorum.size(); if (size != 0) { log.info( "There are " + size + " HA deploymentIDs waiting on a quorum. These will now be deployed"); Runnable task; while ((task = toDeployOnQuorum.poll()) != null) { try { task.run(); } catch (Throwable t) { log.error("Failed to run redeployment task", t); } } } }
@Override public void stop(final Future<Void> stopFuture) throws Exception { LOGGER.info("stopping {0}..", identifier()); consumer.ifPresent(MessageConsumer::unregister); stopFuture.complete(); }
public IdAndWorkingDir(final Class clazz) { this.verticleId = clazz.getName(); final String vertxWorkingDir = buildVertxWorkingDir(JAVA_MAIN_DIR, clazz); logger.info("vertxWorkingDir: " + vertxWorkingDir); System.setProperty("vertx.cwd", vertxWorkingDir); }
public void start() throws Exception { if (statisticianCreated.compareAndSet(false, true)) { getVertx() .deployVerticle( "com.alertme.test.Statistician", res -> { if (res.failed()) { logger.error(format("Failed to create statistician: %s", res.result())); } }); } logger.info( format("Initializing HTTP Server Proxy instance %s", Thread.currentThread().getName())); logger.info(format("Primary host: %s, secondary host: %s", primaryHost, secondaryHost)); final HttpClient client = vertx.createHttpClient(); vertx .createHttpServer() .requestHandler( request -> { collectStats(request); final Buffer requestBody = buffer(); request.handler(requestBody::appendBuffer); request.endHandler( end -> { final ProxyRequest primaryProxyRequest = new ProxyRequest( request.method(), request.headers(), request.uri(), requestBody, client, primaryHost); final ProxyRequest secondaryProxyRequest = new ProxyRequest( request.method(), request.headers(), request.uri(), requestBody, client, secondaryHost); final TriConsumer<Buffer, Integer, MultiMap> writeResponse = (body, code, headers) -> { if (headers != null) { request.response().headers().setAll(headers); } if (code == null) { logger.error("Code is empty, assuming server error occurred"); code = INTERNAL_SERVER_ERROR.code(); } request.response().setStatusCode(code); request.response().setChunked(true); if (body != null) { request.response().write(body); } request.response().end(); // TODO if we start writing async, then we should call this only when // request ended }; if (request.method() == HttpMethod.GET || request.method() == HttpMethod.HEAD || request.method() == HttpMethod.OPTIONS) { primaryProxyRequest.onFailure( (body, code, headers) -> secondaryProxyRequest.request()); primaryProxyRequest.onSuccess(writeResponse); secondaryProxyRequest.onFailure( (body, code, headers) -> primaryProxyRequest.writeResponse(writeResponse)); secondaryProxyRequest.onSuccess(writeResponse); primaryProxyRequest.request(); } else { primaryProxyRequest.onComplete(writeResponse); primaryProxyRequest.request(); secondaryProxyRequest.request(); } }); }) .listen(port); }
private void process(@NotNull Collection<String> tags) { logger.info(tags.size() + " agencies loaded"); for (String tag : tags) { eventBus.send(AGENCIES_ADDRESS, tag); } }