@Override public void run() { Random rand = new Random(System.nanoTime()); ZContext ctx = new ZContext(); Socket worker = ctx.createSocket(ZMQ.REQ); worker.connect(String.format("ipc://%s-localbe.ipc", self)); // Tell broker we're ready for work ZFrame frame = new ZFrame(WORKER_READY); frame.send(worker, 0); while (true) { // Send request, get reply ZMsg msg = ZMsg.recvMsg(worker, 0); if (msg == null) break; // Interrupted // Workers are busy for 0/1 seconds try { Thread.sleep(rand.nextInt(2) * 1000); } catch (InterruptedException e) { } msg.send(worker); } ctx.destroy(); }
public void run() { ZContext ctx = new ZContext(); Socket client = ctx.createSocket(ZMQ.DEALER); // Set random identity to make tracing easier String identity = String.format("%04X-%04X", rand.nextInt(), rand.nextInt()); client.setIdentity(identity.getBytes()); client.connect("tcp://localhost:5570"); PollItem[] items = new PollItem[] {new PollItem(client, Poller.POLLIN)}; int requestNbr = 0; while (!Thread.currentThread().isInterrupted()) { // Tick once per second, pulling in arriving messages for (int centitick = 0; centitick < 100; centitick++) { ZMQ.poll(items, 10); if (items[0].isReadable()) { ZMsg msg = ZMsg.recvMsg(client); msg.getLast().print(identity); msg.destroy(); } } client.send(String.format("request #%d", ++requestNbr), 0); } ctx.destroy(); }
public void run() { Socket worker = ctx.createSocket(ZMQ.DEALER); worker.connect("inproc://backend"); while (!Thread.currentThread().isInterrupted()) { // The DEALER socket gives us the address envelope and message ZMsg msg = ZMsg.recvMsg(worker); ZFrame address = msg.pop(); ZFrame content = msg.pop(); assert (content != null); msg.destroy(); // Send 0..4 replies back int replies = rand.nextInt(5); for (int reply = 0; reply < replies; reply++) { // Sleep for some fraction of a second try { Thread.sleep(rand.nextInt(1000) + 1); } catch (InterruptedException e) { } address.send(worker, ZFrame.REUSE + ZFrame.MORE); content.send(worker, ZFrame.REUSE); } address.destroy(); content.destroy(); } ctx.destroy(); }
// .split connect method // Connect to a new server endpoint. We can connect to at most two // servers. Sends [CONNECT][endpoint][service] to the agent: public void connect(String address, String service) { ZMsg msg = new ZMsg(); msg.add("CONNECT"); msg.add(address); msg.add(service); msg.send(pipe); }
@Override public void onFrame(byte[] rgb, int width, int height, int rotation) { VideoMessage oMsg = new VideoMessage(robotID, rgb, rotation); ZMsg zmsg = oMsg.toZmsg(); zmsg.send(m_oVideoSocket); if (m_bDebug) { ++m_nFpsCounterPartner; long now = System.currentTimeMillis(); if ((now - m_lLastTimePartner) >= 1000) { final int nFPS = m_nFpsCounterPartner; Utils.runAsyncUiTask( new Runnable() { @Override public void run() { m_lblFPS.setText("FPS: " + String.valueOf(nFPS)); } }); m_lLastTimePartner = now; m_nFpsCounterPartner = 0; } } }
public static void main(String[] args) { if (args.length < 1) { System.out.printf("I: syntax: flserver2 <endpoint>\n"); System.exit(0); } ZContext ctx = new ZContext(); Socket server = ctx.createSocket(ZMQ.REP); server.bind(args[0]); System.out.printf("I: echo service is ready at %s\n", args[0]); while (true) { ZMsg request = ZMsg.recvMsg(server); if (request == null) break; // Interrupted // Fail nastily if run against wrong client assert (request.size() == 2); ZFrame identity = request.pop(); request.destroy(); ZMsg reply = new ZMsg(); reply.add(identity); reply.add("OK"); reply.send(server); } if (Thread.currentThread().isInterrupted()) System.out.printf("W: interrupted\n"); ctx.destroy(); }
/** * 发送的消息格式: [000] [006] MDPC01 [004] echo [011] Hello world * * @param service * @param request */ public void send(String service, ZMsg request) { assert (request == null); request.addFirst(service); // 在reqeust前面添加帧 request.addFirst(MDP.C_CLIENT.newFrame()); request.addFirst(""); if (verbose) { log.format("I:send request to '%s' service.\n", service); request.dump(log.out()); } request.send(client); }
public static void main(String[] args) throws IOException { ZMQ.Context context = ZMQ.context(1); ZMQ.Socket subscriber = context.socket(ZMQ.SUB); subscriber.connect("tcp://localhost:7817"); subscriber.subscribe("/GOVI/KV8".getBytes()); while (true) { ZMsg msg = ZMsg.recvMsg(subscriber); try { Iterator<ZFrame> msgs = msg.iterator(); msgs.next(); ArrayList<Byte> receivedMsgs = new ArrayList<Byte>(); while (msgs.hasNext()) { for (byte b : msgs.next().getData()) { receivedMsgs.add(b); } } byte[] fullMsg = new byte[receivedMsgs.size()]; for (int i = 0; i < fullMsg.length; i++) { fullMsg[i] = receivedMsgs.get(i); } InputStream gzipped = new ByteArrayInputStream(fullMsg); InputStream in = new GZIPInputStream(gzipped); StringBuffer out = new StringBuffer(); byte[] b = new byte[4096]; for (int n; (n = in.read(b)) != -1; ) { out.append(new String(b, 0, n)); } String s = out.toString(); CTX c = new CTX(s); for (int i = 0; i < c.rows.size(); i++) { HashMap<String, String> row = c.rows.get(i); System.out.println( row.get("LinePlanningNumber") + " " + row.get("TripStopStatus") + " " + row.get("ExpectedDepartureTime") + " " + row.get("TimingPointCode")); } } catch (Exception e) { e.printStackTrace(); } } }
static ZMsg serviceCall(mdcliapi session, String service, ZMsg request) { ZMsg reply = session.send(service, request); if (reply != null) { ZFrame status = reply.pop(); if (status.streq("200")) { status.destroy(); return reply; } else if (status.streq("400")) { System.out.println("E: client fatal error, aborting"); } else if (status.streq("500")) { System.out.println("E: server fatal error, aborting"); } reply.destroy(); } return null; // Didn't succeed; don't care why not }
@Override public int handle(ZLoop loop, PollItem item, Object arg_) { LBBroker arg = (LBBroker) arg_; ZMsg msg = ZMsg.recvMsg(arg.frontend); if (msg != null) { msg.wrap(arg.workers.poll()); msg.send(arg.backend); // Cancel reader on frontend if we went from 1 to 0 workers if (arg.workers.size() == 0) { loop.removePoller(new PollItem(arg.frontend, 0)); } } return 0; }
public void run() { LOGGER.info("DynamicUtil run()"); String result = "result from dynamic worker"; // getParsedPage(); ZMsg msg = new ZMsg(); msg.addFirst(result); // msg.addFirst(new byte[0]); msg.wrap(sendTo); msg.addFirst(MDP.W_REPLY.newFrame()); msg.addFirst(MDP.DYNAMIC_WORKER.newFrame()); msg.addFirst(new byte[0]); // 这就是dealer与req的不同之处,req在此处会自动加入一个空帧 LOGGER.info("I:sending reply to broker\n" + msg.toString()); System.out.println("I:sending reply to broker\n" + msg.toString()); // msg.dump(log.out()); msg.send(worker); msg.destroy(); }
public ZMsg recv() { ZMsg reply = null; Poller items = new Poller(1); items.register(client, Poller.POLLIN); if (items.poll(timeout * 1000) == -1) { return null; } if (items.pollin(0)) { ZMsg msg = ZMsg.recvMsg(client, 0); // 非阻塞 if (verbose) { log.format("I:receive reply"); msg.dump(log.out()); } assert (msg.size() >= 4); ZFrame empty = msg.pop(); assert (empty.getData().length == 0); empty.destroy(); ZFrame header = msg.pop(); assert (MDP.C_CLIENT.equals(header.toString())); header.destroy(); ZFrame replyService = msg.pop(); replyService.destroy(); reply = msg; } return reply; }
public static void main(String[] args) throws InterruptedException { boolean verbose = (args.length > 0 && "-v".equals(args[0])); verbose = true; MdClientApi clientSession = new MdClientApi("tcp://localhost:5500", verbose); int count; for (count = 0; count < 100; count++) { ZMsg request = new ZMsg(); request.addString("Hello world"); clientSession.send("echo", request); } for (count = 0; count < 100; count++) { ZMsg reply = clientSession.recv(); if (reply != null) reply.destroy(); else break; // Interrupt or failure } System.out.printf("%d requests/replies processed\n", count); clientSession.destroy(); }
// .split handling a control message // Here we handle the different control messages from the frontend; // SUBTREE, CONNECT, SET, and GET: private boolean controlMessage() { ZMsg msg = ZMsg.recvMsg(pipe); String command = msg.popString(); if (command == null) return false; // Interrupted if (command.equals("SUBTREE")) { subtree = msg.popString(); } else if (command.equals("CONNECT")) { String address = msg.popString(); String service = msg.popString(); if (nbrServers < SERVER_MAX) { server[nbrServers++] = new Server(ctx, address, Integer.parseInt(service), subtree); // We broadcast updates to all known servers publisher.connect(String.format("%s:%d", address, Integer.parseInt(service) + 2)); } else System.out.printf("E: too many servers (max. %d)\n", SERVER_MAX); } else // .split set and get commands // When we set a property, we push the new key-value pair onto // all our connected servers: if (command.equals("SET")) { String key = msg.popString(); String value = msg.popString(); String ttl = msg.popString(); kvmap.put(key, value); // Send key-value pair on to server kvmsg kvmsg = new kvmsg(0); kvmsg.setKey(key); kvmsg.setUUID(); kvmsg.fmtBody("%s", value); kvmsg.setProp("ttl", ttl); kvmsg.send(publisher); kvmsg.destroy(); } else if (command.equals("GET")) { String key = msg.popString(); String value = kvmap.get(key); if (value != null) pipe.send(value); else pipe.send(""); } msg.destroy(); return true; }
public static void main(String[] args) { if (args.length < 1) { System.out.printf("I: syntax: flserver1 <endpoint>\n"); System.exit(0); } ZContext ctx = new ZContext(); Socket server = ctx.createSocket(ZMQ.REP); server.bind(args[0]); System.out.printf("I: echo service is ready at %s\n", args[0]); while (true) { ZMsg msg = ZMsg.recvMsg(server); if (msg == null) break; // Interrupted msg.send(server); } if (Thread.currentThread().isInterrupted()) System.out.printf("W: interrupted\n"); ctx.destroy(); }
// .split get method // Look up value in distributed hash table. Sends [GET][key] to the agent and // waits for a value response. If there is no value available, will eventually // return NULL: public String get(String key) { ZMsg msg = new ZMsg(); msg.add("GET"); msg.add(key); msg.send(pipe); ZMsg reply = ZMsg.recvMsg(pipe); if (reply != null) { String value = reply.popString(); reply.destroy(); return value; } return null; }
// .split set method // Set a new value in the shared hashmap. Sends a [SET][key][value][ttl] // command through to the agent which does the actual work: public void set(String key, String value, int ttl) { ZMsg msg = new ZMsg(); msg.add("SET"); msg.add(key); msg.add(value); msg.add(String.format("%d", ttl)); msg.send(pipe); }
@Override public void run(Object... args) { ZContext context = new ZContext(); // Prepare our context and sockets Socket worker = context.createSocket(ZMQ.REQ); ZHelper.setId(worker); // Set a printable identity worker.connect("ipc://backend.ipc"); // Tell backend we're ready for work ZFrame frame = new ZFrame(WORKER_READY); frame.send(worker, 0); while (true) { ZMsg msg = ZMsg.recvMsg(worker); if (msg == null) break; msg.getLast().reset("OK"); msg.send(worker); } context.destroy(); }
/** * DynamicUtil constructor * * @param worker * @param msg */ public DynamicUtil(Socket worker, ZMsg msg) { LOGGER.info("DynamicUtil msg from broker: \n" + msg.toString()); this.worker = worker; assert (msg.size() >= 3); this.msg = msg; sendTo = msg.unwrap(); // msg.pop(); ZFrame m = msg.pop(); data = m.toString(); LOGGER.info("DynamicUtil from broker data:" + data); if (!msg.isEmpty()) { clickButton = msg.popString(); } }
public static void sendBinary( Socket output, ZFrame routingId, int sequence, byte[] flags, byte[] public_key, UUID identifier, ZFrame address, ZMsg content) { ZprotoExample self = new ZprotoExample(ZprotoExample.BINARY); if (routingId != null) { self.setRoutingId(routingId); } self.setSequence(sequence); self.setFlags(flags); self.setPublic_Key(public_key); self.setIdentifier(identifier); self.setAddress(address.duplicate()); self.setContent(content.duplicate()); self.send(output); }
@Override public int handle(ZLoop loop, PollItem item, Object arg_) { LBBroker arg = (LBBroker) arg_; ZMsg msg = ZMsg.recvMsg(arg.backend); if (msg != null) { ZFrame address = msg.unwrap(); // Queue worker address for load-balancing arg.workers.add(address); // Enable reader on frontend if we went from 0 to 1 workers if (arg.workers.size() == 1) { PollItem newItem = new PollItem(arg.frontend, ZMQ.Poller.POLLIN); loop.addPoller(newItem, frontendHandler, arg); } // Forward message to client if it's not a READY ZFrame frame = msg.getFirst(); if (Arrays.equals(frame.getData(), WORKER_READY)) msg.destroy(); else msg.send(arg.frontend); } return 0; }
// The main task is an LRU queue with heartbeating on workers so we can // detect crashed or blocked worker tasks: public static void main(final String[] args) { final ZContext ctx = new ZContext(); final Socket frontend = ctx.createSocket(ZMQ.ROUTER); final Socket backend = ctx.createSocket(ZMQ.ROUTER); frontend.bind("tcp://*:5555"); // For clients backend.bind("tcp://*:5556"); // For workers // List of available workers final ArrayList<Worker> workers = new ArrayList<Worker>(); // Send out heartbeats at regular intervals long heartbeat_at = System.currentTimeMillis() + HEARTBEAT_INTERVAL; while (true) { final PollItem items[] = { new PollItem(backend, ZMQ.Poller.POLLIN), new PollItem(frontend, ZMQ.Poller.POLLIN) }; // Poll frontend only if we have available workers final int rc = ZMQ.poll(items, workers.size() > 0 ? 2 : 1, HEARTBEAT_INTERVAL); if (rc == -1) { break; // Interrupted } // Handle worker activity on backend if (items[0].isReadable()) { // Use worker address for LRU routing final ZMsg msg = ZMsg.recvMsg(backend); if (msg == null) { break; // Interrupted } // Any sign of life from worker means it's ready final ZFrame address = msg.unwrap(); final Worker worker = new Worker(address); worker.ready(workers); // Validate control message, or return reply to client if (msg.size() == 1) { final ZFrame frame = msg.getFirst(); final String data = new String(frame.getData()); if (!data.equals(PPP_READY) && !data.equals(PPP_HEARTBEAT)) { System.out.println("E: invalid message from worker"); msg.dump(System.out); } msg.destroy(); } else { msg.send(frontend); } } if (items[1].isReadable()) { // Now get next client request, route to next worker final ZMsg msg = ZMsg.recvMsg(frontend); if (msg == null) { break; // Interrupted } msg.push(Worker.next(workers)); msg.send(backend); } // We handle heartbeating after any socket activity. First we send // heartbeats to any idle workers if it's time. Then we purge any // dead workers: if (System.currentTimeMillis() >= heartbeat_at) { for (final Worker worker : workers) { worker.address.send(backend, ZFrame.REUSE + ZFrame.MORE); final ZFrame frame = new ZFrame(PPP_HEARTBEAT); frame.send(backend, 0); } heartbeat_at = System.currentTimeMillis() + HEARTBEAT_INTERVAL; } Worker.purge(workers); } // When we're done, clean up properly while (workers.size() > 0) { final Worker worker = workers.remove(0); } workers.clear(); ctx.destroy(); }
public static void main(String[] args) throws Exception { boolean verbose = true; // (args.length > 0 && args[0].equals("-v")); mdcliapi session = new mdcliapi("tcp://localhost:5555", verbose); // 1. Send 'echo' request to Titanic ZMsg request = new ZMsg(); request.add("echo"); request.add("Hello world"); ZMsg reply = serviceCall(session, "titanic.request", request); ZFrame uuid = null; if (reply != null) { uuid = reply.pop(); reply.destroy(); uuid.print("I: request UUID "); } // 2. Wait until we get a reply while (!Thread.currentThread().isInterrupted()) { Thread.sleep(100); request = new ZMsg(); request.add(uuid.duplicate()); reply = serviceCall(session, "titanic.reply", request); if (reply != null) { String replyString = reply.getLast().toString(); System.out.printf("Reply: %s\n", replyString); reply.destroy(); // 3. Close request request = new ZMsg(); request.add(uuid.duplicate()); reply = serviceCall(session, "titanic.close", request); reply.destroy(); break; } else { System.out.println("I: no reply yet, trying again..."); Thread.sleep(5000); // Try again in 5 seconds } } uuid.destroy(); session.destroy(); }
// Takes ownership of supplied frame public void setContent(ZMsg frame) { if (content != null) content.destroy(); content = frame; }
public boolean send(Socket socket) { assert (socket != null); ZMsg msg = new ZMsg(); // If we're sending to a ROUTER, send the routingId first if (socket.getType() == ZMQ.ROUTER) { msg.add(routingId); } int frameSize = 2 + 1; // Signature and message ID switch (id) { case LOG: { // sequence is a 2-byte integer frameSize += 2; // version is a 2-byte integer frameSize += 2; // level is a 1-byte integer frameSize += 1; // event is a 1-byte integer frameSize += 1; // node is a 2-byte integer frameSize += 2; // peer is a 2-byte integer frameSize += 2; // time is a 8-byte integer frameSize += 8; // host is a string with 1-byte length frameSize++; frameSize += (host != null) ? host.length() : 0; // data is a long string with 4-byte length frameSize += 4; frameSize += (data != null) ? data.length() : 0; } break; case STRUCTURES: { // sequence is a 2-byte integer frameSize += 2; // aliases is an array of strings frameSize += 4; if (aliases != null) { for (String value : aliases) { frameSize += 4; frameSize += value.length(); } } // headers is an array of key=value strings frameSize += 4; if (headers != null) { headersBytes = 0; for (Map.Entry<String, String> entry : headers.entrySet()) { headersBytes += 1 + entry.getKey().length(); headersBytes += 4 + entry.getValue().length(); } frameSize += headersBytes; } } break; case BINARY: { // sequence is a 2-byte integer frameSize += 2; // flags is a block of 4 bytes frameSize += 4; // public_key is a chunk with 4-byte length frameSize += 4; frameSize += (public_key != null) ? public_key.length : 0; // identifier is uuid with 16-byte length frameSize += 16; } break; case TYPES: { // sequence is a 2-byte integer frameSize += 2; // client_forename is a string with 1-byte length frameSize++; frameSize += (client_forename != null) ? client_forename.length() : 0; // client_surname is a string with 1-byte length frameSize++; frameSize += (client_surname != null) ? client_surname.length() : 0; // client_mobile is a string with 1-byte length frameSize++; frameSize += (client_mobile != null) ? client_mobile.length() : 0; // client_email is a string with 1-byte length frameSize++; frameSize += (client_email != null) ? client_email.length() : 0; // supplier_forename is a string with 1-byte length frameSize++; frameSize += (supplier_forename != null) ? supplier_forename.length() : 0; // supplier_surname is a string with 1-byte length frameSize++; frameSize += (supplier_surname != null) ? supplier_surname.length() : 0; // supplier_mobile is a string with 1-byte length frameSize++; frameSize += (supplier_mobile != null) ? supplier_mobile.length() : 0; // supplier_email is a string with 1-byte length frameSize++; frameSize += (supplier_email != null) ? supplier_email.length() : 0; } break; default: System.out.printf("E: bad message type '%d', not sent\n", id); assert (false); } // Now serialize message into the frame ZFrame frame = new ZFrame(new byte[frameSize]); needle = ByteBuffer.wrap(frame.getData()); int frameFlags = 0; putNumber2(0xAAA0 | 0); putNumber1((byte) id); switch (id) { case LOG: { putNumber2(sequence); putNumber2(3); putNumber1(level); putNumber1(event); putNumber2(node); putNumber2(peer); putNumber8(time); if (host != null) putString(host); else putNumber1((byte) 0); // Empty string if (data != null) putLongString(data); else putNumber4(0); // Empty string } break; case STRUCTURES: { putNumber2(sequence); if (aliases != null) { putNumber4(aliases.size()); for (String value : aliases) { putLongString(value); } } else putNumber4(0); // Empty string array if (headers != null) { putNumber4(headers.size()); for (Map.Entry<String, String> entry : headers.entrySet()) { putString(entry.getKey()); putLongString(entry.getValue()); } } else putNumber4(0); // Empty hash } break; case BINARY: { putNumber2(sequence); putBlock(flags, 4); if (public_key != null) { putNumber4(public_key.length); needle.put(public_key, 0, public_key.length); } else { putNumber4(0); } if (identifier != null) { ByteBuffer bb = ByteBuffer.wrap(new byte[16]); bb.putLong(identifier.getMostSignificantBits()); bb.putLong(identifier.getLeastSignificantBits()); needle.put(bb.array()); } else { needle.put(new byte[16]); // Empty Chunk } } break; case TYPES: { putNumber2(sequence); if (client_forename != null) putString(client_forename); else putNumber1((byte) 0); // Empty string if (client_surname != null) putString(client_surname); else putNumber1((byte) 0); // Empty string if (client_mobile != null) putString(client_mobile); else putNumber1((byte) 0); // Empty string if (client_email != null) putString(client_email); else putNumber1((byte) 0); // Empty string if (supplier_forename != null) putString(supplier_forename); else putNumber1((byte) 0); // Empty string if (supplier_surname != null) putString(supplier_surname); else putNumber1((byte) 0); // Empty string if (supplier_mobile != null) putString(supplier_mobile); else putNumber1((byte) 0); // Empty string if (supplier_email != null) putString(supplier_email); else putNumber1((byte) 0); // Empty string } break; } // Now send the data frame msg.add(frame); // Now send any frame fields, in order switch (id) { case BINARY: { // If address isn't set, send an empty frame if (address == null) address = new ZFrame("".getBytes()); msg.add(address); } break; } switch (id) { case BINARY: { if (content == null) content = new ZMsg(); for (ZFrame contentPart : content) { msg.add(contentPart); } } break; } // Destroy ZprotoExample object msg.send(socket); destroy(); return true; }
// .split subtree method // Specify subtree for snapshot and updates, which we must do before // connecting to a server as the subtree specification is sent as the // first command to the server. Sends a [SUBTREE][subtree] command to // the agent: public void subtree(String subtree) { ZMsg msg = new ZMsg(); msg.add("SUBTREE"); msg.add(subtree); msg.send(pipe); }
// The main task begins by setting-up all its sockets. The local frontend // talks to clients, and our local backend talks to workers. The cloud // frontend talks to peer brokers as if they were clients, and the cloud // backend talks to peer brokers as if they were workers. The state // backend publishes regular state messages, and the state frontend // subscribes to all state backends to collect these messages. Finally, // we use a PULL monitor socket to collect printable messages from tasks: public static void main(String[] argv) { // First argument is this broker's name // Other arguments are our peers' names // if (argv.length < 1) { System.out.println("syntax: peering3 me {you}"); System.exit(-1); } self = argv[0]; System.out.printf("I: preparing broker at %s\n", self); Random rand = new Random(System.nanoTime()); ZContext ctx = new ZContext(); // Prepare local frontend and backend Socket localfe = ctx.createSocket(ZMQ.ROUTER); localfe.bind(String.format("ipc://%s-localfe.ipc", self)); Socket localbe = ctx.createSocket(ZMQ.ROUTER); localbe.bind(String.format("ipc://%s-localbe.ipc", self)); // Bind cloud frontend to endpoint Socket cloudfe = ctx.createSocket(ZMQ.ROUTER); cloudfe.setIdentity(self.getBytes()); cloudfe.bind(String.format("ipc://%s-cloud.ipc", self)); // Connect cloud backend to all peers Socket cloudbe = ctx.createSocket(ZMQ.ROUTER); cloudbe.setIdentity(self.getBytes()); int argn; for (argn = 1; argn < argv.length; argn++) { String peer = argv[argn]; System.out.printf("I: connecting to cloud forintend at '%s'\n", peer); cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer)); } // Bind state backend to endpoint Socket statebe = ctx.createSocket(ZMQ.PUB); statebe.bind(String.format("ipc://%s-state.ipc", self)); // Connect statefe to all peers Socket statefe = ctx.createSocket(ZMQ.SUB); statefe.subscribe("".getBytes()); for (argn = 1; argn < argv.length; argn++) { String peer = argv[argn]; System.out.printf("I: connecting to state backend at '%s'\n", peer); statefe.connect(String.format("ipc://%s-state.ipc", peer)); } // Prepare monitor socket Socket monitor = ctx.createSocket(ZMQ.PULL); monitor.bind(String.format("ipc://%s-monitor.ipc", self)); // Start local workers int worker_nbr; for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start(); // Start local clients int client_nbr; for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start(); // Queue of available workers int localCapacity = 0; int cloudCapacity = 0; ArrayList<ZFrame> workers = new ArrayList<ZFrame>(); // The main loop has two parts. First we poll workers and our two service // sockets (statefe and monitor), in any case. If we have no ready workers, // there's no point in looking at incoming requests. These can remain on // their internal 0MQ queues: while (true) { // First, route any waiting replies from workers PollItem primary[] = { new PollItem(localbe, Poller.POLLIN), new PollItem(cloudbe, Poller.POLLIN), new PollItem(statefe, Poller.POLLIN), new PollItem(monitor, Poller.POLLIN) }; // If we have no workers anyhow, wait indefinitely int rc = ZMQ.poll(primary, localCapacity > 0 ? 1000 : -1); if (rc == -1) break; // Interrupted // Track if capacity changes during this iteration int previous = localCapacity; // Handle reply from local worker ZMsg msg = null; if (primary[0].isReadable()) { msg = ZMsg.recvMsg(localbe); if (msg == null) break; // Interrupted ZFrame address = msg.unwrap(); workers.add(address); localCapacity++; // If it's READY, don't route the message any further ZFrame frame = msg.getFirst(); if (new String(frame.getData()).equals(WORKER_READY)) { msg.destroy(); msg = null; } } // Or handle reply from peer broker else if (primary[1].isReadable()) { msg = ZMsg.recvMsg(cloudbe); if (msg == null) break; // Interrupted // We don't use peer broker address for anything ZFrame address = msg.unwrap(); address.destroy(); } // Route reply to cloud if it's addressed to a broker for (argn = 1; msg != null && argn < argv.length; argn++) { byte[] data = msg.getFirst().getData(); if (argv[argn].equals(new String(data))) { msg.send(cloudfe); msg = null; } } // Route reply to client if we still need to if (msg != null) msg.send(localfe); // If we have input messages on our statefe or monitor sockets we // can process these immediately: if (primary[2].isReadable()) { String peer = statefe.recvStr(); String status = statefe.recvStr(); cloudCapacity = Integer.parseInt(status); } if (primary[3].isReadable()) { String status = monitor.recvStr(); System.out.println(status); } // Now we route as many client requests as we have worker capacity // for. We may reroute requests from our local frontend, but not from // // the cloud frontend. We reroute randomly now, just to test things // out. In the next version we'll do this properly by calculating // cloud capacity:// while (localCapacity + cloudCapacity > 0) { PollItem secondary[] = { new PollItem(localfe, Poller.POLLIN), new PollItem(cloudfe, Poller.POLLIN) }; if (localCapacity > 0) rc = ZMQ.poll(secondary, 2, 0); else rc = ZMQ.poll(secondary, 1, 0); assert (rc >= 0); if (secondary[0].isReadable()) { msg = ZMsg.recvMsg(localfe); } else if (secondary[1].isReadable()) { msg = ZMsg.recvMsg(cloudfe); } else break; // No work, go back to backends if (localCapacity > 0) { ZFrame frame = workers.remove(0); msg.wrap(frame); msg.send(localbe); localCapacity--; } else { // Route to random broker peer int random_peer = rand.nextInt(argv.length - 1) + 1; msg.push(argv[random_peer]); msg.send(cloudbe); } } // We broadcast capacity messages to other peers; to reduce chatter // we do this only if our capacity changed. if (localCapacity != previous) { // We stick our own address onto the envelope statebe.sendMore(self); // Broadcast new capacity statebe.send(String.format("%d", localCapacity), 0); } } // When we're done, clean up properly while (workers.size() > 0) { ZFrame frame = workers.remove(0); frame.destroy(); } ctx.destroy(); }