Пример #1
0
    public void run() {
      Socket worker = ctx.createSocket(ZMQ.DEALER);
      worker.connect("inproc://backend");

      while (!Thread.currentThread().isInterrupted()) {
        //  The DEALER socket gives us the address envelope and message
        ZMsg msg = ZMsg.recvMsg(worker);
        ZFrame address = msg.pop();
        ZFrame content = msg.pop();
        assert (content != null);
        msg.destroy();

        //  Send 0..4 replies back
        int replies = rand.nextInt(5);
        for (int reply = 0; reply < replies; reply++) {
          //  Sleep for some fraction of a second
          try {
            Thread.sleep(rand.nextInt(1000) + 1);
          } catch (InterruptedException e) {
          }
          address.send(worker, ZFrame.REUSE + ZFrame.MORE);
          content.send(worker, ZFrame.REUSE);
        }
        address.destroy();
        content.destroy();
      }
      ctx.destroy();
    }
Пример #2
0
  public static void main(String[] args) {
    if (args.length < 1) {
      System.out.printf("I: syntax: flserver2 <endpoint>\n");
      System.exit(0);
    }
    ZContext ctx = new ZContext();
    Socket server = ctx.createSocket(ZMQ.REP);
    server.bind(args[0]);

    System.out.printf("I: echo service is ready at %s\n", args[0]);
    while (true) {
      ZMsg request = ZMsg.recvMsg(server);
      if (request == null) break; //  Interrupted

      //  Fail nastily if run against wrong client
      assert (request.size() == 2);

      ZFrame identity = request.pop();
      request.destroy();

      ZMsg reply = new ZMsg();
      reply.add(identity);
      reply.add("OK");
      reply.send(server);
    }
    if (Thread.currentThread().isInterrupted()) System.out.printf("W: interrupted\n");

    ctx.destroy();
  }
Пример #3
0
    public void run() {
      ZContext ctx = new ZContext();
      Socket client = ctx.createSocket(ZMQ.DEALER);

      //  Set random identity to make tracing easier
      String identity = String.format("%04X-%04X", rand.nextInt(), rand.nextInt());
      client.setIdentity(identity.getBytes());
      client.connect("tcp://localhost:5570");

      PollItem[] items = new PollItem[] {new PollItem(client, Poller.POLLIN)};

      int requestNbr = 0;
      while (!Thread.currentThread().isInterrupted()) {
        //  Tick once per second, pulling in arriving messages
        for (int centitick = 0; centitick < 100; centitick++) {
          ZMQ.poll(items, 10);
          if (items[0].isReadable()) {
            ZMsg msg = ZMsg.recvMsg(client);
            msg.getLast().print(identity);
            msg.destroy();
          }
        }
        client.send(String.format("request #%d", ++requestNbr), 0);
      }
      ctx.destroy();
    }
Пример #4
0
  public static void main(String[] args) throws Exception {
    boolean verbose = true; // (args.length > 0 && args[0].equals("-v"));
    mdcliapi session = new mdcliapi("tcp://localhost:5555", verbose);

    //  1. Send 'echo' request to Titanic
    ZMsg request = new ZMsg();
    request.add("echo");
    request.add("Hello world");
    ZMsg reply = serviceCall(session, "titanic.request", request);

    ZFrame uuid = null;
    if (reply != null) {
      uuid = reply.pop();
      reply.destroy();
      uuid.print("I: request UUID ");
    }
    //  2. Wait until we get a reply
    while (!Thread.currentThread().isInterrupted()) {
      Thread.sleep(100);
      request = new ZMsg();
      request.add(uuid.duplicate());
      reply = serviceCall(session, "titanic.reply", request);

      if (reply != null) {
        String replyString = reply.getLast().toString();
        System.out.printf("Reply: %s\n", replyString);
        reply.destroy();

        //  3. Close request
        request = new ZMsg();
        request.add(uuid.duplicate());
        reply = serviceCall(session, "titanic.close", request);
        reply.destroy();
        break;
      } else {
        System.out.println("I: no reply yet, trying again...");
        Thread.sleep(5000); //  Try again in 5 seconds
      }
    }
    uuid.destroy();
    session.destroy();
  }
Пример #5
0
  //  .split get method
  //  Look up value in distributed hash table. Sends [GET][key] to the agent and
  //  waits for a value response. If there is no value available, will eventually
  //  return NULL:
  public String get(String key) {
    ZMsg msg = new ZMsg();
    msg.add("GET");
    msg.add(key);
    msg.send(pipe);

    ZMsg reply = ZMsg.recvMsg(pipe);
    if (reply != null) {
      String value = reply.popString();
      reply.destroy();
      return value;
    }
    return null;
  }
Пример #6
0
 public void run() {
   LOGGER.info("DynamicUtil run()");
   String result = "result from dynamic worker"; // getParsedPage();
   ZMsg msg = new ZMsg();
   msg.addFirst(result);
   // msg.addFirst(new byte[0]);
   msg.wrap(sendTo);
   msg.addFirst(MDP.W_REPLY.newFrame());
   msg.addFirst(MDP.DYNAMIC_WORKER.newFrame());
   msg.addFirst(new byte[0]); // 这就是dealer与req的不同之处,req在此处会自动加入一个空帧
   LOGGER.info("I:sending reply to broker\n" + msg.toString());
   System.out.println("I:sending reply to broker\n" + msg.toString());
   // msg.dump(log.out());
   msg.send(worker);
   msg.destroy();
 }
Пример #7
0
 static ZMsg serviceCall(mdcliapi session, String service, ZMsg request) {
   ZMsg reply = session.send(service, request);
   if (reply != null) {
     ZFrame status = reply.pop();
     if (status.streq("200")) {
       status.destroy();
       return reply;
     } else if (status.streq("400")) {
       System.out.println("E: client fatal error, aborting");
     } else if (status.streq("500")) {
       System.out.println("E: server fatal error, aborting");
     }
     reply.destroy();
   }
   return null; //  Didn't succeed; don't care why not
 }
Пример #8
0
 public static void main(String[] args) throws InterruptedException {
   boolean verbose = (args.length > 0 && "-v".equals(args[0]));
   verbose = true;
   MdClientApi clientSession = new MdClientApi("tcp://localhost:5500", verbose);
   int count;
   for (count = 0; count < 100; count++) {
     ZMsg request = new ZMsg();
     request.addString("Hello world");
     clientSession.send("echo", request);
   }
   for (count = 0; count < 100; count++) {
     ZMsg reply = clientSession.recv();
     if (reply != null) reply.destroy();
     else break; // Interrupt or failure
   }
   System.out.printf("%d requests/replies processed\n", count);
   clientSession.destroy();
 }
Пример #9
0
    //  .split handling a control message
    //  Here we handle the different control messages from the frontend;
    //  SUBTREE, CONNECT, SET, and GET:
    private boolean controlMessage() {
      ZMsg msg = ZMsg.recvMsg(pipe);
      String command = msg.popString();
      if (command == null) return false; //  Interrupted

      if (command.equals("SUBTREE")) {
        subtree = msg.popString();
      } else if (command.equals("CONNECT")) {
        String address = msg.popString();
        String service = msg.popString();
        if (nbrServers < SERVER_MAX) {
          server[nbrServers++] = new Server(ctx, address, Integer.parseInt(service), subtree);
          //  We broadcast updates to all known servers
          publisher.connect(String.format("%s:%d", address, Integer.parseInt(service) + 2));
        } else System.out.printf("E: too many servers (max. %d)\n", SERVER_MAX);
      } else
      //  .split set and get commands
      //  When we set a property, we push the new key-value pair onto
      //  all our connected servers:
      if (command.equals("SET")) {
        String key = msg.popString();
        String value = msg.popString();
        String ttl = msg.popString();
        kvmap.put(key, value);

        //  Send key-value pair on to server
        kvmsg kvmsg = new kvmsg(0);
        kvmsg.setKey(key);
        kvmsg.setUUID();
        kvmsg.fmtBody("%s", value);
        kvmsg.setProp("ttl", ttl);
        kvmsg.send(publisher);
        kvmsg.destroy();
      } else if (command.equals("GET")) {
        String key = msg.popString();
        String value = kvmap.get(key);
        if (value != null) pipe.send(value);
        else pipe.send("");
      }
      msg.destroy();

      return true;
    }
Пример #10
0
    @Override
    public int handle(ZLoop loop, PollItem item, Object arg_) {

      LBBroker arg = (LBBroker) arg_;
      ZMsg msg = ZMsg.recvMsg(arg.backend);
      if (msg != null) {
        ZFrame address = msg.unwrap();
        //  Queue worker address for load-balancing
        arg.workers.add(address);

        //  Enable reader on frontend if we went from 0 to 1 workers
        if (arg.workers.size() == 1) {
          PollItem newItem = new PollItem(arg.frontend, ZMQ.Poller.POLLIN);
          loop.addPoller(newItem, frontendHandler, arg);
        }

        //  Forward message to client if it's not a READY
        ZFrame frame = msg.getFirst();
        if (Arrays.equals(frame.getData(), WORKER_READY)) msg.destroy();
        else msg.send(arg.frontend);
      }
      return 0;
    }
Пример #11
0
  //  The main task is an LRU queue with heartbeating on workers so we can
  //  detect crashed or blocked worker tasks:
  public static void main(final String[] args) {
    final ZContext ctx = new ZContext();
    final Socket frontend = ctx.createSocket(ZMQ.ROUTER);
    final Socket backend = ctx.createSocket(ZMQ.ROUTER);
    frontend.bind("tcp://*:5555"); //  For clients
    backend.bind("tcp://*:5556"); //  For workers

    //  List of available workers
    final ArrayList<Worker> workers = new ArrayList<Worker>();

    //  Send out heartbeats at regular intervals
    long heartbeat_at = System.currentTimeMillis() + HEARTBEAT_INTERVAL;

    while (true) {
      final PollItem items[] = {
        new PollItem(backend, ZMQ.Poller.POLLIN), new PollItem(frontend, ZMQ.Poller.POLLIN)
      };
      //  Poll frontend only if we have available workers
      final int rc = ZMQ.poll(items, workers.size() > 0 ? 2 : 1, HEARTBEAT_INTERVAL);
      if (rc == -1) {
        break; //  Interrupted
      }

      //  Handle worker activity on backend
      if (items[0].isReadable()) {
        //  Use worker address for LRU routing
        final ZMsg msg = ZMsg.recvMsg(backend);
        if (msg == null) {
          break; //  Interrupted
        }

        //  Any sign of life from worker means it's ready
        final ZFrame address = msg.unwrap();
        final Worker worker = new Worker(address);
        worker.ready(workers);

        //  Validate control message, or return reply to client
        if (msg.size() == 1) {
          final ZFrame frame = msg.getFirst();
          final String data = new String(frame.getData());
          if (!data.equals(PPP_READY) && !data.equals(PPP_HEARTBEAT)) {
            System.out.println("E: invalid message from worker");
            msg.dump(System.out);
          }
          msg.destroy();
        } else {
          msg.send(frontend);
        }
      }
      if (items[1].isReadable()) {
        //  Now get next client request, route to next worker
        final ZMsg msg = ZMsg.recvMsg(frontend);
        if (msg == null) {
          break; //  Interrupted
        }
        msg.push(Worker.next(workers));
        msg.send(backend);
      }

      //  We handle heartbeating after any socket activity. First we send
      //  heartbeats to any idle workers if it's time. Then we purge any
      //  dead workers:

      if (System.currentTimeMillis() >= heartbeat_at) {
        for (final Worker worker : workers) {

          worker.address.send(backend, ZFrame.REUSE + ZFrame.MORE);
          final ZFrame frame = new ZFrame(PPP_HEARTBEAT);
          frame.send(backend, 0);
        }
        heartbeat_at = System.currentTimeMillis() + HEARTBEAT_INTERVAL;
      }
      Worker.purge(workers);
    }

    //  When we're done, clean up properly
    while (workers.size() > 0) {
      final Worker worker = workers.remove(0);
    }
    workers.clear();
    ctx.destroy();
  }
Пример #12
0
  //  The main task begins by setting-up all its sockets. The local frontend
  //  talks to clients, and our local backend talks to workers. The cloud
  //  frontend talks to peer brokers as if they were clients, and the cloud
  //  backend talks to peer brokers as if they were workers. The state
  //  backend publishes regular state messages, and the state frontend
  //  subscribes to all state backends to collect these messages. Finally,
  //  we use a PULL monitor socket to collect printable messages from tasks:
  public static void main(String[] argv) {
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argv.length < 1) {
      System.out.println("syntax: peering3 me {you}");
      System.exit(-1);
    }
    self = argv[0];
    System.out.printf("I: preparing broker at %s\n", self);
    Random rand = new Random(System.nanoTime());

    ZContext ctx = new ZContext();

    //  Prepare local frontend and backend
    Socket localfe = ctx.createSocket(ZMQ.ROUTER);
    localfe.bind(String.format("ipc://%s-localfe.ipc", self));
    Socket localbe = ctx.createSocket(ZMQ.ROUTER);
    localbe.bind(String.format("ipc://%s-localbe.ipc", self));

    //  Bind cloud frontend to endpoint
    Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
    cloudfe.setIdentity(self.getBytes());
    cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));

    //  Connect cloud backend to all peers
    Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
    cloudbe.setIdentity(self.getBytes());
    int argn;
    for (argn = 1; argn < argv.length; argn++) {
      String peer = argv[argn];
      System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
      cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
    }

    //  Bind state backend to endpoint
    Socket statebe = ctx.createSocket(ZMQ.PUB);
    statebe.bind(String.format("ipc://%s-state.ipc", self));

    //  Connect statefe to all peers
    Socket statefe = ctx.createSocket(ZMQ.SUB);
    statefe.subscribe("".getBytes());
    for (argn = 1; argn < argv.length; argn++) {
      String peer = argv[argn];
      System.out.printf("I: connecting to state backend at '%s'\n", peer);
      statefe.connect(String.format("ipc://%s-state.ipc", peer));
    }

    //  Prepare monitor socket
    Socket monitor = ctx.createSocket(ZMQ.PULL);
    monitor.bind(String.format("ipc://%s-monitor.ipc", self));

    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start();

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start();

    //  Queue of available workers
    int localCapacity = 0;
    int cloudCapacity = 0;
    ArrayList<ZFrame> workers = new ArrayList<ZFrame>();

    //  The main loop has two parts. First we poll workers and our two service
    //  sockets (statefe and monitor), in any case. If we have no ready workers,
    //  there's no point in looking at incoming requests. These can remain on
    //  their internal 0MQ queues:

    while (true) {
      //  First, route any waiting replies from workers
      PollItem primary[] = {
        new PollItem(localbe, Poller.POLLIN),
        new PollItem(cloudbe, Poller.POLLIN),
        new PollItem(statefe, Poller.POLLIN),
        new PollItem(monitor, Poller.POLLIN)
      };
      //  If we have no workers anyhow, wait indefinitely
      int rc = ZMQ.poll(primary, localCapacity > 0 ? 1000 : -1);
      if (rc == -1) break; //  Interrupted

      //  Track if capacity changes during this iteration
      int previous = localCapacity;

      //  Handle reply from local worker
      ZMsg msg = null;
      if (primary[0].isReadable()) {
        msg = ZMsg.recvMsg(localbe);
        if (msg == null) break; //  Interrupted
        ZFrame address = msg.unwrap();
        workers.add(address);
        localCapacity++;

        //  If it's READY, don't route the message any further
        ZFrame frame = msg.getFirst();
        if (new String(frame.getData()).equals(WORKER_READY)) {
          msg.destroy();
          msg = null;
        }
      }
      //  Or handle reply from peer broker
      else if (primary[1].isReadable()) {
        msg = ZMsg.recvMsg(cloudbe);
        if (msg == null) break; //  Interrupted
        //  We don't use peer broker address for anything
        ZFrame address = msg.unwrap();
        address.destroy();
      }
      //  Route reply to cloud if it's addressed to a broker
      for (argn = 1; msg != null && argn < argv.length; argn++) {
        byte[] data = msg.getFirst().getData();
        if (argv[argn].equals(new String(data))) {
          msg.send(cloudfe);
          msg = null;
        }
      }
      //  Route reply to client if we still need to
      if (msg != null) msg.send(localfe);

      //  If we have input messages on our statefe or monitor sockets we
      //  can process these immediately:

      if (primary[2].isReadable()) {
        String peer = statefe.recvStr();
        String status = statefe.recvStr();
        cloudCapacity = Integer.parseInt(status);
      }
      if (primary[3].isReadable()) {
        String status = monitor.recvStr();
        System.out.println(status);
      }

      //  Now we route as many client requests as we have worker capacity
      //  for. We may reroute requests from our local frontend, but not from //
      //  the cloud frontend. We reroute randomly now, just to test things
      //  out. In the next version we'll do this properly by calculating
      //  cloud capacity://

      while (localCapacity + cloudCapacity > 0) {
        PollItem secondary[] = {
          new PollItem(localfe, Poller.POLLIN), new PollItem(cloudfe, Poller.POLLIN)
        };

        if (localCapacity > 0) rc = ZMQ.poll(secondary, 2, 0);
        else rc = ZMQ.poll(secondary, 1, 0);

        assert (rc >= 0);

        if (secondary[0].isReadable()) {
          msg = ZMsg.recvMsg(localfe);
        } else if (secondary[1].isReadable()) {
          msg = ZMsg.recvMsg(cloudfe);
        } else break; //  No work, go back to backends

        if (localCapacity > 0) {
          ZFrame frame = workers.remove(0);
          msg.wrap(frame);
          msg.send(localbe);
          localCapacity--;

        } else {
          //  Route to random broker peer
          int random_peer = rand.nextInt(argv.length - 1) + 1;
          msg.push(argv[random_peer]);
          msg.send(cloudbe);
        }
      }

      //  We broadcast capacity messages to other peers; to reduce chatter
      //  we do this only if our capacity changed.

      if (localCapacity != previous) {
        //  We stick our own address onto the envelope
        statebe.sendMore(self);
        //  Broadcast new capacity
        statebe.send(String.format("%d", localCapacity), 0);
      }
    }
    //  When we're done, clean up properly
    while (workers.size() > 0) {
      ZFrame frame = workers.remove(0);
      frame.destroy();
    }

    ctx.destroy();
  }
Пример #13
0
 //  Takes ownership of supplied frame
 public void setContent(ZMsg frame) {
   if (content != null) content.destroy();
   content = frame;
 }