Ejemplo n.º 1
0
  //  The main task is an LRU queue with heartbeating on workers so we can
  //  detect crashed or blocked worker tasks:
  public static void main(final String[] args) {
    final ZContext ctx = new ZContext();
    final Socket frontend = ctx.createSocket(ZMQ.ROUTER);
    final Socket backend = ctx.createSocket(ZMQ.ROUTER);
    frontend.bind("tcp://*:5555"); //  For clients
    backend.bind("tcp://*:5556"); //  For workers

    //  List of available workers
    final ArrayList<Worker> workers = new ArrayList<Worker>();

    //  Send out heartbeats at regular intervals
    long heartbeat_at = System.currentTimeMillis() + HEARTBEAT_INTERVAL;

    while (true) {
      final PollItem items[] = {
        new PollItem(backend, ZMQ.Poller.POLLIN), new PollItem(frontend, ZMQ.Poller.POLLIN)
      };
      //  Poll frontend only if we have available workers
      final int rc = ZMQ.poll(items, workers.size() > 0 ? 2 : 1, HEARTBEAT_INTERVAL);
      if (rc == -1) {
        break; //  Interrupted
      }

      //  Handle worker activity on backend
      if (items[0].isReadable()) {
        //  Use worker address for LRU routing
        final ZMsg msg = ZMsg.recvMsg(backend);
        if (msg == null) {
          break; //  Interrupted
        }

        //  Any sign of life from worker means it's ready
        final ZFrame address = msg.unwrap();
        final Worker worker = new Worker(address);
        worker.ready(workers);

        //  Validate control message, or return reply to client
        if (msg.size() == 1) {
          final ZFrame frame = msg.getFirst();
          final String data = new String(frame.getData());
          if (!data.equals(PPP_READY) && !data.equals(PPP_HEARTBEAT)) {
            System.out.println("E: invalid message from worker");
            msg.dump(System.out);
          }
          msg.destroy();
        } else {
          msg.send(frontend);
        }
      }
      if (items[1].isReadable()) {
        //  Now get next client request, route to next worker
        final ZMsg msg = ZMsg.recvMsg(frontend);
        if (msg == null) {
          break; //  Interrupted
        }
        msg.push(Worker.next(workers));
        msg.send(backend);
      }

      //  We handle heartbeating after any socket activity. First we send
      //  heartbeats to any idle workers if it's time. Then we purge any
      //  dead workers:

      if (System.currentTimeMillis() >= heartbeat_at) {
        for (final Worker worker : workers) {

          worker.address.send(backend, ZFrame.REUSE + ZFrame.MORE);
          final ZFrame frame = new ZFrame(PPP_HEARTBEAT);
          frame.send(backend, 0);
        }
        heartbeat_at = System.currentTimeMillis() + HEARTBEAT_INTERVAL;
      }
      Worker.purge(workers);
    }

    //  When we're done, clean up properly
    while (workers.size() > 0) {
      final Worker worker = workers.remove(0);
    }
    workers.clear();
    ctx.destroy();
  }
Ejemplo n.º 2
0
  //  The main task begins by setting-up all its sockets. The local frontend
  //  talks to clients, and our local backend talks to workers. The cloud
  //  frontend talks to peer brokers as if they were clients, and the cloud
  //  backend talks to peer brokers as if they were workers. The state
  //  backend publishes regular state messages, and the state frontend
  //  subscribes to all state backends to collect these messages. Finally,
  //  we use a PULL monitor socket to collect printable messages from tasks:
  public static void main(String[] argv) {
    //  First argument is this broker's name
    //  Other arguments are our peers' names
    //
    if (argv.length < 1) {
      System.out.println("syntax: peering3 me {you}");
      System.exit(-1);
    }
    self = argv[0];
    System.out.printf("I: preparing broker at %s\n", self);
    Random rand = new Random(System.nanoTime());

    ZContext ctx = new ZContext();

    //  Prepare local frontend and backend
    Socket localfe = ctx.createSocket(ZMQ.ROUTER);
    localfe.bind(String.format("ipc://%s-localfe.ipc", self));
    Socket localbe = ctx.createSocket(ZMQ.ROUTER);
    localbe.bind(String.format("ipc://%s-localbe.ipc", self));

    //  Bind cloud frontend to endpoint
    Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
    cloudfe.setIdentity(self.getBytes());
    cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));

    //  Connect cloud backend to all peers
    Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
    cloudbe.setIdentity(self.getBytes());
    int argn;
    for (argn = 1; argn < argv.length; argn++) {
      String peer = argv[argn];
      System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
      cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
    }

    //  Bind state backend to endpoint
    Socket statebe = ctx.createSocket(ZMQ.PUB);
    statebe.bind(String.format("ipc://%s-state.ipc", self));

    //  Connect statefe to all peers
    Socket statefe = ctx.createSocket(ZMQ.SUB);
    statefe.subscribe("".getBytes());
    for (argn = 1; argn < argv.length; argn++) {
      String peer = argv[argn];
      System.out.printf("I: connecting to state backend at '%s'\n", peer);
      statefe.connect(String.format("ipc://%s-state.ipc", peer));
    }

    //  Prepare monitor socket
    Socket monitor = ctx.createSocket(ZMQ.PULL);
    monitor.bind(String.format("ipc://%s-monitor.ipc", self));

    //  Start local workers
    int worker_nbr;
    for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++) new worker_task().start();

    //  Start local clients
    int client_nbr;
    for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++) new client_task().start();

    //  Queue of available workers
    int localCapacity = 0;
    int cloudCapacity = 0;
    ArrayList<ZFrame> workers = new ArrayList<ZFrame>();

    //  The main loop has two parts. First we poll workers and our two service
    //  sockets (statefe and monitor), in any case. If we have no ready workers,
    //  there's no point in looking at incoming requests. These can remain on
    //  their internal 0MQ queues:

    while (true) {
      //  First, route any waiting replies from workers
      PollItem primary[] = {
        new PollItem(localbe, Poller.POLLIN),
        new PollItem(cloudbe, Poller.POLLIN),
        new PollItem(statefe, Poller.POLLIN),
        new PollItem(monitor, Poller.POLLIN)
      };
      //  If we have no workers anyhow, wait indefinitely
      int rc = ZMQ.poll(primary, localCapacity > 0 ? 1000 : -1);
      if (rc == -1) break; //  Interrupted

      //  Track if capacity changes during this iteration
      int previous = localCapacity;

      //  Handle reply from local worker
      ZMsg msg = null;
      if (primary[0].isReadable()) {
        msg = ZMsg.recvMsg(localbe);
        if (msg == null) break; //  Interrupted
        ZFrame address = msg.unwrap();
        workers.add(address);
        localCapacity++;

        //  If it's READY, don't route the message any further
        ZFrame frame = msg.getFirst();
        if (new String(frame.getData()).equals(WORKER_READY)) {
          msg.destroy();
          msg = null;
        }
      }
      //  Or handle reply from peer broker
      else if (primary[1].isReadable()) {
        msg = ZMsg.recvMsg(cloudbe);
        if (msg == null) break; //  Interrupted
        //  We don't use peer broker address for anything
        ZFrame address = msg.unwrap();
        address.destroy();
      }
      //  Route reply to cloud if it's addressed to a broker
      for (argn = 1; msg != null && argn < argv.length; argn++) {
        byte[] data = msg.getFirst().getData();
        if (argv[argn].equals(new String(data))) {
          msg.send(cloudfe);
          msg = null;
        }
      }
      //  Route reply to client if we still need to
      if (msg != null) msg.send(localfe);

      //  If we have input messages on our statefe or monitor sockets we
      //  can process these immediately:

      if (primary[2].isReadable()) {
        String peer = statefe.recvStr();
        String status = statefe.recvStr();
        cloudCapacity = Integer.parseInt(status);
      }
      if (primary[3].isReadable()) {
        String status = monitor.recvStr();
        System.out.println(status);
      }

      //  Now we route as many client requests as we have worker capacity
      //  for. We may reroute requests from our local frontend, but not from //
      //  the cloud frontend. We reroute randomly now, just to test things
      //  out. In the next version we'll do this properly by calculating
      //  cloud capacity://

      while (localCapacity + cloudCapacity > 0) {
        PollItem secondary[] = {
          new PollItem(localfe, Poller.POLLIN), new PollItem(cloudfe, Poller.POLLIN)
        };

        if (localCapacity > 0) rc = ZMQ.poll(secondary, 2, 0);
        else rc = ZMQ.poll(secondary, 1, 0);

        assert (rc >= 0);

        if (secondary[0].isReadable()) {
          msg = ZMsg.recvMsg(localfe);
        } else if (secondary[1].isReadable()) {
          msg = ZMsg.recvMsg(cloudfe);
        } else break; //  No work, go back to backends

        if (localCapacity > 0) {
          ZFrame frame = workers.remove(0);
          msg.wrap(frame);
          msg.send(localbe);
          localCapacity--;

        } else {
          //  Route to random broker peer
          int random_peer = rand.nextInt(argv.length - 1) + 1;
          msg.push(argv[random_peer]);
          msg.send(cloudbe);
        }
      }

      //  We broadcast capacity messages to other peers; to reduce chatter
      //  we do this only if our capacity changed.

      if (localCapacity != previous) {
        //  We stick our own address onto the envelope
        statebe.sendMore(self);
        //  Broadcast new capacity
        statebe.send(String.format("%d", localCapacity), 0);
      }
    }
    //  When we're done, clean up properly
    while (workers.size() > 0) {
      ZFrame frame = workers.remove(0);
      frame.destroy();
    }

    ctx.destroy();
  }