Esempio n. 1
0
  public void sendDiscoveryRequest(String cluster_name, Promise promise, ViewId view_id)
      throws Exception {
    PingData data = null;
    PhysicalAddress physical_addr =
        (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr));

    if (view_id == null) {
      List<PhysicalAddress> physical_addrs = Arrays.asList(physical_addr);
      data = new PingData(local_addr, null, false, UUID.get(local_addr), physical_addrs);
    }

    PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name);
    hdr.view_id = view_id;

    Collection<PhysicalAddress> cluster_members = fetchClusterMembers(cluster_name);
    if (cluster_members == null) {
      Message msg = new Message(null); // multicast msg
      msg.setFlag(Message.OOB);
      msg.putHeader(getId(), hdr);
      sendMcastDiscoveryRequest(msg);
    } else {
      if (cluster_members.isEmpty()) { // if we don't find any members, return immediately
        if (promise != null) promise.setResult(null);
      } else {
        for (final Address addr : cluster_members) {
          if (addr.equals(physical_addr)) // no need to send the request to myself
          continue;
          final Message msg = new Message(addr, null, null);
          msg.setFlag(Message.OOB);
          msg.putHeader(this.id, hdr);
          if (log.isTraceEnabled())
            log.trace("[FIND_INITIAL_MBRS] sending discovery request to " + msg.getDest());
          if (!sendDiscoveryRequestsInParallel()) {
            down_prot.down(new Event(Event.MSG, msg));
          } else {
            timer.execute(
                new Runnable() {
                  public void run() {
                    try {
                      down_prot.down(new Event(Event.MSG, msg));
                    } catch (Exception ex) {
                      if (log.isErrorEnabled())
                        log.error("failed sending discovery request to " + addr + ": " + ex);
                    }
                  }
                });
          }
        }
      }
    }
  }
Esempio n. 2
0
  /**
   * Sends the new view and digest to all subgroup coordinors in coords. Each coord will in turn
   *
   * <ol>
   *   <li>broadcast the new view and digest to all the members of its subgroup (MergeView)
   *   <li>on reception of the view, if it is a MergeView, each member will set the digest and
   *       install the new view
   * </ol>
   */
  private void sendMergeView(
      Collection<Address> coords, MergeData combined_merge_data, MergeId merge_id) {
    if (coords == null || combined_merge_data == null) return;

    View view = combined_merge_data.view;
    Digest digest = combined_merge_data.digest;
    if (view == null || digest == null) {
      if (log.isErrorEnabled())
        log.error("view or digest is null, cannot send consolidated merge view/digest");
      return;
    }

    if (log.isDebugEnabled())
      log.debug(
          gms.local_addr + ": sending merge view " + view.getVid() + " to coordinators " + coords);

    gms.merge_ack_collector.reset(coords);
    int size = gms.merge_ack_collector.size();
    long timeout = gms.view_ack_collection_timeout;

    long start = System.currentTimeMillis();
    for (Address coord : coords) {
      Message msg = new Message(coord, null, null);
      GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW);
      hdr.view = view;
      hdr.my_digest = digest;
      hdr.merge_id = merge_id;
      msg.putHeader(gms.getId(), hdr);
      gms.getDownProtocol().down(new Event(Event.MSG, msg));
    }

    // [JGRP-700] - FLUSH: flushing should span merge
    // if flush is in stack wait for acks from separated island coordinators
    if (gms.flushProtocolInStack) {
      try {
        gms.merge_ack_collector.waitForAllAcks(timeout);
        long stop = System.currentTimeMillis();
        if (log.isTraceEnabled())
          log.trace(
              "received all ACKs ("
                  + size
                  + ") for merge view "
                  + view
                  + " in "
                  + (stop - start)
                  + "ms");
      } catch (TimeoutException e) {
        log.warn(
            gms.local_addr
                + ": failed to collect all ACKs for merge ("
                + size
                + ") for view "
                + view
                + " after "
                + timeout
                + "ms, missing ACKs from "
                + gms.merge_ack_collector.printMissing());
      }
    }
  }
Esempio n. 3
0
    /**
     * Sends a MERGE_REQ to all coords and populates a list of MergeData (in merge_rsps). Returns
     * after coords.size() response have been received, or timeout msecs have elapsed (whichever is
     * first).
     *
     * <p>If a subgroup coordinator rejects the MERGE_REQ (e.g. because of participation in a
     * different merge), <em>that member will be removed from coords !</em>
     *
     * @param coords A map of coordinatgor addresses and associated membership lists
     * @param new_merge_id The new merge id
     * @param timeout Max number of msecs to wait for the merge responses from the subgroup coords
     */
    private boolean getMergeDataFromSubgroupCoordinators(
        Map<Address, Collection<Address>> coords, MergeId new_merge_id, long timeout) {
      boolean gotAllResponses;
      long start = System.currentTimeMillis();
      merge_rsps.reset(coords.keySet());
      if (log.isDebugEnabled())
        log.debug(gms.local_addr + ": sending MERGE_REQ to " + coords.keySet());

      for (Map.Entry<Address, Collection<Address>> entry : coords.entrySet()) {
        Address coord = entry.getKey();
        Collection<Address> mbrs = entry.getValue();
        Message msg = new Message(coord, null, null);
        msg.setFlag(Message.OOB);
        GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_REQ, mbrs);
        hdr.mbr = gms.local_addr;
        hdr.merge_id = new_merge_id;
        msg.putHeader(gms.getId(), hdr);
        gms.getDownProtocol().down(new Event(Event.MSG, msg));
      }

      // wait until num_rsps_expected >= num_rsps or timeout elapsed
      merge_rsps.waitForAllResponses(timeout);
      gotAllResponses = merge_rsps.hasAllResponses();
      long stop = System.currentTimeMillis();
      if (log.isDebugEnabled())
        log.debug(
            gms.local_addr
                + ": collected "
                + merge_rsps.size()
                + " merge response(s) in "
                + (stop - start)
                + " ms");
      return gotAllResponses;
    }
Esempio n. 4
0
 public void retransmit(long first_seqno, long last_seqno, Address sender) {
   if (last_seqno < first_seqno) return;
   Unicast2Header hdr = Unicast2Header.createXmitReqHeader(first_seqno, last_seqno);
   Message xmit_req = new Message(sender, null, null);
   xmit_req.putHeader(this.id, hdr);
   down_prot.down(new Event(Event.MSG, xmit_req));
 }
Esempio n. 5
0
  protected void sendStableMessage(Address dest, short conn_id, long low, long high) {
    Message stable_msg = new Message(dest, null, null);
    Unicast2Header hdr = Unicast2Header.createStableHeader(conn_id, low, high);
    stable_msg.putHeader(this.id, hdr);
    stable_msg.setFlag(Message.OOB);
    if (log.isTraceEnabled()) {
      StringBuilder sb = new StringBuilder();
      sb.append(local_addr)
          .append(" --> STABLE(")
          .append(dest)
          .append(": ")
          .append(low)
          .append("-")
          .append(high)
          .append(", conn_id=")
          .append(conn_id)
          .append(")");
      log.trace(sb.toString());
    }
    down_prot.down(new Event(Event.MSG, stable_msg));

    ReceiverEntry entry = recv_table.get(dest);
    NakReceiverWindow win = entry != null ? entry.received_msgs : null;
    if (win != null) win.stable(win.getHighestDelivered());
  }
Esempio n. 6
0
 private void sendViewAck(Address dest) {
   Message view_ack = new Message(dest, null, null);
   view_ack.setFlag(Message.OOB);
   GmsHeader tmphdr = new GmsHeader(GmsHeader.VIEW_ACK);
   view_ack.putHeader(this.id, tmphdr);
   down_prot.down(new Event(Event.MSG, view_ack));
 }
Esempio n. 7
0
  /**
   * We need to resend our first message with our conn_id
   *
   * @param sender
   * @param seqno Resend the non null messages in the range [lowest .. seqno]
   */
  protected void handleResendingOfFirstMessage(Address sender, long seqno) {
    if (log.isTraceEnabled())
      log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")");
    SenderEntry entry = send_table.get(sender);
    Table<Message> win = entry != null ? entry.sent_msgs : null;
    if (win == null) {
      if (log.isErrorEnabled())
        log.error(local_addr + ": sender window for " + sender + " not found");
      return;
    }

    boolean first_sent = false;
    for (long i = win.getLow() + 1; i <= seqno; i++) {
      Message rsp = win.get(i);
      if (rsp == null) continue;
      if (first_sent) {
        down_prot.down(new Event(Event.MSG, rsp));
      } else {
        first_sent = true;
        // We need to copy the UnicastHeader and put it back into the message because Message.copy()
        // doesn't copy
        // the headers and therefore we'd modify the original message in the sender retransmission
        // window
        // (https://jira.jboss.org/jira/browse/JGRP-965)
        Message copy = rsp.copy();
        Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id);
        Unicast2Header newhdr = hdr.copy();
        newhdr.first = true;
        copy.putHeader(this.id, newhdr);
        down_prot.down(new Event(Event.MSG, copy));
      }
    }
  }
Esempio n. 8
0
  /**
   * Multicasts a GET_DIGEST_REQ to all current members and waits for all responses (GET_DIGEST_RSP)
   * or N ms.
   *
   * @return
   */
  private Digest fetchDigestsFromAllMembersInSubPartition(List<Address> current_mbrs) {
    if (current_mbrs == null) return null;

    GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ);
    Message get_digest_req = new Message();
    get_digest_req.setFlag(Message.OOB);
    get_digest_req.putHeader(gms.getId(), hdr);

    long max_wait_time = gms.merge_timeout > 0 ? gms.merge_timeout / 2 : 2000L;
    digest_collector.reset(current_mbrs);

    // add my own digest first
    Digest digest = (Digest) gms.getDownProtocol().down(Event.GET_DIGEST_EVT);
    digest_collector.add(gms.local_addr, digest);

    gms.getDownProtocol().down(new Event(Event.MSG, get_digest_req));
    digest_collector.waitForAllResponses(max_wait_time);
    if (log.isDebugEnabled()) {
      if (digest_collector.hasAllResponses())
        log.debug(gms.local_addr + ": fetched all digests for " + current_mbrs);
      else
        log.debug(
            gms.local_addr
                + ": fetched incomplete digests (after timeout of "
                + max_wait_time
                + ") ms for "
                + current_mbrs);
    }
    Map<Address, Digest> responses = new HashMap<Address, Digest>(digest_collector.getResults());
    MutableDigest retval = new MutableDigest(responses.size());
    for (Digest dig : responses.values()) {
      if (dig != null) retval.add(dig);
    }
    return retval;
  }
Esempio n. 9
0
  /**
   * If merge_id is not equal to this.merge_id then discard. Else cast the view/digest to all
   * members of this group.
   */
  public void handleMergeView(final MergeData data, final MergeId merge_id) {
    if (!matchMergeId(merge_id)) {
      if (log.isErrorEnabled())
        log.error("merge_ids don't match (or are null); merge view discarded");
      return;
    }

    // only send to our *current* members, if we have A and B being merged (we are B), then we would
    // *not*
    // receive a VIEW_ACK from A because A doesn't see us in the pre-merge view yet and discards the
    // view

    // [JGRP-700] - FLUSH: flushing should span merge

    // we have to send new view only to current members and we should not wait
    // for view acks from newly merged mebers
    List<Address> newViewMembers = new Vector<Address>(data.view.getMembers());
    newViewMembers.removeAll(gms.members.getMembers());

    gms.castViewChangeWithDest(data.view, data.digest, null, newViewMembers);
    // if we have flush in stack send ack back to merge coordinator
    if (gms.flushProtocolInStack) {
      Message ack = new Message(data.getSender(), null, null);
      ack.setFlag(Message.OOB);
      GMS.GmsHeader ack_hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW_OK);
      ack.putHeader(gms.getId(), ack_hdr);
      gms.getDownProtocol().down(new Event(Event.MSG, ack));
    }
    cancelMerge(merge_id);
  }
Esempio n. 10
0
  public void sendGetMembersRequest(String cluster_name, Promise promise, boolean return_views_only)
      throws Exception {
    PhysicalAddress physical_addr =
        (PhysicalAddress) down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr));
    PingData data =
        new PingData(local_addr, null, false, UUID.get(local_addr), Arrays.asList(physical_addr));
    PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name);
    hdr.return_view_only = return_views_only;

    Set<PhysicalAddress> combined_target_members = new HashSet<PhysicalAddress>(initial_hosts);
    combined_target_members.addAll(dynamic_hosts);

    for (final Address addr : combined_target_members) {
      if (addr.equals(physical_addr)) continue;
      final Message msg = new Message(addr, null, null);
      msg.setFlag(Message.OOB);
      msg.putHeader(this.id, hdr);
      if (log.isTraceEnabled())
        log.trace("[FIND_INITIAL_MBRS] sending PING request to " + msg.getDest());
      timer.execute(
          new Runnable() {
            public void run() {
              try {
                down_prot.down(new Event(Event.MSG, msg));
              } catch (Exception ex) {
                if (log.isErrorEnabled())
                  log.error("failed sending discovery request to " + addr + ": " + ex);
              }
            }
          });
    }
  }
Esempio n. 11
0
 protected void sendRequestForFirstSeqno(Address dest, long seqno_received) {
   Message msg = new Message(dest).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL);
   UnicastHeader hdr = UnicastHeader.createSendFirstSeqnoHeader(seqno_received);
   msg.putHeader(this.id, hdr);
   if (log.isTraceEnabled())
     log.trace(local_addr + " --> SEND_FIRST_SEQNO(" + dest + "," + seqno_received + ")");
   down_prot.down(new Event(Event.MSG, msg));
 }
Esempio n. 12
0
 protected void sendMergeRejectedResponse(Address sender, MergeId merge_id) {
   Message msg = new Message(sender).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL);
   GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP);
   hdr.merge_rejected = true;
   hdr.merge_id = merge_id;
   msg.putHeader(gms.getId(), hdr);
   gms.getDownProtocol().down(new Event(Event.MSG, msg));
 }
Esempio n. 13
0
 /**
  * Fetches the digests from all members and installs them again. Used only for diagnosis and
  * support; don't use this otherwise !
  */
 void fixDigests() {
   Digest digest = fetchDigestsFromAllMembersInSubPartition(gms.view.getMembers());
   Message msg = new Message();
   GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_DIGEST);
   hdr.my_digest = digest;
   msg.putHeader(gms.getId(), hdr);
   gms.getDownProtocol().down(new Event(Event.MSG, msg));
 }
Esempio n. 14
0
 protected void sendMergeRejectedResponse(Address sender, MergeId merge_id) {
   Message msg = new Message(sender, null, null);
   msg.setFlag(Message.OOB);
   GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP);
   hdr.merge_rejected = true;
   hdr.merge_id = merge_id;
   msg.putHeader(gms.getId(), hdr);
   if (log.isDebugEnabled()) log.debug("merge response=" + hdr);
   gms.getDownProtocol().down(new Event(Event.MSG, msg));
 }
Esempio n. 15
0
 /** Send back a response containing view and digest to sender */
 private void sendMergeResponse(Address sender, View view, Digest digest, MergeId merge_id) {
   Message msg = new Message(sender).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL);
   GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP);
   hdr.merge_id = merge_id;
   hdr.view = view;
   hdr.my_digest = digest;
   msg.putHeader(gms.getId(), hdr);
   if (log.isTraceEnabled()) log.trace(gms.local_addr + ": sending merge response=" + hdr);
   gms.getDownProtocol().down(new Event(Event.MSG, msg));
 }
Esempio n. 16
0
 public void retransmit(SeqnoList missing, Address sender) {
   Unicast2Header hdr = Unicast2Header.createXmitReqHeader();
   Message retransmit_msg = new Message(sender, null, missing);
   retransmit_msg.setFlag(Message.OOB);
   if (log.isTraceEnabled())
     log.trace(local_addr + ": sending XMIT_REQ (" + missing + ") to " + sender);
   retransmit_msg.putHeader(this.id, hdr);
   down_prot.down(new Event(Event.MSG, retransmit_msg));
   xmit_reqs_sent.addAndGet(missing.size());
 }
Esempio n. 17
0
  private void sendMergeCancelledMessage(Collection<Address> coords, MergeId merge_id) {
    if (coords == null || merge_id == null) return;

    for (Address coord : coords) {
      Message msg = new Message(coord);
      // msg.setFlag(Message.Flag.OOB);
      GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.CANCEL_MERGE);
      hdr.merge_id = merge_id;
      msg.putHeader(gms.getId(), hdr);
      gms.getDownProtocol().down(new Event(Event.MSG, msg));
    }
  }
Esempio n. 18
0
  private void sendMergeCancelledMessage(Collection<Address> coords, MergeId merge_id) {
    if (coords == null || merge_id == null) return;

    for (Address coord : coords) {
      Message msg = new Message(coord, null, null);
      // msg.setFlag(Message.OOB);
      GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.CANCEL_MERGE);
      hdr.merge_id = merge_id;
      msg.putHeader(gms.getId(), hdr);
      if (log.isDebugEnabled()) log.debug(gms.local_addr + ": sending cancel merge to " + coord);
      gms.getDownProtocol().down(new Event(Event.MSG, msg));
    }
  }
Esempio n. 19
0
  protected void sendDiscoveryResponse(
      Address logical_addr,
      List<PhysicalAddress> physical_addrs,
      boolean is_server,
      boolean return_view_only,
      String logical_name,
      final Address sender) {
    PingData data;
    if (return_view_only) {
      data = new PingData(logical_addr, view, is_server, null, null);
    } else {
      ViewId view_id = view != null ? view.getViewId() : null;
      data = new PingData(logical_addr, null, view_id, is_server, logical_name, physical_addrs);
    }

    final Message rsp_msg = new Message(sender, null, null);
    rsp_msg.setFlag(Message.OOB);
    final PingHeader rsp_hdr = new PingHeader(PingHeader.GET_MBRS_RSP, data);
    rsp_msg.putHeader(this.id, rsp_hdr);

    if (stagger_timeout > 0) {
      int view_size = view != null ? view.size() : 10;
      int rank = Util.getRank(view, local_addr); // returns 0 if view or local_addr are null
      long sleep_time =
          rank == 0
              ? Util.random(stagger_timeout)
              : stagger_timeout * rank / view_size - (stagger_timeout / view_size);
      timer.schedule(
          new Runnable() {
            public void run() {
              if (log.isTraceEnabled())
                log.trace(
                    local_addr
                        + ": received GET_MBRS_REQ from "
                        + sender
                        + ", sending staggered response "
                        + rsp_hdr);
              down_prot.down(new Event(Event.MSG, rsp_msg));
            }
          },
          sleep_time,
          TimeUnit.MILLISECONDS);
      return;
    }

    if (log.isTraceEnabled())
      log.trace("received GET_MBRS_REQ from " + sender + ", sending response " + rsp_hdr);
    down_prot.down(new Event(Event.MSG, rsp_msg));
  }
Esempio n. 20
0
  /**
   * We need to resend our first message with our conn_id
   *
   * @param sender
   * @param seqno Resend messages in the range [lowest .. seqno]
   */
  private void handleResendingOfFirstMessage(Address sender, long seqno) {
    if (log.isTraceEnabled())
      log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")");
    SenderEntry entry = send_table.get(sender);
    AckSenderWindow win = entry != null ? entry.sent_msgs : null;
    if (win == null) {
      if (log.isErrorEnabled())
        log.error(local_addr + ": sender window for " + sender + " not found");
      return;
    }
    long lowest = win.getLowest();
    Message rsp = win.get(lowest);
    if (rsp == null) return;

    // We need to copy the UnicastHeader and put it back into the message because Message.copy()
    // doesn't copy
    // the headers and therefore we'd modify the original message in the sender retransmission
    // window
    // (https://jira.jboss.org/jira/browse/JGRP-965)
    Message copy = rsp.copy();
    Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id);
    Unicast2Header newhdr = hdr.copy();
    newhdr.first = true;
    copy.putHeader(this.id, newhdr);

    if (log.isTraceEnabled()) {
      StringBuilder sb = new StringBuilder();
      sb.append(local_addr)
          .append(" --> DATA(")
          .append(copy.getDest())
          .append(": #")
          .append(newhdr.seqno)
          .append(", conn_id=")
          .append(newhdr.conn_id);
      if (newhdr.first) sb.append(", first");
      sb.append(')');
      log.trace(sb);
    }
    down_prot.down(new Event(Event.MSG, copy));

    if (++lowest > seqno) return;
    for (long i = lowest; i <= seqno; i++) {
      rsp = win.get(i);
      if (rsp != null) down_prot.down(new Event(Event.MSG, rsp));
    }
  }
Esempio n. 21
0
 protected void sendStableMessage(Address dest, short conn_id, long hd, long hr) {
   Message stable_msg = new Message(dest, null, null);
   Unicast2Header hdr = Unicast2Header.createStableHeader(conn_id, hd, hr);
   stable_msg.putHeader(this.id, hdr);
   stable_msg.setFlag(Message.OOB);
   if (log.isTraceEnabled()) {
     StringBuilder sb = new StringBuilder();
     sb.append(local_addr)
         .append(" --> STABLE(")
         .append(dest)
         .append(": ")
         .append(hd)
         .append("-")
         .append(hr)
         .append(", conn_id=")
         .append(conn_id)
         .append(")");
     log.trace(sb.toString());
   }
   down_prot.down(new Event(Event.MSG, stable_msg));
 }
Esempio n. 22
0
 protected void sendAck(Address dst, long seqno) {
   if (!running) // if we are disconnected, then don't send any acks which throw exceptions on
                 // shutdown
   return;
   Message ack = new Message(dst);
   ack.putHeader(this.id, UnicastHeader.createAckHeader(seqno));
   if (log.isTraceEnabled())
     log.trace(
         new StringBuilder()
             .append(local_addr)
             .append(" --> ACK(")
             .append(dst)
             .append(": #")
             .append(seqno)
             .append(')'));
   try {
     down_prot.down(new Event(Event.MSG, ack));
     num_acks_sent++;
   } catch (Throwable t) {
     log.error("failed sending ACK(" + seqno + ") to " + dst, t);
   }
 }
Esempio n. 23
0
  /**
   * If merge_id is not equal to this.merge_id then discard. Else cast the view/digest to all
   * members of this group.
   */
  public void handleMergeView(final MergeData data, final MergeId merge_id) {
    if (!matchMergeId(merge_id)) {
      if (log.isTraceEnabled())
        log.trace(
            gms.local_addr
                + ": merge_ids (mine: "
                + this.merge_id
                + ", received: "
                + merge_id
                + ") don't match; merge view "
                + data.view.getViewId()
                + " is discarded");
      return;
    }

    // only send to our *current* members, if we have A and B being merged (we are B), then we would
    // *not*
    // want to block on a VIEW_ACK from A because A doesn't see us in the pre-merge view yet and
    // discards the view
    List<Address> newViewMembers = new ArrayList<Address>(data.view.getMembers());
    newViewMembers.removeAll(gms.members.getMembers());

    try {
      gms.castViewChange(data.view, data.digest, null, newViewMembers);
      // if we have flush in stack send ack back to merge coordinator
      if (gms.flushProtocolInStack) { // [JGRP-700] - FLUSH: flushing should span merge
        Message ack =
            new Message(data.getSender()).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL);
        GMS.GmsHeader ack_hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW_OK);
        ack.putHeader(gms.getId(), ack_hdr);
        gms.getDownProtocol().down(new Event(Event.MSG, ack));
      }
    } finally {
      cancelMerge(merge_id);
    }
  }
Esempio n. 24
0
  /**
   * Caller by the layer above this layer. Usually we just put this Message into the send queue and
   * let one or more worker threads handle it. A worker thread then removes the Message from the
   * send queue, performs a conversion and adds the modified Message to the send queue of the layer
   * below it, by calling Down).
   */
  public void down(Event evt) {
    Message msg;
    Object dest_addr;

    if (evt.getType() != Event.MSG) { // unless it is a message handle it and respond
      handleDownEvent(evt);
      return;
    }

    // ****************** profiling ******************
    /*if(num_msgs == 0) {
    start=System.currentTimeMillis();
    num_msgs++;
         }
         else if(num_msgs >= 1000) {
    stop=System.currentTimeMillis();

    long total_time=stop-start;
    double msgs_per_msec=num_msgs / (double)total_time;

    if(Trace.trace)
        Trace.info("UDP.down.profile()",
                "total_time=" + total_time + ", msgs/ms=" + msgs_per_msec);
    num_msgs=0;
         }
         else {
    num_msgs++;
         }*/
    // ****************** profiling ******************

    msg = (Message) evt.getArg();

    if (udp_hdr != null && udp_hdr.group_addr != null) {
      // added patch by Roland Kurmann (March 20 2003)
      msg.putHeader(name, udp_hdr);
    }

    dest_addr = msg.getDest();

    // Because we don't call Protocol.passDown(), we notify the observer directly (e.g.
    // PerfObserver).
    // This way, we still have performance numbers for UDP
    if (observer != null) {
      observer.passDown(evt);
    }
    if (dest_addr == null) { // 'null' means send to all group members
      if (ip_mcast) {
        if (mcast_addr == null) {
          Trace.error(
              "UDP.down()",
              "dest address of message is null, and "
                  + "sending to default address fails as mcast_addr is null, too !"
                  + " Discarding message "
                  + Util.printEvent(evt));
          return;
        }
        // if we want to use IP multicast, then set the destination of the message
        msg.setDest(mcast_addr);
      } else {
        // sends a separate UDP message to each address
        sendMultipleUdpMessages(msg, members);
        return;
      }
    }

    try {
      sendUdpMessage(msg);
    } catch (Exception e) {
      Trace.error("UDP.down()", "exception=" + e + ", msg=" + msg + ", mcast_addr=" + mcast_addr);
    }
  }
Esempio n. 25
0
  /**
   * Broadcasts the new view and digest, and waits for acks from all members in the list given as
   * argument. If the list is null, we take the members who are part of new_view
   */
  public void castViewChange(
      View new_view, Digest digest, JoinRsp jr, Collection<Address> newMembers) {
    if (log.isTraceEnabled())
      log.trace(local_addr + ": mcasting view " + new_view + " (" + new_view.size() + " mbrs)\n");

    // Send down a local TMP_VIEW event. This is needed by certain layers (e.g. NAKACK) to compute
    // correct digest
    // in case client's next request (e.g. getState()) reaches us *before* our own view change
    // multicast.
    // Check NAKACK's TMP_VIEW handling for details
    down_prot.up(new Event(Event.TMP_VIEW, new_view));
    down_prot.down(new Event(Event.TMP_VIEW, new_view));

    List<Address> ackMembers = new ArrayList<Address>(new_view.getMembers());
    if (newMembers != null && !newMembers.isEmpty()) ackMembers.removeAll(newMembers);

    Message view_change_msg = new Message(); // bcast to all members
    GmsHeader hdr = new GmsHeader(GmsHeader.VIEW, new_view);
    hdr.my_digest = digest;
    view_change_msg.putHeader(this.id, hdr);

    // If we're the only member the VIEW is broadcast to, let's simply install the view directly,
    // without
    // sending the VIEW multicast ! Or else N-1 members drop the multicast anyway...
    if (local_addr != null && ackMembers.size() == 1 && ackMembers.get(0).equals(local_addr)) {
      // we need to add the message to the retransmit window (e.g. in NAKACK), so (1) it can be
      // retransmitted and
      // (2) we increment the seqno (otherwise, we'd return an incorrect digest)
      down_prot.down(new Event(Event.ADD_TO_XMIT_TABLE, view_change_msg));
      impl.handleViewChange(new_view, digest);
    } else {
      if (!ackMembers.isEmpty()) ack_collector.reset(ackMembers);

      down_prot.down(new Event(Event.MSG, view_change_msg));
      try {
        if (!ackMembers.isEmpty()) {
          ack_collector.waitForAllAcks(view_ack_collection_timeout);
          if (log.isTraceEnabled())
            log.trace(
                local_addr
                    + ": received all "
                    + ack_collector.expectedAcks()
                    + " ACKs from members for view "
                    + new_view.getVid());
        }
      } catch (TimeoutException e) {
        if (log_collect_msgs && log.isWarnEnabled()) {
          log.warn(
              local_addr
                  + ": failed to collect all ACKs (expected="
                  + ack_collector.expectedAcks()
                  + ") for view "
                  + new_view.getViewId()
                  + " after "
                  + view_ack_collection_timeout
                  + "ms, missing ACKs from "
                  + ack_collector.printMissing());
        }
      }
    }

    if (jr != null && (newMembers != null && !newMembers.isEmpty())) {
      ack_collector.reset(new ArrayList<Address>(newMembers));
      for (Address joiner : newMembers) {
        sendJoinResponse(jr, joiner);
      }
      try {
        ack_collector.waitForAllAcks(view_ack_collection_timeout);
        if (log.isTraceEnabled())
          log.trace(
              local_addr
                  + ": received all ACKs ("
                  + ack_collector.expectedAcks()
                  + ") from joiners for view "
                  + new_view.getVid());
      } catch (TimeoutException e) {
        if (log_collect_msgs && log.isWarnEnabled()) {
          log.warn(
              local_addr
                  + ": failed to collect all ACKs (expected="
                  + ack_collector.expectedAcks()
                  + ") for unicast view "
                  + new_view
                  + " after "
                  + view_ack_collection_timeout
                  + "ms, missing ACKs from "
                  + ack_collector.printMissing());
        }
      }
    }
  }
Esempio n. 26
0
 public void sendJoinResponse(JoinRsp rsp, Address dest) {
   Message m = new Message(dest, null, null);
   GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.JOIN_RSP, rsp);
   m.putHeader(this.id, hdr);
   getDownProtocol().down(new Event(Event.MSG, m));
 }
Esempio n. 27
0
  @SuppressWarnings("unchecked")
  public Object up(Event evt) {
    switch (evt.getType()) {
      case Event.MSG:
        Message msg = (Message) evt.getArg();
        GmsHeader hdr = (GmsHeader) msg.getHeader(this.id);
        if (hdr == null) break;
        switch (hdr.type) {
          case GmsHeader.JOIN_REQ:
            view_handler.add(
                new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent));
            break;
          case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER:
            view_handler.add(
                new Request(
                    Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent));
            break;
          case GmsHeader.JOIN_RSP:
            impl.handleJoinResponse(hdr.join_rsp);
            break;
          case GmsHeader.LEAVE_REQ:
            if (hdr.mbr == null) return null;
            view_handler.add(new Request(Request.LEAVE, hdr.mbr, false));
            break;
          case GmsHeader.LEAVE_RSP:
            impl.handleLeaveResponse();
            break;
          case GmsHeader.VIEW:
            View new_view = hdr.view;
            if (new_view == null) return null;

            Address coord = msg.getSrc();
            if (!new_view.containsMember(coord)) {
              sendViewAck(
                  coord); // we need to send the ack first, otherwise the connection is removed
              impl.handleViewChange(new_view, hdr.my_digest);
            } else {
              impl.handleViewChange(new_view, hdr.my_digest);
              sendViewAck(coord); // send VIEW_ACK to sender of view
            }
            break;

          case GmsHeader.VIEW_ACK:
            Address sender = msg.getSrc();
            ack_collector.ack(sender);
            return null; // don't pass further up

          case GmsHeader.MERGE_REQ:
            impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs);
            break;

          case GmsHeader.MERGE_RSP:
            MergeData merge_data =
                new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected);
            if (log.isTraceEnabled()) {
              log.trace(
                  local_addr
                      + ": got merge response from "
                      + msg.getSrc()
                      + ", merge_id="
                      + hdr.merge_id
                      + ", merge data is "
                      + merge_data);
            }
            impl.handleMergeResponse(merge_data, hdr.merge_id);
            break;

          case GmsHeader.INSTALL_MERGE_VIEW:
            impl.handleMergeView(
                new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id);
            break;

          case GmsHeader.INSTALL_DIGEST:
            Digest tmp = hdr.my_digest;
            down_prot.down(new Event(Event.MERGE_DIGEST, tmp));
            break;

          case GmsHeader.INSTALL_MERGE_VIEW_OK:
            // [JGRP-700] - FLUSH: flushing should span merge
            merge_ack_collector.ack(msg.getSrc());
            break;

          case GmsHeader.CANCEL_MERGE:
            // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process
            impl.handleMergeCancelled(hdr.merge_id);
            break;

          case GmsHeader.GET_DIGEST_REQ:
            // only handle this request if it was sent by the coordinator (or at least a member) of
            // the current cluster
            synchronized (members) {
              if (!members.contains(msg.getSrc())) break;
            }

            // discard my own request:
            if (msg.getSrc().equals(local_addr)) return null;

            if (hdr.merge_id != null
                && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id)))
              return null;

            // fetch only my own digest
            Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr));
            if (digest != null) {
              GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP);
              rsp_hdr.my_digest = digest;
              Message get_digest_rsp = new Message(msg.getSrc(), null, null);
              get_digest_rsp.setFlag(Message.OOB);
              get_digest_rsp.putHeader(this.id, rsp_hdr);
              down_prot.down(new Event(Event.MSG, get_digest_rsp));
            }
            break;

          case GmsHeader.GET_DIGEST_RSP:
            Digest digest_rsp = hdr.my_digest;
            impl.handleDigestResponse(msg.getSrc(), digest_rsp);
            break;

          default:
            if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known");
        }
        return null; // don't pass up

      case Event.SUSPECT:
        Object retval = up_prot.up(evt);
        Address suspected = (Address) evt.getArg();
        view_handler.add(new Request(Request.SUSPECT, suspected, true));
        ack_collector.suspect(suspected);
        merge_ack_collector.suspect(suspected);
        return retval;

      case Event.UNSUSPECT:
        impl.unsuspect((Address) evt.getArg());
        return null; // discard

      case Event.MERGE:
        view_handler.add(
            new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg()));
        return null; // don't pass up

      case Event.IS_MERGE_IN_PROGRESS:
        return merger.isMergeInProgress();
    }
    return up_prot.up(evt);
  }
Esempio n. 28
0
  public Object down(Event evt) {
    switch (evt.getType()) {
      case Event.MSG: // Add UnicastHeader, add to AckSenderWindow and pass down
        Message msg = (Message) evt.getArg();
        Address dst = msg.getDest();

        /* only handle unicast messages */
        if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) break;

        if (!started) {
          if (log.isTraceEnabled())
            log.trace("discarded message as start() has not yet been called, message: " + msg);
          return null;
        }

        SenderEntry entry = send_table.get(dst);
        if (entry == null) {
          entry = new SenderEntry(getNewConnectionId());
          SenderEntry existing = send_table.putIfAbsent(dst, entry);
          if (existing != null) entry = existing;
          else {
            if (log.isTraceEnabled())
              log.trace(
                  local_addr
                      + ": created connection to "
                      + dst
                      + " (conn_id="
                      + entry.send_conn_id
                      + ")");
            if (cache != null && !members.contains(dst)) cache.add(dst);
          }
        }

        long seqno = -2;
        short send_conn_id = -1;
        Unicast2Header hdr;

        entry.lock(); // threads will only sync if they access the same entry
        try {
          seqno = entry.sent_msgs_seqno;
          send_conn_id = entry.send_conn_id;
          hdr = Unicast2Header.createDataHeader(seqno, send_conn_id, seqno == DEFAULT_FIRST_SEQNO);
          msg.putHeader(this.id, hdr);
          entry.sent_msgs.addToMessages(
              seqno, msg); // add *including* UnicastHeader, adds to retransmitter
          entry.sent_msgs_seqno++;
          entry.update();
        } finally {
          entry.unlock();
        }

        if (log.isTraceEnabled()) {
          StringBuilder sb = new StringBuilder();
          sb.append(local_addr)
              .append(" --> DATA(")
              .append(dst)
              .append(": #")
              .append(seqno)
              .append(", conn_id=")
              .append(send_conn_id);
          if (hdr.first) sb.append(", first");
          sb.append(')');
          log.trace(sb);
        }

        try {
          down_prot.down(evt);
          num_msgs_sent++;
          num_bytes_sent += msg.getLength();
        } catch (Throwable t) {
          log.warn("failed sending the message", t);
        }
        return null; // we already passed the msg down

      case Event.VIEW_CHANGE: // remove connections to peers that are not members anymore !
        View view = (View) evt.getArg();
        List<Address> new_members = view.getMembers();
        Set<Address> non_members = new HashSet<Address>(send_table.keySet());
        non_members.addAll(recv_table.keySet());

        synchronized (members) {
          members.clear();
          if (new_members != null) members.addAll(new_members);
          non_members.removeAll(members);
          if (cache != null) {
            cache.removeAll(members);
          }
        }

        if (!non_members.isEmpty()) {
          if (log.isTraceEnabled()) log.trace("removing non members " + non_members);
          for (Address non_mbr : non_members) removeConnection(non_mbr);
        }
        break;

      case Event.SET_LOCAL_ADDRESS:
        local_addr = (Address) evt.getArg();
        break;
    }

    return down_prot.down(evt); // Pass on to the layer below us
  }
Esempio n. 29
0
  public Object down(Event evt) {
    switch (evt.getType()) {
      case Event.MSG: // Add UnicastHeader, add to AckSenderWindow and pass down
        Message msg = (Message) evt.getArg();
        Address dst = msg.getDest();

        /* only handle unicast messages */
        if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) break;

        if (!running) {
          if (log.isTraceEnabled())
            log.trace("discarded message as start() has not yet been called, message: " + msg);
          return null;
        }

        SenderEntry entry = send_table.get(dst);
        if (entry == null) {
          entry = new SenderEntry(getNewConnectionId());
          SenderEntry existing = send_table.putIfAbsent(dst, entry);
          if (existing != null) entry = existing;
          else {
            if (log.isTraceEnabled())
              log.trace(
                  local_addr
                      + ": created connection to "
                      + dst
                      + " (conn_id="
                      + entry.send_conn_id
                      + ")");
            if (cache != null && !members.contains(dst)) cache.add(dst);
          }
        }

        short send_conn_id = entry.send_conn_id;
        long seqno = entry.sent_msgs_seqno.getAndIncrement();
        long sleep = 10;
        while (running) {
          try {
            msg.putHeader(
                this.id,
                Unicast2Header.createDataHeader(seqno, send_conn_id, seqno == DEFAULT_FIRST_SEQNO));
            entry.sent_msgs.add(seqno, msg); // add *including* UnicastHeader, adds to retransmitter
            if (conn_expiry_timeout > 0) entry.update();
            break;
          } catch (Throwable t) {
            if (!running) break;
            if (log.isWarnEnabled()) log.warn("failed sending message", t);
            Util.sleep(sleep);
            sleep = Math.min(5000, sleep * 2);
          }
        }

        if (log.isTraceEnabled()) {
          StringBuilder sb = new StringBuilder();
          sb.append(local_addr)
              .append(" --> DATA(")
              .append(dst)
              .append(": #")
              .append(seqno)
              .append(", conn_id=")
              .append(send_conn_id);
          if (seqno == DEFAULT_FIRST_SEQNO) sb.append(", first");
          sb.append(')');
          log.trace(sb);
        }

        try {
          down_prot.down(evt);
          num_msgs_sent++;
        } catch (Throwable t) {
          log.warn("failed sending the message", t);
        }
        return null; // we already passed the msg down

      case Event.VIEW_CHANGE: // remove connections to peers that are not members anymore !
        View view = (View) evt.getArg();
        List<Address> new_members = view.getMembers();
        Set<Address> non_members = new HashSet<Address>(send_table.keySet());
        non_members.addAll(recv_table.keySet());

        members = new_members;
        non_members.removeAll(new_members);
        if (cache != null) cache.removeAll(new_members);

        if (!non_members.isEmpty()) {
          if (log.isTraceEnabled()) log.trace("removing non members " + non_members);
          for (Address non_mbr : non_members) removeConnection(non_mbr);
        }
        break;

      case Event.SET_LOCAL_ADDRESS:
        local_addr = (Address) evt.getArg();
        break;
    }

    return down_prot.down(evt); // Pass on to the layer below us
  }