Пример #1
0
 private void sendViewAck(Address dest) {
   Message view_ack = new Message(dest, null, null);
   view_ack.setFlag(Message.OOB);
   GmsHeader tmphdr = new GmsHeader(GmsHeader.VIEW_ACK);
   view_ack.putHeader(this.id, tmphdr);
   down_prot.down(new Event(Event.MSG, view_ack));
 }
Пример #2
0
 /**
  * Copies a message. Copies only headers with IDs >= starting_id or IDs which are in the
  * copy_only_ids list
  *
  * @param copy_buffer
  * @param starting_id
  * @param copy_only_ids
  * @return
  */
 public Message copy(boolean copy_buffer, short starting_id, short... copy_only_ids) {
   Message retval = copy(copy_buffer, false);
   for (Map.Entry<Short, Header> entry : getHeaders().entrySet()) {
     short id = entry.getKey();
     if (id >= starting_id || containsId(id, copy_only_ids))
       retval.putHeader(id, entry.getValue());
   }
   return retval;
 }
Пример #3
0
  /**
   * Doesn't copy any headers except for those with ID >= copy_headers_above
   *
   * @param copy_buffer
   * @param starting_id
   * @return A message with headers whose ID are >= starting_id
   */
  public Message copy(boolean copy_buffer, short starting_id) {
    Message retval = copy(copy_buffer, false);
    if (starting_id > 0) {
      for (Map.Entry<Short, Header> entry : getHeaders().entrySet()) {
        short id = entry.getKey();
        if (id >= starting_id) retval.putHeader(id, entry.getValue());
      }
    }

    return retval;
  }
Пример #4
0
  /**
   * Caller by the layer above this layer. Usually we just put this Message into the send queue and
   * let one or more worker threads handle it. A worker thread then removes the Message from the
   * send queue, performs a conversion and adds the modified Message to the send queue of the layer
   * below it, by calling Down).
   */
  public void down(Event evt) {
    Message msg;
    Object dest_addr;

    if (evt.getType() != Event.MSG) { // unless it is a message handle it and respond
      handleDownEvent(evt);
      return;
    }

    // ****************** profiling ******************
    /*if(num_msgs == 0) {
    start=System.currentTimeMillis();
    num_msgs++;
         }
         else if(num_msgs >= 1000) {
    stop=System.currentTimeMillis();

    long total_time=stop-start;
    double msgs_per_msec=num_msgs / (double)total_time;

    if(Trace.trace)
        Trace.info("UDP.down.profile()",
                "total_time=" + total_time + ", msgs/ms=" + msgs_per_msec);
    num_msgs=0;
         }
         else {
    num_msgs++;
         }*/
    // ****************** profiling ******************

    msg = (Message) evt.getArg();

    if (udp_hdr != null && udp_hdr.group_addr != null) {
      // added patch by Roland Kurmann (March 20 2003)
      msg.putHeader(name, udp_hdr);
    }

    dest_addr = msg.getDest();

    // Because we don't call Protocol.passDown(), we notify the observer directly (e.g.
    // PerfObserver).
    // This way, we still have performance numbers for UDP
    if (observer != null) {
      observer.passDown(evt);
    }
    if (dest_addr == null) { // 'null' means send to all group members
      if (ip_mcast) {
        if (mcast_addr == null) {
          Trace.error(
              "UDP.down()",
              "dest address of message is null, and "
                  + "sending to default address fails as mcast_addr is null, too !"
                  + " Discarding message "
                  + Util.printEvent(evt));
          return;
        }
        // if we want to use IP multicast, then set the destination of the message
        msg.setDest(mcast_addr);
      } else {
        // sends a separate UDP message to each address
        sendMultipleUdpMessages(msg, members);
        return;
      }
    }

    try {
      sendUdpMessage(msg);
    } catch (Exception e) {
      Trace.error("UDP.down()", "exception=" + e + ", msg=" + msg + ", mcast_addr=" + mcast_addr);
    }
  }
Пример #5
0
  @SuppressWarnings("unchecked")
  public Object up(Event evt) {
    switch (evt.getType()) {
      case Event.MSG:
        Message msg = (Message) evt.getArg();
        GmsHeader hdr = (GmsHeader) msg.getHeader(this.id);
        if (hdr == null) break;
        switch (hdr.type) {
          case GmsHeader.JOIN_REQ:
            view_handler.add(
                new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent));
            break;
          case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER:
            view_handler.add(
                new Request(
                    Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent));
            break;
          case GmsHeader.JOIN_RSP:
            impl.handleJoinResponse(hdr.join_rsp);
            break;
          case GmsHeader.LEAVE_REQ:
            if (hdr.mbr == null) return null;
            view_handler.add(new Request(Request.LEAVE, hdr.mbr, false));
            break;
          case GmsHeader.LEAVE_RSP:
            impl.handleLeaveResponse();
            break;
          case GmsHeader.VIEW:
            View new_view = hdr.view;
            if (new_view == null) return null;

            Address coord = msg.getSrc();
            if (!new_view.containsMember(coord)) {
              sendViewAck(
                  coord); // we need to send the ack first, otherwise the connection is removed
              impl.handleViewChange(new_view, hdr.my_digest);
            } else {
              impl.handleViewChange(new_view, hdr.my_digest);
              sendViewAck(coord); // send VIEW_ACK to sender of view
            }
            break;

          case GmsHeader.VIEW_ACK:
            Address sender = msg.getSrc();
            ack_collector.ack(sender);
            return null; // don't pass further up

          case GmsHeader.MERGE_REQ:
            impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs);
            break;

          case GmsHeader.MERGE_RSP:
            MergeData merge_data =
                new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected);
            if (log.isTraceEnabled()) {
              log.trace(
                  local_addr
                      + ": got merge response from "
                      + msg.getSrc()
                      + ", merge_id="
                      + hdr.merge_id
                      + ", merge data is "
                      + merge_data);
            }
            impl.handleMergeResponse(merge_data, hdr.merge_id);
            break;

          case GmsHeader.INSTALL_MERGE_VIEW:
            impl.handleMergeView(
                new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id);
            break;

          case GmsHeader.INSTALL_DIGEST:
            Digest tmp = hdr.my_digest;
            down_prot.down(new Event(Event.MERGE_DIGEST, tmp));
            break;

          case GmsHeader.INSTALL_MERGE_VIEW_OK:
            // [JGRP-700] - FLUSH: flushing should span merge
            merge_ack_collector.ack(msg.getSrc());
            break;

          case GmsHeader.CANCEL_MERGE:
            // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process
            impl.handleMergeCancelled(hdr.merge_id);
            break;

          case GmsHeader.GET_DIGEST_REQ:
            // only handle this request if it was sent by the coordinator (or at least a member) of
            // the current cluster
            synchronized (members) {
              if (!members.contains(msg.getSrc())) break;
            }

            // discard my own request:
            if (msg.getSrc().equals(local_addr)) return null;

            if (hdr.merge_id != null
                && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id)))
              return null;

            // fetch only my own digest
            Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr));
            if (digest != null) {
              GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP);
              rsp_hdr.my_digest = digest;
              Message get_digest_rsp = new Message(msg.getSrc(), null, null);
              get_digest_rsp.setFlag(Message.OOB);
              get_digest_rsp.putHeader(this.id, rsp_hdr);
              down_prot.down(new Event(Event.MSG, get_digest_rsp));
            }
            break;

          case GmsHeader.GET_DIGEST_RSP:
            Digest digest_rsp = hdr.my_digest;
            impl.handleDigestResponse(msg.getSrc(), digest_rsp);
            break;

          default:
            if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known");
        }
        return null; // don't pass up

      case Event.SUSPECT:
        Object retval = up_prot.up(evt);
        Address suspected = (Address) evt.getArg();
        view_handler.add(new Request(Request.SUSPECT, suspected, true));
        ack_collector.suspect(suspected);
        merge_ack_collector.suspect(suspected);
        return retval;

      case Event.UNSUSPECT:
        impl.unsuspect((Address) evt.getArg());
        return null; // discard

      case Event.MERGE:
        view_handler.add(
            new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg()));
        return null; // don't pass up

      case Event.IS_MERGE_IN_PROGRESS:
        return merger.isMergeInProgress();
    }
    return up_prot.up(evt);
  }
Пример #6
0
 public void sendJoinResponse(JoinRsp rsp, Address dest) {
   Message m = new Message(dest, null, null);
   GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.JOIN_RSP, rsp);
   m.putHeader(this.id, hdr);
   getDownProtocol().down(new Event(Event.MSG, m));
 }
Пример #7
0
  /**
   * Broadcasts the new view and digest, and waits for acks from all members in the list given as
   * argument. If the list is null, we take the members who are part of new_view
   */
  public void castViewChange(
      View new_view, Digest digest, JoinRsp jr, Collection<Address> newMembers) {
    if (log.isTraceEnabled())
      log.trace(local_addr + ": mcasting view " + new_view + " (" + new_view.size() + " mbrs)\n");

    // Send down a local TMP_VIEW event. This is needed by certain layers (e.g. NAKACK) to compute
    // correct digest
    // in case client's next request (e.g. getState()) reaches us *before* our own view change
    // multicast.
    // Check NAKACK's TMP_VIEW handling for details
    down_prot.up(new Event(Event.TMP_VIEW, new_view));
    down_prot.down(new Event(Event.TMP_VIEW, new_view));

    List<Address> ackMembers = new ArrayList<Address>(new_view.getMembers());
    if (newMembers != null && !newMembers.isEmpty()) ackMembers.removeAll(newMembers);

    Message view_change_msg = new Message(); // bcast to all members
    GmsHeader hdr = new GmsHeader(GmsHeader.VIEW, new_view);
    hdr.my_digest = digest;
    view_change_msg.putHeader(this.id, hdr);

    // If we're the only member the VIEW is broadcast to, let's simply install the view directly,
    // without
    // sending the VIEW multicast ! Or else N-1 members drop the multicast anyway...
    if (local_addr != null && ackMembers.size() == 1 && ackMembers.get(0).equals(local_addr)) {
      // we need to add the message to the retransmit window (e.g. in NAKACK), so (1) it can be
      // retransmitted and
      // (2) we increment the seqno (otherwise, we'd return an incorrect digest)
      down_prot.down(new Event(Event.ADD_TO_XMIT_TABLE, view_change_msg));
      impl.handleViewChange(new_view, digest);
    } else {
      if (!ackMembers.isEmpty()) ack_collector.reset(ackMembers);

      down_prot.down(new Event(Event.MSG, view_change_msg));
      try {
        if (!ackMembers.isEmpty()) {
          ack_collector.waitForAllAcks(view_ack_collection_timeout);
          if (log.isTraceEnabled())
            log.trace(
                local_addr
                    + ": received all "
                    + ack_collector.expectedAcks()
                    + " ACKs from members for view "
                    + new_view.getVid());
        }
      } catch (TimeoutException e) {
        if (log_collect_msgs && log.isWarnEnabled()) {
          log.warn(
              local_addr
                  + ": failed to collect all ACKs (expected="
                  + ack_collector.expectedAcks()
                  + ") for view "
                  + new_view.getViewId()
                  + " after "
                  + view_ack_collection_timeout
                  + "ms, missing ACKs from "
                  + ack_collector.printMissing());
        }
      }
    }

    if (jr != null && (newMembers != null && !newMembers.isEmpty())) {
      ack_collector.reset(new ArrayList<Address>(newMembers));
      for (Address joiner : newMembers) {
        sendJoinResponse(jr, joiner);
      }
      try {
        ack_collector.waitForAllAcks(view_ack_collection_timeout);
        if (log.isTraceEnabled())
          log.trace(
              local_addr
                  + ": received all ACKs ("
                  + ack_collector.expectedAcks()
                  + ") from joiners for view "
                  + new_view.getVid());
      } catch (TimeoutException e) {
        if (log_collect_msgs && log.isWarnEnabled()) {
          log.warn(
              local_addr
                  + ": failed to collect all ACKs (expected="
                  + ack_collector.expectedAcks()
                  + ") for unicast view "
                  + new_view
                  + " after "
                  + view_ack_collection_timeout
                  + "ms, missing ACKs from "
                  + ack_collector.printMissing());
        }
      }
    }
  }