/** * Sets the new view and sends a VIEW_CHANGE event up and down the stack. If the view is a * MergeView (subclass of View), then digest will be non-null and has to be set before installing * the view. */ public void installView(View new_view, Digest digest) { ViewId vid = new_view.getVid(); List<Address> mbrs = new_view.getMembers(); ltime = Math.max( vid.getId(), ltime); // compute the logical time, regardless of whether the view is accepted // Discards view with id lower than or equal to our own. Will be installed without check if it // is the first view if (view != null) { ViewId view_id = view.getViewId(); int rc = vid.compareToIDs(view_id); if (rc <= 0) { if (log.isWarnEnabled() && rc < 0 && log_view_warnings) { // only scream if view is smaller, silently discard same views log.warn( local_addr + ": received view < current view;" + " discarding it (current vid: " + view_id + ", new vid: " + vid + ')'); } return; } } /* Check for self-inclusion: if I'm not part of the new membership, I just discard it. This ensures that messages sent in view V1 are only received by members of V1 */ if (!mbrs.contains(local_addr)) { if (log.isWarnEnabled() && log_view_warnings) log.warn(local_addr + ": not member of view " + new_view.getViewId() + "; discarding it"); return; } if (digest != null) { if (new_view instanceof MergeView) mergeDigest(digest); else setDigest(digest); } if (log.isDebugEnabled()) log.debug(local_addr + ": installing view " + new_view); Event view_event; synchronized (members) { view = new View(new_view.getVid(), new_view.getMembers()); view_event = new Event(Event.VIEW_CHANGE, new_view); // Set the membership. Take into account joining members if (!mbrs.isEmpty()) { members.set(mbrs); tmp_members.set(members); joining.removeAll(mbrs); // remove all members in mbrs from joining // remove all elements from 'leaving' that are not in 'mbrs' leaving.retainAll(mbrs); tmp_members.add(joining); // add members that haven't yet shown up in the membership tmp_members.remove( leaving); // remove members that haven't yet been removed from the membership // add to prev_members for (Address addr : mbrs) { if (!prev_members.contains(addr)) prev_members.add(addr); } } Address coord = determineCoordinator(); if (coord != null && coord.equals(local_addr) && !haveCoordinatorRole()) { becomeCoordinator(); } else { if (haveCoordinatorRole() && !local_addr.equals(coord)) { becomeParticipant(); merge_ack_collector.reset(null); // we don't need this one anymore } } } // - Changed order of passing view up and down (http://jira.jboss.com/jira/browse/JGRP-347) // - Changed it back (bela Sept 4 2007): http://jira.jboss.com/jira/browse/JGRP-564 // - Moved sending up view_event out of the synchronized block (bela Nov 2011) down_prot.down(view_event); // needed e.g. by failure detector or UDP up_prot.up(view_event); List<Address> tmp_mbrs = new_view.getMembers(); ack_collector.retainAll(tmp_mbrs); merge_ack_collector.retainAll(tmp_mbrs); if (new_view instanceof MergeView) merger.forceCancelMerge(); if (stats) { num_views++; prev_views.add(new Tuple<View, Long>(new_view, System.currentTimeMillis())); } }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); GmsHeader hdr = (GmsHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case GmsHeader.JOIN_REQ: view_handler.add( new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER: view_handler.add( new Request( Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_RSP: impl.handleJoinResponse(hdr.join_rsp); break; case GmsHeader.LEAVE_REQ: if (hdr.mbr == null) return null; view_handler.add(new Request(Request.LEAVE, hdr.mbr, false)); break; case GmsHeader.LEAVE_RSP: impl.handleLeaveResponse(); break; case GmsHeader.VIEW: View new_view = hdr.view; if (new_view == null) return null; Address coord = msg.getSrc(); if (!new_view.containsMember(coord)) { sendViewAck( coord); // we need to send the ack first, otherwise the connection is removed impl.handleViewChange(new_view, hdr.my_digest); } else { impl.handleViewChange(new_view, hdr.my_digest); sendViewAck(coord); // send VIEW_ACK to sender of view } break; case GmsHeader.VIEW_ACK: Address sender = msg.getSrc(); ack_collector.ack(sender); return null; // don't pass further up case GmsHeader.MERGE_REQ: impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs); break; case GmsHeader.MERGE_RSP: MergeData merge_data = new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected); if (log.isTraceEnabled()) { log.trace( local_addr + ": got merge response from " + msg.getSrc() + ", merge_id=" + hdr.merge_id + ", merge data is " + merge_data); } impl.handleMergeResponse(merge_data, hdr.merge_id); break; case GmsHeader.INSTALL_MERGE_VIEW: impl.handleMergeView( new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id); break; case GmsHeader.INSTALL_DIGEST: Digest tmp = hdr.my_digest; down_prot.down(new Event(Event.MERGE_DIGEST, tmp)); break; case GmsHeader.INSTALL_MERGE_VIEW_OK: // [JGRP-700] - FLUSH: flushing should span merge merge_ack_collector.ack(msg.getSrc()); break; case GmsHeader.CANCEL_MERGE: // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process impl.handleMergeCancelled(hdr.merge_id); break; case GmsHeader.GET_DIGEST_REQ: // only handle this request if it was sent by the coordinator (or at least a member) of // the current cluster synchronized (members) { if (!members.contains(msg.getSrc())) break; } // discard my own request: if (msg.getSrc().equals(local_addr)) return null; if (hdr.merge_id != null && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id))) return null; // fetch only my own digest Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if (digest != null) { GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP); rsp_hdr.my_digest = digest; Message get_digest_rsp = new Message(msg.getSrc(), null, null); get_digest_rsp.setFlag(Message.OOB); get_digest_rsp.putHeader(this.id, rsp_hdr); down_prot.down(new Event(Event.MSG, get_digest_rsp)); } break; case GmsHeader.GET_DIGEST_RSP: Digest digest_rsp = hdr.my_digest; impl.handleDigestResponse(msg.getSrc(), digest_rsp); break; default: if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known"); } return null; // don't pass up case Event.SUSPECT: Object retval = up_prot.up(evt); Address suspected = (Address) evt.getArg(); view_handler.add(new Request(Request.SUSPECT, suspected, true)); ack_collector.suspect(suspected); merge_ack_collector.suspect(suspected); return retval; case Event.UNSUSPECT: impl.unsuspect((Address) evt.getArg()); return null; // discard case Event.MERGE: view_handler.add( new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg())); return null; // don't pass up case Event.IS_MERGE_IN_PROGRESS: return merger.isMergeInProgress(); } return up_prot.up(evt); }
/** * Broadcasts the new view and digest, and waits for acks from all members in the list given as * argument. If the list is null, we take the members who are part of new_view */ public void castViewChange( View new_view, Digest digest, JoinRsp jr, Collection<Address> newMembers) { if (log.isTraceEnabled()) log.trace(local_addr + ": mcasting view " + new_view + " (" + new_view.size() + " mbrs)\n"); // Send down a local TMP_VIEW event. This is needed by certain layers (e.g. NAKACK) to compute // correct digest // in case client's next request (e.g. getState()) reaches us *before* our own view change // multicast. // Check NAKACK's TMP_VIEW handling for details down_prot.up(new Event(Event.TMP_VIEW, new_view)); down_prot.down(new Event(Event.TMP_VIEW, new_view)); List<Address> ackMembers = new ArrayList<Address>(new_view.getMembers()); if (newMembers != null && !newMembers.isEmpty()) ackMembers.removeAll(newMembers); Message view_change_msg = new Message(); // bcast to all members GmsHeader hdr = new GmsHeader(GmsHeader.VIEW, new_view); hdr.my_digest = digest; view_change_msg.putHeader(this.id, hdr); // If we're the only member the VIEW is broadcast to, let's simply install the view directly, // without // sending the VIEW multicast ! Or else N-1 members drop the multicast anyway... if (local_addr != null && ackMembers.size() == 1 && ackMembers.get(0).equals(local_addr)) { // we need to add the message to the retransmit window (e.g. in NAKACK), so (1) it can be // retransmitted and // (2) we increment the seqno (otherwise, we'd return an incorrect digest) down_prot.down(new Event(Event.ADD_TO_XMIT_TABLE, view_change_msg)); impl.handleViewChange(new_view, digest); } else { if (!ackMembers.isEmpty()) ack_collector.reset(ackMembers); down_prot.down(new Event(Event.MSG, view_change_msg)); try { if (!ackMembers.isEmpty()) { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all " + ack_collector.expectedAcks() + " ACKs from members for view " + new_view.getVid()); } } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for view " + new_view.getViewId() + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } if (jr != null && (newMembers != null && !newMembers.isEmpty())) { ack_collector.reset(new ArrayList<Address>(newMembers)); for (Address joiner : newMembers) { sendJoinResponse(jr, joiner); } try { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all ACKs (" + ack_collector.expectedAcks() + ") from joiners for view " + new_view.getVid()); } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for unicast view " + new_view + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } }