protected void _handleMergeRequest( Address sender, MergeId merge_id, Collection<? extends Address> mbrs) throws Exception { boolean success = matchMergeId(merge_id) || setMergeId(null, merge_id); if (!success) throw new Exception( "merge " + this.merge_id + " is already in progress, received merge-id=" + merge_id); /* Clears the view handler queue and discards all JOIN/LEAVE/MERGE requests until after the MERGE */ // gms.getViewHandler().suspend(); if (log.isTraceEnabled()) log.trace( gms.local_addr + ": got merge request from " + sender + ", merge_id=" + merge_id + ", mbrs=" + mbrs); // merge the membership of the current view with mbrs List<Address> members = new LinkedList<Address>(); if (mbrs != null) { // didn't use a set because we didn't want to change the membership order at this // time (although not incorrect) for (Address mbr : mbrs) { if (!members.contains(mbr)) members.add(mbr); } } ViewId tmp_vid = gms.getViewId(); if (tmp_vid != null) tmp_vid = tmp_vid.copy(); if (tmp_vid == null) throw new Exception("view ID is null; cannot return merge response"); View view = new View(tmp_vid, new ArrayList<Address>(members)); // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process // [JGRP-770] - Concurrent startup of many channels doesn't stabilize // [JGRP-700] - FLUSH: flushing should span merge /*if flush is in stack, let this coordinator flush its cluster island */ if (gms.flushProtocolInStack && !gms.startFlush(view)) throw new Exception("flush failed"); // we still need to fetch digests from all members, and not just return our own digest // (https://issues.jboss.org/browse/JGRP-948) Digest digest = fetchDigestsFromAllMembersInSubPartition(members, merge_id); if (digest == null || digest.size() == 0) throw new Exception( "failed fetching digests from subpartition members; dropping merge response"); sendMergeResponse(sender, view, digest, merge_id); }
/** * Merge all MergeData. All MergeData elements should be disjunct (both views and digests). * However, this method is prepared to resolve duplicate entries (for the same member). * Resolution strategy for views is to merge only 1 of the duplicate members. Resolution * strategy for digests is to take the higher seqnos for duplicate digests. * * <p>After merging all members into a Membership and subsequent sorting, the first member of * the sorted membership will be the new coordinator. This method has a lock on merge_rsps. * * @param merge_rsps A list of MergeData items. Elements with merge_rejected=true were removed * before. Is guaranteed not to be null and to contain at least 1 member. */ private MergeData consolidateMergeData(List<MergeData> merge_rsps) { long logical_time = 0; // for new_vid List<View> subgroups = new ArrayList<View>(11); // contains a list of Views, each View is a subgroup Collection<Collection<Address>> sub_mbrships = new ArrayList<Collection<Address>>(); for (MergeData tmp_data : merge_rsps) { View tmp_view = tmp_data.getView(); if (tmp_view != null) { ViewId tmp_vid = tmp_view.getVid(); if (tmp_vid != null) { // compute the new view id (max of all vids +1) logical_time = Math.max(logical_time, tmp_vid.getId()); } // merge all membership lists into one (prevent duplicates) sub_mbrships.add(new ArrayList<Address>(tmp_view.getMembers())); subgroups.add(tmp_view.copy()); } } // determine the new digest Digest new_digest = consolidateDigests(merge_rsps, merge_rsps.size()); if (new_digest == null) return null; // remove all members from the new member list that are not in the digest Collection<Address> digest_mbrs = new_digest.getMembers(); for (Collection<Address> coll : sub_mbrships) coll.retainAll(digest_mbrs); List<Address> merged_mbrs = gms.computeNewMembership(sub_mbrships); // the new coordinator is the first member of the consolidated & sorted membership list Address new_coord = merged_mbrs.isEmpty() ? null : merged_mbrs.get(0); if (new_coord == null) return null; // should be the highest view ID seen up to now plus 1 ViewId new_vid = new ViewId(new_coord, logical_time + 1); // determine the new view MergeView new_view = new MergeView(new_vid, merged_mbrs, subgroups); if (log.isTraceEnabled()) log.trace( gms.local_addr + ": consolidated view=" + new_view + "\nconsolidated digest=" + new_digest); return new MergeData(gms.local_addr, new_view, new_digest); }
public void testSetDigest() throws TimeoutException { System.out.println("d1: " + d1); System.out.println("d2: " + d2); System.out.println("setting d2:"); nak.down(new Event(Event.SET_DIGEST, d2)); Digest digest = (Digest) nak.down(new Event(Event.GET_DIGEST)); System.out.println("digest = " + digest); assert digest.capacity() == 3; assert digest.containsAll(a, b, c); System.out.println("setting d1:"); nak.down(new Event(Event.SET_DIGEST, d1)); digest = (Digest) nak.down(new Event(Event.GET_DIGEST)); System.out.println("digest = " + digest); assert digest.capacity() == 3; // https://jira.jboss.org/jira/browse/JGRP-1060 assert digest.containsAll(a, b, c); }
public int size() { int retval = Global.BYTE_SIZE * 2; // type + merge_rejected retval += Global.BYTE_SIZE; // presence view retval += Global.BYTE_SIZE; // MergeView or View if (view != null) retval += view.serializedSize(); retval += Util.size(mbr); retval += Util.size(mbrs); retval += Global.BYTE_SIZE; // presence of join_rsp if (join_rsp != null) retval += join_rsp.serializedSize(); retval += Global.BYTE_SIZE; // presence for my_digest if (my_digest != null) retval += my_digest.serializedSize(); retval += Global.BYTE_SIZE; // presence for merge_id if (merge_id != null) retval += merge_id.size(); retval += Global.BYTE_SIZE; // boolean useFlushIfPresent return retval; }
/** * Get the view and digest and send back both (MergeData) in the form of a MERGE_RSP to the * sender. If a merge is already in progress, send back a MergeData with the merge_rejected field * set to true. * * @param sender The address of the merge leader * @param merge_id The merge ID * @param mbrs The set of members from which we expect responses */ public void handleMergeRequest( Address sender, MergeId merge_id, Collection<? extends Address> mbrs) { boolean success = matchMergeId(merge_id) || setMergeId(null, merge_id); if (!success) { if (log.isWarnEnabled()) log.warn(gms.local_addr + ": merge is already in progress"); sendMergeRejectedResponse(sender, merge_id); return; } /* Clears the view handler queue and discards all JOIN/LEAVE/MERGE requests until after the MERGE */ gms.getViewHandler().suspend(merge_id); if (log.isDebugEnabled()) log.debug( gms.local_addr + ": got merge request from " + sender + ", merge_id=" + merge_id + ", mbrs=" + mbrs); // merge the membership of the current view with mbrs List<Address> members = new LinkedList<Address>(); if (mbrs != null) { // didn't use a set because we didn't want to change the membership order at this // time (although not incorrect) for (Address mbr : mbrs) { if (!members.contains(mbr)) members.add(mbr); } } ViewId tmp_vid = gms.view_id != null ? gms.view_id.copy() : null; if (tmp_vid == null) { log.warn("view ID is null; cannot return merge response"); sendMergeRejectedResponse(sender, merge_id); return; } View view = new View(tmp_vid, new Vector<Address>(members)); // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process // [JGRP-770] - Concurrent startup of many channels doesn't stabilize // [JGRP-700] - FLUSH: flushing should span merge /*if flush is in stack, let this coordinator flush its cluster island */ boolean successfulFlush = gms.startFlush(view); if (!successfulFlush) { sendMergeRejectedResponse(sender, merge_id); if (log.isWarnEnabled()) log.warn( gms.local_addr + ": flush failed; sending merge rejected message to " + sender + ", merge_id=" + merge_id); cancelMerge(merge_id); return; } Digest digest = fetchDigestsFromAllMembersInSubPartition(members); if (digest.size() == 0) { log.error("failed fetching digests from subpartition members; dropping merge response"); return; } sendMergeResponse(sender, view, digest, merge_id); }