/** * Tests a simple split: {A,B} and {C,D} need to merge back into one subgroup. Checks how many * MergeViews are installed */ public void testSplitInTheMiddle2() throws Exception { View v1 = View.create(a.getAddress(), 10, a.getAddress(), b.getAddress()); View v2 = View.create(c.getAddress(), 10, c.getAddress(), d.getAddress()); injectView(v1, a, b); injectView(v2, c, d); enableInfoSender(false, a, b, c, d); Util.waitUntilAllChannelsHaveSameSize(10000, 500, a, b); Util.waitUntilAllChannelsHaveSameSize(10000, 500, c, d); enableInfoSender(false, a, b, c, d); for (JChannel ch : Arrays.asList(a, b, c, d)) System.out.println(ch.getName() + ": " + ch.getView()); System.out.println("\nEnabling INFO sending in merge protocols to merge subclusters"); enableInfoSender(true, a, b, c, d); Util.waitUntilAllChannelsHaveSameSize(30000, 1000, a, b, c, d); System.out.println("\nResulting views:"); for (JChannel ch : Arrays.asList(a, b, c, d)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); System.out.println(mv); } for (JChannel ch : Arrays.asList(a, b, c, d)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); assert mv instanceof MergeView; assert mv.size() == 4; assert ((MergeView) mv).getSubgroups().size() == 2; } for (JChannel ch : Arrays.asList(a, b, c, d)) { View view = ch.getView(); assert view.size() == 4 : "view should have 4 members: " + view; } }
/** * Sends the new view and digest to all subgroup coordinors in coords. Each coord will in turn * * <ol> * <li>broadcast the new view and digest to all the members of its subgroup (MergeView) * <li>on reception of the view, if it is a MergeView, each member will set the digest and * install the new view * </ol> */ private void sendMergeView( Collection<Address> coords, MergeData combined_merge_data, MergeId merge_id) { if (coords == null || combined_merge_data == null) return; View view = combined_merge_data.view; Digest digest = combined_merge_data.digest; if (view == null || digest == null) { if (log.isErrorEnabled()) log.error("view or digest is null, cannot send consolidated merge view/digest"); return; } if (log.isDebugEnabled()) log.debug( gms.local_addr + ": sending merge view " + view.getVid() + " to coordinators " + coords); gms.merge_ack_collector.reset(coords); int size = gms.merge_ack_collector.size(); long timeout = gms.view_ack_collection_timeout; long start = System.currentTimeMillis(); for (Address coord : coords) { Message msg = new Message(coord, null, null); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW); hdr.view = view; hdr.my_digest = digest; hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } // [JGRP-700] - FLUSH: flushing should span merge // if flush is in stack wait for acks from separated island coordinators if (gms.flushProtocolInStack) { try { gms.merge_ack_collector.waitForAllAcks(timeout); long stop = System.currentTimeMillis(); if (log.isTraceEnabled()) log.trace( "received all ACKs (" + size + ") for merge view " + view + " in " + (stop - start) + "ms"); } catch (TimeoutException e) { log.warn( gms.local_addr + ": failed to collect all ACKs for merge (" + size + ") for view " + view + " after " + timeout + "ms, missing ACKs from " + gms.merge_ack_collector.printMissing()); } } }
/** * Multicasts a GET_DIGEST_REQ to all current members and waits for all responses (GET_DIGEST_RSP) * or N ms. * * @return */ private Digest fetchDigestsFromAllMembersInSubPartition(List<Address> current_mbrs) { if (current_mbrs == null) return null; GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ); Message get_digest_req = new Message(); get_digest_req.setFlag(Message.OOB); get_digest_req.putHeader(gms.getId(), hdr); long max_wait_time = gms.merge_timeout > 0 ? gms.merge_timeout / 2 : 2000L; digest_collector.reset(current_mbrs); // add my own digest first Digest digest = (Digest) gms.getDownProtocol().down(Event.GET_DIGEST_EVT); digest_collector.add(gms.local_addr, digest); gms.getDownProtocol().down(new Event(Event.MSG, get_digest_req)); digest_collector.waitForAllResponses(max_wait_time); if (log.isDebugEnabled()) { if (digest_collector.hasAllResponses()) log.debug(gms.local_addr + ": fetched all digests for " + current_mbrs); else log.debug( gms.local_addr + ": fetched incomplete digests (after timeout of " + max_wait_time + ") ms for " + current_mbrs); } Map<Address, Digest> responses = new HashMap<Address, Digest>(digest_collector.getResults()); MutableDigest retval = new MutableDigest(responses.size()); for (Digest dig : responses.values()) { if (dig != null) retval.add(dig); } return retval; }
/** Creates a singleton view for each channel listed and injects it */ protected static void createPartition(JChannel... channels) { for (JChannel ch : channels) { View view = Util.createView(ch.getAddress(), 5, ch.getAddress()); GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); gms.installView(view); } }
/** * If merge_id is not equal to this.merge_id then discard. Else cast the view/digest to all * members of this group. */ public void handleMergeView(final MergeData data, final MergeId merge_id) { if (!matchMergeId(merge_id)) { if (log.isErrorEnabled()) log.error("merge_ids don't match (or are null); merge view discarded"); return; } // only send to our *current* members, if we have A and B being merged (we are B), then we would // *not* // receive a VIEW_ACK from A because A doesn't see us in the pre-merge view yet and discards the // view // [JGRP-700] - FLUSH: flushing should span merge // we have to send new view only to current members and we should not wait // for view acks from newly merged mebers List<Address> newViewMembers = new Vector<Address>(data.view.getMembers()); newViewMembers.removeAll(gms.members.getMembers()); gms.castViewChangeWithDest(data.view, data.digest, null, newViewMembers); // if we have flush in stack send ack back to merge coordinator if (gms.flushProtocolInStack) { Message ack = new Message(data.getSender(), null, null); ack.setFlag(Message.OOB); GMS.GmsHeader ack_hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW_OK); ack.putHeader(gms.getId(), ack_hdr); gms.getDownProtocol().down(new Event(Event.MSG, ack)); } cancelMerge(merge_id); }
protected void sendMergeRejectedResponse(Address sender, MergeId merge_id) { Message msg = new Message(sender).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_rejected = true; hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
/** * Fetches the digests from all members and installs them again. Used only for diagnosis and * support; don't use this otherwise ! */ void fixDigests() { Digest digest = fetchDigestsFromAllMembersInSubPartition(gms.view.getMembers()); Message msg = new Message(); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_DIGEST); hdr.my_digest = digest; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
void cancelMerge(MergeId id) { if (setMergeId(id, null)) { merge_task.stop(); stopMergeKiller(); merge_rsps.reset(); gms.getViewHandler().resume(); gms.getDownProtocol().down(new Event(Event.RESUME_STABLE)); } }
public void testMergeWithAsymetricViewsCoordIsolated() { // Isolate the coord Address coord = a.getView().getCreator(); System.out.println("Isolating coord: " + coord); List<Address> members = new ArrayList<>(); members.add(coord); View coord_view = new View(coord, 4, members); System.out.println("coord_view: " + coord_view); Channel coord_channel = findChannel(coord); System.out.println("coord_channel: " + coord_channel.getAddress()); MutableDigest digest = new MutableDigest(coord_view.getMembersRaw()); NAKACK2 nakack = (NAKACK2) coord_channel.getProtocolStack().findProtocol(NAKACK2.class); digest.merge(nakack.getDigest(coord)); GMS gms = (GMS) coord_channel.getProtocolStack().findProtocol(GMS.class); gms.installView(coord_view, digest); System.out.println("gms.getView() " + gms.getView()); System.out.println("Views are:"); for (JChannel ch : Arrays.asList(a, b, c, d)) System.out.println(ch.getAddress() + ": " + ch.getView()); JChannel merge_leader = findChannel(coord); MyReceiver receiver = new MyReceiver(); merge_leader.setReceiver(receiver); System.out.println("merge_leader: " + merge_leader.getAddressAsString()); System.out.println("Injecting MERGE event into merge leader " + merge_leader.getAddress()); Map<Address, View> merge_views = new HashMap<>(4); merge_views.put(a.getAddress(), a.getView()); merge_views.put(b.getAddress(), b.getView()); merge_views.put(c.getAddress(), c.getView()); merge_views.put(d.getAddress(), d.getView()); gms = (GMS) merge_leader.getProtocolStack().findProtocol(GMS.class); gms.up(new Event(Event.MERGE, merge_views)); Util.waitUntilAllChannelsHaveSameSize(10000, 1000, a, b, c, d); System.out.println("Views are:"); for (JChannel ch : Arrays.asList(a, b, c, d)) { View view = ch.getView(); System.out.println(ch.getAddress() + ": " + view); assert view.size() == 4; } MergeView merge_view = receiver.getView(); System.out.println("merge_view = " + merge_view); assert merge_view.size() == 4; assert merge_view.getSubgroups().size() == 2; for (View view : merge_view.getSubgroups()) assert contains(view, a.getAddress()) || contains(view, b.getAddress(), c.getAddress(), d.getAddress()); }
protected void sendMergeRejectedResponse(Address sender, MergeId merge_id) { Message msg = new Message(sender, null, null); msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_rejected = true; hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); if (log.isDebugEnabled()) log.debug("merge response=" + hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
/** Send back a response containing view and digest to sender */ private void sendMergeResponse(Address sender, View view, Digest digest, MergeId merge_id) { Message msg = new Message(sender).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_id = merge_id; hdr.view = view; hdr.my_digest = digest; msg.putHeader(gms.getId(), hdr); if (log.isTraceEnabled()) log.trace(gms.local_addr + ": sending merge response=" + hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
@BeforeClass protected void setUp() throws Exception { c1 = createChannel(true); c1.setName("A"); GMS gms = (GMS) c1.getProtocolStack().findProtocol(GMS.class); if (gms != null) gms.setPrintLocalAddress(false); disableBundling(c1); d1 = new MessageDispatcher(c1, null, null, null); c1.connect("MessageDispatcherUnitTest"); }
private void sendMergeCancelledMessage(Collection<Address> coords, MergeId merge_id) { if (coords == null || merge_id == null) return; for (Address coord : coords) { Message msg = new Message(coord); // msg.setFlag(Message.Flag.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.CANCEL_MERGE); hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } }
private static void createPartitions(JChannel... channels) throws Exception { for (JChannel ch : channels) { DISCARD discard = new DISCARD(); discard.setDiscardAll(true); ch.getProtocolStack().insertProtocol(discard, ProtocolStack.Position.ABOVE, TP.class); } for (JChannel ch : channels) { View view = View.create(ch.getAddress(), 10, ch.getAddress()); GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); gms.installView(view); } }
private void sendMergeCancelledMessage(Collection<Address> coords, MergeId merge_id) { if (coords == null || merge_id == null) return; for (Address coord : coords) { Message msg = new Message(coord, null, null); // msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.CANCEL_MERGE); hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); if (log.isDebugEnabled()) log.debug(gms.local_addr + ": sending cancel merge to " + coord); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } }
protected void _handleMergeRequest( Address sender, MergeId merge_id, Collection<? extends Address> mbrs) throws Exception { boolean success = matchMergeId(merge_id) || setMergeId(null, merge_id); if (!success) throw new Exception( "merge " + this.merge_id + " is already in progress, received merge-id=" + merge_id); /* Clears the view handler queue and discards all JOIN/LEAVE/MERGE requests until after the MERGE */ // gms.getViewHandler().suspend(); if (log.isTraceEnabled()) log.trace( gms.local_addr + ": got merge request from " + sender + ", merge_id=" + merge_id + ", mbrs=" + mbrs); // merge the membership of the current view with mbrs List<Address> members = new LinkedList<Address>(); if (mbrs != null) { // didn't use a set because we didn't want to change the membership order at this // time (although not incorrect) for (Address mbr : mbrs) { if (!members.contains(mbr)) members.add(mbr); } } ViewId tmp_vid = gms.getViewId(); if (tmp_vid != null) tmp_vid = tmp_vid.copy(); if (tmp_vid == null) throw new Exception("view ID is null; cannot return merge response"); View view = new View(tmp_vid, new ArrayList<Address>(members)); // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process // [JGRP-770] - Concurrent startup of many channels doesn't stabilize // [JGRP-700] - FLUSH: flushing should span merge /*if flush is in stack, let this coordinator flush its cluster island */ if (gms.flushProtocolInStack && !gms.startFlush(view)) throw new Exception("flush failed"); // we still need to fetch digests from all members, and not just return our own digest // (https://issues.jboss.org/browse/JGRP-948) Digest digest = fetchDigestsFromAllMembersInSubPartition(members, merge_id); if (digest == null || digest.size() == 0) throw new Exception( "failed fetching digests from subpartition members; dropping merge response"); sendMergeResponse(sender, view, digest, merge_id); }
/** Tests the scenario described by Dan in https://issues.jboss.org/browse/JGRP-1876 */ public void testJGRP_1876_Dan() throws Exception { Util.close(d, c, b, a); s = createChannel("S", true); t = createChannel("T", true); u = createChannel("U", true); v = createChannel("V", true); Util.waitUntilAllChannelsHaveSameSize(10000, 500, s, t, u, v); enableInfoSender(false, s, t, u, v); // stops INFO sending in MERGE3 for (JChannel ch : Arrays.asList(s, t, u, v)) System.out.println(ch.getName() + ": " + ch.getView()); View v1 = View.create(s.getAddress(), 10, s.getAddress()); View v2 = View.create(t.getAddress(), 10, t.getAddress()); View v3 = View.create(u.getAddress(), 11, u.getAddress(), v.getAddress()); injectView(v1, s); injectView(v2, t); injectView(v3, u, v); enableInfoSender(false, s, t, u, v); // stops INFO sending in MERGE3 Util.waitUntilAllChannelsHaveSameSize(10000, 500, s); Util.waitUntilAllChannelsHaveSameSize(10000, 500, t); Util.waitUntilAllChannelsHaveSameSize(10000, 500, u, v); System.out.printf("\nPartitions:\n"); for (JChannel ch : Arrays.asList(s, t, u, v)) System.out.println(ch.getName() + ": " + ch.getView()); enableInfoSender(true, s, t, u, v); System.out.println("\nEnabling INFO sending in merge protocols to merge subclusters"); Util.waitUntilAllChannelsHaveSameSize(30000, 1000, s, t, u, v); System.out.println("\nResulting views:"); for (JChannel ch : Arrays.asList(s, t, u, v)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); System.out.println(mv); } for (JChannel ch : Arrays.asList(s, t, u, v)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); assert mv instanceof MergeView; assert mv.size() == 4; assert ((MergeView) mv).getSubgroups().size() == 3; } for (JChannel ch : Arrays.asList(s, t, u, v)) { View view = ch.getView(); assert view.size() == 4 : "view should have 4 members: " + view; } }
void cancelMerge(MergeId id) { if (setMergeId(id, null)) { merge_task.stop(); merge_rsps.reset(); gms.getViewHandler().resume(id); } }
private static void mergePartitions(JChannel... channels) throws Exception { Membership membership = new Membership(); for (JChannel ch : channels) { membership.add(ch.getAddress()); } membership.sort(); Address leaderAddress = membership.elementAt(0); JChannel leader = findChannelByAddress(leaderAddress, channels); GMS gms = (GMS) leader.getProtocolStack().findProtocol(GMS.class); gms.setLevel("trace"); Map<Address, View> views = new HashMap<>(); for (JChannel ch : channels) { views.put(ch.getAddress(), ch.getView()); } gms.up(new Event(Event.MERGE, views)); }
/** * Takes a membership of 10, e.g. {A,B,C,D,E,F,G,H,I,J}, creates multiple partitions (e.g. * {A,B,C}, {D}, {E,F,G,H}, {I,J}), and merges them back into one cluster. Asserts that there's * only 1 MergeView */ public void testMultipleSplits() throws Exception { e = createChannel("E", true); f = createChannel("F", true); g = createChannel("G", true); h = createChannel("H", true); i = createChannel("I", true); j = createChannel("J", true); Util.waitUntilAllChannelsHaveSameSize(10000, 500, a, b, c, d, e, f, g, h, i, j); enableInfoSender(false, a, b, c, d, e, f, g, h, i, j); // stops INFO sending in MERGE3 for (JChannel ch : Arrays.asList(a, b, c, d, e, f, g, h, i, j)) System.out.println(ch.getName() + ": " + ch.getView()); List<View> partitions = createPartitions(4, a, b, c, d, e, f, g, h, i, j); // Install views for (View partition : partitions) { for (Address mbr : partition.getMembersRaw()) { JChannel ch = findChannel(mbr); injectView(partition, ch); } } System.out.printf("\n%d partitions:\n", partitions.size()); for (JChannel ch : Arrays.asList(a, b, c, d, e, f, g, h, i, j)) System.out.println(ch.getName() + ": " + ch.getView()); System.out.println("\nEnabling INFO sending in merge protocols to merge subclusters"); enableInfoSender(true, a, b, c, d, e, f, g, h, i, j); Util.waitUntilAllChannelsHaveSameSize(30000, 1000, a, b, c, d, e, f, g, h, i, j); System.out.println("\nResulting views:"); for (JChannel ch : Arrays.asList(a, b, c, d, e, f, g, h, i, j)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); System.out.println(mv); } for (JChannel ch : Arrays.asList(a, b, c, d, e, f, g, h, i, j)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); assert mv instanceof MergeView; assert mv.size() == 10; assert ((MergeView) mv).getSubgroups().size() == partitions.size(); } for (JChannel ch : Arrays.asList(a, b, c, d, e, f, g, h, i, j)) { View view = ch.getView(); assert view.size() == 10 : "view should have 10 members: " + view; } }
public void handleMergeCancelled(MergeId merge_id) { try { gms.stopFlush(); } catch (Throwable t) { log.error("stop flush failed", t); } if (log.isTraceEnabled()) log.trace(gms.local_addr + ": merge " + merge_id + " is cancelled"); cancelMerge(merge_id); }
/** A: {A,B} B: {A,B} C: {C} C receives INFO from B only */ public void testMergeWithIncompleteInfos() throws Exception { Util.close(d); enableInfoSender(false, a, b, c); View one = View.create(a.getAddress(), 10, a.getAddress(), b.getAddress()); View two = View.create(c.getAddress(), 10, c.getAddress()); injectView(one, a, b); injectView(two, c); enableInfoSender(false, a, b, c); Util.waitUntilAllChannelsHaveSameSize(10000, 500, a, b); Util.waitUntilAllChannelsHaveSameSize(10000, 500, c); System.out.printf("\nPartitions:\n"); for (JChannel ch : Arrays.asList(a, b, c)) System.out.println(ch.getName() + ": " + ch.getView()); MERGE3.MergeHeader hdr = MERGE3.MergeHeader.createInfo(one.getViewId(), null, null); Message msg = new Message(null, b.getAddress(), null) .putHeader(merge_id, hdr); // B sends the INFO message to C Event merge_event = new Event(Event.MSG, msg); MERGE3 merge = (MERGE3) c.getProtocolStack().findProtocol(MERGE3.class); merge.up(merge_event); enableInfoSender(true, a, b, c); System.out.println("\nEnabling INFO sending in merge protocols to merge subclusters"); Util.waitUntilAllChannelsHaveSameSize(30000, 1000, a, b, c); System.out.println("\nResulting views:"); for (JChannel ch : Arrays.asList(a, b, c)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); System.out.println(mv); } for (JChannel ch : Arrays.asList(a, b, c)) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); View mv = gms.view(); assert mv instanceof MergeView; assert mv.size() == 3; assert ((MergeView) mv).getSubgroups().size() == 2; } for (JChannel ch : Arrays.asList(a, b, c)) { View view = ch.getView(); assert view.size() == 3 : "view should have 3 members: " + view; } }
/** * Multicasts a GET_DIGEST_REQ to all current members and waits for all responses (GET_DIGEST_RSP) * or N ms. * * @return */ private Digest fetchDigestsFromAllMembersInSubPartition( List<Address> current_mbrs, MergeId merge_id) { // Optimization: if we're the only member, we don't need to multicast the get-digest message if (current_mbrs == null || current_mbrs.size() == 1 && current_mbrs.get(0).equals(gms.local_addr)) return (Digest) gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr)); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ); hdr.merge_id = merge_id; Message get_digest_req = new Message().setFlag(Message.Flag.OOB, Message.Flag.INTERNAL).putHeader(gms.getId(), hdr); long max_wait_time = gms.merge_timeout / 2; // gms.merge_timeout is guaranteed to be > 0, verified in init() digest_collector.reset(current_mbrs); gms.getDownProtocol().down(new Event(Event.MSG, get_digest_req)); // add my own digest first - the get_digest_req needs to be sent first *before* getting our own // digest, so // we have that message in our digest ! Digest digest = (Digest) gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr)); digest_collector.add(gms.local_addr, digest); digest_collector.waitForAllResponses(max_wait_time); if (log.isTraceEnabled()) { if (digest_collector.hasAllResponses()) log.trace(gms.local_addr + ": fetched all digests for " + current_mbrs); else log.trace( gms.local_addr + ": fetched incomplete digests (after timeout of " + max_wait_time + ") ms for " + current_mbrs); } Map<Address, Digest> responses = new HashMap<Address, Digest>(digest_collector.getResults()); MutableDigest retval = new MutableDigest(responses.size()); for (Digest dig : responses.values()) { if (dig != null) retval.add(dig); } return retval; }
private static void changeProperties(JChannel ch) { ch.setOpt(Channel.AUTO_RECONNECT, true); ProtocolStack stack = ch.getProtocolStack(); GMS gms = (GMS) stack.findProtocol("GMS"); if (gms != null) { gms.setViewBundling(true); gms.setMaxBundlingTime(300); gms.setPrintLocalAddr(false); } MERGE2 merge = (MERGE2) stack.findProtocol("MERGE2"); if (merge != null) { merge.setMinInterval(2000); merge.setMaxInterval(5000); } VIEW_SYNC sync = (VIEW_SYNC) stack.findProtocol(VIEW_SYNC.class); if (sync != null) sync.setAverageSendInterval(5000); NAKACK nakack = (NAKACK) stack.findProtocol(NAKACK.class); if (nakack != null) nakack.setLogDiscardMsgs(false); }
protected void sendJoinRejectionMessage(Address dest, String error_msg) { if (dest == null) return; JoinRsp joinRes = new JoinRsp(error_msg); // specify the error message on the JoinRsp Message msg = new Message(dest) .putHeader(gms_id, new GMS.GmsHeader(GMS.GmsHeader.JOIN_RSP)) .setBuffer(GMS.marshal(joinRes)); down_prot.down(new Event(Event.MSG, msg)); }
protected void sendJoinRejectionMessage(Address dest, String error_msg) { if (dest == null) return; JoinRsp joinRes = new JoinRsp(error_msg); // specify the error message on the JoinRsp Message msg = new Message(dest) .putHeader(GMS_ID, new GMS.GmsHeader(GMS.GmsHeader.JOIN_RSP)) .setBuffer(GMS.marshal(joinRes)); if (this.authenticate_coord) msg.putHeader(this.id, new AuthHeader(this.auth_token)); down_prot.down(msg); }
public boolean setMergeId(MergeId expected, MergeId new_value) { merge_lock.lock(); try { boolean match = Util.match(this.merge_id, expected); if (match) { if (new_value != null && merge_id_history.contains(new_value)) return false; else merge_id_history.add(new_value); this.merge_id = new_value; if (this.merge_id != null) { // Clears the view handler queue and discards all JOIN/LEAVE/MERGE requests until after // the MERGE gms.getViewHandler().suspend(); gms.getDownProtocol().down(new Event(Event.SUSPEND_STABLE, 20000)); startMergeKiller(); } } return match; } finally { merge_lock.unlock(); } }
private static JChannel createChannel(String channelName, String mech, String username) throws Exception { SASL sasl = new SASL(); sasl.setMech(mech); sasl.setClientCallbackHandler(new MyCallbackHandler(username)); sasl.setServerCallbackHandler(new MyCallbackHandler(username)); sasl.setTimeout(5000); sasl.sasl_props.put("com.sun.security.sasl.digest.realm", REALM); sasl.setLevel("trace"); GMS gms = new GMS(); gms.setJoinTimeout(3000); return new JChannel( new Protocol[] { new SHARED_LOOPBACK(), new PING(), new MERGE3(), new NAKACK2(), new UNICAST3(), new STABLE(), sasl, gms }) .name(channelName); }
/** * If merge_id is not equal to this.merge_id then discard. Else cast the view/digest to all * members of this group. */ public void handleMergeView(final MergeData data, final MergeId merge_id) { if (!matchMergeId(merge_id)) { if (log.isTraceEnabled()) log.trace( gms.local_addr + ": merge_ids (mine: " + this.merge_id + ", received: " + merge_id + ") don't match; merge view " + data.view.getViewId() + " is discarded"); return; } // only send to our *current* members, if we have A and B being merged (we are B), then we would // *not* // want to block on a VIEW_ACK from A because A doesn't see us in the pre-merge view yet and // discards the view List<Address> newViewMembers = new ArrayList<Address>(data.view.getMembers()); newViewMembers.removeAll(gms.members.getMembers()); try { gms.castViewChange(data.view, data.digest, null, newViewMembers); // if we have flush in stack send ack back to merge coordinator if (gms.flushProtocolInStack) { // [JGRP-700] - FLUSH: flushing should span merge Message ack = new Message(data.getSender()).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL); GMS.GmsHeader ack_hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW_OK); ack.putHeader(gms.getId(), ack_hdr); gms.getDownProtocol().down(new Event(Event.MSG, ack)); } } finally { cancelMerge(merge_id); } }
protected View getViewFromGMS(JChannel ch) { GMS gms = (GMS) ch.getProtocolStack().findProtocol(GMS.class); return gms.view(); }