private void send(Address dest) throws Exception { final CountDownLatch latch = new CountDownLatch(1); final BlockingReceiver receiver = new BlockingReceiver(latch); final int NUM = 10; b.setReceiver(receiver); a.send(dest, 1); // the only regular message for (int i = 2; i <= NUM; i++) a.send(new Message(dest, i).setFlag(Message.Flag.OOB)); sendStableMessages(a, b); List<Integer> list = receiver.getMsgs(); for (int i = 0; i < 20; i++) { if (list.size() == NUM - 1) break; sendStableMessages(a, b); Util.sleep(500); // give the asynchronous msgs some time to be received } System.out.println("list = " + list); assert list.size() == NUM - 1 : "list is " + list; assert list.contains(2) && list.contains(10); System.out.println("[" + Thread.currentThread().getName() + "]: releasing latch"); latch.countDown(); for (int i = 0; i < 20; i++) { if (list.size() == NUM) break; sendStableMessages(a, b); Util.sleep(1000); // give the asynchronous msgs some time to be received } System.out.println("list = " + list); assert list.size() == NUM : "list is " + list; for (int i = 1; i <= NUM; i++) assert list.contains(i); }
public static void testNewMembers() { final Address a = Util.createRandomAddress(), b = Util.createRandomAddress(), c = Util.createRandomAddress(), d = Util.createRandomAddress(), e = Util.createRandomAddress(); List<Address> old = new ArrayList<>(); List<Address> new_list = new ArrayList<>(); old.add(a); old.add(b); old.add(c); new_list.add(b); new_list.add(a); new_list.add(c); new_list.add(d); new_list.add(e); System.out.println("old: " + old); System.out.println("new: " + new_list); List<Address> new_nodes = Util.newMembers(old, new_list); System.out.println("new_nodes = " + new_nodes); assert new_nodes.size() == 2 : "list should have d and e"; assert new_nodes.contains(d); assert new_nodes.contains(e); }
protected void handleViewChange(View v) { Address old_coord; List<Address> new_members = v.getMembers(); boolean send_up_exception = false; this.view = v; synchronized (members) { old_coord = (!members.isEmpty() ? members.get(0) : null); members.clear(); members.addAll(new_members); // this handles the case where a coord dies during a state transfer; prevents clients from // hanging forever // Note this only takes a coordinator crash into account, a getState(target, timeout), where // target is not // null is not handled ! (Usually we get the state from the coordinator) // http://jira.jboss.com/jira/browse/JGRP-148 if (waiting_for_state_response && old_coord != null && !members.contains(old_coord)) send_up_exception = true; } if (send_up_exception) { log.warn("%s: discovered that the state provider (%s) left", local_addr, old_coord); waiting_for_state_response = false; Exception ex = new EOFException("state provider " + old_coord + " left"); up_prot.up(new Event(Event.GET_STATE_OK, new StateTransferResult(ex))); openBarrierAndResumeStable(); } // remove non members from list of members requesting state state_requesters.retainAll(new_members); }
protected void handleView(View view) { this.view = view; if (log.isDebugEnabled()) log.debug("view=" + view); List<Address> members = view.getMembers(); _consumerLock.lock(); try { // This removes the consumers that were registered that are now gone Iterator<Owner> iterator = _consumersAvailable.iterator(); while (iterator.hasNext()) { Owner owner = iterator.next(); if (!members.contains(owner.getAddress())) { iterator.remove(); sendRemoveConsumerRequest(owner); } } // This removes the tasks that those requestors are gone iterator = _runRequests.iterator(); while (iterator.hasNext()) { Owner owner = iterator.next(); if (!members.contains(owner.getAddress())) { iterator.remove(); sendRemoveRunRequest(owner); } } synchronized (_awaitingReturn) { for (Entry<Owner, Runnable> entry : _awaitingReturn.entrySet()) { // The person currently servicing our request has gone down // without completing so we have to keep our request alive by // sending ours back to the coordinator Owner owner = entry.getKey(); if (!members.contains(owner.getAddress())) { Runnable runnable = entry.getValue(); // We need to register the request id before sending the request back to the coordinator // in case if our task gets picked up since another was removed _requestId.put(runnable, owner.getRequestId()); _awaitingConsumer.add(runnable); sendToCoordinator(RUN_REQUEST, owner.getRequestId(), local_addr); } } } } finally { _consumerLock.unlock(); } }
/** * Computes the next view. Returns a copy that has <code>old_mbrs</code> and <code>suspected_mbrs * </code> removed and <code>new_mbrs</code> added. */ public View getNextView( Collection<Address> new_mbrs, Collection<Address> old_mbrs, Collection<Address> suspected_mbrs) { synchronized (members) { ViewId view_id = view != null ? view.getViewId() : null; if (view_id == null) { log.error("view_id is null"); return null; // this should *never* happen ! } long vid = Math.max(view_id.getId(), ltime) + 1; ltime = vid; Membership tmp_mbrs = tmp_members.copy(); // always operate on the temporary membership tmp_mbrs.remove(suspected_mbrs); tmp_mbrs.remove(old_mbrs); tmp_mbrs.add(new_mbrs); List<Address> mbrs = tmp_mbrs.getMembers(); Address new_coord = local_addr; if (!mbrs.isEmpty()) new_coord = mbrs.get(0); View v = new View(new_coord, vid, mbrs); // Update membership (see DESIGN for explanation): tmp_members.set(mbrs); // Update joining list (see DESIGN for explanation) if (new_mbrs != null) { for (Address tmp_mbr : new_mbrs) { if (!joining.contains(tmp_mbr)) joining.add(tmp_mbr); } } // Update leaving list (see DESIGN for explanations) if (old_mbrs != null) { for (Address addr : old_mbrs) { if (!leaving.contains(addr)) leaving.add(addr); } } if (suspected_mbrs != null) { for (Address addr : suspected_mbrs) { if (!leaving.contains(addr)) leaving.add(addr); } } return v; } }
/** * Add a new channel listener to be notified on the channel's state change. * * @return true if the listener was added or false if the listener was already in the list. */ public boolean addChannelListener(ChannelListener l) { synchronized (additionalChannelListeners) { if (additionalChannelListeners.contains(l)) { return false; } additionalChannelListeners.add(l); return true; } }
public void discoveryRequestReceived( Address sender, String logical_name, Collection<PhysicalAddress> physical_addrs) { super.discoveryRequestReceived(sender, logical_name, physical_addrs); if (physical_addrs != null) { for (PhysicalAddress addr : physical_addrs) { if (!initial_hosts.contains(addr)) dynamic_hosts.addIfAbsent(addr); } } }
private void send(Address dest) throws ChannelNotConnectedException, ChannelClosedException { final ReentrantLock lock = new ReentrantLock(); final BlockingReceiver receiver = new BlockingReceiver(lock); final int NUM = 10; c2.setReceiver(receiver); System.out.println("[" + Thread.currentThread().getName() + "]: locking lock"); lock.lock(); c1.send(new Message(dest, null, 1)); for (int i = 2; i <= NUM; i++) { Message msg = new Message(dest, null, i); msg.setFlag(Message.OOB); c1.send(msg); } sendStableMessages(c1, c2); Util.sleep(500); List<Integer> list = receiver.getMsgs(); for (int i = 0; i < 20; i++) { if (list.size() == NUM - 1) break; System.out.println("list = " + list); Util.sleep(1000); // give the asynchronous msgs some time to be received } System.out.println("list = " + list); assert list.size() == NUM - 1 : "list is " + list; assert list.contains(2) && list.contains(10); System.out.println("[" + Thread.currentThread().getName() + "]: unlocking lock"); lock.unlock(); for (int i = 0; i < 20; i++) { if (list.size() == NUM) break; System.out.println("list = " + list); Util.sleep(1000); // give the asynchronous msgs some time to be received } System.out.println("list = " + list); assert list.size() == NUM : "list is " + list; for (int i = 1; i <= NUM; i++) assert list.contains(i); }
protected synchronized void handleView(List<Address> members) { if (current_owner != null && !members.contains(current_owner.getAddress())) { Owner tmp = current_owner; setOwner(null); if (log.isDebugEnabled()) log.debug("unlocked \"" + lock_name + "\" because owner " + tmp + " left"); } for (Iterator<Request> it = queue.iterator(); it.hasNext(); ) { Request req = it.next(); if (!members.contains(req.owner.getAddress())) it.remove(); } for (Iterator<Owner> it = condition.queue.iterator(); it.hasNext(); ) { Owner own = it.next(); if (!members.contains(own.getAddress())) { it.remove(); } } processQueue(); }
protected void _handleMergeRequest( Address sender, MergeId merge_id, Collection<? extends Address> mbrs) throws Exception { boolean success = matchMergeId(merge_id) || setMergeId(null, merge_id); if (!success) throw new Exception( "merge " + this.merge_id + " is already in progress, received merge-id=" + merge_id); /* Clears the view handler queue and discards all JOIN/LEAVE/MERGE requests until after the MERGE */ // gms.getViewHandler().suspend(); if (log.isTraceEnabled()) log.trace( gms.local_addr + ": got merge request from " + sender + ", merge_id=" + merge_id + ", mbrs=" + mbrs); // merge the membership of the current view with mbrs List<Address> members = new LinkedList<Address>(); if (mbrs != null) { // didn't use a set because we didn't want to change the membership order at this // time (although not incorrect) for (Address mbr : mbrs) { if (!members.contains(mbr)) members.add(mbr); } } ViewId tmp_vid = gms.getViewId(); if (tmp_vid != null) tmp_vid = tmp_vid.copy(); if (tmp_vid == null) throw new Exception("view ID is null; cannot return merge response"); View view = new View(tmp_vid, new ArrayList<Address>(members)); // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process // [JGRP-770] - Concurrent startup of many channels doesn't stabilize // [JGRP-700] - FLUSH: flushing should span merge /*if flush is in stack, let this coordinator flush its cluster island */ if (gms.flushProtocolInStack && !gms.startFlush(view)) throw new Exception("flush failed"); // we still need to fetch digests from all members, and not just return our own digest // (https://issues.jboss.org/browse/JGRP-948) Digest digest = fetchDigestsFromAllMembersInSubPartition(members, merge_id); if (digest == null || digest.size() == 0) throw new Exception( "failed fetching digests from subpartition members; dropping merge response"); sendMergeResponse(sender, view, digest, merge_id); }
public Object down(Event evt) { Object retval = super.down(evt); switch (evt.getType()) { case Event.VIEW_CHANGE: for (Address logical_addr : members) { PhysicalAddress physical_addr = (PhysicalAddress) down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESS, logical_addr)); if (physical_addr != null && !initial_hosts.contains(physical_addr)) { dynamic_hosts.addIfAbsent(physical_addr); } } break; } return retval; }
public static void testLeftMembers() { final Address a = Util.createRandomAddress(), b = Util.createRandomAddress(), c = Util.createRandomAddress(), d = Util.createRandomAddress(); List<Address> v1 = new ArrayList<>(); v1.add(a); v1.add(b); v1.add(c); v1.add(d); List<Address> v2 = new ArrayList<>(); v2.add(c); v2.add(d); View one = new View(new ViewId(a, 1), v1), two = new View(new ViewId(b, 2), v2); List<Address> left = View.leftMembers(one, two); System.out.println("left = " + left); assert left != null; assert left.size() == 2; assert left.contains(a); assert left.contains(b); }
public static void testPickNextN() { List<Integer> list = Arrays.asList(1, 2, 3, 4); List<Integer> result = Util.pickNext(list, 0, 1); assert result.isEmpty(); result = Util.pickNext(list, 1, 1); System.out.println("result = " + result); assert result.size() == 1; assert result.contains(2); result = Util.pickNext(list, 3, 2); System.out.println("result = " + result); assert result.size() == 2; assert result.contains(4) && result.contains(1); result = Util.pickNext(list, 4, 5); System.out.println("result = " + result); assert result.size() == 3; assert result.contains(1) && result.contains(2) && result.contains(3); }
protected static boolean contains(View view, Address... members) { List<Address> mbrs = view.getMembers(); for (Address member : members) if (!mbrs.contains(member)) return false; return true; }
protected <T> GroupRequest<T> cast( final Collection<Address> dests, Message msg, RequestOptions options, boolean block_for_results, FutureListener<RspList<T>> listener) throws Exception { if (msg.getDest() != null && !(msg.getDest() instanceof AnycastAddress)) throw new IllegalArgumentException("message destination is non-null, cannot send message"); if (options != null) { msg.setFlag(options.getFlags()).setTransientFlag(options.getTransientFlags()); if (options.getScope() > 0) msg.setScope(options.getScope()); } List<Address> real_dests; // we need to clone because we don't want to modify the original if (dests != null) { real_dests = new ArrayList<Address>(); for (Address dest : dests) { if (dest instanceof SiteAddress || this.members.contains(dest)) { if (!real_dests.contains(dest)) real_dests.add(dest); } } } else real_dests = new ArrayList<Address>(members); // if local delivery is off, then we should not wait for the message from the local member. // therefore remove it from the membership Channel tmp = channel; if ((tmp != null && tmp.getDiscardOwnMessages()) || msg.isTransientFlagSet(Message.TransientFlag.DONT_LOOPBACK)) { if (local_addr == null) local_addr = tmp != null ? tmp.getAddress() : null; if (local_addr != null) real_dests.remove(local_addr); } if (options != null && options.hasExclusionList()) { Address[] exclusion_list = options.exclusionList(); for (Address excluding : exclusion_list) real_dests.remove(excluding); } // don't even send the message if the destination list is empty if (log.isTraceEnabled()) log.trace("real_dests=" + real_dests); if (real_dests.isEmpty()) { if (log.isTraceEnabled()) log.trace("destination list is empty, won't send message"); return null; } if (options != null) { boolean async = options.getMode() == ResponseMode.GET_NONE; if (options.getAnycasting()) { if (async) async_anycasts.incrementAndGet(); else sync_anycasts.incrementAndGet(); } else { if (async) async_multicasts.incrementAndGet(); else sync_multicasts.incrementAndGet(); } } GroupRequest<T> req = new GroupRequest<T>(msg, corr, real_dests, options); if (listener != null) req.setListener(listener); if (options != null) { req.setResponseFilter(options.getRspFilter()); req.setAnycasting(options.getAnycasting()); } req.setBlockForResults(block_for_results); req.execute(); return req; }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: // Add UnicastHeader, add to AckSenderWindow and pass down Message msg = (Message) evt.getArg(); Address dst = msg.getDest(); /* only handle unicast messages */ if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) break; if (!started) { if (log.isTraceEnabled()) log.trace("discarded message as start() has not yet been called, message: " + msg); return null; } SenderEntry entry = send_table.get(dst); if (entry == null) { entry = new SenderEntry(getNewConnectionId()); SenderEntry existing = send_table.putIfAbsent(dst, entry); if (existing != null) entry = existing; else { if (log.isTraceEnabled()) log.trace( local_addr + ": created connection to " + dst + " (conn_id=" + entry.send_conn_id + ")"); if (cache != null && !members.contains(dst)) cache.add(dst); } } long seqno = -2; short send_conn_id = -1; Unicast2Header hdr; entry.lock(); // threads will only sync if they access the same entry try { seqno = entry.sent_msgs_seqno; send_conn_id = entry.send_conn_id; hdr = Unicast2Header.createDataHeader(seqno, send_conn_id, seqno == DEFAULT_FIRST_SEQNO); msg.putHeader(this.id, hdr); entry.sent_msgs.addToMessages( seqno, msg); // add *including* UnicastHeader, adds to retransmitter entry.sent_msgs_seqno++; entry.update(); } finally { entry.unlock(); } if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> DATA(") .append(dst) .append(": #") .append(seqno) .append(", conn_id=") .append(send_conn_id); if (hdr.first) sb.append(", first"); sb.append(')'); log.trace(sb); } try { down_prot.down(evt); num_msgs_sent++; num_bytes_sent += msg.getLength(); } catch (Throwable t) { log.warn("failed sending the message", t); } return null; // we already passed the msg down case Event.VIEW_CHANGE: // remove connections to peers that are not members anymore ! View view = (View) evt.getArg(); List<Address> new_members = view.getMembers(); Set<Address> non_members = new HashSet<Address>(send_table.keySet()); non_members.addAll(recv_table.keySet()); synchronized (members) { members.clear(); if (new_members != null) members.addAll(new_members); non_members.removeAll(members); if (cache != null) { cache.removeAll(members); } } if (!non_members.isEmpty()) { if (log.isTraceEnabled()) log.trace("removing non members " + non_members); for (Address non_mbr : non_members) removeConnection(non_mbr); } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); // Pass on to the layer below us }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: // Add UnicastHeader, add to AckSenderWindow and pass down Message msg = (Message) evt.getArg(); Address dst = msg.getDest(); /* only handle unicast messages */ if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) break; if (!running) { if (log.isTraceEnabled()) log.trace("discarded message as start() has not yet been called, message: " + msg); return null; } SenderEntry entry = send_table.get(dst); if (entry == null) { entry = new SenderEntry(getNewConnectionId()); SenderEntry existing = send_table.putIfAbsent(dst, entry); if (existing != null) entry = existing; else { if (log.isTraceEnabled()) log.trace( local_addr + ": created connection to " + dst + " (conn_id=" + entry.send_conn_id + ")"); if (cache != null && !members.contains(dst)) cache.add(dst); } } short send_conn_id = entry.send_conn_id; long seqno = entry.sent_msgs_seqno.getAndIncrement(); long sleep = 10; while (running) { try { msg.putHeader( this.id, Unicast2Header.createDataHeader(seqno, send_conn_id, seqno == DEFAULT_FIRST_SEQNO)); entry.sent_msgs.add(seqno, msg); // add *including* UnicastHeader, adds to retransmitter if (conn_expiry_timeout > 0) entry.update(); break; } catch (Throwable t) { if (!running) break; if (log.isWarnEnabled()) log.warn("failed sending message", t); Util.sleep(sleep); sleep = Math.min(5000, sleep * 2); } } if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> DATA(") .append(dst) .append(": #") .append(seqno) .append(", conn_id=") .append(send_conn_id); if (seqno == DEFAULT_FIRST_SEQNO) sb.append(", first"); sb.append(')'); log.trace(sb); } try { down_prot.down(evt); num_msgs_sent++; } catch (Throwable t) { log.warn("failed sending the message", t); } return null; // we already passed the msg down case Event.VIEW_CHANGE: // remove connections to peers that are not members anymore ! View view = (View) evt.getArg(); List<Address> new_members = view.getMembers(); Set<Address> non_members = new HashSet<Address>(send_table.keySet()); non_members.addAll(recv_table.keySet()); members = new_members; non_members.removeAll(new_members); if (cache != null) cache.removeAll(new_members); if (!non_members.isEmpty()) { if (log.isTraceEnabled()) log.trace("removing non members " + non_members); for (Address non_mbr : non_members) removeConnection(non_mbr); } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); // Pass on to the layer below us }
/** * Get the view and digest and send back both (MergeData) in the form of a MERGE_RSP to the * sender. If a merge is already in progress, send back a MergeData with the merge_rejected field * set to true. * * @param sender The address of the merge leader * @param merge_id The merge ID * @param mbrs The set of members from which we expect responses */ public void handleMergeRequest( Address sender, MergeId merge_id, Collection<? extends Address> mbrs) { boolean success = matchMergeId(merge_id) || setMergeId(null, merge_id); if (!success) { if (log.isWarnEnabled()) log.warn(gms.local_addr + ": merge is already in progress"); sendMergeRejectedResponse(sender, merge_id); return; } /* Clears the view handler queue and discards all JOIN/LEAVE/MERGE requests until after the MERGE */ gms.getViewHandler().suspend(merge_id); if (log.isDebugEnabled()) log.debug( gms.local_addr + ": got merge request from " + sender + ", merge_id=" + merge_id + ", mbrs=" + mbrs); // merge the membership of the current view with mbrs List<Address> members = new LinkedList<Address>(); if (mbrs != null) { // didn't use a set because we didn't want to change the membership order at this // time (although not incorrect) for (Address mbr : mbrs) { if (!members.contains(mbr)) members.add(mbr); } } ViewId tmp_vid = gms.view_id != null ? gms.view_id.copy() : null; if (tmp_vid == null) { log.warn("view ID is null; cannot return merge response"); sendMergeRejectedResponse(sender, merge_id); return; } View view = new View(tmp_vid, new Vector<Address>(members)); // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process // [JGRP-770] - Concurrent startup of many channels doesn't stabilize // [JGRP-700] - FLUSH: flushing should span merge /*if flush is in stack, let this coordinator flush its cluster island */ boolean successfulFlush = gms.startFlush(view); if (!successfulFlush) { sendMergeRejectedResponse(sender, merge_id); if (log.isWarnEnabled()) log.warn( gms.local_addr + ": flush failed; sending merge rejected message to " + sender + ", merge_id=" + merge_id); cancelMerge(merge_id); return; } Digest digest = fetchDigestsFromAllMembersInSubPartition(members); if (digest.size() == 0) { log.error("failed fetching digests from subpartition members; dropping merge response"); return; } sendMergeResponse(sender, view, digest, merge_id); }
/** * Sets the new view and sends a VIEW_CHANGE event up and down the stack. If the view is a * MergeView (subclass of View), then digest will be non-null and has to be set before installing * the view. */ public void installView(View new_view, Digest digest) { ViewId vid = new_view.getVid(); List<Address> mbrs = new_view.getMembers(); ltime = Math.max( vid.getId(), ltime); // compute the logical time, regardless of whether the view is accepted // Discards view with id lower than or equal to our own. Will be installed without check if it // is the first view if (view != null) { ViewId view_id = view.getViewId(); int rc = vid.compareToIDs(view_id); if (rc <= 0) { if (log.isWarnEnabled() && rc < 0 && log_view_warnings) { // only scream if view is smaller, silently discard same views log.warn( local_addr + ": received view < current view;" + " discarding it (current vid: " + view_id + ", new vid: " + vid + ')'); } return; } } /* Check for self-inclusion: if I'm not part of the new membership, I just discard it. This ensures that messages sent in view V1 are only received by members of V1 */ if (!mbrs.contains(local_addr)) { if (log.isWarnEnabled() && log_view_warnings) log.warn(local_addr + ": not member of view " + new_view.getViewId() + "; discarding it"); return; } if (digest != null) { if (new_view instanceof MergeView) mergeDigest(digest); else setDigest(digest); } if (log.isDebugEnabled()) log.debug(local_addr + ": installing view " + new_view); Event view_event; synchronized (members) { view = new View(new_view.getVid(), new_view.getMembers()); view_event = new Event(Event.VIEW_CHANGE, new_view); // Set the membership. Take into account joining members if (!mbrs.isEmpty()) { members.set(mbrs); tmp_members.set(members); joining.removeAll(mbrs); // remove all members in mbrs from joining // remove all elements from 'leaving' that are not in 'mbrs' leaving.retainAll(mbrs); tmp_members.add(joining); // add members that haven't yet shown up in the membership tmp_members.remove( leaving); // remove members that haven't yet been removed from the membership // add to prev_members for (Address addr : mbrs) { if (!prev_members.contains(addr)) prev_members.add(addr); } } Address coord = determineCoordinator(); if (coord != null && coord.equals(local_addr) && !haveCoordinatorRole()) { becomeCoordinator(); } else { if (haveCoordinatorRole() && !local_addr.equals(coord)) { becomeParticipant(); merge_ack_collector.reset(null); // we don't need this one anymore } } } // - Changed order of passing view up and down (http://jira.jboss.com/jira/browse/JGRP-347) // - Changed it back (bela Sept 4 2007): http://jira.jboss.com/jira/browse/JGRP-564 // - Moved sending up view_event out of the synchronized block (bela Nov 2011) down_prot.down(view_event); // needed e.g. by failure detector or UDP up_prot.up(view_event); List<Address> tmp_mbrs = new_view.getMembers(); ack_collector.retainAll(tmp_mbrs); merge_ack_collector.retainAll(tmp_mbrs); if (new_view instanceof MergeView) merger.forceCancelMerge(); if (stats) { num_views++; prev_views.add(new Tuple<View, Long>(new_view, System.currentTimeMillis())); } }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); PingHeader hdr = (PingHeader) msg.getHeader(this.id); if (hdr == null) return up_prot.up(evt); if (is_leaving) return null; // prevents merging back a leaving member // (https://issues.jboss.org/browse/JGRP-1336) PingData data = readPingData(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); Address logical_addr = data != null ? data.getAddress() : msg.src(); switch (hdr.type) { case PingHeader.GET_MBRS_REQ: // return Rsp(local_addr, coord) if (cluster_name == null || hdr.cluster_name == null) { log.warn( "cluster_name (%s) or cluster_name of header (%s) is null; passing up discovery " + "request from %s, but this should not be the case", cluster_name, hdr.cluster_name, msg.src()); } else { if (!cluster_name.equals(hdr.cluster_name)) { log.warn( "%s: discarding discovery request for cluster '%s' from %s; " + "our cluster name is '%s'. Please separate your clusters properly", logical_addr, hdr.cluster_name, msg.src(), cluster_name); return null; } } // add physical address and logical name of the discovery sender (if available) to the // cache if (data != null) { addDiscoveryResponseToCaches( logical_addr, data.getLogicalName(), data.getPhysicalAddr()); discoveryRequestReceived(msg.getSrc(), data.getLogicalName(), data.getPhysicalAddr()); addResponse(data, false); } if (return_entire_cache) { Map<Address, PhysicalAddress> cache = (Map<Address, PhysicalAddress>) down(new Event(Event.GET_LOGICAL_PHYSICAL_MAPPINGS)); if (cache != null) { for (Map.Entry<Address, PhysicalAddress> entry : cache.entrySet()) { Address addr = entry.getKey(); // JGRP-1492: only return our own address, and addresses in view. if (addr.equals(local_addr) || members.contains(addr)) { PhysicalAddress physical_addr = entry.getValue(); sendDiscoveryResponse( addr, physical_addr, UUID.get(addr), msg.getSrc(), isCoord(addr)); } } } return null; } // Only send a response if hdr.mbrs is not empty and contains myself. Otherwise always // send my info Collection<? extends Address> mbrs = data != null ? data.mbrs() : null; boolean send_response = mbrs == null || mbrs.contains(local_addr); if (send_response) { PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); sendDiscoveryResponse( local_addr, physical_addr, UUID.get(local_addr), msg.getSrc(), is_coord); } return null; case PingHeader.GET_MBRS_RSP: // add physical address (if available) to transport's cache if (data != null) { log.trace("%s: received GET_MBRS_RSP from %s: %s", local_addr, msg.src(), data); handleDiscoveryResponse(data, msg.src()); } return null; default: log.warn("got PING header with unknown type %d", hdr.type); return null; } case Event.FIND_MBRS: return findMembers( (List<Address>) evt.getArg(), false, true); // this is done asynchronously } return up_prot.up(evt); }