public List<PingData> findAllViews(Promise<JoinRsp> promise) { int num_expected_mbrs = Math.max( max_found_members, Math.max(num_initial_members, view != null ? view.size() : num_initial_members)); max_found_members = Math.max(max_found_members, num_expected_mbrs); return findMembers(promise, num_expected_mbrs, false, getViewId()); }
void setAnycastCount() throws Exception { int tmp = Util.readIntFromStdin("Anycast count: "); View view = channel.getView(); if (tmp > view.size()) { System.err.println( "anycast count must be smaller or equal to the view size (" + view + ")\n"); return; } disp.callRemoteMethods(null, new MethodCall(SET_ANYCAST_COUNT, tmp), RequestOptions.SYNC()); }
protected void sendDiscoveryResponse( Address logical_addr, List<PhysicalAddress> physical_addrs, boolean is_server, boolean return_view_only, String logical_name, final Address sender) { PingData data; if (return_view_only) { data = new PingData(logical_addr, view, is_server, null, null); } else { ViewId view_id = view != null ? view.getViewId() : null; data = new PingData(logical_addr, null, view_id, is_server, logical_name, physical_addrs); } final Message rsp_msg = new Message(sender, null, null); rsp_msg.setFlag(Message.OOB); final PingHeader rsp_hdr = new PingHeader(PingHeader.GET_MBRS_RSP, data); rsp_msg.putHeader(this.id, rsp_hdr); if (stagger_timeout > 0) { int view_size = view != null ? view.size() : 10; int rank = Util.getRank(view, local_addr); // returns 0 if view or local_addr are null long sleep_time = rank == 0 ? Util.random(stagger_timeout) : stagger_timeout * rank / view_size - (stagger_timeout / view_size); timer.schedule( new Runnable() { public void run() { if (log.isTraceEnabled()) log.trace( local_addr + ": received GET_MBRS_REQ from " + sender + ", sending staggered response " + rsp_hdr); down_prot.down(new Event(Event.MSG, rsp_msg)); } }, sleep_time, TimeUnit.MILLISECONDS); return; } if (log.isTraceEnabled()) log.trace("received GET_MBRS_REQ from " + sender + ", sending response " + rsp_hdr); down_prot.down(new Event(Event.MSG, rsp_msg)); }
protected boolean _startFlush( final View new_view, int maxAttempts, long randomFloor, long randomCeiling) { if (!flushProtocolInStack) return true; if (flushInvokerClass != null) { try { Callable<Boolean> invoker = flushInvokerClass.getDeclaredConstructor(View.class).newInstance(new_view); return invoker.call(); } catch (Throwable e) { return false; } } try { boolean successfulFlush = false; boolean validView = new_view != null && new_view.size() > 0; if (validView && flushProtocolInStack) { int attemptCount = 0; while (attemptCount < maxAttempts) { try { up_prot.up(new Event(Event.SUSPEND, new ArrayList<Address>(new_view.getMembers()))); successfulFlush = true; break; } catch (Exception e) { Util.sleepRandom(randomFloor, randomCeiling); attemptCount++; } } if (successfulFlush) { if (log.isTraceEnabled()) log.trace(local_addr + ": successful GMS flush by coordinator"); } else { if (log.isWarnEnabled()) log.warn(local_addr + ": GMS flush by coordinator failed"); } } return successfulFlush; } catch (Exception e) { return false; } }
protected void sendDiscoveryResponse( Address logical_addr, PhysicalAddress physical_addr, String logical_name, final Address sender, boolean coord) { final PingData data = new PingData(logical_addr, is_server, logical_name, physical_addr).coord(coord); final Message rsp_msg = new Message(sender) .setFlag(Message.Flag.INTERNAL, Message.Flag.OOB, Message.Flag.DONT_BUNDLE) .putHeader(this.id, new PingHeader(PingHeader.GET_MBRS_RSP)) .setBuffer(marshal(data)); if (stagger_timeout > 0) { int view_size = view != null ? view.size() : 10; int rank = Util.getRank(view, local_addr); // returns 0 if view or local_addr are null long sleep_time = rank == 0 ? Util.random(stagger_timeout) : stagger_timeout * rank / view_size - (stagger_timeout / view_size); timer.schedule( (Runnable) () -> { log.trace( "%s: received GET_MBRS_REQ from %s, sending staggered response %s", local_addr, sender, data); down_prot.down(new Event(Event.MSG, rsp_msg)); }, sleep_time, TimeUnit.MILLISECONDS); return; } log.trace("%s: received GET_MBRS_REQ from %s, sending response %s", local_addr, sender, data); down_prot.down(new Event(Event.MSG, rsp_msg)); }
/** * Broadcasts the new view and digest, and waits for acks from all members in the list given as * argument. If the list is null, we take the members who are part of new_view */ public void castViewChange( View new_view, Digest digest, JoinRsp jr, Collection<Address> newMembers) { if (log.isTraceEnabled()) log.trace(local_addr + ": mcasting view " + new_view + " (" + new_view.size() + " mbrs)\n"); // Send down a local TMP_VIEW event. This is needed by certain layers (e.g. NAKACK) to compute // correct digest // in case client's next request (e.g. getState()) reaches us *before* our own view change // multicast. // Check NAKACK's TMP_VIEW handling for details down_prot.up(new Event(Event.TMP_VIEW, new_view)); down_prot.down(new Event(Event.TMP_VIEW, new_view)); List<Address> ackMembers = new ArrayList<Address>(new_view.getMembers()); if (newMembers != null && !newMembers.isEmpty()) ackMembers.removeAll(newMembers); Message view_change_msg = new Message(); // bcast to all members GmsHeader hdr = new GmsHeader(GmsHeader.VIEW, new_view); hdr.my_digest = digest; view_change_msg.putHeader(this.id, hdr); // If we're the only member the VIEW is broadcast to, let's simply install the view directly, // without // sending the VIEW multicast ! Or else N-1 members drop the multicast anyway... if (local_addr != null && ackMembers.size() == 1 && ackMembers.get(0).equals(local_addr)) { // we need to add the message to the retransmit window (e.g. in NAKACK), so (1) it can be // retransmitted and // (2) we increment the seqno (otherwise, we'd return an incorrect digest) down_prot.down(new Event(Event.ADD_TO_XMIT_TABLE, view_change_msg)); impl.handleViewChange(new_view, digest); } else { if (!ackMembers.isEmpty()) ack_collector.reset(ackMembers); down_prot.down(new Event(Event.MSG, view_change_msg)); try { if (!ackMembers.isEmpty()) { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all " + ack_collector.expectedAcks() + " ACKs from members for view " + new_view.getVid()); } } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for view " + new_view.getViewId() + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } if (jr != null && (newMembers != null && !newMembers.isEmpty())) { ack_collector.reset(new ArrayList<Address>(newMembers)); for (Address joiner : newMembers) { sendJoinResponse(jr, joiner); } try { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all ACKs (" + ack_collector.expectedAcks() + ") from joiners for view " + new_view.getVid()); } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for unicast view " + new_view + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } }