public static void testParseSemicolonDelimitedString2() { String input = " myID1::subID1 ; myID2::mySubID2; myID3 ;myID4::blaSubID4"; List list = Util.parseStringList(input, ";"); System.out.println("list: " + list); Assert.assertEquals(4, list.size()); Assert.assertEquals("myID1::subID1", list.get(0)); Assert.assertEquals("myID4::blaSubID4", list.get(list.size() - 1)); }
public static void testParseSemicolonDelimitedString() { String input = "one;two ; three; four ; five;six"; List list = Util.parseStringList(input, ";"); System.out.println("list: " + list); Assert.assertEquals(6, list.size()); Assert.assertEquals("one", list.get(0)); Assert.assertEquals("six", list.get(list.size() - 1)); }
public static void testParseCommaDelimitedString() { String input = "1,2,3,4,5,6,7,8,9,10 , 11, 12 ,13"; List list = Util.parseCommaDelimitedStrings(input); System.out.println("list: " + list); Assert.assertEquals(13, list.size()); Assert.assertEquals("1", list.get(0)); Assert.assertEquals("13", list.get(list.size() - 1)); }
protected String printMessageList(List<Message> list) { StringBuilder sb = new StringBuilder(); int size = list.size(); Message first = size > 0 ? list.get(0) : null, second = size > 1 ? list.get(size - 1) : first; UnicastHeader hdr; if (first != null) { hdr = (UnicastHeader) first.getHeader(id); if (hdr != null) sb.append("#" + hdr.seqno); } if (second != null) { hdr = (UnicastHeader) second.getHeader(id); if (hdr != null) sb.append(" - #" + hdr.seqno); } return sb.toString(); }
protected void handleViewChange(View v) { Address old_coord; List<Address> new_members = v.getMembers(); boolean send_up_exception = false; this.view = v; synchronized (members) { old_coord = (!members.isEmpty() ? members.get(0) : null); members.clear(); members.addAll(new_members); // this handles the case where a coord dies during a state transfer; prevents clients from // hanging forever // Note this only takes a coordinator crash into account, a getState(target, timeout), where // target is not // null is not handled ! (Usually we get the state from the coordinator) // http://jira.jboss.com/jira/browse/JGRP-148 if (waiting_for_state_response && old_coord != null && !members.contains(old_coord)) send_up_exception = true; } if (send_up_exception) { log.warn("%s: discovered that the state provider (%s) left", local_addr, old_coord); waiting_for_state_response = false; Exception ex = new EOFException("state provider " + old_coord + " left"); up_prot.up(new Event(Event.GET_STATE_OK, new StateTransferResult(ex))); openBarrierAndResumeStable(); } // remove non members from list of members requesting state state_requesters.retainAll(new_members); }
/** * An event is to be sent down the stack. The layer may want to examine its type and perform some * action on it, depending on the event's type. If the event is a message MSG, then the layer may * need to add a header to it (or do nothing at all) before sending it down the stack using <code> * PassDown</code>. In case of a GET_ADDRESS event (which tries to retrieve the stack's address * from one of the bottom layers), the layer may need to send a new response event back up the * stack using <code>up_prot.up()</code>. The PING protocol is interested in several different * down events, Event.FIND_INITIAL_MBRS - sent by the GMS layer and expecting a GET_MBRS_OK * Event.TMP_VIEW and Event.VIEW_CHANGE - a view change event Event.BECOME_SERVER - called after * client has joined and is fully working group member Event.CONNECT, Event.DISCONNECT. */ @SuppressWarnings("unchecked") public Object down(Event evt) { switch (evt.getType()) { case Event.FIND_INITIAL_MBRS: // sent by GMS layer case Event.FIND_ALL_VIEWS: // sends the GET_MBRS_REQ to all members, waits 'timeout' ms or until 'num_initial_members' // have been retrieved long start = System.currentTimeMillis(); boolean find_all_views = evt.getType() == Event.FIND_ALL_VIEWS; Promise<JoinRsp> promise = (Promise<JoinRsp>) evt.getArg(); List<PingData> rsps = find_all_views ? findAllViews(promise) : findInitialMembers(promise); long diff = System.currentTimeMillis() - start; if (log.isTraceEnabled()) log.trace("discovery took " + diff + " ms: responses: " + Util.printPingData(rsps)); return rsps; case Event.TMP_VIEW: case Event.VIEW_CHANGE: List<Address> tmp; view = (View) evt.getArg(); if ((tmp = view.getMembers()) != null) { synchronized (members) { members.clear(); members.addAll(tmp); } } current_coord = !members.isEmpty() ? members.get(0) : null; is_coord = current_coord != null && local_addr != null && current_coord.equals(local_addr); return down_prot.down(evt); case Event.BECOME_SERVER: // called after client has joined and is fully working group member down_prot.down(evt); is_server = true; return null; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); return down_prot.down(evt); case Event.CONNECT: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: is_leaving = false; group_addr = (String) evt.getArg(); Object ret = down_prot.down(evt); handleConnect(); return ret; case Event.DISCONNECT: is_leaving = true; handleDisconnect(); return down_prot.down(evt); default: return down_prot.down(evt); // Pass on to the layer below us } }
/** * Merge all MergeData. All MergeData elements should be disjunct (both views and digests). * However, this method is prepared to resolve duplicate entries (for the same member). * Resolution strategy for views is to merge only 1 of the duplicate members. Resolution * strategy for digests is to take the higher seqnos for duplicate digests. * * <p>After merging all members into a Membership and subsequent sorting, the first member of * the sorted membership will be the new coordinator. This method has a lock on merge_rsps. * * @param merge_rsps A list of MergeData items. Elements with merge_rejected=true were removed * before. Is guaranteed not to be null and to contain at least 1 member. */ private MergeData consolidateMergeData(List<MergeData> merge_rsps) { long logical_time = 0; // for new_vid List<View> subgroups = new ArrayList<View>(11); // contains a list of Views, each View is a subgroup Collection<Collection<Address>> sub_mbrships = new ArrayList<Collection<Address>>(); for (MergeData tmp_data : merge_rsps) { View tmp_view = tmp_data.getView(); if (tmp_view != null) { ViewId tmp_vid = tmp_view.getVid(); if (tmp_vid != null) { // compute the new view id (max of all vids +1) logical_time = Math.max(logical_time, tmp_vid.getId()); } // merge all membership lists into one (prevent duplicates) sub_mbrships.add(new ArrayList<Address>(tmp_view.getMembers())); subgroups.add(tmp_view.copy()); } } // determine the new digest Digest new_digest = consolidateDigests(merge_rsps, merge_rsps.size()); if (new_digest == null) return null; // remove all members from the new member list that are not in the digest Collection<Address> digest_mbrs = new_digest.getMembers(); for (Collection<Address> coll : sub_mbrships) coll.retainAll(digest_mbrs); List<Address> merged_mbrs = gms.computeNewMembership(sub_mbrships); // the new coordinator is the first member of the consolidated & sorted membership list Address new_coord = merged_mbrs.isEmpty() ? null : merged_mbrs.get(0); if (new_coord == null) return null; // should be the highest view ID seen up to now plus 1 ViewId new_vid = new ViewId(new_coord, logical_time + 1); // determine the new view MergeView new_view = new MergeView(new_vid, merged_mbrs, subgroups); if (log.isTraceEnabled()) log.trace( gms.local_addr + ": consolidated view=" + new_view + "\nconsolidated digest=" + new_digest); return new MergeData(gms.local_addr, new_view, new_digest); }
/** * Creates a list of randomly generated partitions, each having a max size of max_partition_size */ protected List<View> createPartitions(int max_partition_size, JChannel... channels) { long view_id = 1; for (JChannel ch : channels) view_id = Math.max(view_id, ch.getView().getViewId().getId()); List<View> partitions = new ArrayList<>(); List<Address> tmp = new ArrayList<>(); for (JChannel ch : channels) tmp.add(ch.getAddress()); while (!tmp.isEmpty()) { int num_to_remove = (int) Util.random(max_partition_size); List<Address> part = new ArrayList<>(max_partition_size); for (int x = 0; x < num_to_remove && !tmp.isEmpty(); x++) part.add(tmp.remove(0)); partitions.add(new View(part.get(0), view_id + 1, part)); } return partitions; }
private void process(List<Request> requests) { if (requests.isEmpty()) return; Request firstReq = requests.get(0); switch (firstReq.type) { case Request.JOIN: case Request.JOIN_WITH_STATE_TRANSFER: case Request.LEAVE: case Request.SUSPECT: impl.handleMembershipChange(requests); break; case Request.MERGE: impl.merge(firstReq.views); break; default: log.error("request " + firstReq.type + " is unknown; discarded"); } }
private synchronized void handleViewChange(View view, boolean makeServer) { if (makeServer) initializeNewSymmetricKey(view instanceof MergeView); // if view is a bit broken set me as keyserver List<Address> members = view.getMembers(); if (members == null || members.isEmpty() || members.get(0) == null) { becomeKeyServer(local_addr, false); return; } // otherwise get keyserver from view controller Address tmpKeyServer = view.getMembers().get(0); // I am keyserver - either first member of group or old key server is no more and // I have been voted new controller if (makeServer || (tmpKeyServer.equals(local_addr))) becomeKeyServer(tmpKeyServer, makeServer); else handleNewKeyServer(tmpKeyServer, view instanceof MergeView); }
/** * Computes the next view. Returns a copy that has <code>old_mbrs</code> and <code>suspected_mbrs * </code> removed and <code>new_mbrs</code> added. */ public View getNextView( Collection<Address> new_mbrs, Collection<Address> old_mbrs, Collection<Address> suspected_mbrs) { synchronized (members) { ViewId view_id = view != null ? view.getViewId() : null; if (view_id == null) { log.error("view_id is null"); return null; // this should *never* happen ! } long vid = Math.max(view_id.getId(), ltime) + 1; ltime = vid; Membership tmp_mbrs = tmp_members.copy(); // always operate on the temporary membership tmp_mbrs.remove(suspected_mbrs); tmp_mbrs.remove(old_mbrs); tmp_mbrs.add(new_mbrs); List<Address> mbrs = tmp_mbrs.getMembers(); Address new_coord = local_addr; if (!mbrs.isEmpty()) new_coord = mbrs.get(0); View v = new View(new_coord, vid, mbrs); // Update membership (see DESIGN for explanation): tmp_members.set(mbrs); // Update joining list (see DESIGN for explanation) if (new_mbrs != null) { for (Address tmp_mbr : new_mbrs) { if (!joining.contains(tmp_mbr)) joining.add(tmp_mbr); } } // Update leaving list (see DESIGN for explanations) if (old_mbrs != null) { for (Address addr : old_mbrs) { if (!leaving.contains(addr)) leaving.add(addr); } } if (suspected_mbrs != null) { for (Address addr : suspected_mbrs) { if (!leaving.contains(addr)) leaving.add(addr); } } return v; } }
/** * Multicasts a GET_DIGEST_REQ to all current members and waits for all responses (GET_DIGEST_RSP) * or N ms. * * @return */ private Digest fetchDigestsFromAllMembersInSubPartition( List<Address> current_mbrs, MergeId merge_id) { // Optimization: if we're the only member, we don't need to multicast the get-digest message if (current_mbrs == null || current_mbrs.size() == 1 && current_mbrs.get(0).equals(gms.local_addr)) return (Digest) gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr)); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ); hdr.merge_id = merge_id; Message get_digest_req = new Message().setFlag(Message.Flag.OOB, Message.Flag.INTERNAL).putHeader(gms.getId(), hdr); long max_wait_time = gms.merge_timeout / 2; // gms.merge_timeout is guaranteed to be > 0, verified in init() digest_collector.reset(current_mbrs); gms.getDownProtocol().down(new Event(Event.MSG, get_digest_req)); // add my own digest first - the get_digest_req needs to be sent first *before* getting our own // digest, so // we have that message in our digest ! Digest digest = (Digest) gms.getDownProtocol().down(new Event(Event.GET_DIGEST, gms.local_addr)); digest_collector.add(gms.local_addr, digest); digest_collector.waitForAllResponses(max_wait_time); if (log.isTraceEnabled()) { if (digest_collector.hasAllResponses()) log.trace(gms.local_addr + ": fetched all digests for " + current_mbrs); else log.trace( gms.local_addr + ": fetched incomplete digests (after timeout of " + max_wait_time + ") ms for " + current_mbrs); } Map<Address, Digest> responses = new HashMap<Address, Digest>(digest_collector.getResults()); MutableDigest retval = new MutableDigest(responses.size()); for (Digest dig : responses.values()) { if (dig != null) retval.add(dig); } return retval; }
public void addResponse(PingData rsp, boolean overwrite) { if (rsp == null) return; promise.getLock().lock(); try { if (overwrite) ping_rsps.remove(rsp); // https://jira.jboss.org/jira/browse/JGRP-1179 int index = ping_rsps.indexOf(rsp); if (index == -1) { ping_rsps.add(rsp); promise.getCond().signalAll(); } else if (rsp.isCoord()) { PingData pr = ping_rsps.get(index); // Check if the already existing element is not server if (!pr.isCoord()) { ping_rsps.set(index, rsp); promise.getCond().signalAll(); } } } finally { promise.getLock().unlock(); } }
@SuppressWarnings("unchecked") public Object down(Event evt) { switch (evt.getType()) { case Event.FIND_INITIAL_MBRS: // sent by GMS layer return findMembers(null, true, false); // triggered by JOIN process (ClientGmsImpl) case Event.FIND_MBRS: return findMembers( (List<Address>) evt.getArg(), false, false); // triggered by MERGE2/MERGE3 // case Event.TMP_VIEW: case Event.VIEW_CHANGE: List<Address> tmp; View old_view = view; view = (View) evt.getArg(); if ((tmp = view.getMembers()) != null) { synchronized (members) { members.clear(); members.addAll(tmp); } } current_coord = !members.isEmpty() ? members.get(0) : null; is_coord = current_coord != null && local_addr != null && current_coord.equals(local_addr); Object retval = down_prot.down(evt); if (send_cache_on_join && !isDynamic() && is_coord) { List<Address> curr_mbrs, left_mbrs, new_mbrs; synchronized (members) { curr_mbrs = new ArrayList<>(members); left_mbrs = old_view != null ? Util.leftMembers(old_view.getMembers(), members) : null; new_mbrs = old_view != null ? Util.newMembers(old_view.getMembers(), members) : null; } startCacheDissemination(curr_mbrs, left_mbrs, new_mbrs); // separate task } return retval; case Event.BECOME_SERVER: // called after client has joined and is fully working group member down_prot.down(evt); is_server = true; return null; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); return down_prot.down(evt); case Event.CONNECT: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: is_leaving = false; cluster_name = (String) evt.getArg(); Object ret = down_prot.down(evt); handleConnect(); return ret; case Event.DISCONNECT: is_leaving = true; handleDisconnect(); return down_prot.down(evt); default: return down_prot.down(evt); // Pass on to the layer below us } }
/** * Broadcasts the new view and digest, and waits for acks from all members in the list given as * argument. If the list is null, we take the members who are part of new_view */ public void castViewChange( View new_view, Digest digest, JoinRsp jr, Collection<Address> newMembers) { if (log.isTraceEnabled()) log.trace(local_addr + ": mcasting view " + new_view + " (" + new_view.size() + " mbrs)\n"); // Send down a local TMP_VIEW event. This is needed by certain layers (e.g. NAKACK) to compute // correct digest // in case client's next request (e.g. getState()) reaches us *before* our own view change // multicast. // Check NAKACK's TMP_VIEW handling for details down_prot.up(new Event(Event.TMP_VIEW, new_view)); down_prot.down(new Event(Event.TMP_VIEW, new_view)); List<Address> ackMembers = new ArrayList<Address>(new_view.getMembers()); if (newMembers != null && !newMembers.isEmpty()) ackMembers.removeAll(newMembers); Message view_change_msg = new Message(); // bcast to all members GmsHeader hdr = new GmsHeader(GmsHeader.VIEW, new_view); hdr.my_digest = digest; view_change_msg.putHeader(this.id, hdr); // If we're the only member the VIEW is broadcast to, let's simply install the view directly, // without // sending the VIEW multicast ! Or else N-1 members drop the multicast anyway... if (local_addr != null && ackMembers.size() == 1 && ackMembers.get(0).equals(local_addr)) { // we need to add the message to the retransmit window (e.g. in NAKACK), so (1) it can be // retransmitted and // (2) we increment the seqno (otherwise, we'd return an incorrect digest) down_prot.down(new Event(Event.ADD_TO_XMIT_TABLE, view_change_msg)); impl.handleViewChange(new_view, digest); } else { if (!ackMembers.isEmpty()) ack_collector.reset(ackMembers); down_prot.down(new Event(Event.MSG, view_change_msg)); try { if (!ackMembers.isEmpty()) { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all " + ack_collector.expectedAcks() + " ACKs from members for view " + new_view.getVid()); } } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for view " + new_view.getViewId() + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } if (jr != null && (newMembers != null && !newMembers.isEmpty())) { ack_collector.reset(new ArrayList<Address>(newMembers)); for (Address joiner : newMembers) { sendJoinResponse(jr, joiner); } try { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all ACKs (" + ack_collector.expectedAcks() + ") from joiners for view " + new_view.getVid()); } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for unicast view " + new_view + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } }