public void run() { for (int i = 1; i <= number_of_msgs; i++) { try { Message msg = new Message(null, buf); if (oob) msg.setFlag(Message.Flag.OOB); if (dont_bundle) msg.setFlag(Message.Flag.DONT_BUNDLE); if (i > 0 && do_print > 0 && i % do_print == 0) System.out.println("-- sent " + i); Buffer buffer = writeMessage(msg); output_lock.lock(); // need to sync if we have more than 1 sender try { // msg.writeTo(output); output.writeInt(buffer.getLength()); output.write(buffer.getBuf(), buffer.getOffset(), buffer.getLength()); // output.flush(); } finally { output_lock.unlock(); } } catch (Exception e) { e.printStackTrace(); } } }
protected static Message constructMessage( Buffer buf, Address recipient, ResponseMode mode, boolean rsvp, DeliverOrder deliverOrder) { Message msg = new Message(); msg.setBuffer(buf); encodeDeliverMode(msg, deliverOrder); // some issues with the new bundler. put back the DONT_BUNDLE flag. if (deliverOrder == DeliverOrder.NONE || mode != ResponseMode.GET_NONE) msg.setFlag(Message.Flag.DONT_BUNDLE); if (rsvp) msg.setFlag(Message.Flag.RSVP); if (recipient != null) msg.setDest(recipient); return msg; }
public void sendDiscoveryRequest(String cluster_name, Promise promise, ViewId view_id) throws Exception { PingData data = null; PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); if (view_id == null) { List<PhysicalAddress> physical_addrs = Arrays.asList(physical_addr); data = new PingData(local_addr, null, false, UUID.get(local_addr), physical_addrs); } PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name); hdr.view_id = view_id; Collection<PhysicalAddress> cluster_members = fetchClusterMembers(cluster_name); if (cluster_members == null) { Message msg = new Message(null); // multicast msg msg.setFlag(Message.OOB); msg.putHeader(getId(), hdr); sendMcastDiscoveryRequest(msg); } else { if (cluster_members.isEmpty()) { // if we don't find any members, return immediately if (promise != null) promise.setResult(null); } else { for (final Address addr : cluster_members) { if (addr.equals(physical_addr)) // no need to send the request to myself continue; final Message msg = new Message(addr, null, null); msg.setFlag(Message.OOB); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace("[FIND_INITIAL_MBRS] sending discovery request to " + msg.getDest()); if (!sendDiscoveryRequestsInParallel()) { down_prot.down(new Event(Event.MSG, msg)); } else { timer.execute( new Runnable() { public void run() { try { down_prot.down(new Event(Event.MSG, msg)); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request to " + addr + ": " + ex); } } }); } } } } }
private static Message constructMessage( Buffer buf, Address recipient, boolean oob, ResponseMode mode, boolean rsvp) { Message msg = new Message(); msg.setBuffer(buf); if (oob) msg.setFlag(Message.OOB); if (oob || mode != ResponseMode.GET_NONE) { msg.setFlag(Message.DONT_BUNDLE); // This is removed since this optimisation is no longer valid. See ISPN-1878 // msg.setFlag(Message.NO_FC); } if (rsvp) msg.setFlag(Message.RSVP); if (recipient != null) msg.setDest(recipient); return msg; }
public void run() { for (int i = 1; i <= number_of_msgs; i++) { try { Message msg = new Message(destination, buf); if (oob) msg.setFlag(Message.Flag.OOB); if (dont_bundle) msg.setFlag(Message.Flag.DONT_BUNDLE); if (i > 0 && print > 0 && i % print == 0) System.out.println("-- sent " + i); channel.send(msg); if (sleep_time > 0) Util.sleep(sleep_time); } catch (Exception e) { e.printStackTrace(); } } }
private static void send( Channel sender_channel, Address dest, boolean oob, boolean mixed, int num_msgs) throws Exception { long seqno = 1; for (int i = 0; i < num_msgs; i++) { Message msg = new Message(dest, null, seqno++); if (mixed) { if (i % 2 == 0) msg.setFlag(Message.OOB); } else if (oob) { msg.setFlag(Message.OOB); } sender_channel.send(msg); } }
private static void encodeDeliverMode(Message request, DeliverOrder deliverOrder) { switch (deliverOrder) { case TOTAL: request.setFlag(Message.Flag.OOB); request.clearFlag(Message.Flag.NO_TOTAL_ORDER); break; case PER_SENDER: request.clearFlag(Message.Flag.OOB); request.setFlag(Message.Flag.NO_TOTAL_ORDER); break; case NONE: request.setFlag(Message.Flag.OOB, Message.Flag.NO_TOTAL_ORDER); break; } }
private void sendViewAck(Address dest) { Message view_ack = new Message(dest, null, null); view_ack.setFlag(Message.OOB); GmsHeader tmphdr = new GmsHeader(GmsHeader.VIEW_ACK); view_ack.putHeader(this.id, tmphdr); down_prot.down(new Event(Event.MSG, view_ack)); }
/** * Sends a unicast message and - depending on the options - returns a result * * @param msg the message to be sent. The destination needs to be non-null * @param opts the options to be used * @return T the result * @throws Exception If there was problem sending the request, processing it at the receiver, or * processing it at the sender. * @throws TimeoutException If the call didn't succeed within the timeout defined in options (if * set) */ public <T> T sendMessage(Message msg, RequestOptions opts) throws Exception { Address dest = msg.getDest(); if (dest == null) throw new IllegalArgumentException("message destination is null, cannot send message"); if (opts != null) { msg.setFlag(opts.getFlags()).setTransientFlag(opts.getTransientFlags()); if (opts.getScope() > 0) msg.setScope(opts.getScope()); if (opts.getMode() == ResponseMode.GET_NONE) async_unicasts.incrementAndGet(); else sync_unicasts.incrementAndGet(); } UnicastRequest<T> req = new UnicastRequest<T>(msg, corr, dest, opts); req.execute(); if (opts != null && opts.getMode() == ResponseMode.GET_NONE) return null; Rsp<T> rsp = req.getResult(); if (rsp.wasSuspected()) throw new SuspectedException(dest); Throwable exception = rsp.getException(); if (exception != null) { if (exception instanceof Error) throw (Error) exception; else if (exception instanceof RuntimeException) throw (RuntimeException) exception; else if (exception instanceof Exception) throw (Exception) exception; else throw new RuntimeException(exception); } if (rsp.wasUnreachable()) throw new UnreachableException(dest); if (!rsp.wasReceived() && !req.responseReceived()) throw new TimeoutException("timeout sending message to " + dest); return rsp.getValue(); }
/** * Tests sending 1, 2 (OOB) and 3, where they are received in the order 1, 3, 2. Message 3 should * not get delivered until message 4 is received (http://jira.jboss.com/jira/browse/JGRP-780) */ public void testRegularAndOOBUnicasts() throws Exception { DISCARD discard = new DISCARD(); ProtocolStack stack = a.getProtocolStack(); stack.insertProtocol(discard, ProtocolStack.BELOW, UNICAST.class, UNICAST2.class); Address dest = b.getAddress(); Message m1 = new Message(dest, null, 1); Message m2 = new Message(dest, null, 2); m2.setFlag(Message.OOB); Message m3 = new Message(dest, null, 3); MyReceiver receiver = new MyReceiver("C2"); b.setReceiver(receiver); a.send(m1); discard.setDropDownUnicasts(1); a.send(m2); a.send(m3); Collection<Integer> list = receiver.getMsgs(); int count = 10; while (list.size() < 3 && --count > 0) { Util.sleep(500); // time for potential retransmission sendStableMessages(a, b); } assert list.size() == 3 : "list is " + list; assert list.contains(1) && list.contains(2) && list.contains(3); }
public void testRegularAndOOBMulticasts() throws Exception { DISCARD discard = new DISCARD(); ProtocolStack stack = a.getProtocolStack(); stack.insertProtocol(discard, ProtocolStack.BELOW, NAKACK2.class); a.setDiscardOwnMessages(true); Address dest = null; // send to all Message m1 = new Message(dest, null, 1); Message m2 = new Message(dest, null, 2); m2.setFlag(Message.OOB); Message m3 = new Message(dest, null, 3); MyReceiver receiver = new MyReceiver("C2"); b.setReceiver(receiver); a.send(m1); discard.setDropDownMulticasts(1); a.send(m2); a.send(m3); Util.sleep(500); Collection<Integer> list = receiver.getMsgs(); for (int i = 0; i < 10; i++) { System.out.println("list = " + list); if (list.size() == 3) break; Util.sleep(1000); // give the asynchronous msgs some time to be received sendStableMessages(a, b); } assert list.size() == 3 : "list is " + list; assert list.contains(1) && list.contains(2) && list.contains(3); }
protected void sendStableMessage(Address dest, short conn_id, long low, long high) { Message stable_msg = new Message(dest, null, null); Unicast2Header hdr = Unicast2Header.createStableHeader(conn_id, low, high); stable_msg.putHeader(this.id, hdr); stable_msg.setFlag(Message.OOB); if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> STABLE(") .append(dest) .append(": ") .append(low) .append("-") .append(high) .append(", conn_id=") .append(conn_id) .append(")"); log.trace(sb.toString()); } down_prot.down(new Event(Event.MSG, stable_msg)); ReceiverEntry entry = recv_table.get(dest); NakReceiverWindow win = entry != null ? entry.received_msgs : null; if (win != null) win.stable(win.getHighestDelivered()); }
/** * Multicasts a GET_DIGEST_REQ to all current members and waits for all responses (GET_DIGEST_RSP) * or N ms. * * @return */ private Digest fetchDigestsFromAllMembersInSubPartition(List<Address> current_mbrs) { if (current_mbrs == null) return null; GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ); Message get_digest_req = new Message(); get_digest_req.setFlag(Message.OOB); get_digest_req.putHeader(gms.getId(), hdr); long max_wait_time = gms.merge_timeout > 0 ? gms.merge_timeout / 2 : 2000L; digest_collector.reset(current_mbrs); // add my own digest first Digest digest = (Digest) gms.getDownProtocol().down(Event.GET_DIGEST_EVT); digest_collector.add(gms.local_addr, digest); gms.getDownProtocol().down(new Event(Event.MSG, get_digest_req)); digest_collector.waitForAllResponses(max_wait_time); if (log.isDebugEnabled()) { if (digest_collector.hasAllResponses()) log.debug(gms.local_addr + ": fetched all digests for " + current_mbrs); else log.debug( gms.local_addr + ": fetched incomplete digests (after timeout of " + max_wait_time + ") ms for " + current_mbrs); } Map<Address, Digest> responses = new HashMap<Address, Digest>(digest_collector.getResults()); MutableDigest retval = new MutableDigest(responses.size()); for (Digest dig : responses.values()) { if (dig != null) retval.add(dig); } return retval; }
public void sendGetMembersRequest(String cluster_name, Promise promise, boolean return_views_only) throws Exception { PhysicalAddress physical_addr = (PhysicalAddress) down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); PingData data = new PingData(local_addr, null, false, UUID.get(local_addr), Arrays.asList(physical_addr)); PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name); hdr.return_view_only = return_views_only; Set<PhysicalAddress> combined_target_members = new HashSet<PhysicalAddress>(initial_hosts); combined_target_members.addAll(dynamic_hosts); for (final Address addr : combined_target_members) { if (addr.equals(physical_addr)) continue; final Message msg = new Message(addr, null, null); msg.setFlag(Message.OOB); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace("[FIND_INITIAL_MBRS] sending PING request to " + msg.getDest()); timer.execute( new Runnable() { public void run() { try { down_prot.down(new Event(Event.MSG, msg)); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request to " + addr + ": " + ex); } } }); } }
/** Tests https://jira.jboss.org/jira/browse/JGRP-1079 for unicast messages */ public void testOOBUnicastMessageLoss() throws Exception { MyReceiver receiver = new MySleepingReceiver("C2", 1000); b.setReceiver(receiver); a.getProtocolStack().getTransport().setOOBRejectionPolicy("discard"); final int NUM = 10; final Address dest = b.getAddress(); for (int i = 1; i <= NUM; i++) { Message msg = new Message(dest, null, i); msg.setFlag(Message.OOB); a.send(msg); } Collection<Integer> msgs = receiver.getMsgs(); for (int i = 0; i < 20; i++) { if (msgs.size() == NUM) break; Util.sleep(1000); // sendStableMessages(c1,c2); // not needed for unicasts ! } assert msgs.size() == NUM : "expected " + NUM + " messages but got " + msgs.size() + ", msgs=" + Util.print(msgs); for (int i = 1; i <= NUM; i++) { assert msgs.contains(i); } }
/** * If merge_id is not equal to this.merge_id then discard. Else cast the view/digest to all * members of this group. */ public void handleMergeView(final MergeData data, final MergeId merge_id) { if (!matchMergeId(merge_id)) { if (log.isErrorEnabled()) log.error("merge_ids don't match (or are null); merge view discarded"); return; } // only send to our *current* members, if we have A and B being merged (we are B), then we would // *not* // receive a VIEW_ACK from A because A doesn't see us in the pre-merge view yet and discards the // view // [JGRP-700] - FLUSH: flushing should span merge // we have to send new view only to current members and we should not wait // for view acks from newly merged mebers List<Address> newViewMembers = new Vector<Address>(data.view.getMembers()); newViewMembers.removeAll(gms.members.getMembers()); gms.castViewChangeWithDest(data.view, data.digest, null, newViewMembers); // if we have flush in stack send ack back to merge coordinator if (gms.flushProtocolInStack) { Message ack = new Message(data.getSender(), null, null); ack.setFlag(Message.OOB); GMS.GmsHeader ack_hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW_OK); ack.putHeader(gms.getId(), ack_hdr); gms.getDownProtocol().down(new Event(Event.MSG, ack)); } cancelMerge(merge_id); }
/** Tests https://jira.jboss.org/jira/browse/JGRP-1079 */ public void testOOBMessageLoss() throws Exception { Util.close(b); // we only need 1 channel MyReceiver receiver = new MySleepingReceiver("C1", 1000); a.setReceiver(receiver); TP transport = a.getProtocolStack().getTransport(); transport.setOOBRejectionPolicy("discard"); final int NUM = 10; for (int i = 1; i <= NUM; i++) { Message msg = new Message(null, null, i); msg.setFlag(Message.OOB); a.send(msg); } STABLE stable = (STABLE) a.getProtocolStack().findProtocol(STABLE.class); if (stable != null) stable.runMessageGarbageCollection(); Collection<Integer> msgs = receiver.getMsgs(); for (int i = 0; i < 20; i++) { if (msgs.size() == NUM) break; Util.sleep(1000); sendStableMessages(a, b); } System.out.println("msgs = " + Util.print(msgs)); assert msgs.size() == NUM : "expected " + NUM + " messages but got " + msgs.size() + ", msgs=" + Util.print(msgs); for (int i = 1; i <= NUM; i++) { assert msgs.contains(i); } }
/** * Sends a MERGE_REQ to all coords and populates a list of MergeData (in merge_rsps). Returns * after coords.size() response have been received, or timeout msecs have elapsed (whichever is * first). * * <p>If a subgroup coordinator rejects the MERGE_REQ (e.g. because of participation in a * different merge), <em>that member will be removed from coords !</em> * * @param coords A map of coordinatgor addresses and associated membership lists * @param new_merge_id The new merge id * @param timeout Max number of msecs to wait for the merge responses from the subgroup coords */ private boolean getMergeDataFromSubgroupCoordinators( Map<Address, Collection<Address>> coords, MergeId new_merge_id, long timeout) { boolean gotAllResponses; long start = System.currentTimeMillis(); merge_rsps.reset(coords.keySet()); if (log.isDebugEnabled()) log.debug(gms.local_addr + ": sending MERGE_REQ to " + coords.keySet()); for (Map.Entry<Address, Collection<Address>> entry : coords.entrySet()) { Address coord = entry.getKey(); Collection<Address> mbrs = entry.getValue(); Message msg = new Message(coord, null, null); msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_REQ, mbrs); hdr.mbr = gms.local_addr; hdr.merge_id = new_merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } // wait until num_rsps_expected >= num_rsps or timeout elapsed merge_rsps.waitForAllResponses(timeout); gotAllResponses = merge_rsps.hasAllResponses(); long stop = System.currentTimeMillis(); if (log.isDebugEnabled()) log.debug( gms.local_addr + ": collected " + merge_rsps.size() + " merge response(s) in " + (stop - start) + " ms"); return gotAllResponses; }
private void sendRequestForFirstSeqno(Address dest, long seqno_received) { Message msg = new Message(dest); msg.setFlag(Message.OOB); Unicast2Header hdr = Unicast2Header.createSendFirstSeqnoHeader(seqno_received); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace(local_addr + " --> SEND_FIRST_SEQNO(" + dest + "," + seqno_received + ")"); down_prot.down(new Event(Event.MSG, msg)); }
protected void sendMessage(JChannel ch, String message) { try { Message msg = new Message(null, message); msg.setFlag(Message.Flag.OOB); ch.send(msg); } catch (Exception e) { e.printStackTrace(System.err); } }
public void retransmit(SeqnoList missing, Address sender) { Unicast2Header hdr = Unicast2Header.createXmitReqHeader(); Message retransmit_msg = new Message(sender, null, missing); retransmit_msg.setFlag(Message.OOB); if (log.isTraceEnabled()) log.trace(local_addr + ": sending XMIT_REQ (" + missing + ") to " + sender); retransmit_msg.putHeader(this.id, hdr); down_prot.down(new Event(Event.MSG, retransmit_msg)); xmit_reqs_sent.addAndGet(missing.size()); }
protected void sendMergeRejectedResponse(Address sender, MergeId merge_id) { Message msg = new Message(sender, null, null); msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_rejected = true; hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); if (log.isDebugEnabled()) log.debug("merge response=" + hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
/** Send back a response containing view and digest to sender */ private void sendMergeResponse(Address sender, View view, Digest digest, MergeId merge_id) { Message msg = new Message(sender, null, null); msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_id = merge_id; hdr.view = view; hdr.my_digest = digest; msg.putHeader(gms.getId(), hdr); if (log.isDebugEnabled()) log.debug(gms.local_addr + ": sending merge response=" + hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
protected void sendRequest(Address dest, Type type, long requestId, Object object) { Request req = new Request(type, object, requestId); Message msg = new Message(dest, req).putHeader(id, new ExecutorHeader()); if (bypass_bundling) msg.setFlag(Message.Flag.DONT_BUNDLE); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] --> [" + (dest == null ? "ALL" : dest) + "] " + req); try { down_prot.down(msg); } catch (Exception ex) { log.error(Util.getMessage("FailedSending") + type + " request: " + ex); } }
private void send( final Address dest, final int num_msgs, final int num_threads, final double oob_prob) throws Exception { if (num_threads <= 0) throw new IllegalArgumentException("number of threads <= 0"); if (num_msgs % num_threads != 0) throw new IllegalArgumentException( "number of messages ( " + num_msgs + ") needs to be divisible by " + "the number o threads (" + num_threads + ")"); if (num_threads > 1) { final int msgs_per_thread = num_msgs / num_threads; Thread[] threads = new Thread[num_threads]; final AtomicInteger counter = new AtomicInteger(0); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { public void run() { for (int j = 0; j < msgs_per_thread; j++) { Channel sender = Util.tossWeightedCoin(0.5) ? a : b; boolean oob = Util.tossWeightedCoin(oob_prob); int num = counter.incrementAndGet(); Message msg = new Message(dest, null, num); if (oob) msg.setFlag(Message.OOB); try { sender.send(msg); } catch (Exception e) { e.printStackTrace(); } } } }; threads[i].start(); } for (int i = 0; i < threads.length; i++) { threads[i].join(20000); } return; } for (int i = 0; i < num_msgs; i++) { Channel sender = Util.tossWeightedCoin(0.5) ? a : b; boolean oob = Util.tossWeightedCoin(oob_prob); Message msg = new Message(dest, null, i); if (oob) msg.setFlag(Message.OOB); sender.send(msg); } }
protected void sendRequest( Address dest, Type type, String lock_name, Owner owner, long timeout, boolean is_trylock) { Request req = new Request(type, lock_name, owner, timeout, is_trylock); Message msg = new Message(dest, req).putHeader(id, new LockingHeader()); if (bypass_bundling) msg.setFlag(Message.Flag.DONT_BUNDLE); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] --> [" + (dest == null ? "ALL" : dest) + "] " + req); try { down_prot.down(new Event(Event.MSG, msg)); } catch (Exception ex) { log.error("failed sending " + type + " request: " + ex); } }
protected void sendDiscoveryResponse( Address logical_addr, List<PhysicalAddress> physical_addrs, boolean is_server, boolean return_view_only, String logical_name, final Address sender) { PingData data; if (return_view_only) { data = new PingData(logical_addr, view, is_server, null, null); } else { ViewId view_id = view != null ? view.getViewId() : null; data = new PingData(logical_addr, null, view_id, is_server, logical_name, physical_addrs); } final Message rsp_msg = new Message(sender, null, null); rsp_msg.setFlag(Message.OOB); final PingHeader rsp_hdr = new PingHeader(PingHeader.GET_MBRS_RSP, data); rsp_msg.putHeader(this.id, rsp_hdr); if (stagger_timeout > 0) { int view_size = view != null ? view.size() : 10; int rank = Util.getRank(view, local_addr); // returns 0 if view or local_addr are null long sleep_time = rank == 0 ? Util.random(stagger_timeout) : stagger_timeout * rank / view_size - (stagger_timeout / view_size); timer.schedule( new Runnable() { public void run() { if (log.isTraceEnabled()) log.trace( local_addr + ": received GET_MBRS_REQ from " + sender + ", sending staggered response " + rsp_hdr); down_prot.down(new Event(Event.MSG, rsp_msg)); } }, sleep_time, TimeUnit.MILLISECONDS); return; } if (log.isTraceEnabled()) log.trace("received GET_MBRS_REQ from " + sender + ", sending response " + rsp_hdr); down_prot.down(new Event(Event.MSG, rsp_msg)); }
public void testRegularAndOOBUnicasts2() throws Exception { DISCARD discard = new DISCARD(); ProtocolStack stack = c1.getProtocolStack(); stack.insertProtocol(discard, ProtocolStack.BELOW, UNICAST.class, UNICAST2.class); Address dest = c2.getAddress(); Message m1 = new Message(dest, null, 1); Message m2 = new Message(dest, null, 2); m2.setFlag(Message.OOB); Message m3 = new Message(dest, null, 3); m3.setFlag(Message.OOB); Message m4 = new Message(dest, null, 4); MyReceiver receiver = new MyReceiver("C2"); c2.setReceiver(receiver); c1.send(m1); discard.setDropDownUnicasts(1); c1.send(m3); discard.setDropDownUnicasts(1); c1.send(m2); c1.send(m4); Util.sleep(1000); // sleep some time to receive all messages Collection<Integer> list = receiver.getMsgs(); int count = 10; while (list.size() < 4 && --count > 0) { Util.sleep(500); // time for potential retransmission sendStableMessages(c1, c2); } log.info("list = " + list); assert list.size() == 4 : "list is " + list; assert list.contains(1) && list.contains(2) && list.contains(3) && list.contains(4); }
public <T> NotifyingFuture<T> callRemoteMethodWithFuture( Address dest, MethodCall call, RequestOptions options) throws Throwable { if (log.isTraceEnabled()) log.trace("dest=" + dest + ", method_call=" + call + ", options=" + options); Object buf = req_marshaller != null ? req_marshaller.objectToBuffer(call) : Util.objectToByteBuffer(call); Message msg = new Message(dest, null, null); if (buf instanceof Buffer) msg.setBuffer((Buffer) buf); else msg.setBuffer((byte[]) buf); msg.setFlag(options.getFlags()); if (options.getScope() > 0) msg.setScope(options.getScope()); return super.sendMessageWithFuture(msg, options); }
/** * Invokes a method in all members contained in dests (or all members if dests is null). * * @param dests A list of addresses. If null, the method will be invoked on all cluster members * @param method_call The method (plus args) to be invoked * @param options A collection of call options, e.g. sync versus async, timeout etc * @return RspList A list of return values and flags (suspected, not received) per member * @since 2.9 */ public RspList callRemoteMethods( Collection<Address> dests, MethodCall method_call, RequestOptions options) { if (dests != null && dests.isEmpty()) { // don't send if dest list is empty if (log.isTraceEnabled()) log.trace( new StringBuilder("destination list of ") .append(method_call.getName()) .append("() is empty: no need to send message")); return RspList.EMPTY_RSP_LIST; } if (log.isTraceEnabled()) log.trace( new StringBuilder("dests=") .append(dests) .append(", method_call=") .append(method_call) .append(", options=") .append(options)); Object buf; try { buf = req_marshaller != null ? req_marshaller.objectToBuffer(method_call) : Util.objectToByteBuffer(method_call); } catch (Exception e) { // if(log.isErrorEnabled()) log.error("exception", e); // we will change this in 3.0 to add the exception to the signature // (see http://jira.jboss.com/jira/browse/JGRP-193). The reason for a RTE is that we cannot // change the // signature in 2.3, otherwise 2.3 would be *not* API compatible to prev releases throw new RuntimeException("failure to marshal argument(s)", e); } Message msg = new Message(); if (buf instanceof Buffer) msg.setBuffer((Buffer) buf); else msg.setBuffer((byte[]) buf); msg.setFlag(options.getFlags()); if (options.getScope() > 0) msg.setScope(options.getScope()); RspList retval = super.castMessage(dests, msg, options); if (log.isTraceEnabled()) log.trace("responses: " + retval); return retval; }