static Message createMessage(Address dest, Address src) { Message msg = new Message(dest, src, "hello world"); msg.putHeader(NAKACK_ID, NakAckHeader.createMessageHeader(322649)); msg.putHeader(UNICAST_ID, UNICAST.UnicastHeader.createDataHeader(465784, (short) 23323, true)); msg.putHeader(UDP_ID, new TpHeader("DrawDemo")); return msg; }
public void sendDiscoveryRequest(String cluster_name, Promise promise, ViewId view_id) throws Exception { PingData data = null; PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); if (view_id == null) { List<PhysicalAddress> physical_addrs = Arrays.asList(physical_addr); data = new PingData(local_addr, null, false, UUID.get(local_addr), physical_addrs); } PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name); hdr.view_id = view_id; Collection<PhysicalAddress> cluster_members = fetchClusterMembers(cluster_name); if (cluster_members == null) { Message msg = new Message(null); // multicast msg msg.setFlag(Message.OOB); msg.putHeader(getId(), hdr); sendMcastDiscoveryRequest(msg); } else { if (cluster_members.isEmpty()) { // if we don't find any members, return immediately if (promise != null) promise.setResult(null); } else { for (final Address addr : cluster_members) { if (addr.equals(physical_addr)) // no need to send the request to myself continue; final Message msg = new Message(addr, null, null); msg.setFlag(Message.OOB); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace("[FIND_INITIAL_MBRS] sending discovery request to " + msg.getDest()); if (!sendDiscoveryRequestsInParallel()) { down_prot.down(new Event(Event.MSG, msg)); } else { timer.execute( new Runnable() { public void run() { try { down_prot.down(new Event(Event.MSG, msg)); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request to " + addr + ": " + ex); } } }); } } } } }
private void sendViewAck(Address dest) { Message view_ack = new Message(dest, null, null); view_ack.setFlag(Message.OOB); GmsHeader tmphdr = new GmsHeader(GmsHeader.VIEW_ACK); view_ack.putHeader(this.id, tmphdr); down_prot.down(new Event(Event.MSG, view_ack)); }
/** * Multicasts a GET_DIGEST_REQ to all current members and waits for all responses (GET_DIGEST_RSP) * or N ms. * * @return */ private Digest fetchDigestsFromAllMembersInSubPartition(List<Address> current_mbrs) { if (current_mbrs == null) return null; GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ); Message get_digest_req = new Message(); get_digest_req.setFlag(Message.OOB); get_digest_req.putHeader(gms.getId(), hdr); long max_wait_time = gms.merge_timeout > 0 ? gms.merge_timeout / 2 : 2000L; digest_collector.reset(current_mbrs); // add my own digest first Digest digest = (Digest) gms.getDownProtocol().down(Event.GET_DIGEST_EVT); digest_collector.add(gms.local_addr, digest); gms.getDownProtocol().down(new Event(Event.MSG, get_digest_req)); digest_collector.waitForAllResponses(max_wait_time); if (log.isDebugEnabled()) { if (digest_collector.hasAllResponses()) log.debug(gms.local_addr + ": fetched all digests for " + current_mbrs); else log.debug( gms.local_addr + ": fetched incomplete digests (after timeout of " + max_wait_time + ") ms for " + current_mbrs); } Map<Address, Digest> responses = new HashMap<Address, Digest>(digest_collector.getResults()); MutableDigest retval = new MutableDigest(responses.size()); for (Digest dig : responses.values()) { if (dig != null) retval.add(dig); } return retval; }
/** * Sends the new view and digest to all subgroup coordinors in coords. Each coord will in turn * * <ol> * <li>broadcast the new view and digest to all the members of its subgroup (MergeView) * <li>on reception of the view, if it is a MergeView, each member will set the digest and * install the new view * </ol> */ private void sendMergeView( Collection<Address> coords, MergeData combined_merge_data, MergeId merge_id) { if (coords == null || combined_merge_data == null) return; View view = combined_merge_data.view; Digest digest = combined_merge_data.digest; if (view == null || digest == null) { if (log.isErrorEnabled()) log.error("view or digest is null, cannot send consolidated merge view/digest"); return; } if (log.isDebugEnabled()) log.debug( gms.local_addr + ": sending merge view " + view.getVid() + " to coordinators " + coords); gms.merge_ack_collector.reset(coords); int size = gms.merge_ack_collector.size(); long timeout = gms.view_ack_collection_timeout; long start = System.currentTimeMillis(); for (Address coord : coords) { Message msg = new Message(coord, null, null); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW); hdr.view = view; hdr.my_digest = digest; hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } // [JGRP-700] - FLUSH: flushing should span merge // if flush is in stack wait for acks from separated island coordinators if (gms.flushProtocolInStack) { try { gms.merge_ack_collector.waitForAllAcks(timeout); long stop = System.currentTimeMillis(); if (log.isTraceEnabled()) log.trace( "received all ACKs (" + size + ") for merge view " + view + " in " + (stop - start) + "ms"); } catch (TimeoutException e) { log.warn( gms.local_addr + ": failed to collect all ACKs for merge (" + size + ") for view " + view + " after " + timeout + "ms, missing ACKs from " + gms.merge_ack_collector.printMissing()); } } }
/** * We need to resend our first message with our conn_id * * @param sender * @param seqno Resend the non null messages in the range [lowest .. seqno] */ protected void handleResendingOfFirstMessage(Address sender, long seqno) { if (log.isTraceEnabled()) log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")"); SenderEntry entry = send_table.get(sender); Table<Message> win = entry != null ? entry.sent_msgs : null; if (win == null) { if (log.isErrorEnabled()) log.error(local_addr + ": sender window for " + sender + " not found"); return; } boolean first_sent = false; for (long i = win.getLow() + 1; i <= seqno; i++) { Message rsp = win.get(i); if (rsp == null) continue; if (first_sent) { down_prot.down(new Event(Event.MSG, rsp)); } else { first_sent = true; // We need to copy the UnicastHeader and put it back into the message because Message.copy() // doesn't copy // the headers and therefore we'd modify the original message in the sender retransmission // window // (https://jira.jboss.org/jira/browse/JGRP-965) Message copy = rsp.copy(); Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id); Unicast2Header newhdr = hdr.copy(); newhdr.first = true; copy.putHeader(this.id, newhdr); down_prot.down(new Event(Event.MSG, copy)); } } }
protected void sendMergeRejectionMessage(Address dest) { GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP).setMergeRejected(true); Message msg = new Message(dest).setFlag(Message.Flag.OOB).putHeader(GMS_ID, hdr); if (this.authenticate_coord) msg.putHeader(this.id, new AuthHeader(this.auth_token)); log.debug("merge response=%s", hdr); down_prot.down(msg); }
protected void sendStableMessage(Address dest, short conn_id, long low, long high) { Message stable_msg = new Message(dest, null, null); Unicast2Header hdr = Unicast2Header.createStableHeader(conn_id, low, high); stable_msg.putHeader(this.id, hdr); stable_msg.setFlag(Message.OOB); if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> STABLE(") .append(dest) .append(": ") .append(low) .append("-") .append(high) .append(", conn_id=") .append(conn_id) .append(")"); log.trace(sb.toString()); } down_prot.down(new Event(Event.MSG, stable_msg)); ReceiverEntry entry = recv_table.get(dest); NakReceiverWindow win = entry != null ? entry.received_msgs : null; if (win != null) win.stable(win.getHighestDelivered()); }
public void retransmit(long first_seqno, long last_seqno, Address sender) { if (last_seqno < first_seqno) return; Unicast2Header hdr = Unicast2Header.createXmitReqHeader(first_seqno, last_seqno); Message xmit_req = new Message(sender, null, null); xmit_req.putHeader(this.id, hdr); down_prot.down(new Event(Event.MSG, xmit_req)); }
/** * Send all fragments as separate messages (with same ID !). Example: * * <pre> * Given the generated ID is 2344, number of fragments=3, message {dst,src,buf} * would be fragmented into: * * [2344,3,0]{dst,src,buf1}, * [2344,3,1]{dst,src,buf2} and * [2344,3,2]{dst,src,buf3} * </pre> */ private void fragment(Message msg) { try { byte[] buffer = msg.getRawBuffer(); List<Range> fragments = Util.computeFragOffsets(msg.getOffset(), msg.getLength(), frag_size); int num_frags = fragments.size(); num_sent_frags.addAndGet(num_frags); if (log.isTraceEnabled()) { Address dest = msg.getDest(); StringBuilder sb = new StringBuilder("fragmenting packet to "); sb.append((dest != null ? dest.toString() : "<all members>")) .append(" (size=") .append(buffer.length); sb.append(") into ") .append(num_frags) .append(" fragment(s) [frag_size=") .append(frag_size) .append(']'); log.trace(sb.toString()); } long frag_id = getNextId(); // used as a seqno for (int i = 0; i < fragments.size(); i++) { Range r = fragments.get(i); // don't copy the buffer, only src, dest and headers. Only copy the headers one time ! Message frag_msg = msg.copy(false, i == 0); frag_msg.setBuffer(buffer, (int) r.low, (int) r.high); FragHeader hdr = new FragHeader(frag_id, i, num_frags); frag_msg.putHeader(this.id, hdr); down_prot.down(new Event(Event.MSG, frag_msg)); } } catch (Exception e) { if (log.isErrorEnabled()) log.error("fragmentation failure", e); } }
public void up(MessageBatch batch) { for (Message msg : batch) { DaisyHeader hdr = (DaisyHeader) msg.getHeader(getId()); if (hdr != null) { // 1. forward the message to the next in line if ttl > 0 short ttl = hdr.getTTL(); if (log.isTraceEnabled()) log.trace(local_addr + ": received message from " + msg.getSrc() + " with ttl=" + ttl); if (--ttl > 0) { Message copy = msg.copy(true); copy.setDest(next); copy.putHeader(getId(), new DaisyHeader(ttl)); msgs_forwarded++; if (log.isTraceEnabled()) log.trace(local_addr + ": forwarding message to " + next + " with ttl=" + ttl); down_prot.down(new Event(Event.MSG, copy)); } // 2. Pass up msg.setDest(null); } } if (!batch.isEmpty()) up_prot.up(batch); }
/** * Sends a MERGE_REQ to all coords and populates a list of MergeData (in merge_rsps). Returns * after coords.size() response have been received, or timeout msecs have elapsed (whichever is * first). * * <p>If a subgroup coordinator rejects the MERGE_REQ (e.g. because of participation in a * different merge), <em>that member will be removed from coords !</em> * * @param coords A map of coordinatgor addresses and associated membership lists * @param new_merge_id The new merge id * @param timeout Max number of msecs to wait for the merge responses from the subgroup coords */ private boolean getMergeDataFromSubgroupCoordinators( Map<Address, Collection<Address>> coords, MergeId new_merge_id, long timeout) { boolean gotAllResponses; long start = System.currentTimeMillis(); merge_rsps.reset(coords.keySet()); if (log.isDebugEnabled()) log.debug(gms.local_addr + ": sending MERGE_REQ to " + coords.keySet()); for (Map.Entry<Address, Collection<Address>> entry : coords.entrySet()) { Address coord = entry.getKey(); Collection<Address> mbrs = entry.getValue(); Message msg = new Message(coord, null, null); msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_REQ, mbrs); hdr.mbr = gms.local_addr; hdr.merge_id = new_merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } // wait until num_rsps_expected >= num_rsps or timeout elapsed merge_rsps.waitForAllResponses(timeout); gotAllResponses = merge_rsps.hasAllResponses(); long stop = System.currentTimeMillis(); if (log.isDebugEnabled()) log.debug( gms.local_addr + ": collected " + merge_rsps.size() + " merge response(s) in " + (stop - start) + " ms"); return gotAllResponses; }
/** * If merge_id is not equal to this.merge_id then discard. Else cast the view/digest to all * members of this group. */ public void handleMergeView(final MergeData data, final MergeId merge_id) { if (!matchMergeId(merge_id)) { if (log.isErrorEnabled()) log.error("merge_ids don't match (or are null); merge view discarded"); return; } // only send to our *current* members, if we have A and B being merged (we are B), then we would // *not* // receive a VIEW_ACK from A because A doesn't see us in the pre-merge view yet and discards the // view // [JGRP-700] - FLUSH: flushing should span merge // we have to send new view only to current members and we should not wait // for view acks from newly merged mebers List<Address> newViewMembers = new Vector<Address>(data.view.getMembers()); newViewMembers.removeAll(gms.members.getMembers()); gms.castViewChangeWithDest(data.view, data.digest, null, newViewMembers); // if we have flush in stack send ack back to merge coordinator if (gms.flushProtocolInStack) { Message ack = new Message(data.getSender(), null, null); ack.setFlag(Message.OOB); GMS.GmsHeader ack_hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW_OK); ack.putHeader(gms.getId(), ack_hdr); gms.getDownProtocol().down(new Event(Event.MSG, ack)); } cancelMerge(merge_id); }
public void sendGetMembersRequest(String cluster_name, Promise promise, boolean return_views_only) throws Exception { PhysicalAddress physical_addr = (PhysicalAddress) down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); PingData data = new PingData(local_addr, null, false, UUID.get(local_addr), Arrays.asList(physical_addr)); PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name); hdr.return_view_only = return_views_only; Set<PhysicalAddress> combined_target_members = new HashSet<PhysicalAddress>(initial_hosts); combined_target_members.addAll(dynamic_hosts); for (final Address addr : combined_target_members) { if (addr.equals(physical_addr)) continue; final Message msg = new Message(addr, null, null); msg.setFlag(Message.OOB); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace("[FIND_INITIAL_MBRS] sending PING request to " + msg.getDest()); timer.execute( new Runnable() { public void run() { try { down_prot.down(new Event(Event.MSG, msg)); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request to " + addr + ": " + ex); } } }); } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); DaisyHeader hdr = (DaisyHeader) msg.getHeader(getId()); if (hdr == null) break; // 1. forward the message to the next in line if ttl > 0 short ttl = hdr.getTTL(); if (log.isTraceEnabled()) log.trace(local_addr + ": received message from " + msg.getSrc() + " with ttl=" + ttl); if (--ttl > 0) { Message copy = msg.copy(true); copy.setDest(next); copy.putHeader(getId(), new DaisyHeader(ttl)); msgs_forwarded++; if (log.isTraceEnabled()) log.trace(local_addr + ": forwarding message to " + next + " with ttl=" + ttl); down_prot.down(new Event(Event.MSG, copy)); } // 2. Pass up msg.setDest(null); break; } return up_prot.up(evt); }
protected void sendMergeRejectedResponse(Address sender, MergeId merge_id) { Message msg = new Message(sender).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_rejected = true; hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
public Object down(Message msg) { GMS.GmsHeader hdr = getGMSHeader(msg); if (hdr != null && needsAuthentication(hdr)) { // we found a join request message - now add an AUTH Header msg.putHeader(this.id, new AuthHeader(this.auth_token)); } return down_prot.down(msg); }
protected void sendMergeRejectionMessage(Address dest) { Message msg = new Message(dest).setFlag(Message.Flag.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.setMergeRejected(true); msg.putHeader(gms_id, hdr); if (log.isDebugEnabled()) log.debug("merge response=" + hdr); down_prot.down(new Event(Event.MSG, msg)); }
/** * Fetches the digests from all members and installs them again. Used only for diagnosis and * support; don't use this otherwise ! */ void fixDigests() { Digest digest = fetchDigestsFromAllMembersInSubPartition(gms.view.getMembers()); Message msg = new Message(); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_DIGEST); hdr.my_digest = digest; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
protected void sendRequestForFirstSeqno(Address dest, long seqno_received) { Message msg = new Message(dest).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL); UnicastHeader hdr = UnicastHeader.createSendFirstSeqnoHeader(seqno_received); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace(local_addr + " --> SEND_FIRST_SEQNO(" + dest + "," + seqno_received + ")"); down_prot.down(new Event(Event.MSG, msg)); }
/** * Copies a message. Copies only headers with IDs >= starting_id or IDs which are in the * copy_only_ids list * * @param copy_buffer * @param starting_id * @param copy_only_ids * @return */ public Message copy(boolean copy_buffer, short starting_id, short... copy_only_ids) { Message retval = copy(copy_buffer, false); for (Map.Entry<Short, Header> entry : getHeaders().entrySet()) { short id = entry.getKey(); if (id >= starting_id || containsId(id, copy_only_ids)) retval.putHeader(id, entry.getValue()); } return retval; }
/** Send back a response containing view and digest to sender */ private void sendMergeResponse(Address sender, View view, Digest digest, MergeId merge_id) { Message msg = new Message(sender).setFlag(Message.Flag.OOB, Message.Flag.INTERNAL); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_id = merge_id; hdr.view = view; hdr.my_digest = digest; msg.putHeader(gms.getId(), hdr); if (log.isTraceEnabled()) log.trace(gms.local_addr + ": sending merge response=" + hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
public void retransmit(SeqnoList missing, Address sender) { Unicast2Header hdr = Unicast2Header.createXmitReqHeader(); Message retransmit_msg = new Message(sender, null, missing); retransmit_msg.setFlag(Message.OOB); if (log.isTraceEnabled()) log.trace(local_addr + ": sending XMIT_REQ (" + missing + ") to " + sender); retransmit_msg.putHeader(this.id, hdr); down_prot.down(new Event(Event.MSG, retransmit_msg)); xmit_reqs_sent.addAndGet(missing.size()); }
protected void sendMergeRejectedResponse(Address sender, MergeId merge_id) { Message msg = new Message(sender, null, null); msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP); hdr.merge_rejected = true; hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); if (log.isDebugEnabled()) log.debug("merge response=" + hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); }
/** * Doesn't copy any headers except for those with ID >= copy_headers_above * * @param copy_buffer * @param starting_id * @return A message with headers whose ID are >= starting_id */ public Message copy(boolean copy_buffer, short starting_id) { Message retval = copy(copy_buffer, false); if (starting_id > 0) { for (Map.Entry<Short, Header> entry : getHeaders().entrySet()) { short id = entry.getKey(); if (id >= starting_id) retval.putHeader(id, entry.getValue()); } } return retval; }
protected void sendJoinRejectionMessage(Address dest, String error_msg) { if (dest == null) return; JoinRsp joinRes = new JoinRsp(error_msg); // specify the error message on the JoinRsp Message msg = new Message(dest) .putHeader(GMS_ID, new GMS.GmsHeader(GMS.GmsHeader.JOIN_RSP)) .setBuffer(GMS.marshal(joinRes)); if (this.authenticate_coord) msg.putHeader(this.id, new AuthHeader(this.auth_token)); down_prot.down(msg); }
private void sendMergeCancelledMessage(Collection<Address> coords, MergeId merge_id) { if (coords == null || merge_id == null) return; for (Address coord : coords) { Message msg = new Message(coord); // msg.setFlag(Message.Flag.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.CANCEL_MERGE); hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } }
@Override public Message nextMessage(Address address, SaslHeader header) throws SaslException { Message message = new Message(address).setFlag(Message.Flag.OOB); byte[] challenge = server.evaluateResponse(header.getPayload()); if (server.isComplete()) { latch.countDown(); } if (challenge != null) { return message.putHeader(SASL.SASL_ID, new SaslHeader(Type.RESPONSE, challenge)); } else { return null; } }
/** * An event is to be sent down the stack. The layer may want to examine its type and perform some * action on it, depending on the event's type. If the event is a message MSG, then the layer may * need to add a header to it (or do nothing at all) before sending it down the stack using <code> * down_prot.down()</code>. In case of a GET_ADDRESS event (which tries to retrieve the stack's * address from one of the bottom layers), the layer may need to send a new response event back up * the stack using <code>up_prot.up()</code>. */ public Object down(Event evt) { GMS.GmsHeader hdr = getGMSHeader(evt); if (hdr != null && needsAuthentication(hdr)) { // we found a join request message - now add an AUTH Header Message msg = (Message) evt.getArg(); AuthHeader authHeader = new AuthHeader(this.auth_token); msg.putHeader(this.id, authHeader); } if (evt.getType() == Event.SET_LOCAL_ADDRESS) local_addr = (Address) evt.getArg(); return down_prot.down(evt); }
private void sendMergeCancelledMessage(Collection<Address> coords, MergeId merge_id) { if (coords == null || merge_id == null) return; for (Address coord : coords) { Message msg = new Message(coord, null, null); // msg.setFlag(Message.OOB); GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.CANCEL_MERGE); hdr.merge_id = merge_id; msg.putHeader(gms.getId(), hdr); if (log.isDebugEnabled()) log.debug(gms.local_addr + ": sending cancel merge to " + coord); gms.getDownProtocol().down(new Event(Event.MSG, msg)); } }