protected String printMessageList(List<Message> list) { StringBuilder sb = new StringBuilder(); int size = list.size(); Message first = size > 0 ? list.get(0) : null, second = size > 1 ? list.get(size - 1) : first; UnicastHeader hdr; if (first != null) { hdr = (UnicastHeader) first.getHeader(id); if (hdr != null) sb.append("#" + hdr.seqno); } if (second != null) { hdr = (UnicastHeader) second.getHeader(id); if (hdr != null) sb.append(" - #" + hdr.seqno); } return sb.toString(); }
public void up(MessageBatch batch) { // Sort fork messages by fork-stack-id Map<String, List<Message>> map = new HashMap<>(); for (Message msg : batch) { ForkHeader hdr = (ForkHeader) msg.getHeader(id); if (hdr != null) { batch.remove(msg); List<Message> list = map.get(hdr.fork_stack_id); if (list == null) { list = new ArrayList<>(); map.put(hdr.fork_stack_id, list); } list.add(msg); } } // Now pass fork messages up, batched by fork-stack-id for (Map.Entry<String, List<Message>> entry : map.entrySet()) { String fork_stack_id = entry.getKey(); List<Message> list = entry.getValue(); Protocol bottom_prot = get(fork_stack_id); if (bottom_prot == null) continue; MessageBatch mb = new MessageBatch( batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb); } catch (Throwable t) { log.error(Util.getMessage("FailedPassingUpBatch"), t); } } if (!batch.isEmpty()) up_prot.up(batch); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); ForkHeader hdr = (ForkHeader) msg.getHeader(id); if (hdr == null) break; if (hdr.fork_stack_id == null) throw new IllegalArgumentException("header has a null fork_stack_id"); Protocol bottom_prot = get(hdr.fork_stack_id); return bottom_prot != null ? bottom_prot.up(evt) : this.unknownForkHandler.handleUnknownForkStack(msg, hdr.fork_stack_id); case Event.VIEW_CHANGE: for (Protocol bottom : fork_stacks.values()) bottom.up(evt); break; case Event.STATE_TRANSFER_OUTPUTSTREAM: if (!process_state_events) break; getStateFromMainAndForkChannels(evt); return null; case Event.STATE_TRANSFER_INPUTSTREAM: if (!process_state_events) break; setStateInMainAndForkChannels((InputStream) evt.getArg()); return null; } return up_prot.up(evt); }
public void up(MessageBatch batch) { if (batch.dest() == null) { // not a unicast batch up_prot.up(batch); return; } int size = batch.size(); Map<Short, List<Message>> msgs = new TreeMap<Short, List<Message>>(); // map of messages, keyed by conn-id for (Message msg : batch) { if (msg == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) continue; UnicastHeader hdr = (UnicastHeader) msg.getHeader(id); if (hdr == null) continue; batch.remove(msg); // remove the message from the batch, so it won't be passed up the stack if (hdr.type != UnicastHeader.DATA) { try { handleUpEvent(msg.getSrc(), hdr); } catch (Throwable t) { // we cannot let an exception terminate the processing of this batch log.error(local_addr + ": failed handling event", t); } continue; } List<Message> list = msgs.get(hdr.conn_id); if (list == null) msgs.put(hdr.conn_id, list = new ArrayList<Message>(size)); list.add(msg); } if (!msgs.isEmpty()) handleBatchReceived(batch.sender(), msgs); // process msgs: if (!batch.isEmpty()) up_prot.up(batch); }
/** * If event is a message, if it is fragmented, re-assemble fragments into big message and pass up * the stack. */ public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); FragHeader hdr = (FragHeader) msg.getHeader(this.id); if (hdr != null) { // needs to be defragmented unfragment(msg, hdr); // Unfragment and possibly pass up return null; } else { num_received_msgs.incrementAndGet(); } break; case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.CONFIG: Object ret = up_prot.up(evt); if (log.isDebugEnabled()) log.debug("received CONFIG event: " + evt.getArg()); handleConfigEvent((Map<String, Object>) evt.getArg()); return ret; } return up_prot.up(evt); // Pass up to the layer above us by default }
public void up(MessageBatch batch) { for (Message msg : batch) { DaisyHeader hdr = (DaisyHeader) msg.getHeader(getId()); if (hdr != null) { // 1. forward the message to the next in line if ttl > 0 short ttl = hdr.getTTL(); if (log.isTraceEnabled()) log.trace(local_addr + ": received message from " + msg.getSrc() + " with ttl=" + ttl); if (--ttl > 0) { Message copy = msg.copy(true); copy.setDest(next); copy.putHeader(getId(), new DaisyHeader(ttl)); msgs_forwarded++; if (log.isTraceEnabled()) log.trace(local_addr + ": forwarding message to " + next + " with ttl=" + ttl); down_prot.down(new Event(Event.MSG, copy)); } // 2. Pass up msg.setDest(null); } } if (!batch.isEmpty()) up_prot.up(batch); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); DaisyHeader hdr = (DaisyHeader) msg.getHeader(getId()); if (hdr == null) break; // 1. forward the message to the next in line if ttl > 0 short ttl = hdr.getTTL(); if (log.isTraceEnabled()) log.trace(local_addr + ": received message from " + msg.getSrc() + " with ttl=" + ttl); if (--ttl > 0) { Message copy = msg.copy(true); copy.setDest(next); copy.putHeader(getId(), new DaisyHeader(ttl)); msgs_forwarded++; if (log.isTraceEnabled()) log.trace(local_addr + ": forwarding message to " + next + " with ttl=" + ttl); down_prot.down(new Event(Event.MSG, copy)); } // 2. Pass up msg.setDest(null); break; } return up_prot.up(evt); }
/** * We need to resend our first message with our conn_id * * @param sender * @param seqno Resend the non null messages in the range [lowest .. seqno] */ protected void handleResendingOfFirstMessage(Address sender, long seqno) { if (log.isTraceEnabled()) log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")"); SenderEntry entry = send_table.get(sender); Table<Message> win = entry != null ? entry.sent_msgs : null; if (win == null) { if (log.isErrorEnabled()) log.error(local_addr + ": sender window for " + sender + " not found"); return; } boolean first_sent = false; for (long i = win.getLow() + 1; i <= seqno; i++) { Message rsp = win.get(i); if (rsp == null) continue; if (first_sent) { down_prot.down(new Event(Event.MSG, rsp)); } else { first_sent = true; // We need to copy the UnicastHeader and put it back into the message because Message.copy() // doesn't copy // the headers and therefore we'd modify the original message in the sender retransmission // window // (https://jira.jboss.org/jira/browse/JGRP-965) Message copy = rsp.copy(); Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id); Unicast2Header newhdr = hdr.copy(); newhdr.first = true; copy.putHeader(this.id, newhdr); down_prot.down(new Event(Event.MSG, copy)); } } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); LockingHeader hdr = (LockingHeader) msg.getHeader(id); if (hdr == null) break; Request req = (Request) msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case GRANT_LOCK: case RELEASE_LOCK: handleLockRequest(req); break; case LOCK_GRANTED: handleLockGrantedResponse(req.lock_name, req.owner, msg.getSrc()); break; case LOCK_DENIED: handleLockDeniedResponse(req.lock_name, req.owner); break; case CREATE_LOCK: handleCreateLockRequest(req.lock_name, req.owner); break; case DELETE_LOCK: handleDeleteLockRequest(req.lock_name); break; case COND_SIG: case COND_SIG_ALL: handleSignalRequest(req); break; case LOCK_AWAIT: handleAwaitRequest(req.lock_name, req.owner); handleLockRequest(req); break; case DELETE_LOCK_AWAIT: handleDeleteAwaitRequest(req.lock_name, req.owner); break; case SIG_RET: handleSignalResponse(req.lock_name, req.owner); break; case CREATE_AWAITER: handleCreateAwaitingRequest(req.lock_name, req.owner); break; case DELETE_AWAITER: handleDeleteAwaitingRequest(req.lock_name, req.owner); break; default: log.error("Request of type " + req.type + " not known"); break; } return null; case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; } return up_prot.up(evt); }
/** Called by AckSenderWindow to resend messages for which no ACK has been received yet */ public void retransmit(Message msg) { if (log.isTraceEnabled()) { UnicastHeader hdr = (UnicastHeader) msg.getHeader(id); long seqno = hdr != null ? hdr.seqno : -1; log.trace(local_addr + " --> XMIT(" + msg.getDest() + ": #" + seqno + ')'); } down_prot.down(new Event(Event.MSG, msg)); num_xmits++; }
/** * Try to remove as many messages as possible from the table as pass them up. Prevents concurrent * passing up of messages by different threads (http://jira.jboss.com/jira/browse/JGRP-198); lots * of threads can come up to this point concurrently, but only 1 is allowed to pass at a time. We * *can* deliver messages from *different* senders concurrently, e.g. reception of P1, Q1, P2, Q2 * can result in delivery of P1, Q1, Q2, P2: FIFO (implemented by UNICAST) says messages need to * be delivered in the order in which they were sent */ protected int removeAndDeliver( final AtomicBoolean processing, Table<Message> win, Address sender) { int retval = 0; boolean released_processing = false; try { while (true) { List<Message> list = win.removeMany(processing, true, max_msg_batch_size); if (list == null) { released_processing = true; return retval; } MessageBatch batch = new MessageBatch(local_addr, sender, null, false, list); for (Message msg_to_deliver : batch) { // discard OOB msg: it has already been delivered // (http://jira.jboss.com/jira/browse/JGRP-377) if (msg_to_deliver.isFlagSet(Message.Flag.OOB)) batch.remove(msg_to_deliver); } try { if (log.isTraceEnabled()) { Message first = batch.first(), last = batch.last(); StringBuilder sb = new StringBuilder(local_addr + ": delivering"); if (first != null && last != null) { UnicastHeader hdr1 = (UnicastHeader) first.getHeader(id), hdr2 = (UnicastHeader) last.getHeader(id); sb.append(" #").append(hdr1.seqno).append(" - #").append(hdr2.seqno); } sb.append(" (" + batch.size()).append(" messages)"); log.trace(sb); } up_prot.up(batch); } catch (Throwable t) { log.error("failed to deliver batch " + batch, t); } } } finally { // processing is always set in win.remove(processing) above and never here ! This code is just // a // 2nd line of defense should there be an exception before win.remove(processing) sets // processing if (!released_processing) processing.set(false); } }
public void up(MessageBatch batch) { for (Message msg : batch) { NakAckHeader2 hdr = (NakAckHeader2) msg.getHeader(ID); if (hdr != null && hdr.getType() == NakAckHeader2.MSG) { long seqno = hdr.getSeqno(); msgs.add(seqno); System.out.println("-- received message #" + seqno + " from " + msg.getSrc()); } } }
/** Send a message to the address specified in msg.dest */ private void sendUnicastMessage(Message msg) { IpAddress dest; Message copy; Object hdr; Event evt; dest = (IpAddress) msg.getDest(); // guaranteed not to be null if (!(dest instanceof IpAddress)) { Trace.error("TCP.sendUnicastMessage()", "destination address is not of type IpAddress !"); return; } setSourceAddress(msg); /* Don't send if destination is local address. Instead, switch dst and src and put in up_queue */ if (loopback && local_addr != null && dest != null && dest.equals(local_addr)) { copy = msg.copy(); hdr = copy.getHeader(getName()); if (hdr != null && hdr instanceof TcpHeader) copy.removeHeader(getName()); copy.setSrc(local_addr); copy.setDest(local_addr); evt = new Event(Event.MSG, copy); /* Because Protocol.up() is never called by this bottommost layer, we call up() directly in the observer. This allows e.g. PerfObserver to get the time of reception of a message */ if (observer != null) observer.up(evt, up_queue.size()); passUp(evt); return; } if (Trace.trace) Trace.info( "TCP.sendUnicastMessage()", "dest=" + msg.getDest() + ", hdrs:\n" + msg.printObjectHeaders()); try { if (skip_suspected_members) { if (suspected_mbrs.contains(dest)) { if (Trace.trace) Trace.info( "TCP.sendUnicastMessage()", "will not send unicast message to " + dest + " as it is currently suspected"); return; } } ct.send(msg); } catch (SocketException e) { if (members.contains(dest)) { if (!suspected_mbrs.contains(dest)) { suspected_mbrs.add(dest); passUp(new Event(Event.SUSPECT, dest)); } } } }
/** * Unmarshal the original message (in the payload) and then pass it up (unless already delivered) * * @param msg */ protected void unwrapAndDeliver(final Message msg, boolean flush_ack) { try { Message msg_to_deliver = Util.streamableFromBuffer( Message.class, msg.getRawBuffer(), msg.getOffset(), msg.getLength()); SequencerHeader hdr = (SequencerHeader) msg_to_deliver.getHeader(this.id); if (flush_ack) hdr.flush_ack = true; deliver(msg_to_deliver, new Event(Event.MSG, msg_to_deliver), hdr); } catch (Exception ex) { log.error(Util.getMessage("FailureUnmarshallingBuffer"), ex); } }
protected void handleBatchReceived(Address sender, Map<Short, List<Message>> map) { for (Map.Entry<Short, List<Message>> element : map.entrySet()) { final List<Message> msg_list = element.getValue(); if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" <-- DATA(") .append(sender) .append(": " + printMessageList(msg_list)) .append(')'); log.trace(sb); } short conn_id = element.getKey(); ReceiverEntry entry = null; for (Message msg : msg_list) { UnicastHeader hdr = (UnicastHeader) msg.getHeader(id); entry = getReceiverEntry(sender, hdr.seqno, hdr.first, conn_id); if (entry == null) continue; Table<Message> win = entry.received_msgs; boolean msg_added = win.add(hdr.seqno, msg); // win is guaranteed to be non-null if we get here num_msgs_received++; if (hdr.first && msg_added) sendAck( sender, hdr.seqno, conn_id); // send an ack immediately when we received the first message of a conn // An OOB message is passed up immediately. Later, when remove() is called, we discard it. // This affects ordering ! // http://jira.jboss.com/jira/browse/JGRP-377 if (msg.isFlagSet(Message.Flag.OOB) && msg_added) { try { up_prot.up(new Event(Event.MSG, msg)); } catch (Throwable t) { log.error("couldn't deliver OOB message " + msg, t); } } } if (entry != null && conn_expiry_timeout > 0) entry.update(); } ReceiverEntry entry = recv_table.get(sender); Table<Message> win = entry != null ? entry.received_msgs : null; if (win != null) { final AtomicBoolean processing = win.getProcessing(); if (processing.compareAndSet(false, true)) { removeAndDeliver(processing, win, sender); sendAck(sender, win.getHighestDeliverable(), entry.recv_conn_id); } } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); NakAckHeader2 hdr = (NakAckHeader2) msg.getHeader(ID); if (hdr != null && hdr.getType() == NakAckHeader2.MSG) { long seqno = hdr.getSeqno(); msgs.add(seqno); System.out.println("-- received message #" + seqno + " from " + msg.getSrc()); } break; } return null; }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); NakAckHeader2 hdr = (NakAckHeader2) msg.getHeader(ID); if (hdr == null) break; if (hdr.getType() == NakAckHeader2.XMIT_REQ) { SeqnoList seqnos = (SeqnoList) msg.getObject(); System.out.println("-- XMIT-REQ: request retransmission for " + seqnos); for (Long seqno : seqnos) xmit_requests.add(seqno); } break; } return null; }
/** * An event was received from the layer below. Usually the current layer will want to examine the * event type and - depending on its type - perform some computation (e.g. removing headers from a * MSG event type, or updating the internal membership list when receiving a VIEW_CHANGE event). * Finally the event is either a) discarded, or b) an event is sent down the stack using {@code * down_prot.down()} or c) the event (or another event) is sent up the stack using {@code * up_prot.up()}. */ public Object up(Message msg) { // If we have a join or merge request --> authenticate, else pass up GMS.GmsHeader gms_hdr = getGMSHeader(msg); if (gms_hdr != null && needsAuthentication(gms_hdr)) { AuthHeader auth_hdr = msg.getHeader(id); if (auth_hdr == null) throw new IllegalStateException( String.format("found %s from %s but no AUTH header", gms_hdr, msg.src())); if (!handleAuthHeader(gms_hdr, auth_hdr, msg)) // authentication failed return null; // don't pass up } if (!callUpHandlers(msg)) return null; return up_prot.up(msg); }
public void up(MessageBatch batch) { for (Message msg : batch) { FragHeader hdr = (FragHeader) msg.getHeader(this.id); if (hdr != null) { // needs to be defragmented Message assembled_msg = unfragment(msg, hdr); if (assembled_msg != null) // the reassembled msg has to be add in the right place // (https://issues.jboss.org/browse/JGRP-1648), // and canot be added to the tail of the batch ! batch.replace(msg, assembled_msg); else batch.remove(msg); } } if (!batch.isEmpty()) up_prot.up(batch); }
public void up(MessageBatch batch) { for (Message msg : batch) { if (msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB) || msg.getHeader(id) == null) continue; batch.remove(msg); // simplistic implementation try { up(new Event(Event.MSG, msg)); } catch (Throwable t) { log.error(Util.getMessage("FailedPassingUpMessage"), t); } } if (!batch.isEmpty()) up_prot.up(batch); }
/** * We need to resend our first message with our conn_id * * @param sender * @param seqno Resend messages in the range [lowest .. seqno] */ private void handleResendingOfFirstMessage(Address sender, long seqno) { if (log.isTraceEnabled()) log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")"); SenderEntry entry = send_table.get(sender); AckSenderWindow win = entry != null ? entry.sent_msgs : null; if (win == null) { if (log.isErrorEnabled()) log.error(local_addr + ": sender window for " + sender + " not found"); return; } long lowest = win.getLowest(); Message rsp = win.get(lowest); if (rsp == null) return; // We need to copy the UnicastHeader and put it back into the message because Message.copy() // doesn't copy // the headers and therefore we'd modify the original message in the sender retransmission // window // (https://jira.jboss.org/jira/browse/JGRP-965) Message copy = rsp.copy(); Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id); Unicast2Header newhdr = hdr.copy(); newhdr.first = true; copy.putHeader(this.id, newhdr); if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> DATA(") .append(copy.getDest()) .append(": #") .append(newhdr.seqno) .append(", conn_id=") .append(newhdr.conn_id); if (newhdr.first) sb.append(", first"); sb.append(')'); log.trace(sb); } down_prot.down(new Event(Event.MSG, copy)); if (++lowest > seqno) return; for (long i = lowest; i <= seqno; i++) { rsp = win.get(i); if (rsp != null) down_prot.down(new Event(Event.MSG, rsp)); } }
private Object handleUpMessage(Event evt) throws Exception { Message msg = (Message) evt.getArg(); EncryptHeader hdr; if (msg == null || (msg.getLength() == 0 && !encrypt_entire_message) || ((hdr = (EncryptHeader) msg.getHeader(this.id)) == null)) return up_prot.up(evt); if (log.isTraceEnabled()) log.trace("header received %s", hdr); switch (hdr.getType()) { case EncryptHeader.ENCRYPT: return handleEncryptedMessage(msg, evt, hdr); default: handleUpEvent(msg, hdr); return null; } }
/** * Does the actual work for decrypting - if version does not match current cipher then tries the * previous cipher */ private Message decryptMessage(Cipher cipher, Message msg) throws Exception { EncryptHeader hdr = (EncryptHeader) msg.getHeader(this.id); if (!Arrays.equals(hdr.getVersion(), getSymVersion())) { log.warn( "attempting to use stored cipher as message does not use current encryption version "); cipher = keyMap.get(new AsciiString(hdr.getVersion())); if (cipher == null) { log.warn("unable to find a matching cipher in previous key map"); return null; } log.trace("decrypting using previous cipher version"); synchronized (cipher) { return _decrypt(cipher, msg, hdr.encryptEntireMessage()); } } return _decrypt(cipher, msg, hdr.encryptEntireMessage()); }
public void up(MessageBatch batch) { for (Message msg : batch) { // If we have a join or merge request --> authenticate, else pass up GMS.GmsHeader gms_hdr = getGMSHeader(msg); if (gms_hdr != null && needsAuthentication(gms_hdr)) { AuthHeader auth_hdr = (AuthHeader) msg.getHeader(id); if (auth_hdr == null) { log.warn("found GMS join or merge request but no AUTH header"); sendRejectionMessage( gms_hdr.getType(), batch.sender(), "join or merge without an AUTH header"); batch.remove(msg); } else if (!handleAuthHeader(gms_hdr, auth_hdr, msg)) // authentication failed batch.remove(msg); // don't pass up } } if (!batch.isEmpty()) up_prot.up(batch); }
public Message visit(Message msg, MessageBatch batch) { EncryptHeader hdr; if (msg == null || (msg.getLength() == 0 && !encrypt_entire_message) || ((hdr = (EncryptHeader) msg.getHeader(id)) == null)) return null; if (hdr.getType() == EncryptHeader.ENCRYPT) { // if queueing then pass into queue to be dealt with later if (queue_up) { queueUpMessage(msg, batch); return null; } // make sure we pass up any queued messages first if (!suppliedKey) drainUpQueue(); if (lock == null) { int index = getNextIndex(); lock = decoding_locks[index]; cipher = decoding_ciphers[index]; lock.lock(); } try { Message tmpMsg = decryptMessage(cipher, msg.copy()); // need to copy for possible xmits if (tmpMsg != null) batch.replace(msg, tmpMsg); } catch (Exception e) { log.error( "failed decrypting message from %s (offset=%d, length=%d, buf.length=%d): %s, headers are %s", msg.getSrc(), msg.getOffset(), msg.getLength(), msg.getRawBuffer().length, e, msg.printHeaders()); } } else { batch.remove( msg); // a control message will get handled by ENCRYPT and should not be passed up handleUpEvent(msg, hdr); } return null; }
/** * An event was received from the layer below. Usually the current layer will want to examine the * event type and - depending on its type - perform some computation (e.g. removing headers from a * MSG event type, or updating the internal membership list when receiving a VIEW_CHANGE event). * Finally the event is either a) discarded, or b) an event is sent down the stack using <code> * down_prot.down()</code> or c) the event (or another event) is sent up the stack using <code> * up_prot.up()</code>. */ public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); // If we have a join or merge request --> authenticate, else pass up GMS.GmsHeader gms_hdr = getGMSHeader(evt); if (gms_hdr != null && needsAuthentication(gms_hdr)) { AuthHeader auth_hdr = (AuthHeader) msg.getHeader(id); if (auth_hdr == null) throw new IllegalStateException("found GMS join or merge request but no AUTH header"); if (!handleAuthHeader(gms_hdr, auth_hdr, msg)) // authentication failed return null; // don't pass up } break; } if (!callUpHandlers(evt)) return null; return up_prot.up(evt); }
public Object up(Event evt) { Message msg; Address dst, src; Unicast2Header hdr; switch (evt.getType()) { case Event.MSG: msg = (Message) evt.getArg(); dst = msg.getDest(); if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) // only handle unicast messages break; // pass up // changed from removeHeader(): we cannot remove the header because if we do loopback=true // at the // transport level, we will not have the header on retransmit ! (bela Aug 22 2006) hdr = (Unicast2Header) msg.getHeader(this.id); if (hdr == null) break; src = msg.getSrc(); switch (hdr.type) { case Unicast2Header.DATA: // received regular message handleDataReceived(src, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); return null; // we pass the deliverable message up in handleDataReceived() case Unicast2Header.XMIT_REQ: // received ACK for previously sent message handleXmitRequest(src, hdr.seqno, hdr.high_seqno); break; case Unicast2Header.SEND_FIRST_SEQNO: handleResendingOfFirstMessage(src, hdr.seqno); break; case Unicast2Header.STABLE: stable(msg.getSrc(), hdr.conn_id, hdr.seqno, hdr.high_seqno); break; default: log.error("UnicastHeader type " + hdr.type + " not known !"); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
/** * If event is a message, if it is fragmented, re-assemble fragments into big message and pass up * the stack. */ public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); FragHeader hdr = (FragHeader) msg.getHeader(this.id); if (hdr != null) { // needs to be defragmented Message assembled_msg = unfragment(msg, hdr); if (assembled_msg != null) up_prot.up(new Event(Event.MSG, assembled_msg)); return null; } else { num_received_msgs++; } break; case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; } return up_prot.up(evt); // Pass up to the layer above us by default }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); StateHeader hdr = (StateHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case StateHeader.STATE_REQ: state_requesters.add(msg.getSrc()); break; case StateHeader.STATE_RSP: handleStateRsp(hdr.getDigest(), msg.getSrc(), msg.getBuffer()); break; case StateHeader.STATE_EX: closeHoleFor(msg.getSrc()); handleException((Throwable) msg.getObject()); break; default: log.error("%s: type %s not known in StateHeader", local_addr, hdr.type); break; } return null; case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("state_transfer")) log.error( Util.getMessage( "ProtocolStackCannotContainTwoStateTransferProtocolsRemoveEitherOneOfThem")); break; } return up_prot.up(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getDest() == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) // only handle unicast messages break; // pass up UnicastHeader hdr = (UnicastHeader) msg.getHeader(this.id); if (hdr == null) break; Address sender = msg.getSrc(); switch (hdr.type) { case UnicastHeader.DATA: // received regular message handleDataReceived(sender, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); break; default: handleUpEvent(sender, hdr); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }