/** * <b>Callback</b>. Called by superclass when event may be handled. * * <p><b>Do not use <code>PassDown</code> in this method as the event is passed down by default by * the superclass after this method returns !</b> * * @return boolean Defaults to true. If false, event will not be passed down the stack. */ public boolean handleDownEvent(Event evt) { switch (evt.getType()) { case Event.CONNECT: passDown(evt); try { group_addr = (String) evt.getArg(); } catch (ClassCastException cce) { Trace.error( "GMS.handleDownEvent(CONNECT)", "group address must " + "be a string (group name) to make sense"); } impl.join(local_addr); passUp(new Event(Event.CONNECT_OK)); startEventHandlerThread(); return false; // don't pass down: was already passed down case Event.DISCONNECT: impl.leave((Address) evt.getArg()); passUp(new Event(Event.DISCONNECT_OK)); stopEventHandlerThread(); initState(); return true; // pass down } return impl.handleDownEvent(evt); }
/** * An event is to be sent down the stack. The layer may want to examine its type and perform some * action on it, depending on the event's type. If the event is a message MSG, then the layer may * need to add a header to it (or do nothing at all) before sending it down the stack using <code> * PassDown</code>. In case of a GET_ADDRESS event (which tries to retrieve the stack's address * from one of the bottom layers), the layer may need to send a new response event back up the * stack using <code>up_prot.up()</code>. The PING protocol is interested in several different * down events, Event.FIND_INITIAL_MBRS - sent by the GMS layer and expecting a GET_MBRS_OK * Event.TMP_VIEW and Event.VIEW_CHANGE - a view change event Event.BECOME_SERVER - called after * client has joined and is fully working group member Event.CONNECT, Event.DISCONNECT. */ @SuppressWarnings("unchecked") public Object down(Event evt) { switch (evt.getType()) { case Event.FIND_INITIAL_MBRS: // sent by GMS layer case Event.FIND_ALL_VIEWS: // sends the GET_MBRS_REQ to all members, waits 'timeout' ms or until 'num_initial_members' // have been retrieved long start = System.currentTimeMillis(); boolean find_all_views = evt.getType() == Event.FIND_ALL_VIEWS; Promise<JoinRsp> promise = (Promise<JoinRsp>) evt.getArg(); List<PingData> rsps = find_all_views ? findAllViews(promise) : findInitialMembers(promise); long diff = System.currentTimeMillis() - start; if (log.isTraceEnabled()) log.trace("discovery took " + diff + " ms: responses: " + Util.printPingData(rsps)); return rsps; case Event.TMP_VIEW: case Event.VIEW_CHANGE: List<Address> tmp; view = (View) evt.getArg(); if ((tmp = view.getMembers()) != null) { synchronized (members) { members.clear(); members.addAll(tmp); } } current_coord = !members.isEmpty() ? members.get(0) : null; is_coord = current_coord != null && local_addr != null && current_coord.equals(local_addr); return down_prot.down(evt); case Event.BECOME_SERVER: // called after client has joined and is fully working group member down_prot.down(evt); is_server = true; return null; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); return down_prot.down(evt); case Event.CONNECT: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: is_leaving = false; group_addr = (String) evt.getArg(); Object ret = down_prot.down(evt); handleConnect(); return ret; case Event.DISCONNECT: is_leaving = true; handleDisconnect(); return down_prot.down(evt); default: return down_prot.down(evt); // Pass on to the layer below us } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); ForkHeader hdr = (ForkHeader) msg.getHeader(id); if (hdr == null) break; if (hdr.fork_stack_id == null) throw new IllegalArgumentException("header has a null fork_stack_id"); Protocol bottom_prot = get(hdr.fork_stack_id); return bottom_prot != null ? bottom_prot.up(evt) : this.unknownForkHandler.handleUnknownForkStack(msg, hdr.fork_stack_id); case Event.VIEW_CHANGE: for (Protocol bottom : fork_stacks.values()) bottom.up(evt); break; case Event.STATE_TRANSFER_OUTPUTSTREAM: if (!process_state_events) break; getStateFromMainAndForkChannels(evt); return null; case Event.STATE_TRANSFER_INPUTSTREAM: if (!process_state_events) break; setStateInMainAndForkChannels((InputStream) evt.getArg()); return null; } return up_prot.up(evt); }
public void run() { Event evt; while (evt_thread != null && event_queue != null) { try { evt = (Event) event_queue.remove(); switch (evt.getType()) { case Event.SUSPECT: impl.suspect((Address) evt.getArg()); break; case Event.MERGE: impl.merge((Vector) evt.getArg()); break; default: Trace.error( "GMS.run()", "event handler thread encountered event of type " + Event.type2String(evt.getType()) + ": not handled by me !"); break; } } catch (QueueClosedException closed) { break; } catch (Exception ex) { Trace.warn("GMS.run()", "exception=" + ex); } } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); LockingHeader hdr = (LockingHeader) msg.getHeader(id); if (hdr == null) break; Request req = (Request) msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case GRANT_LOCK: case RELEASE_LOCK: handleLockRequest(req); break; case LOCK_GRANTED: handleLockGrantedResponse(req.lock_name, req.owner, msg.getSrc()); break; case LOCK_DENIED: handleLockDeniedResponse(req.lock_name, req.owner); break; case CREATE_LOCK: handleCreateLockRequest(req.lock_name, req.owner); break; case DELETE_LOCK: handleDeleteLockRequest(req.lock_name); break; case COND_SIG: case COND_SIG_ALL: handleSignalRequest(req); break; case LOCK_AWAIT: handleAwaitRequest(req.lock_name, req.owner); handleLockRequest(req); break; case DELETE_LOCK_AWAIT: handleDeleteAwaitRequest(req.lock_name, req.owner); break; case SIG_RET: handleSignalResponse(req.lock_name, req.owner); break; case CREATE_AWAITER: handleCreateAwaitingRequest(req.lock_name, req.owner); break; case DELETE_AWAITER: handleDeleteAwaitingRequest(req.lock_name, req.owner); break; default: log.error("Request of type " + req.type + " not known"); break; } return null; case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; } return up_prot.up(evt); }
public Object down(Event evt) { switch (evt.getType()) { case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.GET_STATE: Address target; StateTransferInfo info = (StateTransferInfo) evt.getArg(); if (info.target == null) { target = determineCoordinator(); } else { target = info.target; if (target.equals(local_addr)) { log.error("%s: cannot fetch state from myself", local_addr); target = null; } } if (target == null) { log.debug("%s: first member (no state)", local_addr); up_prot.up(new Event(Event.GET_STATE_OK, new StateTransferInfo())); } else { Message state_req = new Message(target) .putHeader(this.id, new StateHeader(StateHeader.STATE_REQ)) .setFlag(Message.Flag.DONT_BUNDLE, Message.Flag.OOB, Message.Flag.SKIP_BARRIER); log.debug("%s: asking %s for state", local_addr, target); // suspend sending and handling of message garbage collection gossip messages, // fixes bugs #943480 and #938584). Wake up when state has been received /*if(log.isDebugEnabled()) log.debug("passing down a SUSPEND_STABLE event"); down_prot.down(new Event(Event.SUSPEND_STABLE, new Long(info.timeout)));*/ waiting_for_state_response = true; start = System.currentTimeMillis(); down_prot.down(new Event(Event.MSG, state_req)); } return null; // don't pass down any further ! case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("flush_supported")) { flushProtocolInStack = true; } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); // pass on to the layer below us }
/** * handle the UP event. * * @param evt - the event being send from the stack */ public void up(Event evt) { passUp(evt); switch (evt.getType()) { case Event.CONFIG: passUp(evt); if (Trace.trace) { Trace.info("UDP.up()", "received CONFIG event: " + evt.getArg()); } handleConfigEvent((HashMap) evt.getArg()); return; } passUp(evt); }
/** * <b>Callback</b>. Called by superclass when event may be handled. * * <p><b>Do not use <code>PassUp</code> in this method as the event is passed up by default by the * superclass after this method returns !</b> * * @return boolean Defaults to true. If false, event will not be passed up the stack. */ public boolean handleUpEvent(Event evt) { switch (evt.getType()) { case Event.CONNECT_OK: // sent by someone else, but WE are responsible for sending this ! case Event.DISCONNECT_OK: // dito (e.g. sent by UDP layer) return false; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); if (print_local_addr) { System.out.println( "\n-------------------------------------------------------\n" + "2.1GMS: address is " + local_addr + "\n-------------------------------------------------------"); } return true; // pass up case Event.SUSPECT: try { event_queue.add(evt); } catch (Exception e) { } return true; // pass up case Event.MERGE: try { event_queue.add(evt); } catch (Exception e) { } return false; // don't pass up case Event.FLUSH_OK: synchronized (flush_mutex) { flush_rsp = (FlushRsp) evt.getArg(); flush_mutex.notify(); } return false; // don't pass up case Event.REBROADCAST_MSGS_OK: synchronized (rebroadcast_mutex) { rebroadcast_mutex.notify(); } return false; // don't pass up } return impl.handleUpEvent(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); DaisyHeader hdr = (DaisyHeader) msg.getHeader(getId()); if (hdr == null) break; // 1. forward the message to the next in line if ttl > 0 short ttl = hdr.getTTL(); if (log.isTraceEnabled()) log.trace(local_addr + ": received message from " + msg.getSrc() + " with ttl=" + ttl); if (--ttl > 0) { Message copy = msg.copy(true); copy.setDest(next); copy.putHeader(getId(), new DaisyHeader(ttl)); msgs_forwarded++; if (log.isTraceEnabled()) log.trace(local_addr + ": forwarding message to " + next + " with ttl=" + ttl); down_prot.down(new Event(Event.MSG, copy)); } // 2. Pass up msg.setDest(null); break; } return up_prot.up(evt); }
/** * Sends an event down the protocol stack. Note that - contrary to {@link #send(Message)}, if the * event is a message, no checks are performed whether the channel is closed or disconnected. * * @param evt the message to send down, encapsulated in an event */ public Object down(Event evt) { if (evt == null) return null; if (stats && evt.getType() == Event.MSG) { sent_msgs++; sent_bytes += ((Message) evt.getArg()).getLength(); } return prot_stack.down(evt); }
public Object down(Event evt) { switch (evt.getType()) { case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.VIEW_CHANGE: handleView(evt.getArg()); break; } return up_prot.up(evt); }
protected void _connect(Event connect_event) throws Exception { try { down(connect_event); } catch (Throwable t) { stopStack(true, false); state = State.OPEN; init(); throw new Exception("connecting to channel \"" + connect_event.getArg() + "\" failed", t); } }
public Object down(final Event evt) { switch (evt.getType()) { case Event.MSG: final Message msg = (Message) evt.getArg(); if (msg.getDest() != null) break; // only process multicast messages if (next == null) // view hasn't been received yet, use the normal transport break; // we need to copy the message, as we cannot do a msg.setSrc(next): the next retransmission // would use 'next' as destination ! Message copy = msg.copy(true); short hdr_ttl = (short) (loopback ? view_size - 1 : view_size); DaisyHeader hdr = new DaisyHeader(hdr_ttl); copy.setDest(next); copy.putHeader(getId(), hdr); msgs_sent++; if (loopback) { if (log.isTraceEnabled()) log.trace(new StringBuilder("looping back message ").append(msg)); if (msg.getSrc() == null) msg.setSrc(local_addr); Executor pool = msg.isFlagSet(Message.Flag.OOB) ? oob_pool : default_pool; pool.execute(() -> up_prot.up(evt)); } return down_prot.down(new Event(Event.MSG, copy)); case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; case Event.TMP_VIEW: view_size = ((View) evt.getArg()).size(); break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getLength() == 0 && !encrypt_entire_message) break; try { if (queue_down) { log.trace("queueing down message as no session key established: %s", msg); downMessageQueue.put(msg); // queue messages if we are waiting for a new key } else { // make sure the down queue is drained first to keep ordering if (!suppliedKey) drainDownQueue(); encryptAndSend(msg); } } catch (Exception e) { log.warn("unable to send message down", e); } return null; case Event.VIEW_CHANGE: View view = (View) evt.getArg(); log.debug("new view: " + view); if (!suppliedKey) handleViewChange(view, false); break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); log.debug("set local address to %s", local_addr); break; case Event.TMP_VIEW: view = (View) evt.getArg(); if (!suppliedKey) { // if a tmp_view then we are trying to become coordinator so // make us keyserver handleViewChange(view, true); } break; } return down_prot.down(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); // Do something with the event, e.g. extract the message and remove a header. // Optionally pass up break; } return up_prot.up(evt); // Pass up to the layer above us }
public Object down(Event evt) { switch (evt.getType()) { case Event.TMP_VIEW: case Event.VIEW_CHANGE: List<Address> new_members = ((View) evt.getArg()).getMembers(); synchronized (members) { members.clear(); if (new_members != null && !new_members.isEmpty()) members.addAll(new_members); } return down_prot.down(evt); case Event.MSG: Message msg = (Message) evt.getArg(); // Do something with the event, e.g. add a header to the message // Optionally pass down break; } return down_prot.down(evt); // Pass on to the layer below us }
public Object up(Event evt) { switch (evt.getType()) { case Event.VIEW_CHANGE: View view = (View) evt.getArg(); log.debug("new view: " + view); if (!suppliedKey) handleViewChange(view, false); break; case Event.TMP_VIEW: view = (View) evt.getArg(); if (!suppliedKey) handleViewChange(view, true); break; // we try and decrypt all messages case Event.MSG: try { return handleUpMessage(evt); } catch (Exception e) { log.warn("exception occurred decrypting message", e); } return null; } return up_prot.up(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); StateHeader hdr = (StateHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case StateHeader.STATE_REQ: state_requesters.add(msg.getSrc()); break; case StateHeader.STATE_RSP: handleStateRsp(hdr.getDigest(), msg.getSrc(), msg.getBuffer()); break; case StateHeader.STATE_EX: closeHoleFor(msg.getSrc()); handleException((Throwable) msg.getObject()); break; default: log.error("%s: type %s not known in StateHeader", local_addr, hdr.type); break; } return null; case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("state_transfer")) log.error( Util.getMessage( "ProtocolStackCannotContainTwoStateTransferProtocolsRemoveEitherOneOfThem")); break; } return up_prot.up(evt); }
void handleDownEvent(Event evt) { switch (evt.getType()) { case Event.TMP_VIEW: case Event.VIEW_CHANGE: synchronized (members) { members.removeAllElements(); Vector tmpvec = ((View) evt.getArg()).getMembers(); for (int i = 0; i < tmpvec.size(); i++) { members.addElement(tmpvec.elementAt(i)); } } break; case Event.GET_LOCAL_ADDRESS: // return local address -> Event(SET_LOCAL_ADDRESS, local) passUp(new Event(Event.SET_LOCAL_ADDRESS, local_addr)); break; case Event.CONNECT: group_addr = (String) evt.getArg(); udp_hdr = new UdpHeader(group_addr); // removed March 18 2003 (bela), not needed (handled by GMS) // changed July 2 2003 (bela): we discard CONNECT_OK at the GMS level anyway, this might // be needed if we run without GMS though passUp(new Event(Event.CONNECT_OK)); break; case Event.DISCONNECT: passUp(new Event(Event.DISCONNECT_OK)); break; case Event.CONFIG: if (Trace.trace) { Trace.info("UDP.down()", "received CONFIG event: " + evt.getArg()); } handleConfigEvent((HashMap) evt.getArg()); break; } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); NakAckHeader2 hdr = (NakAckHeader2) msg.getHeader(ID); if (hdr != null && hdr.getType() == NakAckHeader2.MSG) { long seqno = hdr.getSeqno(); msgs.add(seqno); System.out.println("-- received message #" + seqno + " from " + msg.getSrc()); } break; } return null; }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); NakAckHeader2 hdr = (NakAckHeader2) msg.getHeader(ID); if (hdr == null) break; if (hdr.getType() == NakAckHeader2.XMIT_REQ) { SeqnoList seqnos = (SeqnoList) msg.getObject(); System.out.println("-- XMIT-REQ: request retransmission for " + seqnos); for (Long seqno : seqnos) xmit_requests.add(seqno); } break; } return null; }
private Object handleUpMessage(Event evt) throws Exception { Message msg = (Message) evt.getArg(); EncryptHeader hdr; if (msg == null || (msg.getLength() == 0 && !encrypt_entire_message) || ((hdr = (EncryptHeader) msg.getHeader(this.id)) == null)) return up_prot.up(evt); if (log.isTraceEnabled()) log.trace("header received %s", hdr); switch (hdr.getType()) { case EncryptHeader.ENCRYPT: return handleEncryptedMessage(msg, evt, hdr); default: handleUpEvent(msg, hdr); return null; } }
public Object up(Event evt) { Message msg; Address dst, src; Unicast2Header hdr; switch (evt.getType()) { case Event.MSG: msg = (Message) evt.getArg(); dst = msg.getDest(); if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) // only handle unicast messages break; // pass up // changed from removeHeader(): we cannot remove the header because if we do loopback=true // at the // transport level, we will not have the header on retransmit ! (bela Aug 22 2006) hdr = (Unicast2Header) msg.getHeader(this.id); if (hdr == null) break; src = msg.getSrc(); switch (hdr.type) { case Unicast2Header.DATA: // received regular message handleDataReceived(src, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); return null; // we pass the deliverable message up in handleDataReceived() case Unicast2Header.XMIT_REQ: // received ACK for previously sent message handleXmitRequest(src, hdr.seqno, hdr.high_seqno); break; case Unicast2Header.SEND_FIRST_SEQNO: handleResendingOfFirstMessage(src, hdr.seqno); break; case Unicast2Header.STABLE: stable(msg.getSrc(), hdr.conn_id, hdr.seqno, hdr.high_seqno); break; default: log.error("UnicastHeader type " + hdr.type + " not known !"); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
protected void getStateFromMainAndForkChannels(Event evt) { final OutputStream out = (OutputStream) evt.getArg(); try (DataOutputStream dos = new DataOutputStream(out)) { getStateFrom(null, up_prot, null, null, dos); // now fetch state from all fork channels for (Map.Entry<String, Protocol> entry : fork_stacks.entrySet()) { String stack_name = entry.getKey(); Protocol prot = entry.getValue(); ForkProtocolStack fork_stack = getForkStack(prot); for (Map.Entry<String, JChannel> en : fork_stack.getForkChannels().entrySet()) { String fc_name = en.getKey(); JChannel fc = en.getValue(); getStateFrom(fc, null, stack_name, fc_name, dos); } } } catch (Throwable ex) { log.error("%s: failed fetching state from main channel", local_addr, ex); } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getDest() == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) // only handle unicast messages break; // pass up UnicastHeader hdr = (UnicastHeader) msg.getHeader(this.id); if (hdr == null) break; Address sender = msg.getSrc(); switch (hdr.type) { case UnicastHeader.DATA: // received regular message handleDataReceived(sender, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); break; default: handleUpEvent(sender, hdr); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
public Object up(Event evt) { Message msg; SequencerHeader hdr; switch (evt.getType()) { case Event.MSG: msg = (Message) evt.getArg(); if (msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB)) break; hdr = (SequencerHeader) msg.getHeader(this.id); if (hdr == null) break; // pass up switch (hdr.type) { case SequencerHeader.FORWARD: case SequencerHeader.FLUSH: if (!is_coord) { if (log.isErrorEnabled()) log.error( local_addr + ": non-coord; dropping FORWARD request from " + msg.getSrc()); return null; } Address sender = msg.getSrc(); if (view != null && !view.containsMember(sender)) { if (log.isErrorEnabled()) log.error( local_addr + ": dropping FORWARD request from non-member " + sender + "; view=" + view); return null; } broadcast( msg, true, msg.getSrc(), hdr.seqno, hdr.type == SequencerHeader.FLUSH); // do copy the message received_forwards++; break; case SequencerHeader.BCAST: deliver(msg, evt, hdr); received_bcasts++; break; case SequencerHeader.WRAPPED_BCAST: unwrapAndDeliver( msg, hdr.flush_ack); // unwrap the original message (in the payload) and deliver it received_bcasts++; break; } return null; case Event.VIEW_CHANGE: Object retval = up_prot.up(evt); handleViewChange((View) evt.getArg()); return retval; case Event.TMP_VIEW: handleTmpView((View) evt.getArg()); break; } return up_prot.up(evt); }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getDest() != null || msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER) || msg.isFlagSet(Message.Flag.OOB)) break; if (msg.getSrc() == null) msg.setSrc(local_addr); if (flushing) block(); // A seqno is not used to establish ordering, but only to weed out duplicates; next_seqno // doesn't need // to increase monotonically, but only to be unique // (https://issues.jboss.org/browse/JGRP-1461) ! long next_seqno = seqno.incrementAndGet(); in_flight_sends.incrementAndGet(); try { SequencerHeader hdr = new SequencerHeader( is_coord ? SequencerHeader.BCAST : SequencerHeader.WRAPPED_BCAST, next_seqno); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace( "[" + local_addr + "]: forwarding " + local_addr + "::" + seqno + " to coord " + coord); // We always forward messages to the coordinator, even if we're the coordinator. Having // the coord // send its messages directly led to starvation of messages from other members. MPerf perf // went up // from 20MB/sec/node to 50MB/sec/node with this change ! forwardToCoord(next_seqno, msg); } catch (Exception ex) { log.error(Util.getMessage("FailedSendingMessage"), ex); } finally { in_flight_sends.decrementAndGet(); } return null; // don't pass down case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.TMP_VIEW: handleTmpView((View) evt.getArg()); break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); }
/** * Caller by the layer above this layer. Usually we just put this Message into the send queue and * let one or more worker threads handle it. A worker thread then removes the Message from the * send queue, performs a conversion and adds the modified Message to the send queue of the layer * below it, by calling Down). */ public void down(Event evt) { Message msg; Object dest_addr; if (evt.getType() != Event.MSG) { // unless it is a message handle it and respond handleDownEvent(evt); return; } // ****************** profiling ****************** /*if(num_msgs == 0) { start=System.currentTimeMillis(); num_msgs++; } else if(num_msgs >= 1000) { stop=System.currentTimeMillis(); long total_time=stop-start; double msgs_per_msec=num_msgs / (double)total_time; if(Trace.trace) Trace.info("UDP.down.profile()", "total_time=" + total_time + ", msgs/ms=" + msgs_per_msec); num_msgs=0; } else { num_msgs++; }*/ // ****************** profiling ****************** msg = (Message) evt.getArg(); if (udp_hdr != null && udp_hdr.group_addr != null) { // added patch by Roland Kurmann (March 20 2003) msg.putHeader(name, udp_hdr); } dest_addr = msg.getDest(); // Because we don't call Protocol.passDown(), we notify the observer directly (e.g. // PerfObserver). // This way, we still have performance numbers for UDP if (observer != null) { observer.passDown(evt); } if (dest_addr == null) { // 'null' means send to all group members if (ip_mcast) { if (mcast_addr == null) { Trace.error( "UDP.down()", "dest address of message is null, and " + "sending to default address fails as mcast_addr is null, too !" + " Discarding message " + Util.printEvent(evt)); return; } // if we want to use IP multicast, then set the destination of the message msg.setDest(mcast_addr); } else { // sends a separate UDP message to each address sendMultipleUdpMessages(msg, members); return; } } try { sendUdpMessage(msg); } catch (Exception e) { Trace.error("UDP.down()", "exception=" + e + ", msg=" + msg + ", mcast_addr=" + mcast_addr); } }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: // Add UnicastHeader, add to AckSenderWindow and pass down Message msg = (Message) evt.getArg(); Address dst = msg.getDest(); /* only handle unicast messages */ if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) break; if (!started) { if (log.isTraceEnabled()) log.trace("discarded message as start() has not yet been called, message: " + msg); return null; } SenderEntry entry = send_table.get(dst); if (entry == null) { entry = new SenderEntry(getNewConnectionId()); SenderEntry existing = send_table.putIfAbsent(dst, entry); if (existing != null) entry = existing; else { if (log.isTraceEnabled()) log.trace( local_addr + ": created connection to " + dst + " (conn_id=" + entry.send_conn_id + ")"); if (cache != null && !members.contains(dst)) cache.add(dst); } } long seqno = -2; short send_conn_id = -1; Unicast2Header hdr; entry.lock(); // threads will only sync if they access the same entry try { seqno = entry.sent_msgs_seqno; send_conn_id = entry.send_conn_id; hdr = Unicast2Header.createDataHeader(seqno, send_conn_id, seqno == DEFAULT_FIRST_SEQNO); msg.putHeader(this.id, hdr); entry.sent_msgs.addToMessages( seqno, msg); // add *including* UnicastHeader, adds to retransmitter entry.sent_msgs_seqno++; entry.update(); } finally { entry.unlock(); } if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> DATA(") .append(dst) .append(": #") .append(seqno) .append(", conn_id=") .append(send_conn_id); if (hdr.first) sb.append(", first"); sb.append(')'); log.trace(sb); } try { down_prot.down(evt); num_msgs_sent++; num_bytes_sent += msg.getLength(); } catch (Throwable t) { log.warn("failed sending the message", t); } return null; // we already passed the msg down case Event.VIEW_CHANGE: // remove connections to peers that are not members anymore ! View view = (View) evt.getArg(); List<Address> new_members = view.getMembers(); Set<Address> non_members = new HashSet<Address>(send_table.keySet()); non_members.addAll(recv_table.keySet()); synchronized (members) { members.clear(); if (new_members != null) members.addAll(new_members); non_members.removeAll(members); if (cache != null) { cache.removeAll(members); } } if (!non_members.isEmpty()) { if (log.isTraceEnabled()) log.trace("removing non members " + non_members); for (Address non_mbr : non_members) removeConnection(non_mbr); } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); // Pass on to the layer below us }