/** * An event is to be sent down the stack. The layer may want to examine its type and perform some * action on it, depending on the event's type. If the event is a message MSG, then the layer may * need to add a header to it (or do nothing at all) before sending it down the stack using <code> * PassDown</code>. In case of a GET_ADDRESS event (which tries to retrieve the stack's address * from one of the bottom layers), the layer may need to send a new response event back up the * stack using <code>up_prot.up()</code>. The PING protocol is interested in several different * down events, Event.FIND_INITIAL_MBRS - sent by the GMS layer and expecting a GET_MBRS_OK * Event.TMP_VIEW and Event.VIEW_CHANGE - a view change event Event.BECOME_SERVER - called after * client has joined and is fully working group member Event.CONNECT, Event.DISCONNECT. */ @SuppressWarnings("unchecked") public Object down(Event evt) { switch (evt.getType()) { case Event.FIND_INITIAL_MBRS: // sent by GMS layer case Event.FIND_ALL_VIEWS: // sends the GET_MBRS_REQ to all members, waits 'timeout' ms or until 'num_initial_members' // have been retrieved long start = System.currentTimeMillis(); boolean find_all_views = evt.getType() == Event.FIND_ALL_VIEWS; Promise<JoinRsp> promise = (Promise<JoinRsp>) evt.getArg(); List<PingData> rsps = find_all_views ? findAllViews(promise) : findInitialMembers(promise); long diff = System.currentTimeMillis() - start; if (log.isTraceEnabled()) log.trace("discovery took " + diff + " ms: responses: " + Util.printPingData(rsps)); return rsps; case Event.TMP_VIEW: case Event.VIEW_CHANGE: List<Address> tmp; view = (View) evt.getArg(); if ((tmp = view.getMembers()) != null) { synchronized (members) { members.clear(); members.addAll(tmp); } } current_coord = !members.isEmpty() ? members.get(0) : null; is_coord = current_coord != null && local_addr != null && current_coord.equals(local_addr); return down_prot.down(evt); case Event.BECOME_SERVER: // called after client has joined and is fully working group member down_prot.down(evt); is_server = true; return null; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); return down_prot.down(evt); case Event.CONNECT: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: is_leaving = false; group_addr = (String) evt.getArg(); Object ret = down_prot.down(evt); handleConnect(); return ret; case Event.DISCONNECT: is_leaving = true; handleDisconnect(); return down_prot.down(evt); default: return down_prot.down(evt); // Pass on to the layer below us } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); LockingHeader hdr = (LockingHeader) msg.getHeader(id); if (hdr == null) break; Request req = (Request) msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case GRANT_LOCK: case RELEASE_LOCK: handleLockRequest(req); break; case LOCK_GRANTED: handleLockGrantedResponse(req.lock_name, req.owner, msg.getSrc()); break; case LOCK_DENIED: handleLockDeniedResponse(req.lock_name, req.owner); break; case CREATE_LOCK: handleCreateLockRequest(req.lock_name, req.owner); break; case DELETE_LOCK: handleDeleteLockRequest(req.lock_name); break; case COND_SIG: case COND_SIG_ALL: handleSignalRequest(req); break; case LOCK_AWAIT: handleAwaitRequest(req.lock_name, req.owner); handleLockRequest(req); break; case DELETE_LOCK_AWAIT: handleDeleteAwaitRequest(req.lock_name, req.owner); break; case SIG_RET: handleSignalResponse(req.lock_name, req.owner); break; case CREATE_AWAITER: handleCreateAwaitingRequest(req.lock_name, req.owner); break; case DELETE_AWAITER: handleDeleteAwaitingRequest(req.lock_name, req.owner); break; default: log.error("Request of type " + req.type + " not known"); break; } return null; case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; } return up_prot.up(evt); }
public Object down(Event evt) { switch (evt.getType()) { case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.GET_STATE: Address target; StateTransferInfo info = (StateTransferInfo) evt.getArg(); if (info.target == null) { target = determineCoordinator(); } else { target = info.target; if (target.equals(local_addr)) { log.error("%s: cannot fetch state from myself", local_addr); target = null; } } if (target == null) { log.debug("%s: first member (no state)", local_addr); up_prot.up(new Event(Event.GET_STATE_OK, new StateTransferInfo())); } else { Message state_req = new Message(target) .putHeader(this.id, new StateHeader(StateHeader.STATE_REQ)) .setFlag(Message.Flag.DONT_BUNDLE, Message.Flag.OOB, Message.Flag.SKIP_BARRIER); log.debug("%s: asking %s for state", local_addr, target); // suspend sending and handling of message garbage collection gossip messages, // fixes bugs #943480 and #938584). Wake up when state has been received /*if(log.isDebugEnabled()) log.debug("passing down a SUSPEND_STABLE event"); down_prot.down(new Event(Event.SUSPEND_STABLE, new Long(info.timeout)));*/ waiting_for_state_response = true; start = System.currentTimeMillis(); down_prot.down(new Event(Event.MSG, state_req)); } return null; // don't pass down any further ! case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("flush_supported")) { flushProtocolInStack = true; } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); // pass on to the layer below us }
/** * handle the UP event. * * @param evt - the event being send from the stack */ public void up(Event evt) { passUp(evt); switch (evt.getType()) { case Event.CONFIG: passUp(evt); if (Trace.trace) { Trace.info("UDP.up()", "received CONFIG event: " + evt.getArg()); } handleConfigEvent((HashMap) evt.getArg()); return; } passUp(evt); }
/** * Sends an event down the protocol stack. Note that - contrary to {@link #send(Message)}, if the * event is a message, no checks are performed whether the channel is closed or disconnected. * * @param evt the message to send down, encapsulated in an event */ public Object down(Event evt) { if (evt == null) return null; if (stats && evt.getType() == Event.MSG) { sent_msgs++; sent_bytes += ((Message) evt.getArg()).getLength(); } return prot_stack.down(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.VIEW_CHANGE: handleView(evt.getArg()); break; } return up_prot.up(evt); }
protected void _connect(Event connect_event) throws Exception { try { down(connect_event); } catch (Throwable t) { stopStack(true, false); state = State.OPEN; init(); throw new Exception("connecting to channel \"" + connect_event.getArg() + "\" failed", t); } }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getLength() == 0 && !encrypt_entire_message) break; try { if (queue_down) { log.trace("queueing down message as no session key established: %s", msg); downMessageQueue.put(msg); // queue messages if we are waiting for a new key } else { // make sure the down queue is drained first to keep ordering if (!suppliedKey) drainDownQueue(); encryptAndSend(msg); } } catch (Exception e) { log.warn("unable to send message down", e); } return null; case Event.VIEW_CHANGE: View view = (View) evt.getArg(); log.debug("new view: " + view); if (!suppliedKey) handleViewChange(view, false); break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); log.debug("set local address to %s", local_addr); break; case Event.TMP_VIEW: view = (View) evt.getArg(); if (!suppliedKey) { // if a tmp_view then we are trying to become coordinator so // make us keyserver handleViewChange(view, true); } break; } return down_prot.down(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.VIEW_CHANGE: View view = (View) evt.getArg(); log.debug("new view: " + view); if (!suppliedKey) handleViewChange(view, false); break; case Event.TMP_VIEW: view = (View) evt.getArg(); if (!suppliedKey) handleViewChange(view, true); break; // we try and decrypt all messages case Event.MSG: try { return handleUpMessage(evt); } catch (Exception e) { log.warn("exception occurred decrypting message", e); } return null; } return up_prot.up(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); StateHeader hdr = (StateHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case StateHeader.STATE_REQ: state_requesters.add(msg.getSrc()); break; case StateHeader.STATE_RSP: handleStateRsp(hdr.getDigest(), msg.getSrc(), msg.getBuffer()); break; case StateHeader.STATE_EX: closeHoleFor(msg.getSrc()); handleException((Throwable) msg.getObject()); break; default: log.error("%s: type %s not known in StateHeader", local_addr, hdr.type); break; } return null; case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("state_transfer")) log.error( Util.getMessage( "ProtocolStackCannotContainTwoStateTransferProtocolsRemoveEitherOneOfThem")); break; } return up_prot.up(evt); }
void handleDownEvent(Event evt) { switch (evt.getType()) { case Event.TMP_VIEW: case Event.VIEW_CHANGE: synchronized (members) { members.removeAllElements(); Vector tmpvec = ((View) evt.getArg()).getMembers(); for (int i = 0; i < tmpvec.size(); i++) { members.addElement(tmpvec.elementAt(i)); } } break; case Event.GET_LOCAL_ADDRESS: // return local address -> Event(SET_LOCAL_ADDRESS, local) passUp(new Event(Event.SET_LOCAL_ADDRESS, local_addr)); break; case Event.CONNECT: group_addr = (String) evt.getArg(); udp_hdr = new UdpHeader(group_addr); // removed March 18 2003 (bela), not needed (handled by GMS) // changed July 2 2003 (bela): we discard CONNECT_OK at the GMS level anyway, this might // be needed if we run without GMS though passUp(new Event(Event.CONNECT_OK)); break; case Event.DISCONNECT: passUp(new Event(Event.DISCONNECT_OK)); break; case Event.CONFIG: if (Trace.trace) { Trace.info("UDP.down()", "received CONFIG event: " + evt.getArg()); } handleConfigEvent((HashMap) evt.getArg()); break; } }
private Object handleUpMessage(Event evt) throws Exception { Message msg = (Message) evt.getArg(); EncryptHeader hdr; if (msg == null || (msg.getLength() == 0 && !encrypt_entire_message) || ((hdr = (EncryptHeader) msg.getHeader(this.id)) == null)) return up_prot.up(evt); if (log.isTraceEnabled()) log.trace("header received %s", hdr); switch (hdr.getType()) { case EncryptHeader.ENCRYPT: return handleEncryptedMessage(msg, evt, hdr); default: handleUpEvent(msg, hdr); return null; } }
public Object up(Event evt) { Message msg; Address dst, src; Unicast2Header hdr; switch (evt.getType()) { case Event.MSG: msg = (Message) evt.getArg(); dst = msg.getDest(); if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) // only handle unicast messages break; // pass up // changed from removeHeader(): we cannot remove the header because if we do loopback=true // at the // transport level, we will not have the header on retransmit ! (bela Aug 22 2006) hdr = (Unicast2Header) msg.getHeader(this.id); if (hdr == null) break; src = msg.getSrc(); switch (hdr.type) { case Unicast2Header.DATA: // received regular message handleDataReceived(src, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); return null; // we pass the deliverable message up in handleDataReceived() case Unicast2Header.XMIT_REQ: // received ACK for previously sent message handleXmitRequest(src, hdr.seqno, hdr.high_seqno); break; case Unicast2Header.SEND_FIRST_SEQNO: handleResendingOfFirstMessage(src, hdr.seqno); break; case Unicast2Header.STABLE: stable(msg.getSrc(), hdr.conn_id, hdr.seqno, hdr.high_seqno); break; default: log.error("UnicastHeader type " + hdr.type + " not known !"); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getDest() == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) // only handle unicast messages break; // pass up UnicastHeader hdr = (UnicastHeader) msg.getHeader(this.id); if (hdr == null) break; Address sender = msg.getSrc(); switch (hdr.type) { case UnicastHeader.DATA: // received regular message handleDataReceived(sender, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); break; default: handleUpEvent(sender, hdr); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: // Add UnicastHeader, add to AckSenderWindow and pass down Message msg = (Message) evt.getArg(); Address dst = msg.getDest(); /* only handle unicast messages */ if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) break; if (!started) { if (log.isTraceEnabled()) log.trace("discarded message as start() has not yet been called, message: " + msg); return null; } SenderEntry entry = send_table.get(dst); if (entry == null) { entry = new SenderEntry(getNewConnectionId()); SenderEntry existing = send_table.putIfAbsent(dst, entry); if (existing != null) entry = existing; else { if (log.isTraceEnabled()) log.trace( local_addr + ": created connection to " + dst + " (conn_id=" + entry.send_conn_id + ")"); if (cache != null && !members.contains(dst)) cache.add(dst); } } long seqno = -2; short send_conn_id = -1; Unicast2Header hdr; entry.lock(); // threads will only sync if they access the same entry try { seqno = entry.sent_msgs_seqno; send_conn_id = entry.send_conn_id; hdr = Unicast2Header.createDataHeader(seqno, send_conn_id, seqno == DEFAULT_FIRST_SEQNO); msg.putHeader(this.id, hdr); entry.sent_msgs.addToMessages( seqno, msg); // add *including* UnicastHeader, adds to retransmitter entry.sent_msgs_seqno++; entry.update(); } finally { entry.unlock(); } if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> DATA(") .append(dst) .append(": #") .append(seqno) .append(", conn_id=") .append(send_conn_id); if (hdr.first) sb.append(", first"); sb.append(')'); log.trace(sb); } try { down_prot.down(evt); num_msgs_sent++; num_bytes_sent += msg.getLength(); } catch (Throwable t) { log.warn("failed sending the message", t); } return null; // we already passed the msg down case Event.VIEW_CHANGE: // remove connections to peers that are not members anymore ! View view = (View) evt.getArg(); List<Address> new_members = view.getMembers(); Set<Address> non_members = new HashSet<Address>(send_table.keySet()); non_members.addAll(recv_table.keySet()); synchronized (members) { members.clear(); if (new_members != null) members.addAll(new_members); non_members.removeAll(members); if (cache != null) { cache.removeAll(members); } } if (!non_members.isEmpty()) { if (log.isTraceEnabled()) log.trace("removing non members " + non_members); for (Address non_mbr : non_members) removeConnection(non_mbr); } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); // Pass on to the layer below us }
public Object down(Event evt) { switch (evt.getType()) { case Event.LOCK: LockInfo info = (LockInfo) evt.getArg(); ClientLock lock = getLock(info.getName()); if (!info.isTrylock()) { if (info.isLockInterruptibly()) { try { lock.lockInterruptibly(); } catch (InterruptedException e) { Thread.currentThread() .interrupt(); // has to be checked by caller who has to rethrow ... } } else lock.lock(); } else { if (info.isUseTimeout()) { try { return lock.tryLock(info.getTimeout(), info.getTimeUnit()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } else { return lock.tryLock(); } } return null; case Event.UNLOCK: info = (LockInfo) evt.getArg(); lock = getLock(info.getName(), false); if (lock != null) lock.unlock(); return null; case Event.UNLOCK_ALL: unlockAll(); return null; case Event.LOCK_AWAIT: info = (LockInfo) evt.getArg(); lock = getLock(info.getName(), false); if (lock == null || !lock.acquired) { throw new IllegalMonitorStateException(); } Condition condition = lock.newCondition(); if (info.isUseTimeout()) { try { return condition.awaitNanos(info.getTimeUnit().toNanos(info.getTimeout())); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } else if (info.isLockInterruptibly()) { try { condition.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } else { condition.awaitUninterruptibly(); } break; case Event.LOCK_SIGNAL: AwaitInfo awaitInfo = (AwaitInfo) evt.getArg(); lock = getLock(awaitInfo.getName(), false); if (lock == null || !lock.acquired) { throw new IllegalMonitorStateException(); } sendSignalConditionRequest(awaitInfo.getName(), awaitInfo.isAll()); break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; } return down_prot.down(evt); }
public Object down(Event evt) { switch (evt.getType()) { case ExecutorEvent.TASK_SUBMIT: Runnable runnable = evt.getArg(); // We are limited to a number of concurrent request id's // equal to 2^63-1. This is quite large and if it // overflows it will still be positive long requestId = Math.abs(counter.getAndIncrement()); if (requestId == Long.MIN_VALUE) { // TODO: need to fix this it isn't safe for concurrent modifications counter.set(0); requestId = Math.abs(counter.getAndIncrement()); } // Need to make sure to put the requestId in our map before // adding the runnable to awaiting consumer in case if // coordinator sent a consumer found and their original task // is no longer around // see https://issues.jboss.org/browse/JGRP-1744 _requestId.put(runnable, requestId); _awaitingConsumer.add(runnable); sendToCoordinator(RUN_REQUEST, requestId, local_addr); break; case ExecutorEvent.CONSUMER_READY: Thread currentThread = Thread.currentThread(); long threadId = currentThread.getId(); _consumerId.put(threadId, PRESENT); try { for (; ; ) { CyclicBarrier barrier = new CyclicBarrier(2); _taskBarriers.put(threadId, barrier); // We only send to the coordinator that we are ready after // making the barrier, wait for request to come and let // us free sendToCoordinator(Type.CONSUMER_READY, threadId, local_addr); try { barrier.await(); break; } catch (BrokenBarrierException e) { if (log.isDebugEnabled()) log.debug( "Producer timed out before we picked up" + " the task, have to tell coordinator" + " we are still good."); } } // This should always be non nullable since the latch // was freed runnable = _tasks.remove(threadId); _runnableThreads.put(runnable, currentThread); return runnable; } catch (InterruptedException e) { if (log.isDebugEnabled()) log.debug("Consumer " + threadId + " stopped via interrupt"); sendToCoordinator(Type.CONSUMER_UNREADY, threadId, local_addr); Thread.currentThread().interrupt(); } finally { // Make sure the barriers are cleaned up as well _taskBarriers.remove(threadId); _consumerId.remove(threadId); } break; case ExecutorEvent.TASK_COMPLETE: Object arg = evt.getArg(); Throwable throwable = null; if (arg instanceof Object[]) { Object[] array = (Object[]) arg; runnable = (Runnable) array[0]; throwable = (Throwable) array[1]; } else { runnable = (Runnable) arg; } Owner owner = _running.remove(runnable); // This won't remove anything if owner doesn't come back _runnableThreads.remove(runnable); Object value = null; boolean exception = false; if (throwable != null) { // InterruptedException is special telling us that // we interrupted the thread while waiting but still got // a task therefore we have to reject it. if (throwable instanceof InterruptedException) { if (log.isDebugEnabled()) log.debug("Run rejected due to interrupted exception returned"); sendRequest(owner.address, Type.RUN_REJECTED, owner.requestId, null); break; } value = throwable; exception = true; } else if (runnable instanceof RunnableFuture<?>) { RunnableFuture<?> future = (RunnableFuture<?>) runnable; boolean interrupted = false; boolean gotValue = false; // We have the value, before we interrupt at least get it! while (!gotValue) { try { value = future.get(); gotValue = true; } catch (InterruptedException e) { interrupted = true; } catch (ExecutionException e) { value = e.getCause(); exception = true; gotValue = true; } } if (interrupted) { Thread.currentThread().interrupt(); } } if (owner != null) { final Type type; final Object valueToSend; if (value == null) { type = Type.RESULT_SUCCESS; valueToSend = value; } // Both serializable values and exceptions would go in here else if (value instanceof Serializable || value instanceof Externalizable || value instanceof Streamable) { type = exception ? Type.RESULT_EXCEPTION : Type.RESULT_SUCCESS; valueToSend = value; } // This would happen if the value wasn't serializable, // so we have to send back to the client that the class // wasn't serializable else { type = Type.RESULT_EXCEPTION; valueToSend = new NotSerializableException(value.getClass().getName()); } if (local_addr.equals(owner.getAddress())) { if (log.isTraceEnabled()) log.trace( "[redirect] <--> [" + local_addr + "] " + type.name() + " [" + value + (owner.requestId != -1 ? " request id: " + owner.requestId : "") + "]"); if (type == Type.RESULT_SUCCESS) { handleValueResponse(local_addr, owner.requestId, valueToSend); } else if (type == Type.RESULT_EXCEPTION) { handleExceptionResponse(local_addr, owner.requestId, (Throwable) valueToSend); } } else { sendRequest(owner.getAddress(), type, owner.requestId, valueToSend); } } else { if (log.isTraceEnabled()) { log.trace("Could not return result - most likely because it was interrupted"); } } break; case ExecutorEvent.TASK_CANCEL: Object[] array = evt.getArg(); runnable = (Runnable) array[0]; if (_awaitingConsumer.remove(runnable)) { _requestId.remove(runnable); ExecutorNotification notification = notifiers.remove(runnable); if (notification != null) { notification.interrupted(runnable); } if (log.isTraceEnabled()) log.trace("Cancelled task " + runnable + " before it was picked up"); return Boolean.TRUE; } // This is guaranteed to not be null so don't take cost of auto unboxing else if (array[1] == Boolean.TRUE) { owner = removeKeyForValue(_awaitingReturn, runnable); if (owner != null) { Long requestIdValue = _requestId.remove(runnable); // We only cancel if the requestId is still available // this means the result hasn't been returned yet and // we still have a chance to interrupt if (requestIdValue != null) { if (requestIdValue != owner.getRequestId()) { log.warn("Cancelling requestId didn't match waiting"); } sendRequest(owner.getAddress(), Type.INTERRUPT_RUN, owner.getRequestId(), null); } } else { if (log.isTraceEnabled()) log.warn("Couldn't interrupt server task: " + runnable); } ExecutorNotification notification = notifiers.remove(runnable); if (notification != null) { notification.interrupted(runnable); } return Boolean.TRUE; } else { return Boolean.FALSE; } case ExecutorEvent.ALL_TASK_CANCEL: array = evt.getArg(); // This is a RunnableFuture<?> so this cast is okay @SuppressWarnings("unchecked") Set<Runnable> runnables = (Set<Runnable>) array[0]; Boolean booleanValue = (Boolean) array[1]; List<Runnable> notRan = new ArrayList<>(); for (Runnable cancelRunnable : runnables) { // Removed from the consumer if (!_awaitingConsumer.remove(cancelRunnable) && booleanValue == Boolean.TRUE) { synchronized (_awaitingReturn) { owner = removeKeyForValue(_awaitingReturn, cancelRunnable); if (owner != null) { Long requestIdValue = _requestId.remove(cancelRunnable); if (requestIdValue != owner.getRequestId()) { log.warn("Cancelling requestId didn't match waiting"); } sendRequest(owner.getAddress(), Type.INTERRUPT_RUN, owner.getRequestId(), null); } ExecutorNotification notification = notifiers.remove(cancelRunnable); if (notification != null) { log.trace("Notifying listener"); notification.interrupted(cancelRunnable); } } } else { _requestId.remove(cancelRunnable); notRan.add(cancelRunnable); } } return notRan; case Event.SET_LOCAL_ADDRESS: local_addr = evt.getArg(); break; case Event.VIEW_CHANGE: handleView(evt.getArg()); break; } return down_prot.down(evt); }
public Object down(Event evt) { switch (evt.getType()) { case Event.MSG: // Add UnicastHeader, add to AckSenderWindow and pass down Message msg = (Message) evt.getArg(); Address dst = msg.getDest(); /* only handle unicast messages */ if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) break; if (!running) { if (log.isTraceEnabled()) log.trace("discarded message as start() has not yet been called, message: " + msg); return null; } SenderEntry entry = send_table.get(dst); if (entry == null) { entry = new SenderEntry(getNewConnectionId()); SenderEntry existing = send_table.putIfAbsent(dst, entry); if (existing != null) entry = existing; else { if (log.isTraceEnabled()) log.trace( local_addr + ": created connection to " + dst + " (conn_id=" + entry.send_conn_id + ")"); if (cache != null && !members.contains(dst)) cache.add(dst); } } short send_conn_id = entry.send_conn_id; long seqno = entry.sent_msgs_seqno.getAndIncrement(); long sleep = 10; while (running) { try { msg.putHeader( this.id, Unicast2Header.createDataHeader(seqno, send_conn_id, seqno == DEFAULT_FIRST_SEQNO)); entry.sent_msgs.add(seqno, msg); // add *including* UnicastHeader, adds to retransmitter if (conn_expiry_timeout > 0) entry.update(); break; } catch (Throwable t) { if (!running) break; if (log.isWarnEnabled()) log.warn("failed sending message", t); Util.sleep(sleep); sleep = Math.min(5000, sleep * 2); } } if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> DATA(") .append(dst) .append(": #") .append(seqno) .append(", conn_id=") .append(send_conn_id); if (seqno == DEFAULT_FIRST_SEQNO) sb.append(", first"); sb.append(')'); log.trace(sb); } try { down_prot.down(evt); num_msgs_sent++; } catch (Throwable t) { log.warn("failed sending the message", t); } return null; // we already passed the msg down case Event.VIEW_CHANGE: // remove connections to peers that are not members anymore ! View view = (View) evt.getArg(); List<Address> new_members = view.getMembers(); Set<Address> non_members = new HashSet<Address>(send_table.keySet()); non_members.addAll(recv_table.keySet()); members = new_members; non_members.removeAll(new_members); if (cache != null) cache.removeAll(new_members); if (!non_members.isEmpty()) { if (log.isTraceEnabled()) log.trace("removing non members " + non_members); for (Address non_mbr : non_members) removeConnection(non_mbr); } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); // Pass on to the layer below us }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); PingHeader hdr = (PingHeader) msg.getHeader(this.id); if (hdr == null) return up_prot.up(evt); if (is_leaving) return null; // prevents merging back a leaving member // (https://issues.jboss.org/browse/JGRP-1336) PingData data = readPingData(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); Address logical_addr = data != null ? data.getAddress() : msg.src(); switch (hdr.type) { case PingHeader.GET_MBRS_REQ: // return Rsp(local_addr, coord) if (cluster_name == null || hdr.cluster_name == null) { log.warn( "cluster_name (%s) or cluster_name of header (%s) is null; passing up discovery " + "request from %s, but this should not be the case", cluster_name, hdr.cluster_name, msg.src()); } else { if (!cluster_name.equals(hdr.cluster_name)) { log.warn( "%s: discarding discovery request for cluster '%s' from %s; " + "our cluster name is '%s'. Please separate your clusters properly", logical_addr, hdr.cluster_name, msg.src(), cluster_name); return null; } } // add physical address and logical name of the discovery sender (if available) to the // cache if (data != null) { addDiscoveryResponseToCaches( logical_addr, data.getLogicalName(), data.getPhysicalAddr()); discoveryRequestReceived(msg.getSrc(), data.getLogicalName(), data.getPhysicalAddr()); addResponse(data, false); } if (return_entire_cache) { Map<Address, PhysicalAddress> cache = (Map<Address, PhysicalAddress>) down(new Event(Event.GET_LOGICAL_PHYSICAL_MAPPINGS)); if (cache != null) { for (Map.Entry<Address, PhysicalAddress> entry : cache.entrySet()) { Address addr = entry.getKey(); // JGRP-1492: only return our own address, and addresses in view. if (addr.equals(local_addr) || members.contains(addr)) { PhysicalAddress physical_addr = entry.getValue(); sendDiscoveryResponse( addr, physical_addr, UUID.get(addr), msg.getSrc(), isCoord(addr)); } } } return null; } // Only send a response if hdr.mbrs is not empty and contains myself. Otherwise always // send my info Collection<? extends Address> mbrs = data != null ? data.mbrs() : null; boolean send_response = mbrs == null || mbrs.contains(local_addr); if (send_response) { PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); sendDiscoveryResponse( local_addr, physical_addr, UUID.get(local_addr), msg.getSrc(), is_coord); } return null; case PingHeader.GET_MBRS_RSP: // add physical address (if available) to transport's cache if (data != null) { log.trace("%s: received GET_MBRS_RSP from %s: %s", local_addr, msg.src(), data); handleDiscoveryResponse(data, msg.src()); } return null; default: log.warn("got PING header with unknown type %d", hdr.type); return null; } case Event.FIND_MBRS: return findMembers( (List<Address>) evt.getArg(), false, true); // this is done asynchronously } return up_prot.up(evt); }
protected Object handleUpEvent(Event evt) throws Exception { switch (evt.getType()) { case Event.MSG: if (msg_listener != null) msg_listener.receive((Message) evt.getArg()); break; case Event.GET_APPLSTATE: // reply with GET_APPLSTATE_OK byte[] tmp_state = null; if (msg_listener != null) { ByteArrayOutputStream output = new ByteArrayOutputStream(1024); msg_listener.getState(output); tmp_state = output.toByteArray(); } return new StateTransferInfo(null, 0L, tmp_state); case Event.GET_STATE_OK: if (msg_listener != null) { StateTransferResult result = (StateTransferResult) evt.getArg(); if (result.hasBuffer()) { ByteArrayInputStream input = new ByteArrayInputStream(result.getBuffer()); msg_listener.setState(input); } } break; case Event.STATE_TRANSFER_OUTPUTSTREAM: OutputStream os = (OutputStream) evt.getArg(); if (msg_listener != null && os != null) { msg_listener.getState(os); } break; case Event.STATE_TRANSFER_INPUTSTREAM: InputStream is = (InputStream) evt.getArg(); if (msg_listener != null && is != null) msg_listener.setState(is); break; case Event.VIEW_CHANGE: View v = (View) evt.getArg(); List<Address> new_mbrs = v.getMembers(); setMembers(new_mbrs); if (membership_listener != null) membership_listener.viewAccepted(v); break; case Event.SET_LOCAL_ADDRESS: if (log.isTraceEnabled()) log.trace("setting local_addr (" + local_addr + ") to " + evt.getArg()); local_addr = (Address) evt.getArg(); break; case Event.SUSPECT: if (membership_listener != null) membership_listener.suspect((Address) evt.getArg()); break; case Event.BLOCK: if (membership_listener != null) membership_listener.block(); break; case Event.UNBLOCK: if (membership_listener != null) membership_listener.unblock(); break; } return null; }
/** * An event was received from the layer below. Usually the current layer will want to examine the * event type and - depending on its type - perform some computation (e.g. removing headers from a * MSG event type, or updating the internal membership list when receiving a VIEW_CHANGE event). * Finally the event is either a) discarded, or b) an event is sent down the stack using <code> * PassDown</code> or c) the event (or another event) is sent up the stack using <code>PassUp * </code>. * * <p>For the PING protocol, the Up operation does the following things. 1. If the event is a * Event.MSG then PING will inspect the message header. If the header is null, PING simply passes * up the event If the header is PingHeader.GET_MBRS_REQ then the PING protocol will PassDown a * PingRequest message If the header is PingHeader.GET_MBRS_RSP we will add the message to the * initial members vector and wake up any waiting threads. 2. If the event is Event.SET_LOCAL_ADDR * we will simple set the local address of this protocol 3. For all other messages we simple pass * it up to the protocol above * * @param evt - the event that has been sent from the layer below */ @SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); PingHeader hdr = (PingHeader) msg.getHeader(this.id); if (hdr == null) return up_prot.up(evt); PingData data = hdr.data; Address logical_addr = data != null ? data.getAddress() : null; if (is_leaving) return null; // prevents merging back a leaving member // (https://issues.jboss.org/browse/JGRP-1336) switch (hdr.type) { case PingHeader.GET_MBRS_REQ: // return Rsp(local_addr, coord) if (group_addr == null || hdr.cluster_name == null) { if (log.isWarnEnabled()) log.warn( "group_addr (" + group_addr + ") or cluster_name of header (" + hdr.cluster_name + ") is null; passing up discovery request from " + msg.getSrc() + ", but this should not" + " be the case"); } else { if (!group_addr.equals(hdr.cluster_name)) { if (log.isWarnEnabled()) log.warn( "discarding discovery request for cluster '" + hdr.cluster_name + "' from " + msg.getSrc() + "; our cluster name is '" + group_addr + "'. " + "Please separate your clusters cleanly."); return null; } } // add physical address and logical name of the discovery sender (if available) to the // cache if (data != null) { if (logical_addr == null) logical_addr = msg.getSrc(); Collection<PhysicalAddress> physical_addrs = data.getPhysicalAddrs(); PhysicalAddress physical_addr = physical_addrs != null && !physical_addrs.isEmpty() ? physical_addrs.iterator().next() : null; if (logical_addr != null && physical_addr != null) down( new Event( Event.SET_PHYSICAL_ADDRESS, new Tuple<Address, PhysicalAddress>(logical_addr, physical_addr))); if (logical_addr != null && data.getLogicalName() != null) UUID.add(logical_addr, data.getLogicalName()); discoveryRequestReceived(msg.getSrc(), data.getLogicalName(), physical_addrs); synchronized (ping_responses) { for (Responses response : ping_responses) { response.addResponse(data, false); } } } if (hdr.view_id != null) { // If the discovery request is merge-triggered, and the ViewId shipped with it // is the same as ours, we don't respond (JGRP-1315). ViewId my_view_id = view != null ? view.getViewId() : null; if (my_view_id != null && my_view_id.equals(hdr.view_id)) return null; boolean send_discovery_rsp = force_sending_discovery_rsps || is_coord || current_coord == null || current_coord.equals(msg.getSrc()); if (!send_discovery_rsp) { if (log.isTraceEnabled()) log.trace( local_addr + ": suppressing merge response as I'm not a coordinator and the " + "discovery request was not sent by a coordinator"); return null; } } if (isMergeRunning()) { if (log.isTraceEnabled()) log.trace( local_addr + ": suppressing merge response as a merge is already in progress"); return null; } List<PhysicalAddress> physical_addrs = hdr.view_id != null ? null : Arrays.asList( (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr))); sendDiscoveryResponse( local_addr, physical_addrs, is_server, hdr.view_id != null, UUID.get(local_addr), msg.getSrc()); return null; case PingHeader.GET_MBRS_RSP: // add response to vector and notify waiting thread // add physical address (if available) to transport's cache if (data != null) { Address response_sender = msg.getSrc(); if (logical_addr == null) logical_addr = msg.getSrc(); Collection<PhysicalAddress> addrs = data.getPhysicalAddrs(); PhysicalAddress physical_addr = addrs != null && !addrs.isEmpty() ? addrs.iterator().next() : null; if (logical_addr != null && physical_addr != null) down( new Event( Event.SET_PHYSICAL_ADDRESS, new Tuple<Address, PhysicalAddress>(logical_addr, physical_addr))); if (logical_addr != null && data.getLogicalName() != null) UUID.add(logical_addr, data.getLogicalName()); if (log.isTraceEnabled()) log.trace( local_addr + ": received GET_MBRS_RSP from " + response_sender + ": " + data); boolean overwrite = logical_addr != null && logical_addr.equals(response_sender); synchronized (ping_responses) { for (Responses response : ping_responses) { response.addResponse(data, overwrite); } } } return null; default: if (log.isWarnEnabled()) log.warn("got PING header with unknown type (" + hdr.type + ')'); return null; } case Event.GET_PHYSICAL_ADDRESS: try { sendDiscoveryRequest(group_addr, null, null); } catch (InterruptedIOException ie) { if (log.isWarnEnabled()) { log.warn("Discovery request for cluster " + group_addr + " interrupted"); } Thread.currentThread().interrupt(); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request", ex); } return null; case Event.FIND_INITIAL_MBRS: // sent by transport return findInitialMembers(null); } return up_prot.up(evt); }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); GmsHeader hdr = (GmsHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case GmsHeader.JOIN_REQ: view_handler.add( new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER: view_handler.add( new Request( Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_RSP: impl.handleJoinResponse(hdr.join_rsp); break; case GmsHeader.LEAVE_REQ: if (hdr.mbr == null) return null; view_handler.add(new Request(Request.LEAVE, hdr.mbr, false)); break; case GmsHeader.LEAVE_RSP: impl.handleLeaveResponse(); break; case GmsHeader.VIEW: View new_view = hdr.view; if (new_view == null) return null; Address coord = msg.getSrc(); if (!new_view.containsMember(coord)) { sendViewAck( coord); // we need to send the ack first, otherwise the connection is removed impl.handleViewChange(new_view, hdr.my_digest); } else { impl.handleViewChange(new_view, hdr.my_digest); sendViewAck(coord); // send VIEW_ACK to sender of view } break; case GmsHeader.VIEW_ACK: Address sender = msg.getSrc(); ack_collector.ack(sender); return null; // don't pass further up case GmsHeader.MERGE_REQ: impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs); break; case GmsHeader.MERGE_RSP: MergeData merge_data = new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected); if (log.isTraceEnabled()) { log.trace( local_addr + ": got merge response from " + msg.getSrc() + ", merge_id=" + hdr.merge_id + ", merge data is " + merge_data); } impl.handleMergeResponse(merge_data, hdr.merge_id); break; case GmsHeader.INSTALL_MERGE_VIEW: impl.handleMergeView( new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id); break; case GmsHeader.INSTALL_DIGEST: Digest tmp = hdr.my_digest; down_prot.down(new Event(Event.MERGE_DIGEST, tmp)); break; case GmsHeader.INSTALL_MERGE_VIEW_OK: // [JGRP-700] - FLUSH: flushing should span merge merge_ack_collector.ack(msg.getSrc()); break; case GmsHeader.CANCEL_MERGE: // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process impl.handleMergeCancelled(hdr.merge_id); break; case GmsHeader.GET_DIGEST_REQ: // only handle this request if it was sent by the coordinator (or at least a member) of // the current cluster synchronized (members) { if (!members.contains(msg.getSrc())) break; } // discard my own request: if (msg.getSrc().equals(local_addr)) return null; if (hdr.merge_id != null && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id))) return null; // fetch only my own digest Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if (digest != null) { GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP); rsp_hdr.my_digest = digest; Message get_digest_rsp = new Message(msg.getSrc(), null, null); get_digest_rsp.setFlag(Message.OOB); get_digest_rsp.putHeader(this.id, rsp_hdr); down_prot.down(new Event(Event.MSG, get_digest_rsp)); } break; case GmsHeader.GET_DIGEST_RSP: Digest digest_rsp = hdr.my_digest; impl.handleDigestResponse(msg.getSrc(), digest_rsp); break; default: if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known"); } return null; // don't pass up case Event.SUSPECT: Object retval = up_prot.up(evt); Address suspected = (Address) evt.getArg(); view_handler.add(new Request(Request.SUSPECT, suspected, true)); ack_collector.suspect(suspected); merge_ack_collector.suspect(suspected); return retval; case Event.UNSUSPECT: impl.unsuspect((Address) evt.getArg()); return null; // discard case Event.MERGE: view_handler.add( new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg())); return null; // don't pass up case Event.IS_MERGE_IN_PROGRESS: return merger.isMergeInProgress(); } return up_prot.up(evt); }
/** * Callback method <br> * Called by the ProtocolStack when a message is received. * * @param evt the event carrying the message from the protocol stack */ public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (stats) { received_msgs++; received_bytes += msg.getLength(); } // discard local messages (sent by myself to me) if (discard_own_messages && local_addr != null && msg.getSrc() != null && local_addr.equals(msg.getSrc())) return null; break; case Event.VIEW_CHANGE: View tmp = (View) evt.getArg(); if (tmp instanceof MergeView) my_view = new View(tmp.getViewId(), tmp.getMembers()); else my_view = tmp; // Bela&Vladimir Oct 27th,2006 (JGroups 2.4): we need to set connected=true because a client // can // call channel.getView() in viewAccepted() callback invoked on this thread (see // Event.VIEW_CHANGE handling below) // not good: we are only connected when we returned from connect() - bela June 22 2007 // Changed: when a channel gets a view of which it is a member then it should be // connected even if connect() hasn't returned yet ! (bela Noc 2010) if (state != State.CONNECTED) state = State.CONNECTED; break; case Event.CONFIG: Map<String, Object> cfg = (Map<String, Object>) evt.getArg(); if (cfg != null) { if (cfg.containsKey("state_transfer")) { state_transfer_supported = (Boolean) cfg.get("state_transfer"); } if (cfg.containsKey("flush_supported")) { flush_supported = (Boolean) cfg.get("flush_supported"); } } break; case Event.GET_STATE_OK: StateTransferResult result = (StateTransferResult) evt.getArg(); if (up_handler != null) { try { Object retval = up_handler.up(evt); state_promise.setResult(new StateTransferResult()); return retval; } catch (Throwable t) { state_promise.setResult(new StateTransferResult(t)); } } if (receiver != null) { try { if (result.hasBuffer()) { byte[] tmp_state = result.getBuffer(); ByteArrayInputStream input = new ByteArrayInputStream(tmp_state); receiver.setState(input); } state_promise.setResult(result); } catch (Throwable t) { state_promise.setResult(new StateTransferResult(t)); } } break; case Event.STATE_TRANSFER_INPUTSTREAM_CLOSED: state_promise.setResult((StateTransferResult) evt.getArg()); break; case Event.STATE_TRANSFER_INPUTSTREAM: // Oct 13,2006 moved to down() when Event.STATE_TRANSFER_INPUTSTREAM_CLOSED is received // state_promise.setResult(is != null? Boolean.TRUE : Boolean.FALSE); if (up_handler != null) return up_handler.up(evt); InputStream is = (InputStream) evt.getArg(); if (is != null && receiver != null) { try { receiver.setState(is); } catch (Throwable t) { throw new RuntimeException("failed calling setState() in state requester", t); } } break; case Event.STATE_TRANSFER_OUTPUTSTREAM: if (receiver != null && evt.getArg() != null) { try { receiver.getState((OutputStream) evt.getArg()); } catch (Exception e) { throw new RuntimeException("failed calling getState() in state provider", e); } } break; case Event.GET_LOCAL_ADDRESS: return local_addr; default: break; } // If UpHandler is installed, pass all events to it and return (UpHandler is e.g. a building // block) if (up_handler != null) return up_handler.up(evt); if (receiver != null) return invokeCallback(evt.getType(), evt.getArg()); return null; }
@SuppressWarnings("unchecked") public Object down(Event evt) { int type = evt.getType(); switch (type) { case Event.CONNECT: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: boolean use_flush = type == Event.CONNECT_USE_FLUSH || type == Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH; boolean state_transfer = type == Event.CONNECT_WITH_STATE_TRANSFER || type == Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH; if (print_local_addr) { PhysicalAddress physical_addr = print_physical_addrs ? (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)) : null; System.out.println( "\n-------------------------------------------------------------------\n" + "GMS: address=" + local_addr + ", cluster=" + evt.getArg() + (physical_addr != null ? ", physical address=" + physical_addr : "") + "\n-------------------------------------------------------------------"); } else { if (log.isDebugEnabled()) { PhysicalAddress physical_addr = print_physical_addrs ? (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)) : null; log.debug( "address=" + local_addr + ", cluster=" + evt.getArg() + (physical_addr != null ? ", physical address=" + physical_addr : "")); } } down_prot.down(evt); if (local_addr == null) throw new IllegalStateException("local_addr is null"); try { if (state_transfer) impl.joinWithStateTransfer(local_addr, use_flush); else impl.join(local_addr, use_flush); } catch (Throwable e) { return e; } return null; // don't pass down: event has already been passed down case Event.DISCONNECT: impl.leave((Address) evt.getArg()); if (!(impl instanceof CoordGmsImpl)) { initState(); // in case connect() is called again } down_prot.down(evt); // notify the other protocols, but ignore the result return null; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if ((config != null && config.containsKey("flush_supported"))) { flushProtocolInStack = true; } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); }
@SuppressWarnings("unchecked") public Object down(Event evt) { switch (evt.getType()) { case Event.FIND_INITIAL_MBRS: // sent by GMS layer return findMembers(null, true, false); // triggered by JOIN process (ClientGmsImpl) case Event.FIND_MBRS: return findMembers( (List<Address>) evt.getArg(), false, false); // triggered by MERGE2/MERGE3 // case Event.TMP_VIEW: case Event.VIEW_CHANGE: List<Address> tmp; View old_view = view; view = (View) evt.getArg(); if ((tmp = view.getMembers()) != null) { synchronized (members) { members.clear(); members.addAll(tmp); } } current_coord = !members.isEmpty() ? members.get(0) : null; is_coord = current_coord != null && local_addr != null && current_coord.equals(local_addr); Object retval = down_prot.down(evt); if (send_cache_on_join && !isDynamic() && is_coord) { List<Address> curr_mbrs, left_mbrs, new_mbrs; synchronized (members) { curr_mbrs = new ArrayList<>(members); left_mbrs = old_view != null ? Util.leftMembers(old_view.getMembers(), members) : null; new_mbrs = old_view != null ? Util.newMembers(old_view.getMembers(), members) : null; } startCacheDissemination(curr_mbrs, left_mbrs, new_mbrs); // separate task } return retval; case Event.BECOME_SERVER: // called after client has joined and is fully working group member down_prot.down(evt); is_server = true; return null; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); return down_prot.down(evt); case Event.CONNECT: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: is_leaving = false; cluster_name = (String) evt.getArg(); Object ret = down_prot.down(evt); handleConnect(); return ret; case Event.DISCONNECT: is_leaving = true; handleDisconnect(); return down_prot.down(evt); default: return down_prot.down(evt); // Pass on to the layer below us } }
/** * Caller by the layer above this layer. Usually we just put this Message into the send queue and * let one or more worker threads handle it. A worker thread then removes the Message from the * send queue, performs a conversion and adds the modified Message to the send queue of the layer * below it, by calling Down). */ public void down(Event evt) { Message msg; Object dest_addr; if (evt.getType() != Event.MSG) { // unless it is a message handle it and respond handleDownEvent(evt); return; } // ****************** profiling ****************** /*if(num_msgs == 0) { start=System.currentTimeMillis(); num_msgs++; } else if(num_msgs >= 1000) { stop=System.currentTimeMillis(); long total_time=stop-start; double msgs_per_msec=num_msgs / (double)total_time; if(Trace.trace) Trace.info("UDP.down.profile()", "total_time=" + total_time + ", msgs/ms=" + msgs_per_msec); num_msgs=0; } else { num_msgs++; }*/ // ****************** profiling ****************** msg = (Message) evt.getArg(); if (udp_hdr != null && udp_hdr.group_addr != null) { // added patch by Roland Kurmann (March 20 2003) msg.putHeader(name, udp_hdr); } dest_addr = msg.getDest(); // Because we don't call Protocol.passDown(), we notify the observer directly (e.g. // PerfObserver). // This way, we still have performance numbers for UDP if (observer != null) { observer.passDown(evt); } if (dest_addr == null) { // 'null' means send to all group members if (ip_mcast) { if (mcast_addr == null) { Trace.error( "UDP.down()", "dest address of message is null, and " + "sending to default address fails as mcast_addr is null, too !" + " Discarding message " + Util.printEvent(evt)); return; } // if we want to use IP multicast, then set the destination of the message msg.setDest(mcast_addr); } else { // sends a separate UDP message to each address sendMultipleUdpMessages(msg, members); return; } } try { sendUdpMessage(msg); } catch (Exception e) { Trace.error("UDP.down()", "exception=" + e + ", msg=" + msg + ", mcast_addr=" + mcast_addr); } }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); ExecutorHeader hdr = (ExecutorHeader) msg.getHeader(id); if (hdr == null) break; Request req = (Request) msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case RUN_REQUEST: handleTaskRequest(req.request, (Address) req.object); break; case CONSUMER_READY: handleConsumerReadyRequest(req.request, (Address) req.object); break; case CONSUMER_UNREADY: handleConsumerUnreadyRequest(req.request, (Address) req.object); break; case CONSUMER_FOUND: handleConsumerFoundResponse(req.request, (Address) req.object); break; case RUN_SUBMITTED: RequestWithThread reqWT = (RequestWithThread) req; Object objectToRun = reqWT.object; Runnable runnable; if (objectToRun instanceof Runnable) { runnable = (Runnable) objectToRun; } else if (objectToRun instanceof Callable) { @SuppressWarnings("unchecked") Callable<Object> callable = (Callable<Object>) objectToRun; runnable = new FutureTask<>(callable); } else { log.error( "Request of type " + req.type + " sent an object of " + objectToRun + " which is invalid"); break; } handleTaskSubmittedRequest(runnable, msg.getSrc(), req.request, reqWT.threadId); break; case RUN_REJECTED: // We could make requests local for this, but is it really worth it handleTaskRejectedResponse(msg.getSrc(), req.request); break; case RESULT_SUCCESS: handleValueResponse(msg.getSrc(), req.request, req.object); break; case RESULT_EXCEPTION: handleExceptionResponse(msg.getSrc(), req.request, (Throwable) req.object); break; case INTERRUPT_RUN: // We could make requests local for this, but is it really worth it handleInterruptRequest(msg.getSrc(), req.request); break; case CREATE_CONSUMER_READY: Owner owner = new Owner((Address) req.object, req.request); handleNewConsumer(owner); break; case CREATE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleNewRunRequest(owner); break; case DELETE_CONSUMER_READY: owner = new Owner((Address) req.object, req.request); handleRemoveConsumer(owner); break; case DELETE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleRemoveRunRequest(owner); break; default: log.error("Request of type " + req.type + " not known"); break; } return null; case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; } return up_prot.up(evt); }