private static void _testMessage(Message msg) throws Exception { Buffer buf = Util.messageToByteBuffer(msg); Message msg2 = Util.byteBufferToMessage(buf.getBuf(), buf.getOffset(), buf.getLength()); Assert.assertEquals(msg.getSrc(), msg2.getSrc()); Assert.assertEquals(msg.getDest(), msg2.getDest()); Assert.assertEquals(msg.getLength(), msg2.getLength()); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); LockingHeader hdr = (LockingHeader) msg.getHeader(id); if (hdr == null) break; Request req = (Request) msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case GRANT_LOCK: case RELEASE_LOCK: handleLockRequest(req); break; case LOCK_GRANTED: handleLockGrantedResponse(req.lock_name, req.owner, msg.getSrc()); break; case LOCK_DENIED: handleLockDeniedResponse(req.lock_name, req.owner); break; case CREATE_LOCK: handleCreateLockRequest(req.lock_name, req.owner); break; case DELETE_LOCK: handleDeleteLockRequest(req.lock_name); break; case COND_SIG: case COND_SIG_ALL: handleSignalRequest(req); break; case LOCK_AWAIT: handleAwaitRequest(req.lock_name, req.owner); handleLockRequest(req); break; case DELETE_LOCK_AWAIT: handleDeleteAwaitRequest(req.lock_name, req.owner); break; case SIG_RET: handleSignalResponse(req.lock_name, req.owner); break; case CREATE_AWAITER: handleCreateAwaitingRequest(req.lock_name, req.owner); break; case DELETE_AWAITER: handleDeleteAwaitingRequest(req.lock_name, req.owner); break; default: log.error("Request of type " + req.type + " not known"); break; } return null; case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; } return up_prot.up(evt); }
void handleMessage(Message msg) { Event evt; UdpHeader hdr; // discard my own multicast loopback copy if (loopback) { Address dst = msg.getDest(); Address src = msg.getSrc(); if (dst != null && dst.isMulticastAddress() && src != null && local_addr.equals(src)) { if (Trace.debug) { Trace.info("UDP.handleMessage()", "discarded own loopback multicast packet"); } return; } } evt = new Event(Event.MSG, msg); if (Trace.debug) { Trace.info("UDP.handleMessage()", "message is " + msg + ", headers are " + msg.getHeaders()); /* Because Protocol.up() is never called by this bottommost layer, we call up() directly in the observer. * This allows e.g. PerfObserver to get the time of reception of a message */ } if (observer != null) { observer.up(evt, up_queue.size()); } hdr = (UdpHeader) msg.removeHeader(name); if (hdr != null) { /* Discard all messages destined for a channel with a different name */ String ch_name = null; if (hdr.group_addr != null) { ch_name = hdr.group_addr; // Discard if message's group name is not the same as our group name unless the // message is a diagnosis message (special group name DIAG_GROUP) } if (ch_name != null && group_addr != null && !group_addr.equals(ch_name) && !ch_name.equals(Util.DIAG_GROUP)) { if (Trace.trace) { Trace.warn( "UDP.handleMessage()", "discarded message from different group (" + ch_name + "). Sender was " + msg.getSrc()); } return; } } else { Trace.error("UDP.handleMessage()", "message does not have a UDP header"); } passUp(evt); }
/** When receive a message, analyze message content and then execute the command: Draw or Clear */ public void receive(Message msg) { byte[] buf = msg.getRawBuffer(); if (buf == null) { System.err.println( "[" + channel.getAddress() + "] received null buffer from " + msg.getSrc() + ", headers: " + msg.printHeaders()); return; } try { DrawCommand comm = (DrawCommand) Util.streamableFromByteBuffer( DrawCommand.class, buf, msg.getOffset(), msg.getLength()); switch (comm.mode) { case DrawCommand.DRAW: if (drawPanel != null) drawPanel.drawPoint(comm); break; case DrawCommand.CLEAR: clearPanel(); default: System.err.println("***** received invalid draw command " + comm.mode); break; } } catch (Exception e) { e.printStackTrace(); } }
/** * Message contains MethodCall. Execute it against *this* object and return result. Use * MethodCall.invoke() to do this. Return result. */ public Object handle(Message req) throws Exception { if (server_obj == null) { log.error(Util.getMessage("NoMethodHandlerIsRegisteredDiscardingRequest")); return null; } if (req == null || req.getLength() == 0) { log.error(Util.getMessage("MessageOrMessageBufferIsNull")); return null; } Object body = req_marshaller != null ? req_marshaller.objectFromBuffer(req.getRawBuffer(), req.getOffset(), req.getLength()) : req.getObject(); if (!(body instanceof MethodCall)) throw new IllegalArgumentException("message does not contain a MethodCall object"); MethodCall method_call = (MethodCall) body; if (log.isTraceEnabled()) log.trace("[sender=%s], method_call: %s", req.getSrc(), method_call); if (method_call.getMode() == MethodCall.ID) { if (method_lookup == null) throw new Exception( String.format( "MethodCall uses ID=%d, but method_lookup has not been set", method_call.getId())); Method m = method_lookup.findMethod(method_call.getId()); if (m == null) throw new Exception("no method found for " + method_call.getId()); method_call.setMethod(m); } return method_call.invoke(server_obj); }
private void encryptAndSend(Message msg) throws Exception { EncryptHeader hdr = new EncryptHeader(EncryptHeader.ENCRYPT, getSymVersion()); if (this.encrypt_entire_message) hdr.type |= EncryptHeader.ENCRYPT_ENTIRE_MSG; if (encrypt_entire_message) { if (msg.getSrc() == null) msg.setSrc(local_addr); Buffer serialized_msg = Util.streamableToBuffer(msg); byte[] encrypted_msg = code( serialized_msg.getBuf(), serialized_msg.getOffset(), serialized_msg.getLength(), false); // exclude existing headers, they will be seen again when we decrypt and unmarshal the msg at // the receiver Message tmp = msg.copy(false, false).setBuffer(encrypted_msg).putHeader(this.id, hdr); down_prot.down(new Event(Event.MSG, tmp)); return; } // copy neeeded because same message (object) may be retransmitted -> no double encryption Message msgEncrypted = msg.copy(false) .putHeader(this.id, hdr) .setBuffer(code(msg.getRawBuffer(), msg.getOffset(), msg.getLength(), false)); down_prot.down(new Event(Event.MSG, msgEncrypted)); }
public void up(MessageBatch batch) { if (batch.dest() == null) { // not a unicast batch up_prot.up(batch); return; } int size = batch.size(); Map<Short, List<Message>> msgs = new TreeMap<Short, List<Message>>(); // map of messages, keyed by conn-id for (Message msg : batch) { if (msg == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) continue; UnicastHeader hdr = (UnicastHeader) msg.getHeader(id); if (hdr == null) continue; batch.remove(msg); // remove the message from the batch, so it won't be passed up the stack if (hdr.type != UnicastHeader.DATA) { try { handleUpEvent(msg.getSrc(), hdr); } catch (Throwable t) { // we cannot let an exception terminate the processing of this batch log.error(local_addr + ": failed handling event", t); } continue; } List<Message> list = msgs.get(hdr.conn_id); if (list == null) msgs.put(hdr.conn_id, list = new ArrayList<Message>(size)); list.add(msg); } if (!msgs.isEmpty()) handleBatchReceived(batch.sender(), msgs); // process msgs: if (!batch.isEmpty()) up_prot.up(batch); }
private Message _decrypt(final Cipher cipher, Message msg, boolean decrypt_entire_msg) throws Exception { byte[] decrypted_msg; if (cipher == null) decrypted_msg = code(msg.getRawBuffer(), msg.getOffset(), msg.getLength(), true); else decrypted_msg = cipher.doFinal(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); if (!decrypt_entire_msg) { msg.setBuffer(decrypted_msg); return msg; } Message ret = Util.streamableFromBuffer(Message.class, decrypted_msg, 0, decrypted_msg.length); if (ret.getDest() == null) ret.setDest(msg.getDest()); if (ret.getSrc() == null) ret.setSrc(msg.getSrc()); return ret; }
protected void handleUpEvent(Message msg, EncryptHeader hdr) { // check if we had some sort of encrypt control header if using supplied key we should not // process it if (suppliedKey) { log.warn("we received an encrypt header of %s while in configured mode", hdr.getType()); return; } // see what sort of encrypt control message we have received switch (hdr.getType()) { // if a key request case EncryptHeader.KEY_REQUEST: log.debug("received a key request from peer %s", msg.getSrc()); // if a key request send response key back try { // extract peer's public key PublicKey tmpKey = generatePubKey(msg.getBuffer()); // send back the secret key we have sendSecretKey(getSecretKey(), tmpKey, msg.getSrc()); } catch (Exception e) { log.warn("unable to reconstitute peer's public key"); } break; case EncryptHeader.SECRETKEY: log.debug("received a secretkey response from keyserver %s", msg.getSrc()); try { SecretKey tmp = decodeKey(msg.getBuffer()); if (tmp == null) sendKeyRequest(); // unable to understand response, let's try again else { // otherwise lets set the returned key as the shared key setKeys(tmp, hdr.getVersion()); log.debug("decoded secretkey response"); } } catch (Exception e) { log.warn("unable to process received public key", e); } break; default: log.warn("received ignored encrypt header of %s", hdr.getType()); break; } }
public Object up(Event evt) { Message msg; Address dst, src; Unicast2Header hdr; switch (evt.getType()) { case Event.MSG: msg = (Message) evt.getArg(); dst = msg.getDest(); if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) // only handle unicast messages break; // pass up // changed from removeHeader(): we cannot remove the header because if we do loopback=true // at the // transport level, we will not have the header on retransmit ! (bela Aug 22 2006) hdr = (Unicast2Header) msg.getHeader(this.id); if (hdr == null) break; src = msg.getSrc(); switch (hdr.type) { case Unicast2Header.DATA: // received regular message handleDataReceived(src, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); return null; // we pass the deliverable message up in handleDataReceived() case Unicast2Header.XMIT_REQ: // received ACK for previously sent message handleXmitRequest(src, hdr.seqno, hdr.high_seqno); break; case Unicast2Header.SEND_FIRST_SEQNO: handleResendingOfFirstMessage(src, hdr.seqno); break; case Unicast2Header.STABLE: stable(msg.getSrc(), hdr.conn_id, hdr.seqno, hdr.high_seqno); break; default: log.error("UnicastHeader type " + hdr.type + " not known !"); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
/** * Message contains MethodCall. Execute it against *this* object and return result. Use * MethodCall.invoke() to do this. Return result. */ public Object handle(Message req) { Object body; MethodCall method_call; if (server_obj == null) { if (log.isErrorEnabled()) log.error("no method handler is registered. Discarding request."); return null; } if (req == null || req.getLength() == 0) { if (log.isErrorEnabled()) log.error("message or message buffer is null"); return null; } try { body = req_marshaller != null ? req_marshaller.objectFromByteBuffer( req.getBuffer(), req.getOffset(), req.getLength()) : req.getObject(); } catch (Throwable e) { if (log.isErrorEnabled()) log.error("exception marshalling object", e); return e; } if (!(body instanceof MethodCall)) { if (log.isErrorEnabled()) log.error("message does not contain a MethodCall object"); // create an exception to represent this and return it return new IllegalArgumentException("message does not contain a MethodCall object"); } method_call = (MethodCall) body; try { if (log.isTraceEnabled()) log.trace("[sender=" + req.getSrc() + "], method_call: " + method_call); if (method_call.getMode() == MethodCall.ID) { if (method_lookup == null) throw new Exception( "MethodCall uses ID=" + method_call.getId() + ", but method_lookup has not been set"); Method m = method_lookup.findMethod(method_call.getId()); if (m == null) throw new Exception("no method found for " + method_call.getId()); method_call.setMethod(m); } return method_call.invoke(server_obj); } catch (Throwable x) { return x; } }
/** Send a message to the address specified in dest */ void sendUdpMessage(Message msg) throws Exception { IpAddress dest; Message copy; Event evt; dest = (IpAddress) msg.getDest(); // guaranteed not to be null setSourceAddress(msg); if (Trace.debug) { Trace.debug( "UDP.sendUdpMessage()", "sending message to " + msg.getDest() + " (src=" + msg.getSrc() + "), headers are " + msg.getHeaders()); // Don't send if destination is local address. Instead, switch dst and src and put in // up_queue. // If multicast message, loopback a copy directly to us (but still multicast). Once we receive // this, // we will discard our own multicast message } if (loopback && (dest.equals(local_addr) || dest.isMulticastAddress())) { copy = msg.copy(); copy.removeHeader(name); copy.setSrc(local_addr); copy.setDest(dest); evt = new Event(Event.MSG, copy); /* Because Protocol.up() is never called by this bottommost layer, we call up() directly in the observer. This allows e.g. PerfObserver to get the time of reception of a message */ if (observer != null) { observer.up(evt, up_queue.size()); } if (Trace.debug) { Trace.info("UDP.sendUdpMessage()", "looped back local message " + copy); } passUp(evt); if (!dest.isMulticastAddress()) { return; } } if (use_outgoing_packet_handler) { outgoing_queue.add(msg); return; } send(msg); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); StateHeader hdr = (StateHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case StateHeader.STATE_REQ: state_requesters.add(msg.getSrc()); break; case StateHeader.STATE_RSP: handleStateRsp(hdr.getDigest(), msg.getSrc(), msg.getBuffer()); break; case StateHeader.STATE_EX: closeHoleFor(msg.getSrc()); handleException((Throwable) msg.getObject()); break; default: log.error("%s: type %s not known in StateHeader", local_addr, hdr.type); break; } return null; case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("state_transfer")) log.error( Util.getMessage( "ProtocolStackCannotContainTwoStateTransferProtocolsRemoveEitherOneOfThem")); break; } return up_prot.up(evt); }
public Message visit(Message msg, MessageBatch batch) { EncryptHeader hdr; if (msg == null || (msg.getLength() == 0 && !encrypt_entire_message) || ((hdr = (EncryptHeader) msg.getHeader(id)) == null)) return null; if (hdr.getType() == EncryptHeader.ENCRYPT) { // if queueing then pass into queue to be dealt with later if (queue_up) { queueUpMessage(msg, batch); return null; } // make sure we pass up any queued messages first if (!suppliedKey) drainUpQueue(); if (lock == null) { int index = getNextIndex(); lock = decoding_locks[index]; cipher = decoding_ciphers[index]; lock.lock(); } try { Message tmpMsg = decryptMessage(cipher, msg.copy()); // need to copy for possible xmits if (tmpMsg != null) batch.replace(msg, tmpMsg); } catch (Exception e) { log.error( "failed decrypting message from %s (offset=%d, length=%d, buf.length=%d): %s, headers are %s", msg.getSrc(), msg.getOffset(), msg.getLength(), msg.getRawBuffer().length, e, msg.printHeaders()); } } else { batch.remove( msg); // a control message will get handled by ENCRYPT and should not be passed up handleUpEvent(msg, hdr); } return null; }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getDest() == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) // only handle unicast messages break; // pass up UnicastHeader hdr = (UnicastHeader) msg.getHeader(this.id); if (hdr == null) break; Address sender = msg.getSrc(); switch (hdr.type) { case UnicastHeader.DATA: // received regular message handleDataReceived(sender, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); break; default: handleUpEvent(sender, hdr); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); PingHeader hdr = (PingHeader) msg.getHeader(this.id); if (hdr == null) return up_prot.up(evt); if (is_leaving) return null; // prevents merging back a leaving member // (https://issues.jboss.org/browse/JGRP-1336) PingData data = readPingData(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); Address logical_addr = data != null ? data.getAddress() : msg.src(); switch (hdr.type) { case PingHeader.GET_MBRS_REQ: // return Rsp(local_addr, coord) if (cluster_name == null || hdr.cluster_name == null) { log.warn( "cluster_name (%s) or cluster_name of header (%s) is null; passing up discovery " + "request from %s, but this should not be the case", cluster_name, hdr.cluster_name, msg.src()); } else { if (!cluster_name.equals(hdr.cluster_name)) { log.warn( "%s: discarding discovery request for cluster '%s' from %s; " + "our cluster name is '%s'. Please separate your clusters properly", logical_addr, hdr.cluster_name, msg.src(), cluster_name); return null; } } // add physical address and logical name of the discovery sender (if available) to the // cache if (data != null) { addDiscoveryResponseToCaches( logical_addr, data.getLogicalName(), data.getPhysicalAddr()); discoveryRequestReceived(msg.getSrc(), data.getLogicalName(), data.getPhysicalAddr()); addResponse(data, false); } if (return_entire_cache) { Map<Address, PhysicalAddress> cache = (Map<Address, PhysicalAddress>) down(new Event(Event.GET_LOGICAL_PHYSICAL_MAPPINGS)); if (cache != null) { for (Map.Entry<Address, PhysicalAddress> entry : cache.entrySet()) { Address addr = entry.getKey(); // JGRP-1492: only return our own address, and addresses in view. if (addr.equals(local_addr) || members.contains(addr)) { PhysicalAddress physical_addr = entry.getValue(); sendDiscoveryResponse( addr, physical_addr, UUID.get(addr), msg.getSrc(), isCoord(addr)); } } } return null; } // Only send a response if hdr.mbrs is not empty and contains myself. Otherwise always // send my info Collection<? extends Address> mbrs = data != null ? data.mbrs() : null; boolean send_response = mbrs == null || mbrs.contains(local_addr); if (send_response) { PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); sendDiscoveryResponse( local_addr, physical_addr, UUID.get(local_addr), msg.getSrc(), is_coord); } return null; case PingHeader.GET_MBRS_RSP: // add physical address (if available) to transport's cache if (data != null) { log.trace("%s: received GET_MBRS_RSP from %s: %s", local_addr, msg.src(), data); handleDiscoveryResponse(data, msg.src()); } return null; default: log.warn("got PING header with unknown type %d", hdr.type); return null; } case Event.FIND_MBRS: return findMembers( (List<Address>) evt.getArg(), false, true); // this is done asynchronously } return up_prot.up(evt); }
/** * If the sender is null, set our own address. We cannot just go ahead and set the address anyway, * as we might be sending a message on behalf of someone else ! E.g. in case of retransmission, * when the original sender has crashed, or in a FLUSH protocol when we have to return all * unstable messages with the FLUSH_OK response. */ void setSourceAddress(Message msg) { if (msg.getSrc() == null) { msg.setSrc(local_addr); } }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); GmsHeader hdr = (GmsHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case GmsHeader.JOIN_REQ: view_handler.add( new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER: view_handler.add( new Request( Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_RSP: impl.handleJoinResponse(hdr.join_rsp); break; case GmsHeader.LEAVE_REQ: if (hdr.mbr == null) return null; view_handler.add(new Request(Request.LEAVE, hdr.mbr, false)); break; case GmsHeader.LEAVE_RSP: impl.handleLeaveResponse(); break; case GmsHeader.VIEW: View new_view = hdr.view; if (new_view == null) return null; Address coord = msg.getSrc(); if (!new_view.containsMember(coord)) { sendViewAck( coord); // we need to send the ack first, otherwise the connection is removed impl.handleViewChange(new_view, hdr.my_digest); } else { impl.handleViewChange(new_view, hdr.my_digest); sendViewAck(coord); // send VIEW_ACK to sender of view } break; case GmsHeader.VIEW_ACK: Address sender = msg.getSrc(); ack_collector.ack(sender); return null; // don't pass further up case GmsHeader.MERGE_REQ: impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs); break; case GmsHeader.MERGE_RSP: MergeData merge_data = new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected); if (log.isTraceEnabled()) { log.trace( local_addr + ": got merge response from " + msg.getSrc() + ", merge_id=" + hdr.merge_id + ", merge data is " + merge_data); } impl.handleMergeResponse(merge_data, hdr.merge_id); break; case GmsHeader.INSTALL_MERGE_VIEW: impl.handleMergeView( new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id); break; case GmsHeader.INSTALL_DIGEST: Digest tmp = hdr.my_digest; down_prot.down(new Event(Event.MERGE_DIGEST, tmp)); break; case GmsHeader.INSTALL_MERGE_VIEW_OK: // [JGRP-700] - FLUSH: flushing should span merge merge_ack_collector.ack(msg.getSrc()); break; case GmsHeader.CANCEL_MERGE: // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process impl.handleMergeCancelled(hdr.merge_id); break; case GmsHeader.GET_DIGEST_REQ: // only handle this request if it was sent by the coordinator (or at least a member) of // the current cluster synchronized (members) { if (!members.contains(msg.getSrc())) break; } // discard my own request: if (msg.getSrc().equals(local_addr)) return null; if (hdr.merge_id != null && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id))) return null; // fetch only my own digest Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if (digest != null) { GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP); rsp_hdr.my_digest = digest; Message get_digest_rsp = new Message(msg.getSrc(), null, null); get_digest_rsp.setFlag(Message.OOB); get_digest_rsp.putHeader(this.id, rsp_hdr); down_prot.down(new Event(Event.MSG, get_digest_rsp)); } break; case GmsHeader.GET_DIGEST_RSP: Digest digest_rsp = hdr.my_digest; impl.handleDigestResponse(msg.getSrc(), digest_rsp); break; default: if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known"); } return null; // don't pass up case Event.SUSPECT: Object retval = up_prot.up(evt); Address suspected = (Address) evt.getArg(); view_handler.add(new Request(Request.SUSPECT, suspected, true)); ack_collector.suspect(suspected); merge_ack_collector.suspect(suspected); return retval; case Event.UNSUSPECT: impl.unsuspect((Address) evt.getArg()); return null; // discard case Event.MERGE: view_handler.add( new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg())); return null; // don't pass up case Event.IS_MERGE_IN_PROGRESS: return merger.isMergeInProgress(); } return up_prot.up(evt); }
/** * Callback method <br> * Called by the ProtocolStack when a message is received. * * @param evt the event carrying the message from the protocol stack */ public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (stats) { received_msgs++; received_bytes += msg.getLength(); } // discard local messages (sent by myself to me) if (discard_own_messages && local_addr != null && msg.getSrc() != null && local_addr.equals(msg.getSrc())) return null; break; case Event.VIEW_CHANGE: View tmp = (View) evt.getArg(); if (tmp instanceof MergeView) my_view = new View(tmp.getViewId(), tmp.getMembers()); else my_view = tmp; // Bela&Vladimir Oct 27th,2006 (JGroups 2.4): we need to set connected=true because a client // can // call channel.getView() in viewAccepted() callback invoked on this thread (see // Event.VIEW_CHANGE handling below) // not good: we are only connected when we returned from connect() - bela June 22 2007 // Changed: when a channel gets a view of which it is a member then it should be // connected even if connect() hasn't returned yet ! (bela Noc 2010) if (state != State.CONNECTED) state = State.CONNECTED; break; case Event.CONFIG: Map<String, Object> cfg = (Map<String, Object>) evt.getArg(); if (cfg != null) { if (cfg.containsKey("state_transfer")) { state_transfer_supported = (Boolean) cfg.get("state_transfer"); } if (cfg.containsKey("flush_supported")) { flush_supported = (Boolean) cfg.get("flush_supported"); } } break; case Event.GET_STATE_OK: StateTransferResult result = (StateTransferResult) evt.getArg(); if (up_handler != null) { try { Object retval = up_handler.up(evt); state_promise.setResult(new StateTransferResult()); return retval; } catch (Throwable t) { state_promise.setResult(new StateTransferResult(t)); } } if (receiver != null) { try { if (result.hasBuffer()) { byte[] tmp_state = result.getBuffer(); ByteArrayInputStream input = new ByteArrayInputStream(tmp_state); receiver.setState(input); } state_promise.setResult(result); } catch (Throwable t) { state_promise.setResult(new StateTransferResult(t)); } } break; case Event.STATE_TRANSFER_INPUTSTREAM_CLOSED: state_promise.setResult((StateTransferResult) evt.getArg()); break; case Event.STATE_TRANSFER_INPUTSTREAM: // Oct 13,2006 moved to down() when Event.STATE_TRANSFER_INPUTSTREAM_CLOSED is received // state_promise.setResult(is != null? Boolean.TRUE : Boolean.FALSE); if (up_handler != null) return up_handler.up(evt); InputStream is = (InputStream) evt.getArg(); if (is != null && receiver != null) { try { receiver.setState(is); } catch (Throwable t) { throw new RuntimeException("failed calling setState() in state requester", t); } } break; case Event.STATE_TRANSFER_OUTPUTSTREAM: if (receiver != null && evt.getArg() != null) { try { receiver.getState((OutputStream) evt.getArg()); } catch (Exception e) { throw new RuntimeException("failed calling getState() in state provider", e); } } break; case Event.GET_LOCAL_ADDRESS: return local_addr; default: break; } // If UpHandler is installed, pass all events to it and return (UpHandler is e.g. a building // block) if (up_handler != null) return up_handler.up(evt); if (receiver != null) return invokeCallback(evt.getType(), evt.getArg()); return null; }
public Object up(Message msg) { ExecutorHeader hdr = msg.getHeader(id); if (hdr == null) return up_prot.up(msg); Request req = msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case RUN_REQUEST: handleTaskRequest(req.request, (Address) req.object); break; case CONSUMER_READY: handleConsumerReadyRequest(req.request, (Address) req.object); break; case CONSUMER_UNREADY: handleConsumerUnreadyRequest(req.request, (Address) req.object); break; case CONSUMER_FOUND: handleConsumerFoundResponse(req.request, (Address) req.object); break; case RUN_SUBMITTED: RequestWithThread reqWT = (RequestWithThread) req; Object objectToRun = reqWT.object; Runnable runnable; if (objectToRun instanceof Runnable) { runnable = (Runnable) objectToRun; } else if (objectToRun instanceof Callable) { @SuppressWarnings("unchecked") Callable<Object> callable = (Callable<Object>) objectToRun; runnable = new FutureTask<>(callable); } else { log.error( Util.getMessage("RequestOfType") + req.type + " sent an object of " + objectToRun + " which is invalid"); break; } handleTaskSubmittedRequest(runnable, msg.getSrc(), req.request, reqWT.threadId); break; case RUN_REJECTED: // We could make requests local for this, but is it really worth it handleTaskRejectedResponse(msg.getSrc(), req.request); break; case RESULT_SUCCESS: handleValueResponse(msg.getSrc(), req.request, req.object); break; case RESULT_EXCEPTION: handleExceptionResponse(msg.getSrc(), req.request, (Throwable) req.object); break; case INTERRUPT_RUN: // We could make requests local for this, but is it really worth it handleInterruptRequest(msg.getSrc(), req.request); break; case CREATE_CONSUMER_READY: Owner owner = new Owner((Address) req.object, req.request); handleNewConsumer(owner); break; case CREATE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleNewRunRequest(owner); break; case DELETE_CONSUMER_READY: owner = new Owner((Address) req.object, req.request); handleRemoveConsumer(owner); break; case DELETE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleRemoveRunRequest(owner); break; default: log.error(Util.getMessage("RequestOfType") + req.type + " not known"); break; } return null; }
/** * An event was received from the layer below. Usually the current layer will want to examine the * event type and - depending on its type - perform some computation (e.g. removing headers from a * MSG event type, or updating the internal membership list when receiving a VIEW_CHANGE event). * Finally the event is either a) discarded, or b) an event is sent down the stack using <code> * PassDown</code> or c) the event (or another event) is sent up the stack using <code>PassUp * </code>. * * <p>For the PING protocol, the Up operation does the following things. 1. If the event is a * Event.MSG then PING will inspect the message header. If the header is null, PING simply passes * up the event If the header is PingHeader.GET_MBRS_REQ then the PING protocol will PassDown a * PingRequest message If the header is PingHeader.GET_MBRS_RSP we will add the message to the * initial members vector and wake up any waiting threads. 2. If the event is Event.SET_LOCAL_ADDR * we will simple set the local address of this protocol 3. For all other messages we simple pass * it up to the protocol above * * @param evt - the event that has been sent from the layer below */ @SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); PingHeader hdr = (PingHeader) msg.getHeader(this.id); if (hdr == null) return up_prot.up(evt); PingData data = hdr.data; Address logical_addr = data != null ? data.getAddress() : null; if (is_leaving) return null; // prevents merging back a leaving member // (https://issues.jboss.org/browse/JGRP-1336) switch (hdr.type) { case PingHeader.GET_MBRS_REQ: // return Rsp(local_addr, coord) if (group_addr == null || hdr.cluster_name == null) { if (log.isWarnEnabled()) log.warn( "group_addr (" + group_addr + ") or cluster_name of header (" + hdr.cluster_name + ") is null; passing up discovery request from " + msg.getSrc() + ", but this should not" + " be the case"); } else { if (!group_addr.equals(hdr.cluster_name)) { if (log.isWarnEnabled()) log.warn( "discarding discovery request for cluster '" + hdr.cluster_name + "' from " + msg.getSrc() + "; our cluster name is '" + group_addr + "'. " + "Please separate your clusters cleanly."); return null; } } // add physical address and logical name of the discovery sender (if available) to the // cache if (data != null) { if (logical_addr == null) logical_addr = msg.getSrc(); Collection<PhysicalAddress> physical_addrs = data.getPhysicalAddrs(); PhysicalAddress physical_addr = physical_addrs != null && !physical_addrs.isEmpty() ? physical_addrs.iterator().next() : null; if (logical_addr != null && physical_addr != null) down( new Event( Event.SET_PHYSICAL_ADDRESS, new Tuple<Address, PhysicalAddress>(logical_addr, physical_addr))); if (logical_addr != null && data.getLogicalName() != null) UUID.add(logical_addr, data.getLogicalName()); discoveryRequestReceived(msg.getSrc(), data.getLogicalName(), physical_addrs); synchronized (ping_responses) { for (Responses response : ping_responses) { response.addResponse(data, false); } } } if (hdr.view_id != null) { // If the discovery request is merge-triggered, and the ViewId shipped with it // is the same as ours, we don't respond (JGRP-1315). ViewId my_view_id = view != null ? view.getViewId() : null; if (my_view_id != null && my_view_id.equals(hdr.view_id)) return null; boolean send_discovery_rsp = force_sending_discovery_rsps || is_coord || current_coord == null || current_coord.equals(msg.getSrc()); if (!send_discovery_rsp) { if (log.isTraceEnabled()) log.trace( local_addr + ": suppressing merge response as I'm not a coordinator and the " + "discovery request was not sent by a coordinator"); return null; } } if (isMergeRunning()) { if (log.isTraceEnabled()) log.trace( local_addr + ": suppressing merge response as a merge is already in progress"); return null; } List<PhysicalAddress> physical_addrs = hdr.view_id != null ? null : Arrays.asList( (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr))); sendDiscoveryResponse( local_addr, physical_addrs, is_server, hdr.view_id != null, UUID.get(local_addr), msg.getSrc()); return null; case PingHeader.GET_MBRS_RSP: // add response to vector and notify waiting thread // add physical address (if available) to transport's cache if (data != null) { Address response_sender = msg.getSrc(); if (logical_addr == null) logical_addr = msg.getSrc(); Collection<PhysicalAddress> addrs = data.getPhysicalAddrs(); PhysicalAddress physical_addr = addrs != null && !addrs.isEmpty() ? addrs.iterator().next() : null; if (logical_addr != null && physical_addr != null) down( new Event( Event.SET_PHYSICAL_ADDRESS, new Tuple<Address, PhysicalAddress>(logical_addr, physical_addr))); if (logical_addr != null && data.getLogicalName() != null) UUID.add(logical_addr, data.getLogicalName()); if (log.isTraceEnabled()) log.trace( local_addr + ": received GET_MBRS_RSP from " + response_sender + ": " + data); boolean overwrite = logical_addr != null && logical_addr.equals(response_sender); synchronized (ping_responses) { for (Responses response : ping_responses) { response.addResponse(data, overwrite); } } } return null; default: if (log.isWarnEnabled()) log.warn("got PING header with unknown type (" + hdr.type + ')'); return null; } case Event.GET_PHYSICAL_ADDRESS: try { sendDiscoveryRequest(group_addr, null, null); } catch (InterruptedIOException ie) { if (log.isWarnEnabled()) { log.warn("Discovery request for cluster " + group_addr + " interrupted"); } Thread.currentThread().interrupt(); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request", ex); } return null; case Event.FIND_INITIAL_MBRS: // sent by transport return findInitialMembers(null); } return up_prot.up(evt); }