public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); ForkHeader hdr = (ForkHeader) msg.getHeader(id); if (hdr == null) break; if (hdr.fork_stack_id == null) throw new IllegalArgumentException("header has a null fork_stack_id"); Protocol bottom_prot = get(hdr.fork_stack_id); return bottom_prot != null ? bottom_prot.up(evt) : this.unknownForkHandler.handleUnknownForkStack(msg, hdr.fork_stack_id); case Event.VIEW_CHANGE: for (Protocol bottom : fork_stacks.values()) bottom.up(evt); break; case Event.STATE_TRANSFER_OUTPUTSTREAM: if (!process_state_events) break; getStateFromMainAndForkChannels(evt); return null; case Event.STATE_TRANSFER_INPUTSTREAM: if (!process_state_events) break; setStateInMainAndForkChannels((InputStream) evt.getArg()); return null; } return up_prot.up(evt); }
public void up(MessageBatch batch) { // Sort fork messages by fork-stack-id Map<String, List<Message>> map = new HashMap<>(); for (Message msg : batch) { ForkHeader hdr = (ForkHeader) msg.getHeader(id); if (hdr != null) { batch.remove(msg); List<Message> list = map.get(hdr.fork_stack_id); if (list == null) { list = new ArrayList<>(); map.put(hdr.fork_stack_id, list); } list.add(msg); } } // Now pass fork messages up, batched by fork-stack-id for (Map.Entry<String, List<Message>> entry : map.entrySet()) { String fork_stack_id = entry.getKey(); List<Message> list = entry.getValue(); Protocol bottom_prot = get(fork_stack_id); if (bottom_prot == null) continue; MessageBatch mb = new MessageBatch( batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list); try { bottom_prot.up(mb); } catch (Throwable t) { log.error(Util.getMessage("FailedPassingUpBatch"), t); } } if (!batch.isEmpty()) up_prot.up(batch); }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); GmsHeader hdr = (GmsHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case GmsHeader.JOIN_REQ: view_handler.add( new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER: view_handler.add( new Request( Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_RSP: impl.handleJoinResponse(hdr.join_rsp); break; case GmsHeader.LEAVE_REQ: if (hdr.mbr == null) return null; view_handler.add(new Request(Request.LEAVE, hdr.mbr, false)); break; case GmsHeader.LEAVE_RSP: impl.handleLeaveResponse(); break; case GmsHeader.VIEW: View new_view = hdr.view; if (new_view == null) return null; Address coord = msg.getSrc(); if (!new_view.containsMember(coord)) { sendViewAck( coord); // we need to send the ack first, otherwise the connection is removed impl.handleViewChange(new_view, hdr.my_digest); } else { impl.handleViewChange(new_view, hdr.my_digest); sendViewAck(coord); // send VIEW_ACK to sender of view } break; case GmsHeader.VIEW_ACK: Address sender = msg.getSrc(); ack_collector.ack(sender); return null; // don't pass further up case GmsHeader.MERGE_REQ: impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs); break; case GmsHeader.MERGE_RSP: MergeData merge_data = new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected); if (log.isTraceEnabled()) { log.trace( local_addr + ": got merge response from " + msg.getSrc() + ", merge_id=" + hdr.merge_id + ", merge data is " + merge_data); } impl.handleMergeResponse(merge_data, hdr.merge_id); break; case GmsHeader.INSTALL_MERGE_VIEW: impl.handleMergeView( new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id); break; case GmsHeader.INSTALL_DIGEST: Digest tmp = hdr.my_digest; down_prot.down(new Event(Event.MERGE_DIGEST, tmp)); break; case GmsHeader.INSTALL_MERGE_VIEW_OK: // [JGRP-700] - FLUSH: flushing should span merge merge_ack_collector.ack(msg.getSrc()); break; case GmsHeader.CANCEL_MERGE: // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process impl.handleMergeCancelled(hdr.merge_id); break; case GmsHeader.GET_DIGEST_REQ: // only handle this request if it was sent by the coordinator (or at least a member) of // the current cluster synchronized (members) { if (!members.contains(msg.getSrc())) break; } // discard my own request: if (msg.getSrc().equals(local_addr)) return null; if (hdr.merge_id != null && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id))) return null; // fetch only my own digest Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if (digest != null) { GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP); rsp_hdr.my_digest = digest; Message get_digest_rsp = new Message(msg.getSrc(), null, null); get_digest_rsp.setFlag(Message.OOB); get_digest_rsp.putHeader(this.id, rsp_hdr); down_prot.down(new Event(Event.MSG, get_digest_rsp)); } break; case GmsHeader.GET_DIGEST_RSP: Digest digest_rsp = hdr.my_digest; impl.handleDigestResponse(msg.getSrc(), digest_rsp); break; default: if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known"); } return null; // don't pass up case Event.SUSPECT: Object retval = up_prot.up(evt); Address suspected = (Address) evt.getArg(); view_handler.add(new Request(Request.SUSPECT, suspected, true)); ack_collector.suspect(suspected); merge_ack_collector.suspect(suspected); return retval; case Event.UNSUSPECT: impl.unsuspect((Address) evt.getArg()); return null; // discard case Event.MERGE: view_handler.add( new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg())); return null; // don't pass up case Event.IS_MERGE_IN_PROGRESS: return merger.isMergeInProgress(); } return up_prot.up(evt); }