/** * <b>Callback</b>. Called by superclass when event may be handled. * * <p><b>Do not use <code>PassDown</code> in this method as the event is passed down by default by * the superclass after this method returns !</b> * * @return boolean Defaults to true. If false, event will not be passed down the stack. */ public boolean handleDownEvent(Event evt) { switch (evt.getType()) { case Event.CONNECT: passDown(evt); try { group_addr = (String) evt.getArg(); } catch (ClassCastException cce) { Trace.error( "GMS.handleDownEvent(CONNECT)", "group address must " + "be a string (group name) to make sense"); } impl.join(local_addr); passUp(new Event(Event.CONNECT_OK)); startEventHandlerThread(); return false; // don't pass down: was already passed down case Event.DISCONNECT: impl.leave((Address) evt.getArg()); passUp(new Event(Event.DISCONNECT_OK)); stopEventHandlerThread(); initState(); return true; // pass down } return impl.handleDownEvent(evt); }
public void run() { Event evt; while (evt_thread != null && event_queue != null) { try { evt = (Event) event_queue.remove(); switch (evt.getType()) { case Event.SUSPECT: impl.suspect((Address) evt.getArg()); break; case Event.MERGE: impl.merge((Vector) evt.getArg()); break; default: Trace.error( "GMS.run()", "event handler thread encountered event of type " + Event.type2String(evt.getType()) + ": not handled by me !"); break; } } catch (QueueClosedException closed) { break; } catch (Exception ex) { Trace.warn("GMS.run()", "exception=" + ex); } } }
public View handleMerge(ViewId other_vid, Vector other_members) { synchronized (impl_mutex) { if (Trace.trace) { View v = impl.handleMerge(other_vid, other_members); Trace.info("GMS.handleMerge()", "returning view: " + v); return v; } return impl.handleMerge(other_vid, other_members); } }
public void setImpl(GmsImpl new_impl) { synchronized (impl_mutex) { impl = new_impl; if (Trace.trace) Trace.info("GMS.setImpl()", "changed role to " + new_impl.getClass().getName()); } }
@Override // GemStoneAddition public void init() throws Exception { super.init(); synchronized (initial_mbrs) { initial_mbrs.clear(); initial_mbrs_received = false; } join_promise.reset(); }
public void init() throws Exception { if (view_ack_collection_timeout <= 0) throw new IllegalArgumentException("view_ack_collection_timeout has to be greater than 0"); if (merge_timeout <= 0) throw new IllegalArgumentException("merge_timeout has to be greater than 0"); prev_members = new BoundedList<Address>(num_prev_mbrs); prev_views = new BoundedList<Tuple<View, Long>>(num_prev_views); TP transport = getTransport(); timer = transport.getTimer(); if (timer == null) throw new Exception("timer is null"); if (impl != null) impl.init(); transport.registerProbeHandler(this); }
/** * <b>Callback</b>. Called by superclass when event may be handled. * * <p><b>Do not use <code>PassUp</code> in this method as the event is passed up by default by the * superclass after this method returns !</b> * * @return boolean Defaults to true. If false, event will not be passed up the stack. */ public boolean handleUpEvent(Event evt) { switch (evt.getType()) { case Event.CONNECT_OK: // sent by someone else, but WE are responsible for sending this ! case Event.DISCONNECT_OK: // dito (e.g. sent by UDP layer) return false; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); if (print_local_addr) { System.out.println( "\n-------------------------------------------------------\n" + "2.1GMS: address is " + local_addr + "\n-------------------------------------------------------"); } return true; // pass up case Event.SUSPECT: try { event_queue.add(evt); } catch (Exception e) { } return true; // pass up case Event.MERGE: try { event_queue.add(evt); } catch (Exception e) { } return false; // don't pass up case Event.FLUSH_OK: synchronized (flush_mutex) { flush_rsp = (FlushRsp) evt.getArg(); flush_mutex.notify(); } return false; // don't pass up case Event.REBROADCAST_MSGS_OK: synchronized (rebroadcast_mutex) { rebroadcast_mutex.notify(); } return false; // don't pass up } return impl.handleUpEvent(evt); }
@SuppressWarnings("unchecked") public Object down(Event evt) { int type = evt.getType(); switch (type) { case Event.CONNECT: case Event.CONNECT_USE_FLUSH: case Event.CONNECT_WITH_STATE_TRANSFER: case Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH: boolean use_flush = type == Event.CONNECT_USE_FLUSH || type == Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH; boolean state_transfer = type == Event.CONNECT_WITH_STATE_TRANSFER || type == Event.CONNECT_WITH_STATE_TRANSFER_USE_FLUSH; if (print_local_addr) { PhysicalAddress physical_addr = print_physical_addrs ? (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)) : null; System.out.println( "\n-------------------------------------------------------------------\n" + "GMS: address=" + local_addr + ", cluster=" + evt.getArg() + (physical_addr != null ? ", physical address=" + physical_addr : "") + "\n-------------------------------------------------------------------"); } else { if (log.isDebugEnabled()) { PhysicalAddress physical_addr = print_physical_addrs ? (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)) : null; log.debug( "address=" + local_addr + ", cluster=" + evt.getArg() + (physical_addr != null ? ", physical address=" + physical_addr : "")); } } down_prot.down(evt); if (local_addr == null) throw new IllegalStateException("local_addr is null"); try { if (state_transfer) impl.joinWithStateTransfer(local_addr, use_flush); else impl.join(local_addr, use_flush); } catch (Throwable e) { return e; } return null; // don't pass down: event has already been passed down case Event.DISCONNECT: impl.leave((Address) evt.getArg()); if (!(impl instanceof CoordGmsImpl)) { initState(); // in case connect() is called again } down_prot.down(evt); // notify the other protocols, but ignore the result return null; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if ((config != null && config.containsKey("flush_supported"))) { flushProtocolInStack = true; } break; case Event.SET_LOCAL_ADDRESS: local_addr = (Address) evt.getArg(); break; } return down_prot.down(evt); }
/** * Broadcasts the new view and digest, and waits for acks from all members in the list given as * argument. If the list is null, we take the members who are part of new_view */ public void castViewChange( View new_view, Digest digest, JoinRsp jr, Collection<Address> newMembers) { if (log.isTraceEnabled()) log.trace(local_addr + ": mcasting view " + new_view + " (" + new_view.size() + " mbrs)\n"); // Send down a local TMP_VIEW event. This is needed by certain layers (e.g. NAKACK) to compute // correct digest // in case client's next request (e.g. getState()) reaches us *before* our own view change // multicast. // Check NAKACK's TMP_VIEW handling for details down_prot.up(new Event(Event.TMP_VIEW, new_view)); down_prot.down(new Event(Event.TMP_VIEW, new_view)); List<Address> ackMembers = new ArrayList<Address>(new_view.getMembers()); if (newMembers != null && !newMembers.isEmpty()) ackMembers.removeAll(newMembers); Message view_change_msg = new Message(); // bcast to all members GmsHeader hdr = new GmsHeader(GmsHeader.VIEW, new_view); hdr.my_digest = digest; view_change_msg.putHeader(this.id, hdr); // If we're the only member the VIEW is broadcast to, let's simply install the view directly, // without // sending the VIEW multicast ! Or else N-1 members drop the multicast anyway... if (local_addr != null && ackMembers.size() == 1 && ackMembers.get(0).equals(local_addr)) { // we need to add the message to the retransmit window (e.g. in NAKACK), so (1) it can be // retransmitted and // (2) we increment the seqno (otherwise, we'd return an incorrect digest) down_prot.down(new Event(Event.ADD_TO_XMIT_TABLE, view_change_msg)); impl.handleViewChange(new_view, digest); } else { if (!ackMembers.isEmpty()) ack_collector.reset(ackMembers); down_prot.down(new Event(Event.MSG, view_change_msg)); try { if (!ackMembers.isEmpty()) { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all " + ack_collector.expectedAcks() + " ACKs from members for view " + new_view.getVid()); } } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for view " + new_view.getViewId() + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } if (jr != null && (newMembers != null && !newMembers.isEmpty())) { ack_collector.reset(new ArrayList<Address>(newMembers)); for (Address joiner : newMembers) { sendJoinResponse(jr, joiner); } try { ack_collector.waitForAllAcks(view_ack_collection_timeout); if (log.isTraceEnabled()) log.trace( local_addr + ": received all ACKs (" + ack_collector.expectedAcks() + ") from joiners for view " + new_view.getVid()); } catch (TimeoutException e) { if (log_collect_msgs && log.isWarnEnabled()) { log.warn( local_addr + ": failed to collect all ACKs (expected=" + ack_collector.expectedAcks() + ") for unicast view " + new_view + " after " + view_ack_collection_timeout + "ms, missing ACKs from " + ack_collector.printMissing()); } } } }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); GmsHeader hdr = (GmsHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case GmsHeader.JOIN_REQ: view_handler.add( new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER: view_handler.add( new Request( Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_RSP: impl.handleJoinResponse(hdr.join_rsp); break; case GmsHeader.LEAVE_REQ: if (hdr.mbr == null) return null; view_handler.add(new Request(Request.LEAVE, hdr.mbr, false)); break; case GmsHeader.LEAVE_RSP: impl.handleLeaveResponse(); break; case GmsHeader.VIEW: View new_view = hdr.view; if (new_view == null) return null; Address coord = msg.getSrc(); if (!new_view.containsMember(coord)) { sendViewAck( coord); // we need to send the ack first, otherwise the connection is removed impl.handleViewChange(new_view, hdr.my_digest); } else { impl.handleViewChange(new_view, hdr.my_digest); sendViewAck(coord); // send VIEW_ACK to sender of view } break; case GmsHeader.VIEW_ACK: Address sender = msg.getSrc(); ack_collector.ack(sender); return null; // don't pass further up case GmsHeader.MERGE_REQ: impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs); break; case GmsHeader.MERGE_RSP: MergeData merge_data = new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected); if (log.isTraceEnabled()) { log.trace( local_addr + ": got merge response from " + msg.getSrc() + ", merge_id=" + hdr.merge_id + ", merge data is " + merge_data); } impl.handleMergeResponse(merge_data, hdr.merge_id); break; case GmsHeader.INSTALL_MERGE_VIEW: impl.handleMergeView( new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id); break; case GmsHeader.INSTALL_DIGEST: Digest tmp = hdr.my_digest; down_prot.down(new Event(Event.MERGE_DIGEST, tmp)); break; case GmsHeader.INSTALL_MERGE_VIEW_OK: // [JGRP-700] - FLUSH: flushing should span merge merge_ack_collector.ack(msg.getSrc()); break; case GmsHeader.CANCEL_MERGE: // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process impl.handleMergeCancelled(hdr.merge_id); break; case GmsHeader.GET_DIGEST_REQ: // only handle this request if it was sent by the coordinator (or at least a member) of // the current cluster synchronized (members) { if (!members.contains(msg.getSrc())) break; } // discard my own request: if (msg.getSrc().equals(local_addr)) return null; if (hdr.merge_id != null && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id))) return null; // fetch only my own digest Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if (digest != null) { GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP); rsp_hdr.my_digest = digest; Message get_digest_rsp = new Message(msg.getSrc(), null, null); get_digest_rsp.setFlag(Message.OOB); get_digest_rsp.putHeader(this.id, rsp_hdr); down_prot.down(new Event(Event.MSG, get_digest_rsp)); } break; case GmsHeader.GET_DIGEST_RSP: Digest digest_rsp = hdr.my_digest; impl.handleDigestResponse(msg.getSrc(), digest_rsp); break; default: if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known"); } return null; // don't pass up case Event.SUSPECT: Object retval = up_prot.up(evt); Address suspected = (Address) evt.getArg(); view_handler.add(new Request(Request.SUSPECT, suspected, true)); ack_collector.suspect(suspected); merge_ack_collector.suspect(suspected); return retval; case Event.UNSUSPECT: impl.unsuspect((Address) evt.getArg()); return null; // discard case Event.MERGE: view_handler.add( new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg())); return null; // don't pass up case Event.IS_MERGE_IN_PROGRESS: return merger.isMergeInProgress(); } return up_prot.up(evt); }
public void start() throws Exception { if (impl != null) impl.start(); }
public void stop() { view_handler.stop(true); if (impl != null) impl.stop(); if (prev_members != null) prev_members.clear(); }
void initState() { becomeClient(); impl.init(); view_id = null; if (members != null) members.clear(); }
public void leave(Address mbr) { synchronized (impl_mutex) { impl.leave(mbr); } }
public void merge(Vector other_coords) { synchronized (impl_mutex) { impl.merge(other_coords); } }
public void suspect(Address mbr) { synchronized (impl_mutex) { impl.suspect(mbr); } }
public boolean handleJoin(Address mbr) { synchronized (impl_mutex) { return impl.handleJoin(mbr); } }
public void join(Address mbr) { synchronized (impl_mutex) { impl.join(mbr); } }
public void handleSuspect(Address mbr) { synchronized (impl_mutex) { impl.handleSuspect(mbr); } }
public void handleLeave(Address mbr, boolean suspected) { synchronized (impl_mutex) { impl.handleLeave(mbr, suspected); } }
public void handleViewChange(ViewId new_view, Vector mbrs) { // synchronized (impl_mutex ) { impl.handleViewChange(new_view, mbrs); // } }