protected void forwardToCoord(long seqno, Message msg) { if (is_coord) { forward(msg, seqno, false); return; } if (!running || flushing) { forward_table.put(seqno, msg); return; } if (!ack_mode) { forward_table.put(seqno, msg); forward(msg, seqno, false); return; } send_lock.lock(); try { forward_table.put(seqno, msg); while (running && !flushing) { ack_promise.reset(); forward(msg, seqno, true); if (!ack_mode || !running || flushing) break; Long ack = ack_promise.getResult(500); if ((Objects.equals(ack, seqno)) || !forward_table.containsKey(seqno)) break; } } finally { send_lock.unlock(); } }
public List<PingData> get(long timeout) throws InterruptedException { long start_time = System.currentTimeMillis(), time_to_wait = timeout; promise.getLock().lock(); try { while (time_to_wait > 0 && !promise.hasResult()) { // if num_expected_srv_rsps > 0, then it overrides num_expected_rsps if (num_expected_srv_rsps > 0) { int received_srv_rsps = getNumServerResponses(ping_rsps); if (received_srv_rsps >= num_expected_srv_rsps) return new LinkedList<PingData>(ping_rsps); } else if (ping_rsps.size() >= num_expected_rsps) { return new LinkedList<PingData>(ping_rsps); } if (break_on_coord_rsp && containsCoordinatorResponse(ping_rsps)) return new LinkedList<PingData>(ping_rsps); promise.getCond().await(time_to_wait, TimeUnit.MILLISECONDS); time_to_wait = timeout - (System.currentTimeMillis() - start_time); } return new LinkedList<PingData>(ping_rsps); } finally { promise.getLock().unlock(); } }
protected void deliver(Message msg, Event evt, SequencerHeader hdr) { Address sender = msg.getSrc(); if (sender == null) { if (log.isErrorEnabled()) log.error(local_addr + ": sender is null, cannot deliver " + "::" + hdr.getSeqno()); return; } long msg_seqno = hdr.getSeqno(); if (sender.equals(local_addr)) { forward_table.remove(msg_seqno); if (hdr.flush_ack) { ack_promise.setResult(msg_seqno); if (ack_mode && !flushing && threshold > 0 && ++num_acks >= threshold) { ack_mode = false; num_acks = 0; } } } if (!canDeliver(sender, msg_seqno)) { if (log.isWarnEnabled()) log.warn(local_addr + ": dropped duplicate message " + sender + "::" + msg_seqno); return; } if (log.isTraceEnabled()) log.trace(local_addr + ": delivering " + sender + "::" + msg_seqno); up_prot.up(evt); delivered_bcasts++; }
protected void unblockAll() { flushing = false; send_lock.lock(); try { send_cond.signalAll(); ack_promise.setResult(null); } finally { send_lock.unlock(); } }
protected void stopFlusher() { flushing = false; Thread tmp = flusher; while (tmp != null && tmp.isAlive()) { tmp.interrupt(); ack_promise.setResult(null); try { tmp.join(); } catch (InterruptedException e) { } } }
public void sendDiscoveryRequest(String cluster_name, Promise promise, ViewId view_id) throws Exception { PingData data = null; PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); if (view_id == null) { List<PhysicalAddress> physical_addrs = Arrays.asList(physical_addr); data = new PingData(local_addr, null, false, UUID.get(local_addr), physical_addrs); } PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name); hdr.view_id = view_id; Collection<PhysicalAddress> cluster_members = fetchClusterMembers(cluster_name); if (cluster_members == null) { Message msg = new Message(null); // multicast msg msg.setFlag(Message.OOB); msg.putHeader(getId(), hdr); sendMcastDiscoveryRequest(msg); } else { if (cluster_members.isEmpty()) { // if we don't find any members, return immediately if (promise != null) promise.setResult(null); } else { for (final Address addr : cluster_members) { if (addr.equals(physical_addr)) // no need to send the request to myself continue; final Message msg = new Message(addr, null, null); msg.setFlag(Message.OOB); msg.putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace("[FIND_INITIAL_MBRS] sending discovery request to " + msg.getDest()); if (!sendDiscoveryRequestsInParallel()) { down_prot.down(new Event(Event.MSG, msg)); } else { timer.execute( new Runnable() { public void run() { try { down_prot.down(new Event(Event.MSG, msg)); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request to " + addr + ": " + ex); } } }); } } } } }
public void addResponse(PingData rsp, boolean overwrite) { if (rsp == null) return; promise.getLock().lock(); try { if (overwrite) ping_rsps.remove(rsp); // https://jira.jboss.org/jira/browse/JGRP-1179 int index = ping_rsps.indexOf(rsp); if (index == -1) { ping_rsps.add(rsp); promise.getCond().signalAll(); } else if (rsp.isCoord()) { PingData pr = ping_rsps.get(index); // Check if the already existing element is not server if (!pr.isCoord()) { ping_rsps.set(index, rsp); promise.getCond().signalAll(); } } } finally { promise.getLock().unlock(); } }
/** * Sends all messages currently in forward_table to the new coordinator (changing the dest field). * This needs to be done, so the underlying reliable unicast protocol (e.g. UNICAST) adds these * messages to its retransmission mechanism<br> * Note that we need to resend the messages in order of their seqnos ! We also need to prevent * other message from being inserted until we're done, that's why there's synchronization.<br> * Access to the forward_table doesn't need to be synchronized as there won't be any insertions * during flushing (all down-threads are blocked) */ protected void flushMessagesInForwardTable() { if (is_coord) { for (Map.Entry<Long, Message> entry : forward_table.entrySet()) { Long key = entry.getKey(); Message msg = entry.getValue(); Buffer buf; try { buf = Util.streamableToBuffer(msg); } catch (Exception e) { log.error(Util.getMessage("FlushingBroadcastingFailed"), e); continue; } SequencerHeader hdr = new SequencerHeader(SequencerHeader.WRAPPED_BCAST, key); Message forward_msg = new Message(null, buf).putHeader(this.id, hdr); if (log.isTraceEnabled()) log.trace(local_addr + ": flushing (broadcasting) " + local_addr + "::" + key); down_prot.down(new Event(Event.MSG, forward_msg)); } return; } // for forwarded messages, we need to receive the forwarded message from the coordinator, to // prevent this case: // - V1={A,B,C} // - A crashes // - C installs V2={B,C} // - C forwards messages 3 and 4 to B (the new coord) // - B drops 3 because its view is still V1 // - B installs V2 // - B receives message 4 and broadcasts it // ==> C's message 4 is delivered *before* message 3 ! // ==> By resending 3 until it is received, then resending 4 until it is received, we make sure // this won't happen // (see https://issues.jboss.org/browse/JGRP-1449) while (flushing && running && !forward_table.isEmpty()) { Map.Entry<Long, Message> entry = forward_table.firstEntry(); final Long key = entry.getKey(); Message msg = entry.getValue(); Buffer buf; try { buf = Util.streamableToBuffer(msg); } catch (Exception e) { log.error(Util.getMessage("FlushingBroadcastingFailed"), e); continue; } while (flushing && running && !forward_table.isEmpty()) { SequencerHeader hdr = new SequencerHeader(SequencerHeader.FLUSH, key); Message forward_msg = new Message(coord, buf).putHeader(this.id, hdr).setFlag(Message.Flag.DONT_BUNDLE); if (log.isTraceEnabled()) log.trace( local_addr + ": flushing (forwarding) " + local_addr + "::" + key + " to coord " + coord); ack_promise.reset(); down_prot.down(new Event(Event.MSG, forward_msg)); Long ack = ack_promise.getResult(500); if ((Objects.equals(ack, key)) || !forward_table.containsKey(key)) break; } } }