public Message send(Message query) { int q, r; Message best = null; byte rcode; for (q = 0; q < 20; q++) { boolean ok = false; for (r = 0; r < resolvers.length; r++) ok |= sendTo(query, r, q); if (!ok) break; Message m = null; synchronized (queue) { try { queue.wait((quantum + 1) * 1000); } catch (InterruptedException e) { System.out.println("interrupted"); } if (queue.size() == 0) continue; m = (Message) queue.firstElement(); queue.removeElementAt(0); Integer I = (Integer) queue.firstElement(); queue.removeElementAt(0); r = I.intValue(); } if (m == null) invalid[r] = true; else { rcode = m.getHeader().getRcode(); if (rcode == Rcode.NOERROR) return m; else { if (best == null) best = m; invalid[r] = true; } } } return best; }
protected String printMessageList(List<Message> list) { StringBuilder sb = new StringBuilder(); int size = list.size(); Message first = size > 0 ? list.get(0) : null, second = size > 1 ? list.get(size - 1) : first; UnicastHeader hdr; if (first != null) { hdr = (UnicastHeader) first.getHeader(id); if (hdr != null) sb.append("#" + hdr.seqno); } if (second != null) { hdr = (UnicastHeader) second.getHeader(id); if (hdr != null) sb.append(" - #" + hdr.seqno); } return sb.toString(); }
// ------------------------------------------------------------ private void distributeExternal(Message mess) { Address target = mess.getHeader().getDestinationAddress(); if (matchesExternal(target)) { log("Send to external MBus too\n\n"); layer.send(mess); } }
/** * We need to resend our first message with our conn_id * * @param sender * @param seqno Resend the non null messages in the range [lowest .. seqno] */ protected void handleResendingOfFirstMessage(Address sender, long seqno) { if (log.isTraceEnabled()) log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")"); SenderEntry entry = send_table.get(sender); Table<Message> win = entry != null ? entry.sent_msgs : null; if (win == null) { if (log.isErrorEnabled()) log.error(local_addr + ": sender window for " + sender + " not found"); return; } boolean first_sent = false; for (long i = win.getLow() + 1; i <= seqno; i++) { Message rsp = win.get(i); if (rsp == null) continue; if (first_sent) { down_prot.down(new Event(Event.MSG, rsp)); } else { first_sent = true; // We need to copy the UnicastHeader and put it back into the message because Message.copy() // doesn't copy // the headers and therefore we'd modify the original message in the sender retransmission // window // (https://jira.jboss.org/jira/browse/JGRP-965) Message copy = rsp.copy(); Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id); Unicast2Header newhdr = hdr.copy(); newhdr.first = true; copy.putHeader(this.id, newhdr); down_prot.down(new Event(Event.MSG, copy)); } } }
public void up(MessageBatch batch) { if (batch.dest() == null) { // not a unicast batch up_prot.up(batch); return; } int size = batch.size(); Map<Short, List<Message>> msgs = new TreeMap<Short, List<Message>>(); // map of messages, keyed by conn-id for (Message msg : batch) { if (msg == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) continue; UnicastHeader hdr = (UnicastHeader) msg.getHeader(id); if (hdr == null) continue; batch.remove(msg); // remove the message from the batch, so it won't be passed up the stack if (hdr.type != UnicastHeader.DATA) { try { handleUpEvent(msg.getSrc(), hdr); } catch (Throwable t) { // we cannot let an exception terminate the processing of this batch log.error(local_addr + ": failed handling event", t); } continue; } List<Message> list = msgs.get(hdr.conn_id); if (list == null) msgs.put(hdr.conn_id, list = new ArrayList<Message>(size)); list.add(msg); } if (!msgs.isEmpty()) handleBatchReceived(batch.sender(), msgs); // process msgs: if (!batch.isEmpty()) up_prot.up(batch); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); LockingHeader hdr = (LockingHeader) msg.getHeader(id); if (hdr == null) break; Request req = (Request) msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case GRANT_LOCK: case RELEASE_LOCK: handleLockRequest(req); break; case LOCK_GRANTED: handleLockGrantedResponse(req.lock_name, req.owner, msg.getSrc()); break; case LOCK_DENIED: handleLockDeniedResponse(req.lock_name, req.owner); break; case CREATE_LOCK: handleCreateLockRequest(req.lock_name, req.owner); break; case DELETE_LOCK: handleDeleteLockRequest(req.lock_name); break; case COND_SIG: case COND_SIG_ALL: handleSignalRequest(req); break; case LOCK_AWAIT: handleAwaitRequest(req.lock_name, req.owner); handleLockRequest(req); break; case DELETE_LOCK_AWAIT: handleDeleteAwaitRequest(req.lock_name, req.owner); break; case SIG_RET: handleSignalResponse(req.lock_name, req.owner); break; case CREATE_AWAITER: handleCreateAwaitingRequest(req.lock_name, req.owner); break; case DELETE_AWAITER: handleDeleteAwaitingRequest(req.lock_name, req.owner); break; default: log.error("Request of type " + req.type + " not known"); break; } return null; case Event.VIEW_CHANGE: handleView((View) evt.getArg()); break; } return up_prot.up(evt); }
// ------------------------------------------------------------ public int send(Message mess) { // log(".send: "+mess); int seqNum = mess.getHeader().getSequenceNumber(); distributeInternal(mess); distributeExternal(mess); return seqNum; }
/** Called by AckSenderWindow to resend messages for which no ACK has been received yet */ public void retransmit(Message msg) { if (log.isTraceEnabled()) { UnicastHeader hdr = (UnicastHeader) msg.getHeader(id); long seqno = hdr != null ? hdr.seqno : -1; log.trace(local_addr + " --> XMIT(" + msg.getDest() + ": #" + seqno + ')'); } down_prot.down(new Event(Event.MSG, msg)); num_xmits++; }
/** * Try to remove as many messages as possible from the table as pass them up. Prevents concurrent * passing up of messages by different threads (http://jira.jboss.com/jira/browse/JGRP-198); lots * of threads can come up to this point concurrently, but only 1 is allowed to pass at a time. We * *can* deliver messages from *different* senders concurrently, e.g. reception of P1, Q1, P2, Q2 * can result in delivery of P1, Q1, Q2, P2: FIFO (implemented by UNICAST) says messages need to * be delivered in the order in which they were sent */ protected int removeAndDeliver( final AtomicBoolean processing, Table<Message> win, Address sender) { int retval = 0; boolean released_processing = false; try { while (true) { List<Message> list = win.removeMany(processing, true, max_msg_batch_size); if (list == null) { released_processing = true; return retval; } MessageBatch batch = new MessageBatch(local_addr, sender, null, false, list); for (Message msg_to_deliver : batch) { // discard OOB msg: it has already been delivered // (http://jira.jboss.com/jira/browse/JGRP-377) if (msg_to_deliver.isFlagSet(Message.Flag.OOB)) batch.remove(msg_to_deliver); } try { if (log.isTraceEnabled()) { Message first = batch.first(), last = batch.last(); StringBuilder sb = new StringBuilder(local_addr + ": delivering"); if (first != null && last != null) { UnicastHeader hdr1 = (UnicastHeader) first.getHeader(id), hdr2 = (UnicastHeader) last.getHeader(id); sb.append(" #").append(hdr1.seqno).append(" - #").append(hdr2.seqno); } sb.append(" (" + batch.size()).append(" messages)"); log.trace(sb); } up_prot.up(batch); } catch (Throwable t) { log.error("failed to deliver batch " + batch, t); } } } finally { // processing is always set in win.remove(processing) above and never here ! This code is just // a // 2nd line of defense should there be an exception before win.remove(processing) sets // processing if (!released_processing) processing.set(false); } }
private Message sendAXFR(Message query) throws IOException { Name qname = query.getQuestion().getName(); ZoneTransferIn xfrin = ZoneTransferIn.newAXFR(qname, address, tsig); xfrin.setTimeout((int) (getTimeout() / 1000)); xfrin.setLocalAddress(localAddress); try { xfrin.run(); } catch (ZoneTransferException e) { throw new WireParseException(e.getMessage()); } List records = xfrin.getAXFR(); Message response = new Message(query.getHeader().getID()); response.getHeader().setFlag(Flags.AA); response.getHeader().setFlag(Flags.QR); response.addRecord(query.getQuestion(), Section.QUESTION); Iterator it = records.iterator(); while (it.hasNext()) response.addRecord((Record) it.next(), Section.ANSWER); return response; }
public static void dumpEnvelope(Message m) throws Exception { pr("This is the message envelope"); pr("---------------------------"); Address[] a; // FROM if ((a = m.getFrom()) != null) { for (int j = 0; j < a.length; j++) pr("FROM: " + a[j].toString()); } // TO if ((a = m.getRecipients(Message.RecipientType.TO)) != null) { for (int j = 0; j < a.length; j++) pr("TO: " + a[j].toString()); } // SUBJECT pr("SUBJECT: " + m.getSubject()); // DATE Date d = m.getSentDate(); pr("SendDate: " + (d != null ? d.toString() : "UNKNOWN")); // FLAGS Flags flags = m.getFlags(); StringBuffer sb = new StringBuffer(); Flags.Flag[] sf = flags.getSystemFlags(); // get the system flags boolean first = true; for (int i = 0; i < sf.length; i++) { String s; Flags.Flag f = sf[i]; if (f == Flags.Flag.ANSWERED) s = "\\Answered"; else if (f == Flags.Flag.DELETED) s = "\\Deleted"; else if (f == Flags.Flag.DRAFT) s = "\\Draft"; else if (f == Flags.Flag.FLAGGED) s = "\\Flagged"; else if (f == Flags.Flag.RECENT) s = "\\Recent"; else if (f == Flags.Flag.SEEN) s = "\\Seen"; else continue; // skip it if (first) first = false; else sb.append(' '); sb.append(s); } String[] uf = flags.getUserFlags(); // get the user flag strings for (int i = 0; i < uf.length; i++) { if (first) first = false; else sb.append(' '); sb.append(uf[i]); } pr("FLAGS: " + sb.toString()); // X-MAILER String[] hdrs = m.getHeader("X-Mailer"); if (hdrs != null) pr("X-Mailer: " + hdrs[0]); else pr("X-Mailer NOT available"); }
private void writeAll() { List<MessageBean> beans = messageTable.getBeans(); HashMap<Integer, Message> map = new HashMap<Integer, Message>(2 * beans.size()); for (MessageBean mb : beans) { map.put(mb.m.hashCode(), mb.m); } if (fileChooser == null) fileChooser = new FileManager(null, null, null, (PreferencesExt) prefs.node("FileManager")); String defloc = (raf.getLocation() == null) ? "." : raf.getLocation(); String dirName = fileChooser.chooseDirectory(defloc); if (dirName == null) return; try { int count = 0; for (Message m : map.values()) { String header = m.getHeader(); if (header != null) { header = header.split(" ")[0]; } else { header = Integer.toString(Math.abs(m.hashCode())); } File file = new File(dirName + "/" + header + ".bufr"); FileOutputStream fos = new FileOutputStream(file); WritableByteChannel wbc = fos.getChannel(); wbc.write(ByteBuffer.wrap(m.getHeader().getBytes())); byte[] raw = scan.getMessageBytes(m); wbc.write(ByteBuffer.wrap(raw)); wbc.close(); count++; } JOptionPane.showMessageDialog( BufrMessageViewer.this, count + " successfully written to " + dirName); } catch (IOException e1) { JOptionPane.showMessageDialog(BufrMessageViewer.this, e1.getMessage()); e1.printStackTrace(); } }
protected void handleBatchReceived(Address sender, Map<Short, List<Message>> map) { for (Map.Entry<Short, List<Message>> element : map.entrySet()) { final List<Message> msg_list = element.getValue(); if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" <-- DATA(") .append(sender) .append(": " + printMessageList(msg_list)) .append(')'); log.trace(sb); } short conn_id = element.getKey(); ReceiverEntry entry = null; for (Message msg : msg_list) { UnicastHeader hdr = (UnicastHeader) msg.getHeader(id); entry = getReceiverEntry(sender, hdr.seqno, hdr.first, conn_id); if (entry == null) continue; Table<Message> win = entry.received_msgs; boolean msg_added = win.add(hdr.seqno, msg); // win is guaranteed to be non-null if we get here num_msgs_received++; if (hdr.first && msg_added) sendAck( sender, hdr.seqno, conn_id); // send an ack immediately when we received the first message of a conn // An OOB message is passed up immediately. Later, when remove() is called, we discard it. // This affects ordering ! // http://jira.jboss.com/jira/browse/JGRP-377 if (msg.isFlagSet(Message.Flag.OOB) && msg_added) { try { up_prot.up(new Event(Event.MSG, msg)); } catch (Throwable t) { log.error("couldn't deliver OOB message " + msg, t); } } } if (entry != null && conn_expiry_timeout > 0) entry.update(); } ReceiverEntry entry = recv_table.get(sender); Table<Message> win = entry != null ? entry.received_msgs : null; if (win != null) { final AtomicBoolean processing = win.getProcessing(); if (processing.compareAndSet(false, true)) { removeAndDeliver(processing, win, sender); sendAck(sender, win.getHighestDeliverable(), entry.recv_conn_id); } } }
// ------------------------------------------------------------ private void distributeInternal(Message mess) { // Find out the virtual receipents Address target = mess.getHeader().getDestinationAddress(); Address source = mess.getHeader().getSourceAddress(); VirtualMBusListener[] virt = getMatchingTargets(target); // Deliver message to all virtual receipents for (int i = 0; i < virt.length; i++) { if (!virt[i].getAddress().equals(source)) { log(".distributeInternal to " + virt[i]); virt[i].incomingMessage(mess); } } }
private Object handleUpMessage(Event evt) throws Exception { Message msg = (Message) evt.getArg(); EncryptHeader hdr; if (msg == null || (msg.getLength() == 0 && !encrypt_entire_message) || ((hdr = (EncryptHeader) msg.getHeader(this.id)) == null)) return up_prot.up(evt); if (log.isTraceEnabled()) log.trace("header received %s", hdr); switch (hdr.getType()) { case EncryptHeader.ENCRYPT: return handleEncryptedMessage(msg, evt, hdr); default: handleUpEvent(msg, hdr); return null; } }
/** * We need to resend our first message with our conn_id * * @param sender * @param seqno Resend messages in the range [lowest .. seqno] */ private void handleResendingOfFirstMessage(Address sender, long seqno) { if (log.isTraceEnabled()) log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")"); SenderEntry entry = send_table.get(sender); AckSenderWindow win = entry != null ? entry.sent_msgs : null; if (win == null) { if (log.isErrorEnabled()) log.error(local_addr + ": sender window for " + sender + " not found"); return; } long lowest = win.getLowest(); Message rsp = win.get(lowest); if (rsp == null) return; // We need to copy the UnicastHeader and put it back into the message because Message.copy() // doesn't copy // the headers and therefore we'd modify the original message in the sender retransmission // window // (https://jira.jboss.org/jira/browse/JGRP-965) Message copy = rsp.copy(); Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id); Unicast2Header newhdr = hdr.copy(); newhdr.first = true; copy.putHeader(this.id, newhdr); if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append(local_addr) .append(" --> DATA(") .append(copy.getDest()) .append(": #") .append(newhdr.seqno) .append(", conn_id=") .append(newhdr.conn_id); if (newhdr.first) sb.append(", first"); sb.append(')'); log.trace(sb); } down_prot.down(new Event(Event.MSG, copy)); if (++lowest > seqno) return; for (long i = lowest; i <= seqno; i++) { rsp = win.get(i); if (rsp != null) down_prot.down(new Event(Event.MSG, rsp)); } }
/** * Does the actual work for decrypting - if version does not match current cipher then tries the * previous cipher */ private Message decryptMessage(Cipher cipher, Message msg) throws Exception { EncryptHeader hdr = (EncryptHeader) msg.getHeader(this.id); if (!Arrays.equals(hdr.getVersion(), getSymVersion())) { log.warn( "attempting to use stored cipher as message does not use current encryption version "); cipher = keyMap.get(new AsciiString(hdr.getVersion())); if (cipher == null) { log.warn("unable to find a matching cipher in previous key map"); return null; } log.trace("decrypting using previous cipher version"); synchronized (cipher) { return _decrypt(cipher, msg, hdr.encryptEntireMessage()); } } return _decrypt(cipher, msg, hdr.encryptEntireMessage()); }
public Message visit(Message msg, MessageBatch batch) { EncryptHeader hdr; if (msg == null || (msg.getLength() == 0 && !encrypt_entire_message) || ((hdr = (EncryptHeader) msg.getHeader(id)) == null)) return null; if (hdr.getType() == EncryptHeader.ENCRYPT) { // if queueing then pass into queue to be dealt with later if (queue_up) { queueUpMessage(msg, batch); return null; } // make sure we pass up any queued messages first if (!suppliedKey) drainUpQueue(); if (lock == null) { int index = getNextIndex(); lock = decoding_locks[index]; cipher = decoding_ciphers[index]; lock.lock(); } try { Message tmpMsg = decryptMessage(cipher, msg.copy()); // need to copy for possible xmits if (tmpMsg != null) batch.replace(msg, tmpMsg); } catch (Exception e) { log.error( "failed decrypting message from %s (offset=%d, length=%d, buf.length=%d): %s, headers are %s", msg.getSrc(), msg.getOffset(), msg.getLength(), msg.getRawBuffer().length, e, msg.printHeaders()); } } else { batch.remove( msg); // a control message will get handled by ENCRYPT and should not be passed up handleUpEvent(msg, hdr); } return null; }
public Object up(Event evt) { Message msg; Address dst, src; Unicast2Header hdr; switch (evt.getType()) { case Event.MSG: msg = (Message) evt.getArg(); dst = msg.getDest(); if (dst == null || msg.isFlagSet(Message.NO_RELIABILITY)) // only handle unicast messages break; // pass up // changed from removeHeader(): we cannot remove the header because if we do loopback=true // at the // transport level, we will not have the header on retransmit ! (bela Aug 22 2006) hdr = (Unicast2Header) msg.getHeader(this.id); if (hdr == null) break; src = msg.getSrc(); switch (hdr.type) { case Unicast2Header.DATA: // received regular message handleDataReceived(src, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); return null; // we pass the deliverable message up in handleDataReceived() case Unicast2Header.XMIT_REQ: // received ACK for previously sent message handleXmitRequest(src, hdr.seqno, hdr.high_seqno); break; case Unicast2Header.SEND_FIRST_SEQNO: handleResendingOfFirstMessage(src, hdr.seqno); break; case Unicast2Header.STABLE: stable(msg.getSrc(), hdr.conn_id, hdr.seqno, hdr.high_seqno); break; default: log.error("UnicastHeader type " + hdr.type + " not known !"); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); StateHeader hdr = (StateHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case StateHeader.STATE_REQ: state_requesters.add(msg.getSrc()); break; case StateHeader.STATE_RSP: handleStateRsp(hdr.getDigest(), msg.getSrc(), msg.getBuffer()); break; case StateHeader.STATE_EX: closeHoleFor(msg.getSrc()); handleException((Throwable) msg.getObject()); break; default: log.error("%s: type %s not known in StateHeader", local_addr, hdr.type); break; } return null; case Event.TMP_VIEW: case Event.VIEW_CHANGE: handleViewChange((View) evt.getArg()); break; case Event.CONFIG: Map<String, Object> config = (Map<String, Object>) evt.getArg(); if (config != null && config.containsKey("state_transfer")) log.error( Util.getMessage( "ProtocolStackCannotContainTwoStateTransferProtocolsRemoveEitherOneOfThem")); break; } return up_prot.up(evt); }
public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); if (msg.getDest() == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) // only handle unicast messages break; // pass up UnicastHeader hdr = (UnicastHeader) msg.getHeader(this.id); if (hdr == null) break; Address sender = msg.getSrc(); switch (hdr.type) { case UnicastHeader.DATA: // received regular message handleDataReceived(sender, hdr.seqno, hdr.conn_id, hdr.first, msg, evt); break; default: handleUpEvent(sender, hdr); break; } return null; } return up_prot.up(evt); // Pass up to the layer above us }
public String getHeader() { return m.getHeader(); }
public static Record[] getRecords(String namestr, short type, short dclass, byte cred) { Message query; Message response; Record question; Record[] answers; int answerCount = 0, i = 0; Enumeration e; Name name = new Name(namestr); /*System.out.println("lookup of " + name + " " + Type.string(type));*/ if (!Type.isRR(type) && type != Type.ANY) return null; if (res == null) { try { eres = new ExtendedResolver(); } catch (UnknownHostException uhe) { System.out.println("Failed to initialize resolver"); System.exit(-1); } } if (cache == null) cache = new Cache(); CacheResponse cached = cache.lookupRecords(name, type, dclass, cred); /*System.out.println(cached);*/ if (cached.isSuccessful()) { RRset rrset = cached.answer(); answerCount = rrset.size(); e = rrset.rrs(); } else if (cached.isNegative()) { answerCount = 0; e = null; } else { question = Record.newRecord(name, type, dclass); query = Message.newQuery(question); if (res != null) response = res.send(query); else response = eres.send(query); short rcode = response.getHeader().getRcode(); if (rcode == Rcode.NOERROR || rcode == Rcode.NXDOMAIN) cache.addMessage(response); if (rcode != Rcode.NOERROR) return null; e = response.getSection(Section.ANSWER); while (e.hasMoreElements()) { Record r = (Record) e.nextElement(); if (matchType(r.getType(), type)) answerCount++; } e = response.getSection(Section.ANSWER); } if (answerCount == 0) return null; answers = new Record[answerCount]; while (e.hasMoreElements()) { Record r = (Record) e.nextElement(); if (matchType(r.getType(), type)) answers[i++] = r; } return answers; }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); GmsHeader hdr = (GmsHeader) msg.getHeader(this.id); if (hdr == null) break; switch (hdr.type) { case GmsHeader.JOIN_REQ: view_handler.add( new Request(Request.JOIN, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_REQ_WITH_STATE_TRANSFER: view_handler.add( new Request( Request.JOIN_WITH_STATE_TRANSFER, hdr.mbr, false, null, hdr.useFlushIfPresent)); break; case GmsHeader.JOIN_RSP: impl.handleJoinResponse(hdr.join_rsp); break; case GmsHeader.LEAVE_REQ: if (hdr.mbr == null) return null; view_handler.add(new Request(Request.LEAVE, hdr.mbr, false)); break; case GmsHeader.LEAVE_RSP: impl.handleLeaveResponse(); break; case GmsHeader.VIEW: View new_view = hdr.view; if (new_view == null) return null; Address coord = msg.getSrc(); if (!new_view.containsMember(coord)) { sendViewAck( coord); // we need to send the ack first, otherwise the connection is removed impl.handleViewChange(new_view, hdr.my_digest); } else { impl.handleViewChange(new_view, hdr.my_digest); sendViewAck(coord); // send VIEW_ACK to sender of view } break; case GmsHeader.VIEW_ACK: Address sender = msg.getSrc(); ack_collector.ack(sender); return null; // don't pass further up case GmsHeader.MERGE_REQ: impl.handleMergeRequest(msg.getSrc(), hdr.merge_id, hdr.mbrs); break; case GmsHeader.MERGE_RSP: MergeData merge_data = new MergeData(msg.getSrc(), hdr.view, hdr.my_digest, hdr.merge_rejected); if (log.isTraceEnabled()) { log.trace( local_addr + ": got merge response from " + msg.getSrc() + ", merge_id=" + hdr.merge_id + ", merge data is " + merge_data); } impl.handleMergeResponse(merge_data, hdr.merge_id); break; case GmsHeader.INSTALL_MERGE_VIEW: impl.handleMergeView( new MergeData(msg.getSrc(), hdr.view, hdr.my_digest), hdr.merge_id); break; case GmsHeader.INSTALL_DIGEST: Digest tmp = hdr.my_digest; down_prot.down(new Event(Event.MERGE_DIGEST, tmp)); break; case GmsHeader.INSTALL_MERGE_VIEW_OK: // [JGRP-700] - FLUSH: flushing should span merge merge_ack_collector.ack(msg.getSrc()); break; case GmsHeader.CANCEL_MERGE: // [JGRP-524] - FLUSH and merge: flush doesn't wrap entire merge process impl.handleMergeCancelled(hdr.merge_id); break; case GmsHeader.GET_DIGEST_REQ: // only handle this request if it was sent by the coordinator (or at least a member) of // the current cluster synchronized (members) { if (!members.contains(msg.getSrc())) break; } // discard my own request: if (msg.getSrc().equals(local_addr)) return null; if (hdr.merge_id != null && !(merger.matchMergeId(hdr.merge_id) || merger.setMergeId(null, hdr.merge_id))) return null; // fetch only my own digest Digest digest = (Digest) down_prot.down(new Event(Event.GET_DIGEST, local_addr)); if (digest != null) { GmsHeader rsp_hdr = new GmsHeader(GmsHeader.GET_DIGEST_RSP); rsp_hdr.my_digest = digest; Message get_digest_rsp = new Message(msg.getSrc(), null, null); get_digest_rsp.setFlag(Message.OOB); get_digest_rsp.putHeader(this.id, rsp_hdr); down_prot.down(new Event(Event.MSG, get_digest_rsp)); } break; case GmsHeader.GET_DIGEST_RSP: Digest digest_rsp = hdr.my_digest; impl.handleDigestResponse(msg.getSrc(), digest_rsp); break; default: if (log.isErrorEnabled()) log.error("GmsHeader with type=" + hdr.type + " not known"); } return null; // don't pass up case Event.SUSPECT: Object retval = up_prot.up(evt); Address suspected = (Address) evt.getArg(); view_handler.add(new Request(Request.SUSPECT, suspected, true)); ack_collector.suspect(suspected); merge_ack_collector.suspect(suspected); return retval; case Event.UNSUSPECT: impl.unsuspect((Address) evt.getArg()); return null; // discard case Event.MERGE: view_handler.add( new Request(Request.MERGE, null, false, (Map<Address, View>) evt.getArg())); return null; // don't pass up case Event.IS_MERGE_IN_PROGRESS: return merger.isMergeInProgress(); } return up_prot.up(evt); }
@SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); PingHeader hdr = (PingHeader) msg.getHeader(this.id); if (hdr == null) return up_prot.up(evt); if (is_leaving) return null; // prevents merging back a leaving member // (https://issues.jboss.org/browse/JGRP-1336) PingData data = readPingData(msg.getRawBuffer(), msg.getOffset(), msg.getLength()); Address logical_addr = data != null ? data.getAddress() : msg.src(); switch (hdr.type) { case PingHeader.GET_MBRS_REQ: // return Rsp(local_addr, coord) if (cluster_name == null || hdr.cluster_name == null) { log.warn( "cluster_name (%s) or cluster_name of header (%s) is null; passing up discovery " + "request from %s, but this should not be the case", cluster_name, hdr.cluster_name, msg.src()); } else { if (!cluster_name.equals(hdr.cluster_name)) { log.warn( "%s: discarding discovery request for cluster '%s' from %s; " + "our cluster name is '%s'. Please separate your clusters properly", logical_addr, hdr.cluster_name, msg.src(), cluster_name); return null; } } // add physical address and logical name of the discovery sender (if available) to the // cache if (data != null) { addDiscoveryResponseToCaches( logical_addr, data.getLogicalName(), data.getPhysicalAddr()); discoveryRequestReceived(msg.getSrc(), data.getLogicalName(), data.getPhysicalAddr()); addResponse(data, false); } if (return_entire_cache) { Map<Address, PhysicalAddress> cache = (Map<Address, PhysicalAddress>) down(new Event(Event.GET_LOGICAL_PHYSICAL_MAPPINGS)); if (cache != null) { for (Map.Entry<Address, PhysicalAddress> entry : cache.entrySet()) { Address addr = entry.getKey(); // JGRP-1492: only return our own address, and addresses in view. if (addr.equals(local_addr) || members.contains(addr)) { PhysicalAddress physical_addr = entry.getValue(); sendDiscoveryResponse( addr, physical_addr, UUID.get(addr), msg.getSrc(), isCoord(addr)); } } } return null; } // Only send a response if hdr.mbrs is not empty and contains myself. Otherwise always // send my info Collection<? extends Address> mbrs = data != null ? data.mbrs() : null; boolean send_response = mbrs == null || mbrs.contains(local_addr); if (send_response) { PhysicalAddress physical_addr = (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); sendDiscoveryResponse( local_addr, physical_addr, UUID.get(local_addr), msg.getSrc(), is_coord); } return null; case PingHeader.GET_MBRS_RSP: // add physical address (if available) to transport's cache if (data != null) { log.trace("%s: received GET_MBRS_RSP from %s: %s", local_addr, msg.src(), data); handleDiscoveryResponse(data, msg.src()); } return null; default: log.warn("got PING header with unknown type %d", hdr.type); return null; } case Event.FIND_MBRS: return findMembers( (List<Address>) evt.getArg(), false, true); // this is done asynchronously } return up_prot.up(evt); }
// ------------------------------------------------------------ public void deliveryFailed(int seqNum, Message mess) { MBusListener[] all = getMatchingTargets(mess.getHeader().getSourceAddress()); for (int i = 0; i < all.length; i++) all[i].deliveryFailed(seqNum, mess); }
public Object up(Message msg) { ExecutorHeader hdr = msg.getHeader(id); if (hdr == null) return up_prot.up(msg); Request req = msg.getObject(); if (log.isTraceEnabled()) log.trace("[" + local_addr + "] <-- [" + msg.getSrc() + "] " + req); switch (req.type) { case RUN_REQUEST: handleTaskRequest(req.request, (Address) req.object); break; case CONSUMER_READY: handleConsumerReadyRequest(req.request, (Address) req.object); break; case CONSUMER_UNREADY: handleConsumerUnreadyRequest(req.request, (Address) req.object); break; case CONSUMER_FOUND: handleConsumerFoundResponse(req.request, (Address) req.object); break; case RUN_SUBMITTED: RequestWithThread reqWT = (RequestWithThread) req; Object objectToRun = reqWT.object; Runnable runnable; if (objectToRun instanceof Runnable) { runnable = (Runnable) objectToRun; } else if (objectToRun instanceof Callable) { @SuppressWarnings("unchecked") Callable<Object> callable = (Callable<Object>) objectToRun; runnable = new FutureTask<>(callable); } else { log.error( Util.getMessage("RequestOfType") + req.type + " sent an object of " + objectToRun + " which is invalid"); break; } handleTaskSubmittedRequest(runnable, msg.getSrc(), req.request, reqWT.threadId); break; case RUN_REJECTED: // We could make requests local for this, but is it really worth it handleTaskRejectedResponse(msg.getSrc(), req.request); break; case RESULT_SUCCESS: handleValueResponse(msg.getSrc(), req.request, req.object); break; case RESULT_EXCEPTION: handleExceptionResponse(msg.getSrc(), req.request, (Throwable) req.object); break; case INTERRUPT_RUN: // We could make requests local for this, but is it really worth it handleInterruptRequest(msg.getSrc(), req.request); break; case CREATE_CONSUMER_READY: Owner owner = new Owner((Address) req.object, req.request); handleNewConsumer(owner); break; case CREATE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleNewRunRequest(owner); break; case DELETE_CONSUMER_READY: owner = new Owner((Address) req.object, req.request); handleRemoveConsumer(owner); break; case DELETE_RUN_REQUEST: owner = new Owner((Address) req.object, req.request); handleRemoveRunRequest(owner); break; default: log.error(Util.getMessage("RequestOfType") + req.type + " not known"); break; } return null; }
// --------------------------------------------------------------- public void deliverySuccessful(int seqNum, Message mess) { // log("VTL.deliverySuccessful: "+mess); MBusListener[] all = getMatchingTargets(mess.getHeader().getSourceAddress()); // log("VTL: "+all.length+" matching targets for "+mess.getHeader().getSourceAddress()); for (int i = 0; i < all.length; i++) all[i].deliverySuccessful(seqNum, mess); }
/** * Adds all data from a Message into the Cache. Each record is added with the appropriate * credibility, and negative answers are cached as such. * * @param in The Message to be added * @see Message */ public void addMessage(Message in) { boolean isAuth = in.getHeader().getFlag(Flags.AA); Name qname = in.getQuestion().getName(); Name curname = qname; short qtype = in.getQuestion().getType(); short qclass = in.getQuestion().getDClass(); byte cred; short rcode = in.getHeader().getRcode(); boolean haveAnswer = false; boolean completed = false; boolean restart = false; RRset[] answers, auth, addl; if (rcode != Rcode.NOERROR && rcode != Rcode.NXDOMAIN) return; if (secure) { Cache c = new Cache(dclass); c.addMessage(in); verifyRecords(c); return; } answers = in.getSectionRRsets(Section.ANSWER); for (int i = 0; i < answers.length; i++) { if (answers[i].getDClass() != qclass) continue; short type = answers[i].getType(); Name name = answers[i].getName(); cred = getCred(Section.ANSWER, isAuth); if (type == Type.CNAME && name.equals(curname)) { CNAMERecord cname; addRRset(answers[i], cred); cname = (CNAMERecord) answers[i].first(); curname = cname.getTarget(); restart = true; haveAnswer = true; } else if (type == Type.DNAME && curname.subdomain(name)) { DNAMERecord dname; addRRset(answers[i], cred); dname = (DNAMERecord) answers[i].first(); try { curname = curname.fromDNAME(dname); } catch (NameTooLongException e) { break; } restart = true; haveAnswer = true; } else if ((type == qtype || qtype == Type.ANY) && name.equals(curname)) { addRRset(answers[i], cred); completed = true; haveAnswer = true; } if (restart) { restart = false; i = 0; } } auth = in.getSectionRRsets(Section.AUTHORITY); if (!completed) { /* This is a negative response or a referral. */ RRset soa = null, ns = null; for (int i = 0; i < auth.length; i++) { if (auth[i].getType() == Type.SOA && curname.subdomain(auth[i].getName())) soa = auth[i]; else if (auth[i].getType() == Type.NS && curname.subdomain(auth[i].getName())) ns = auth[i]; } short cachetype = (rcode == Rcode.NXDOMAIN) ? (short) 0 : qtype; if (soa != null || ns == null) { /* Negative response */ cred = getCred(Section.AUTHORITY, isAuth); SOARecord soarec = null; if (soa != null) soarec = (SOARecord) soa.first(); addNegative(curname, cachetype, soarec, cred); /* NXT records are not cached yet. */ } else { /* Referral response */ cred = getCred(Section.AUTHORITY, isAuth); addRRset(ns, cred); } } addl = in.getSectionRRsets(Section.ADDITIONAL); for (int i = 0; i < addl.length; i++) { short type = addl[i].getType(); if (type != Type.A && type != Type.AAAA && type != Type.A6) continue; /* XXX check the name */ Name name = addl[i].getName(); cred = getCred(Section.ADDITIONAL, isAuth); addRRset(addl[i], cred); } }
/** * An event was received from the layer below. Usually the current layer will want to examine the * event type and - depending on its type - perform some computation (e.g. removing headers from a * MSG event type, or updating the internal membership list when receiving a VIEW_CHANGE event). * Finally the event is either a) discarded, or b) an event is sent down the stack using <code> * PassDown</code> or c) the event (or another event) is sent up the stack using <code>PassUp * </code>. * * <p>For the PING protocol, the Up operation does the following things. 1. If the event is a * Event.MSG then PING will inspect the message header. If the header is null, PING simply passes * up the event If the header is PingHeader.GET_MBRS_REQ then the PING protocol will PassDown a * PingRequest message If the header is PingHeader.GET_MBRS_RSP we will add the message to the * initial members vector and wake up any waiting threads. 2. If the event is Event.SET_LOCAL_ADDR * we will simple set the local address of this protocol 3. For all other messages we simple pass * it up to the protocol above * * @param evt - the event that has been sent from the layer below */ @SuppressWarnings("unchecked") public Object up(Event evt) { switch (evt.getType()) { case Event.MSG: Message msg = (Message) evt.getArg(); PingHeader hdr = (PingHeader) msg.getHeader(this.id); if (hdr == null) return up_prot.up(evt); PingData data = hdr.data; Address logical_addr = data != null ? data.getAddress() : null; if (is_leaving) return null; // prevents merging back a leaving member // (https://issues.jboss.org/browse/JGRP-1336) switch (hdr.type) { case PingHeader.GET_MBRS_REQ: // return Rsp(local_addr, coord) if (group_addr == null || hdr.cluster_name == null) { if (log.isWarnEnabled()) log.warn( "group_addr (" + group_addr + ") or cluster_name of header (" + hdr.cluster_name + ") is null; passing up discovery request from " + msg.getSrc() + ", but this should not" + " be the case"); } else { if (!group_addr.equals(hdr.cluster_name)) { if (log.isWarnEnabled()) log.warn( "discarding discovery request for cluster '" + hdr.cluster_name + "' from " + msg.getSrc() + "; our cluster name is '" + group_addr + "'. " + "Please separate your clusters cleanly."); return null; } } // add physical address and logical name of the discovery sender (if available) to the // cache if (data != null) { if (logical_addr == null) logical_addr = msg.getSrc(); Collection<PhysicalAddress> physical_addrs = data.getPhysicalAddrs(); PhysicalAddress physical_addr = physical_addrs != null && !physical_addrs.isEmpty() ? physical_addrs.iterator().next() : null; if (logical_addr != null && physical_addr != null) down( new Event( Event.SET_PHYSICAL_ADDRESS, new Tuple<Address, PhysicalAddress>(logical_addr, physical_addr))); if (logical_addr != null && data.getLogicalName() != null) UUID.add(logical_addr, data.getLogicalName()); discoveryRequestReceived(msg.getSrc(), data.getLogicalName(), physical_addrs); synchronized (ping_responses) { for (Responses response : ping_responses) { response.addResponse(data, false); } } } if (hdr.view_id != null) { // If the discovery request is merge-triggered, and the ViewId shipped with it // is the same as ours, we don't respond (JGRP-1315). ViewId my_view_id = view != null ? view.getViewId() : null; if (my_view_id != null && my_view_id.equals(hdr.view_id)) return null; boolean send_discovery_rsp = force_sending_discovery_rsps || is_coord || current_coord == null || current_coord.equals(msg.getSrc()); if (!send_discovery_rsp) { if (log.isTraceEnabled()) log.trace( local_addr + ": suppressing merge response as I'm not a coordinator and the " + "discovery request was not sent by a coordinator"); return null; } } if (isMergeRunning()) { if (log.isTraceEnabled()) log.trace( local_addr + ": suppressing merge response as a merge is already in progress"); return null; } List<PhysicalAddress> physical_addrs = hdr.view_id != null ? null : Arrays.asList( (PhysicalAddress) down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr))); sendDiscoveryResponse( local_addr, physical_addrs, is_server, hdr.view_id != null, UUID.get(local_addr), msg.getSrc()); return null; case PingHeader.GET_MBRS_RSP: // add response to vector and notify waiting thread // add physical address (if available) to transport's cache if (data != null) { Address response_sender = msg.getSrc(); if (logical_addr == null) logical_addr = msg.getSrc(); Collection<PhysicalAddress> addrs = data.getPhysicalAddrs(); PhysicalAddress physical_addr = addrs != null && !addrs.isEmpty() ? addrs.iterator().next() : null; if (logical_addr != null && physical_addr != null) down( new Event( Event.SET_PHYSICAL_ADDRESS, new Tuple<Address, PhysicalAddress>(logical_addr, physical_addr))); if (logical_addr != null && data.getLogicalName() != null) UUID.add(logical_addr, data.getLogicalName()); if (log.isTraceEnabled()) log.trace( local_addr + ": received GET_MBRS_RSP from " + response_sender + ": " + data); boolean overwrite = logical_addr != null && logical_addr.equals(response_sender); synchronized (ping_responses) { for (Responses response : ping_responses) { response.addResponse(data, overwrite); } } } return null; default: if (log.isWarnEnabled()) log.warn("got PING header with unknown type (" + hdr.type + ')'); return null; } case Event.GET_PHYSICAL_ADDRESS: try { sendDiscoveryRequest(group_addr, null, null); } catch (InterruptedIOException ie) { if (log.isWarnEnabled()) { log.warn("Discovery request for cluster " + group_addr + " interrupted"); } Thread.currentThread().interrupt(); } catch (Exception ex) { if (log.isErrorEnabled()) log.error("failed sending discovery request", ex); } return null; case Event.FIND_INITIAL_MBRS: // sent by transport return findInitialMembers(null); } return up_prot.up(evt); }