Пример #1
0
  protected int removeAndDeliver(final AtomicBoolean processing, Table<Message> win) {
    int retval = 0;
    boolean released_processing = false;
    try {
      while (true) {
        List<Message> list = win.removeMany(processing, true, max_msg_batch_size);
        if (list == null) {
          released_processing = true;
          return retval;
        }

        for (Message m : list) {
          retval++;
          // discard OOB msg: it has already been delivered
          // (http://jira.jboss.com/jira/browse/JGRP-377)
          if (m.isFlagSet(Message.OOB)) continue;
          try {
            up_prot.up(new Event(Event.MSG, m));
          } catch (Throwable t) {
            log.error("couldn't deliver message " + m, t);
          }
        }
      }
    } finally {
      // processing is always set in win.remove(processing) above and never here ! This code is just
      // a
      // 2nd line of defense should there be an exception before win.remove(processing) sets
      // processing
      if (!released_processing) processing.set(false);
    }
  }
Пример #2
0
  /** Tests https://jira.jboss.org/jira/browse/JGRP-1079 */
  public void testOOBMessageLoss() throws Exception {
    Util.close(b); // we only need 1 channel
    MyReceiver receiver = new MySleepingReceiver("C1", 1000);
    a.setReceiver(receiver);

    TP transport = a.getProtocolStack().getTransport();
    transport.setOOBRejectionPolicy("discard");

    final int NUM = 10;

    for (int i = 1; i <= NUM; i++) {
      Message msg = new Message(null, null, i);
      msg.setFlag(Message.OOB);
      a.send(msg);
    }
    STABLE stable = (STABLE) a.getProtocolStack().findProtocol(STABLE.class);
    if (stable != null) stable.runMessageGarbageCollection();
    Collection<Integer> msgs = receiver.getMsgs();

    for (int i = 0; i < 20; i++) {
      if (msgs.size() == NUM) break;
      Util.sleep(1000);
      sendStableMessages(a, b);
    }

    System.out.println("msgs = " + Util.print(msgs));

    assert msgs.size() == NUM
        : "expected " + NUM + " messages but got " + msgs.size() + ", msgs=" + Util.print(msgs);
    for (int i = 1; i <= NUM; i++) {
      assert msgs.contains(i);
    }
  }
Пример #3
0
  /**
   * Tests sending 1, 2 (OOB) and 3, where they are received in the order 1, 3, 2. Message 3 should
   * not get delivered until message 4 is received (http://jira.jboss.com/jira/browse/JGRP-780)
   */
  public void testRegularAndOOBUnicasts() throws Exception {
    DISCARD discard = new DISCARD();
    ProtocolStack stack = a.getProtocolStack();
    stack.insertProtocol(discard, ProtocolStack.BELOW, UNICAST.class, UNICAST2.class);

    Address dest = b.getAddress();
    Message m1 = new Message(dest, null, 1);
    Message m2 = new Message(dest, null, 2);
    m2.setFlag(Message.OOB);
    Message m3 = new Message(dest, null, 3);

    MyReceiver receiver = new MyReceiver("C2");
    b.setReceiver(receiver);
    a.send(m1);
    discard.setDropDownUnicasts(1);
    a.send(m2);
    a.send(m3);

    Collection<Integer> list = receiver.getMsgs();
    int count = 10;
    while (list.size() < 3 && --count > 0) {
      Util.sleep(500); // time for potential retransmission
      sendStableMessages(a, b);
    }

    assert list.size() == 3 : "list is " + list;
    assert list.contains(1) && list.contains(2) && list.contains(3);
  }
Пример #4
0
  protected void sendStableMessage(Address dest, short conn_id, long low, long high) {
    Message stable_msg = new Message(dest, null, null);
    Unicast2Header hdr = Unicast2Header.createStableHeader(conn_id, low, high);
    stable_msg.putHeader(this.id, hdr);
    stable_msg.setFlag(Message.OOB);
    if (log.isTraceEnabled()) {
      StringBuilder sb = new StringBuilder();
      sb.append(local_addr)
          .append(" --> STABLE(")
          .append(dest)
          .append(": ")
          .append(low)
          .append("-")
          .append(high)
          .append(", conn_id=")
          .append(conn_id)
          .append(")");
      log.trace(sb.toString());
    }
    down_prot.down(new Event(Event.MSG, stable_msg));

    ReceiverEntry entry = recv_table.get(dest);
    NakReceiverWindow win = entry != null ? entry.received_msgs : null;
    if (win != null) win.stable(win.getHighestDelivered());
  }
Пример #5
0
  /**
   * Processes a packet read from either the multicast or unicast socket. Needs to be synchronized
   * because mcast or unicast socket reads can be concurrent
   */
  void handleIncomingUdpPacket(byte[] data) {
    ByteArrayInputStream inp_stream;
    ObjectInputStream inp;
    Message msg = null;
    List l; // used if bundling is enabled

    try {
      // skip the first n bytes (default: 4), this is the version info
      inp_stream = new ByteArrayInputStream(data, VERSION_LENGTH, data.length - VERSION_LENGTH);
      inp = new ObjectInputStream(inp_stream);
      if (enable_bundling) {
        l = new List();
        l.readExternal(inp);
        for (Enumeration en = l.elements(); en.hasMoreElements(); ) {
          msg = (Message) en.nextElement();
          try {
            handleMessage(msg);
          } catch (Throwable t) {
            Trace.error("UDP.handleIncomingUdpPacket()", "failure: " + t.toString());
          }
        }
      } else {
        msg = new Message();
        msg.readExternal(inp);
        handleMessage(msg);
      }
    } catch (Throwable e) {
      Trace.error("UDP.handleIncomingUdpPacket()", "exception=" + Trace.getStackTrace(e));
    }
  }
Пример #6
0
 public void up(MessageBatch batch) {
   for (Iterator<Message> it = batch.iterator(); it.hasNext(); ) {
     Message msg = it.next();
     if (msg != null && shouldDropUpMessage(msg, msg.getSrc())) it.remove();
   }
   if (!batch.isEmpty()) up_prot.up(batch);
 }
Пример #7
0
  public void up(MessageBatch batch) {
    if (batch.dest() == null) { // not a unicast batch
      up_prot.up(batch);
      return;
    }

    int size = batch.size();
    Map<Short, List<Message>> msgs =
        new TreeMap<Short, List<Message>>(); // map of messages, keyed by conn-id
    for (Message msg : batch) {
      if (msg == null || msg.isFlagSet(Message.Flag.NO_RELIABILITY)) continue;
      UnicastHeader hdr = (UnicastHeader) msg.getHeader(id);
      if (hdr == null) continue;
      batch.remove(msg); // remove the message from the batch, so it won't be passed up the stack

      if (hdr.type != UnicastHeader.DATA) {
        try {
          handleUpEvent(msg.getSrc(), hdr);
        } catch (Throwable t) { // we cannot let an exception terminate the processing of this batch
          log.error(local_addr + ": failed handling event", t);
        }
        continue;
      }

      List<Message> list = msgs.get(hdr.conn_id);
      if (list == null) msgs.put(hdr.conn_id, list = new ArrayList<Message>(size));
      list.add(msg);
    }

    if (!msgs.isEmpty()) handleBatchReceived(batch.sender(), msgs); // process msgs:
    if (!batch.isEmpty()) up_prot.up(batch);
  }
Пример #8
0
  public void run() {
    final byte[] receive_buf = new byte[65535];
    DatagramPacket packet = new DatagramPacket(receive_buf, receive_buf.length);
    byte[] data;
    ByteArrayInputStream inp_stream;
    DataInputStream inp = null;
    Message msg;

    while (mcast_sock != null && receiver != null && Thread.currentThread().equals(receiver)) {
      packet.setData(receive_buf, 0, receive_buf.length);
      try {
        mcast_sock.receive(packet);
        data = packet.getData();
        inp_stream = new ExposedByteArrayInputStream(data, 0, data.length);
        inp = new DataInputStream(inp_stream);
        msg = new Message();
        msg.readFrom(inp);
        up(new Event(Event.MSG, msg));
      } catch (SocketException socketEx) {
        break;
      } catch (Throwable ex) {
        log.error("failed receiving packet (from " + packet.getSocketAddress() + ")", ex);
      } finally {
        Util.close(inp);
      }
    }
    if (log.isTraceEnabled()) log.trace("receiver thread terminated");
  }
Пример #9
0
 public void send(MessageType type) throws Exception {
   Packet packet = new Packet();
   packet.setType(type);
   Message message = new Message();
   message.setObject(packet);
   channel.send(message);
 }
Пример #10
0
 static Message createMessage(Address dest, Address src) {
   Message msg = new Message(dest, src, "hello world");
   msg.putHeader(NAKACK_ID, NakAckHeader.createMessageHeader(322649));
   msg.putHeader(UNICAST_ID, UNICAST.UnicastHeader.createDataHeader(465784, (short) 23323, true));
   msg.putHeader(UDP_ID, new TpHeader("DrawDemo"));
   return msg;
 }
Пример #11
0
 private static Buffer marshal(Message msg) throws Exception {
   ByteArrayDataOutputStream dos = new ByteArrayDataOutputStream((int) (msg.size() + 50));
   Address dest = msg.getDest();
   boolean multicast = dest == null;
   writeMessage(msg, dos, multicast);
   return dos.getBuffer();
 }
Пример #12
0
    public void receive(Message msg) {
      byte[] buf = msg.getRawBuffer();
      byte type = buf[msg.getOffset()];

      switch (type) {
        case START:
          ByteBuffer tmp = ByteBuffer.wrap(buf, 1 + msg.getOffset(), Global.LONG_SIZE);
          num_msgs = (int) tmp.getLong();
          print = num_msgs / 10;
          current_value.set(0);
          total_bytes.set(0);
          start = System.currentTimeMillis();
          break;
        case DATA:
          long new_val = current_value.incrementAndGet();
          total_bytes.addAndGet(msg.getLength() - Global.INT_SIZE);
          if (print > 0 && new_val % print == 0) System.out.println("received " + new_val);
          if (new_val >= num_msgs) {
            long time = System.currentTimeMillis() - start;
            double msgs_sec = (current_value.get() / (time / 1000.0));
            double throughput = total_bytes.get() / (time / 1000.0);
            System.out.println(
                String.format(
                    "\nreceived %d messages in %d ms (%.2f msgs/sec), throughput=%s",
                    current_value.get(), time, msgs_sec, Util.printBytes(throughput)));
            break;
          }
          break;
        default:
          System.err.println("Type " + type + " is invalid");
      }
    }
Пример #13
0
  /** When receive a message, analyze message content and then execute the command: Draw or Clear */
  public void receive(Message msg) {
    byte[] buf = msg.getRawBuffer();
    if (buf == null) {
      System.err.println(
          "["
              + channel.getAddress()
              + "] received null buffer from "
              + msg.getSrc()
              + ", headers: "
              + msg.printHeaders());
      return;
    }

    try {
      DrawCommand comm =
          (DrawCommand)
              Util.streamableFromByteBuffer(
                  DrawCommand.class, buf, msg.getOffset(), msg.getLength());
      switch (comm.mode) {
        case DrawCommand.DRAW:
          if (drawPanel != null) drawPanel.drawPoint(comm);
          break;
        case DrawCommand.CLEAR:
          clearPanel();
        default:
          System.err.println("***** received invalid draw command " + comm.mode);
          break;
      }
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
Пример #14
0
 private void sendViewAck(Address dest) {
   Message view_ack = new Message(dest, null, null);
   view_ack.setFlag(Message.OOB);
   GmsHeader tmphdr = new GmsHeader(GmsHeader.VIEW_ACK);
   view_ack.putHeader(this.id, tmphdr);
   down_prot.down(new Event(Event.MSG, view_ack));
 }
Пример #15
0
  /**
   * Multicasts a GET_DIGEST_REQ to all current members and waits for all responses (GET_DIGEST_RSP)
   * or N ms.
   *
   * @return
   */
  private Digest fetchDigestsFromAllMembersInSubPartition(List<Address> current_mbrs) {
    if (current_mbrs == null) return null;

    GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.GET_DIGEST_REQ);
    Message get_digest_req = new Message();
    get_digest_req.setFlag(Message.OOB);
    get_digest_req.putHeader(gms.getId(), hdr);

    long max_wait_time = gms.merge_timeout > 0 ? gms.merge_timeout / 2 : 2000L;
    digest_collector.reset(current_mbrs);

    // add my own digest first
    Digest digest = (Digest) gms.getDownProtocol().down(Event.GET_DIGEST_EVT);
    digest_collector.add(gms.local_addr, digest);

    gms.getDownProtocol().down(new Event(Event.MSG, get_digest_req));
    digest_collector.waitForAllResponses(max_wait_time);
    if (log.isDebugEnabled()) {
      if (digest_collector.hasAllResponses())
        log.debug(gms.local_addr + ": fetched all digests for " + current_mbrs);
      else
        log.debug(
            gms.local_addr
                + ": fetched incomplete digests (after timeout of "
                + max_wait_time
                + ") ms for "
                + current_mbrs);
    }
    Map<Address, Digest> responses = new HashMap<Address, Digest>(digest_collector.getResults());
    MutableDigest retval = new MutableDigest(responses.size());
    for (Digest dig : responses.values()) {
      if (dig != null) retval.add(dig);
    }
    return retval;
  }
Пример #16
0
  public Object up(Event evt) {
    switch (evt.getType()) {
      case Event.MSG:
        Message msg = (Message) evt.getArg();
        ForkHeader hdr = (ForkHeader) msg.getHeader(id);
        if (hdr == null) break;
        if (hdr.fork_stack_id == null)
          throw new IllegalArgumentException("header has a null fork_stack_id");
        Protocol bottom_prot = get(hdr.fork_stack_id);
        return bottom_prot != null
            ? bottom_prot.up(evt)
            : this.unknownForkHandler.handleUnknownForkStack(msg, hdr.fork_stack_id);

      case Event.VIEW_CHANGE:
        for (Protocol bottom : fork_stacks.values()) bottom.up(evt);
        break;

      case Event.STATE_TRANSFER_OUTPUTSTREAM:
        if (!process_state_events) break;
        getStateFromMainAndForkChannels(evt);
        return null;

      case Event.STATE_TRANSFER_INPUTSTREAM:
        if (!process_state_events) break;
        setStateInMainAndForkChannels((InputStream) evt.getArg());
        return null;
    }
    return up_prot.up(evt);
  }
Пример #17
0
    /**
     * Sends a MERGE_REQ to all coords and populates a list of MergeData (in merge_rsps). Returns
     * after coords.size() response have been received, or timeout msecs have elapsed (whichever is
     * first).
     *
     * <p>If a subgroup coordinator rejects the MERGE_REQ (e.g. because of participation in a
     * different merge), <em>that member will be removed from coords !</em>
     *
     * @param coords A map of coordinatgor addresses and associated membership lists
     * @param new_merge_id The new merge id
     * @param timeout Max number of msecs to wait for the merge responses from the subgroup coords
     */
    private boolean getMergeDataFromSubgroupCoordinators(
        Map<Address, Collection<Address>> coords, MergeId new_merge_id, long timeout) {
      boolean gotAllResponses;
      long start = System.currentTimeMillis();
      merge_rsps.reset(coords.keySet());
      if (log.isDebugEnabled())
        log.debug(gms.local_addr + ": sending MERGE_REQ to " + coords.keySet());

      for (Map.Entry<Address, Collection<Address>> entry : coords.entrySet()) {
        Address coord = entry.getKey();
        Collection<Address> mbrs = entry.getValue();
        Message msg = new Message(coord, null, null);
        msg.setFlag(Message.OOB);
        GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_REQ, mbrs);
        hdr.mbr = gms.local_addr;
        hdr.merge_id = new_merge_id;
        msg.putHeader(gms.getId(), hdr);
        gms.getDownProtocol().down(new Event(Event.MSG, msg));
      }

      // wait until num_rsps_expected >= num_rsps or timeout elapsed
      merge_rsps.waitForAllResponses(timeout);
      gotAllResponses = merge_rsps.hasAllResponses();
      long stop = System.currentTimeMillis();
      if (log.isDebugEnabled())
        log.debug(
            gms.local_addr
                + ": collected "
                + merge_rsps.size()
                + " merge response(s) in "
                + (stop - start)
                + " ms");
      return gotAllResponses;
    }
Пример #18
0
  public void up(MessageBatch batch) {
    // Sort fork messages by fork-stack-id
    Map<String, List<Message>> map = new HashMap<>();
    for (Message msg : batch) {
      ForkHeader hdr = (ForkHeader) msg.getHeader(id);
      if (hdr != null) {
        batch.remove(msg);
        List<Message> list = map.get(hdr.fork_stack_id);
        if (list == null) {
          list = new ArrayList<>();
          map.put(hdr.fork_stack_id, list);
        }
        list.add(msg);
      }
    }

    // Now pass fork messages up, batched by fork-stack-id
    for (Map.Entry<String, List<Message>> entry : map.entrySet()) {
      String fork_stack_id = entry.getKey();
      List<Message> list = entry.getValue();
      Protocol bottom_prot = get(fork_stack_id);
      if (bottom_prot == null) continue;
      MessageBatch mb =
          new MessageBatch(
              batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list);
      try {
        bottom_prot.up(mb);
      } catch (Throwable t) {
        log.error(Util.getMessage("FailedPassingUpBatch"), t);
      }
    }

    if (!batch.isEmpty()) up_prot.up(batch);
  }
Пример #19
0
  public Object down(Message msg) {
    Address dest = msg.getDest();
    boolean multicast = dest == null;

    if (msg.getSrc() == null) msg.setSrc(localAddress());

    if (discard_all) {
      if (dest == null || dest.equals(localAddress())) loopback(msg);
      return null;
    }

    if (!multicast && drop_down_unicasts > 0) {
      drop_down_unicasts = Math.max(0, drop_down_unicasts - 1);
      return null;
    }

    if (multicast && drop_down_multicasts > 0) {
      drop_down_multicasts = Math.max(0, drop_down_multicasts - 1);
      return null;
    }

    if (down > 0) {
      double r = Math.random();
      if (r < down) {
        if (excludeItself && dest != null && dest.equals(localAddress())) {
          if (log.isTraceEnabled()) log.trace("excluding itself");
        } else {
          log.trace("dropping message");
          num_down++;
          return null;
        }
      }
    }
    return down_prot.down(msg);
  }
Пример #20
0
  /**
   * 1. Get all the fragment buffers 2. When all are received -> Assemble them into one big buffer
   * 3. Read headers and byte buffer from big buffer 4. Set headers and buffer in msg 5. Pass msg up
   * the stack
   */
  private Message unfragment(Message msg, FragHeader hdr) {
    Address sender = msg.getSrc();
    FragmentationTable frag_table = fragment_list.get(sender);
    if (frag_table == null) {
      frag_table = new FragmentationTable(sender);
      try {
        fragment_list.add(sender, frag_table);
      } catch (
          IllegalArgumentException
              x) { // the entry has already been added, probably in parallel from another thread
        frag_table = fragment_list.get(sender);
      }
    }
    num_received_frags++;
    byte[] buf = frag_table.add(hdr.id, hdr.frag_id, hdr.num_frags, msg.getBuffer());
    if (buf == null) return null;

    try {
      DataInput in = new ByteArrayDataInputStream(buf);
      Message assembled_msg = new Message(false);
      assembled_msg.readFrom(in);
      assembled_msg.setSrc(sender); // needed ? YES, because fragments have a null src !!
      if (log.isTraceEnabled()) log.trace("assembled_msg is " + assembled_msg);
      num_received_msgs++;
      return assembled_msg;
    } catch (Exception e) {
      log.error(Util.getMessage("FailedUnfragmentingAMessage"), e);
      return null;
    }
  }
Пример #21
0
  /**
   * We need to resend our first message with our conn_id
   *
   * @param sender
   * @param seqno Resend the non null messages in the range [lowest .. seqno]
   */
  protected void handleResendingOfFirstMessage(Address sender, long seqno) {
    if (log.isTraceEnabled())
      log.trace(local_addr + " <-- SEND_FIRST_SEQNO(" + sender + "," + seqno + ")");
    SenderEntry entry = send_table.get(sender);
    Table<Message> win = entry != null ? entry.sent_msgs : null;
    if (win == null) {
      if (log.isErrorEnabled())
        log.error(local_addr + ": sender window for " + sender + " not found");
      return;
    }

    boolean first_sent = false;
    for (long i = win.getLow() + 1; i <= seqno; i++) {
      Message rsp = win.get(i);
      if (rsp == null) continue;
      if (first_sent) {
        down_prot.down(new Event(Event.MSG, rsp));
      } else {
        first_sent = true;
        // We need to copy the UnicastHeader and put it back into the message because Message.copy()
        // doesn't copy
        // the headers and therefore we'd modify the original message in the sender retransmission
        // window
        // (https://jira.jboss.org/jira/browse/JGRP-965)
        Message copy = rsp.copy();
        Unicast2Header hdr = (Unicast2Header) copy.getHeader(this.id);
        Unicast2Header newhdr = hdr.copy();
        newhdr.first = true;
        copy.putHeader(this.id, newhdr);
        down_prot.down(new Event(Event.MSG, copy));
      }
    }
  }
Пример #22
0
  public void sendGetMembersRequest(String cluster_name, Promise promise, boolean return_views_only)
      throws Exception {
    PhysicalAddress physical_addr =
        (PhysicalAddress) down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr));
    PingData data =
        new PingData(local_addr, null, false, UUID.get(local_addr), Arrays.asList(physical_addr));
    PingHeader hdr = new PingHeader(PingHeader.GET_MBRS_REQ, data, cluster_name);
    hdr.return_view_only = return_views_only;

    Set<PhysicalAddress> combined_target_members = new HashSet<PhysicalAddress>(initial_hosts);
    combined_target_members.addAll(dynamic_hosts);

    for (final Address addr : combined_target_members) {
      if (addr.equals(physical_addr)) continue;
      final Message msg = new Message(addr, null, null);
      msg.setFlag(Message.OOB);
      msg.putHeader(this.id, hdr);
      if (log.isTraceEnabled())
        log.trace("[FIND_INITIAL_MBRS] sending PING request to " + msg.getDest());
      timer.execute(
          new Runnable() {
            public void run() {
              try {
                down_prot.down(new Event(Event.MSG, msg));
              } catch (Exception ex) {
                if (log.isErrorEnabled())
                  log.error("failed sending discovery request to " + addr + ": " + ex);
              }
            }
          });
    }
  }
Пример #23
0
 public void retransmit(long first_seqno, long last_seqno, Address sender) {
   if (last_seqno < first_seqno) return;
   Unicast2Header hdr = Unicast2Header.createXmitReqHeader(first_seqno, last_seqno);
   Message xmit_req = new Message(sender, null, null);
   xmit_req.putHeader(this.id, hdr);
   down_prot.down(new Event(Event.MSG, xmit_req));
 }
 public static String print(List<Message> list) {
   StringBuilder sb = new StringBuilder();
   for (Message msg : list) {
     sb.append(msg.getSrc()).append(": ").append(msg.getObject()).append(" ");
   }
   return sb.toString();
 }
Пример #25
0
  public void testRegularAndOOBMulticasts() throws Exception {
    DISCARD discard = new DISCARD();
    ProtocolStack stack = a.getProtocolStack();
    stack.insertProtocol(discard, ProtocolStack.BELOW, NAKACK2.class);
    a.setDiscardOwnMessages(true);

    Address dest = null; // send to all
    Message m1 = new Message(dest, null, 1);
    Message m2 = new Message(dest, null, 2);
    m2.setFlag(Message.OOB);
    Message m3 = new Message(dest, null, 3);

    MyReceiver receiver = new MyReceiver("C2");
    b.setReceiver(receiver);
    a.send(m1);
    discard.setDropDownMulticasts(1);
    a.send(m2);
    a.send(m3);

    Util.sleep(500);
    Collection<Integer> list = receiver.getMsgs();
    for (int i = 0; i < 10; i++) {
      System.out.println("list = " + list);
      if (list.size() == 3) break;
      Util.sleep(1000); // give the asynchronous msgs some time to be received
      sendStableMessages(a, b);
    }
    assert list.size() == 3 : "list is " + list;
    assert list.contains(1) && list.contains(2) && list.contains(3);
  }
Пример #26
0
  /**
   * If merge_id is not equal to this.merge_id then discard. Else cast the view/digest to all
   * members of this group.
   */
  public void handleMergeView(final MergeData data, final MergeId merge_id) {
    if (!matchMergeId(merge_id)) {
      if (log.isErrorEnabled())
        log.error("merge_ids don't match (or are null); merge view discarded");
      return;
    }

    // only send to our *current* members, if we have A and B being merged (we are B), then we would
    // *not*
    // receive a VIEW_ACK from A because A doesn't see us in the pre-merge view yet and discards the
    // view

    // [JGRP-700] - FLUSH: flushing should span merge

    // we have to send new view only to current members and we should not wait
    // for view acks from newly merged mebers
    List<Address> newViewMembers = new Vector<Address>(data.view.getMembers());
    newViewMembers.removeAll(gms.members.getMembers());

    gms.castViewChangeWithDest(data.view, data.digest, null, newViewMembers);
    // if we have flush in stack send ack back to merge coordinator
    if (gms.flushProtocolInStack) {
      Message ack = new Message(data.getSender(), null, null);
      ack.setFlag(Message.OOB);
      GMS.GmsHeader ack_hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW_OK);
      ack.putHeader(gms.getId(), ack_hdr);
      gms.getDownProtocol().down(new Event(Event.MSG, ack));
    }
    cancelMerge(merge_id);
  }
Пример #27
0
  /** Tests https://jira.jboss.org/jira/browse/JGRP-1079 for unicast messages */
  public void testOOBUnicastMessageLoss() throws Exception {
    MyReceiver receiver = new MySleepingReceiver("C2", 1000);
    b.setReceiver(receiver);

    a.getProtocolStack().getTransport().setOOBRejectionPolicy("discard");

    final int NUM = 10;
    final Address dest = b.getAddress();
    for (int i = 1; i <= NUM; i++) {
      Message msg = new Message(dest, null, i);
      msg.setFlag(Message.OOB);
      a.send(msg);
    }

    Collection<Integer> msgs = receiver.getMsgs();

    for (int i = 0; i < 20; i++) {
      if (msgs.size() == NUM) break;
      Util.sleep(1000);
      // sendStableMessages(c1,c2); // not needed for unicasts !
    }

    assert msgs.size() == NUM
        : "expected " + NUM + " messages but got " + msgs.size() + ", msgs=" + Util.print(msgs);
    for (int i = 1; i <= NUM; i++) {
      assert msgs.contains(i);
    }
  }
Пример #28
0
  /**
   * Sends the new view and digest to all subgroup coordinors in coords. Each coord will in turn
   *
   * <ol>
   *   <li>broadcast the new view and digest to all the members of its subgroup (MergeView)
   *   <li>on reception of the view, if it is a MergeView, each member will set the digest and
   *       install the new view
   * </ol>
   */
  private void sendMergeView(
      Collection<Address> coords, MergeData combined_merge_data, MergeId merge_id) {
    if (coords == null || combined_merge_data == null) return;

    View view = combined_merge_data.view;
    Digest digest = combined_merge_data.digest;
    if (view == null || digest == null) {
      if (log.isErrorEnabled())
        log.error("view or digest is null, cannot send consolidated merge view/digest");
      return;
    }

    if (log.isDebugEnabled())
      log.debug(
          gms.local_addr + ": sending merge view " + view.getVid() + " to coordinators " + coords);

    gms.merge_ack_collector.reset(coords);
    int size = gms.merge_ack_collector.size();
    long timeout = gms.view_ack_collection_timeout;

    long start = System.currentTimeMillis();
    for (Address coord : coords) {
      Message msg = new Message(coord, null, null);
      GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.INSTALL_MERGE_VIEW);
      hdr.view = view;
      hdr.my_digest = digest;
      hdr.merge_id = merge_id;
      msg.putHeader(gms.getId(), hdr);
      gms.getDownProtocol().down(new Event(Event.MSG, msg));
    }

    // [JGRP-700] - FLUSH: flushing should span merge
    // if flush is in stack wait for acks from separated island coordinators
    if (gms.flushProtocolInStack) {
      try {
        gms.merge_ack_collector.waitForAllAcks(timeout);
        long stop = System.currentTimeMillis();
        if (log.isTraceEnabled())
          log.trace(
              "received all ACKs ("
                  + size
                  + ") for merge view "
                  + view
                  + " in "
                  + (stop - start)
                  + "ms");
      } catch (TimeoutException e) {
        log.warn(
            gms.local_addr
                + ": failed to collect all ACKs for merge ("
                + size
                + ") for view "
                + view
                + " after "
                + timeout
                + "ms, missing ACKs from "
                + gms.merge_ack_collector.printMissing());
      }
    }
  }
 @Override
 public void receive(Message msg) {
   try {
     LOG.debug(
         "Table "
             + Bytes.toString(idxCoprocessor.getTableName())
             + " received message: Table "
             + Bytes.toString((byte[]) msg.getObject())
             + " must update its index columns cache.");
     if (Arrays.equals((byte[]) msg.getObject(), idxCoprocessor.getTableName())) {
       LOG.debug(
           "Table "
               + Bytes.toString(idxCoprocessor.getTableName())
               + " is updating its indexed columns cache.");
       idxCoprocessor.initIndexedColumnsForTable();
     }
   } catch (IOException e1) {
     LOG.error("Failed to update from the master index table.", e1);
   } catch (Exception e) {
     LOG.error(
         "Failed to read contents of message; updating from master index table just in case.", e);
     try {
       idxCoprocessor.initIndexedColumnsForTable();
     } catch (IOException e1) {
       LOG.error("Failed to update from the master index table.", e1);
     }
   }
 }
Пример #30
0
 protected void sendMergeRejectionMessage(Address dest) {
   GMS.GmsHeader hdr = new GMS.GmsHeader(GMS.GmsHeader.MERGE_RSP).setMergeRejected(true);
   Message msg = new Message(dest).setFlag(Message.Flag.OOB).putHeader(GMS_ID, hdr);
   if (this.authenticate_coord) msg.putHeader(this.id, new AuthHeader(this.auth_token));
   log.debug("merge response=%s", hdr);
   down_prot.down(msg);
 }