Beispiel #1
0
  /**
   * Message contains MethodCall. Execute it against *this* object and return result. Use
   * MethodCall.invoke() to do this. Return result.
   */
  public Object handle(Message req) throws Exception {
    if (server_obj == null) {
      log.error(Util.getMessage("NoMethodHandlerIsRegisteredDiscardingRequest"));
      return null;
    }

    if (req == null || req.getLength() == 0) {
      log.error(Util.getMessage("MessageOrMessageBufferIsNull"));
      return null;
    }

    Object body =
        req_marshaller != null
            ? req_marshaller.objectFromBuffer(req.getRawBuffer(), req.getOffset(), req.getLength())
            : req.getObject();

    if (!(body instanceof MethodCall))
      throw new IllegalArgumentException("message does not contain a MethodCall object");

    MethodCall method_call = (MethodCall) body;

    if (log.isTraceEnabled()) log.trace("[sender=%s], method_call: %s", req.getSrc(), method_call);

    if (method_call.getMode() == MethodCall.ID) {
      if (method_lookup == null)
        throw new Exception(
            String.format(
                "MethodCall uses ID=%d, but method_lookup has not been set", method_call.getId()));
      Method m = method_lookup.findMethod(method_call.getId());
      if (m == null) throw new Exception("no method found for " + method_call.getId());
      method_call.setMethod(m);
    }

    return method_call.invoke(server_obj);
  }
Beispiel #2
0
  public void up(MessageBatch batch) {
    // Sort fork messages by fork-stack-id
    Map<String, List<Message>> map = new HashMap<>();
    for (Message msg : batch) {
      ForkHeader hdr = (ForkHeader) msg.getHeader(id);
      if (hdr != null) {
        batch.remove(msg);
        List<Message> list = map.get(hdr.fork_stack_id);
        if (list == null) {
          list = new ArrayList<>();
          map.put(hdr.fork_stack_id, list);
        }
        list.add(msg);
      }
    }

    // Now pass fork messages up, batched by fork-stack-id
    for (Map.Entry<String, List<Message>> entry : map.entrySet()) {
      String fork_stack_id = entry.getKey();
      List<Message> list = entry.getValue();
      Protocol bottom_prot = get(fork_stack_id);
      if (bottom_prot == null) continue;
      MessageBatch mb =
          new MessageBatch(
              batch.dest(), batch.sender(), batch.clusterName(), batch.multicast(), list);
      try {
        bottom_prot.up(mb);
      } catch (Throwable t) {
        log.error(Util.getMessage("FailedPassingUpBatch"), t);
      }
    }

    if (!batch.isEmpty()) up_prot.up(batch);
  }
Beispiel #3
0
  protected List<PingData> read(InputStream in) {
    List<PingData> retval = null;
    try {
      while (true) {
        try {
          String name_str = Util.readToken(in);
          String uuid_str = Util.readToken(in);
          String addr_str = Util.readToken(in);
          String coord_str = Util.readToken(in);
          if (name_str == null || uuid_str == null || addr_str == null || coord_str == null) break;

          UUID uuid = null;
          try {
            long tmp = Long.valueOf(uuid_str);
            uuid = new UUID(0, tmp);
          } catch (Throwable t) {
            uuid = UUID.fromString(uuid_str);
          }

          PhysicalAddress phys_addr = new IpAddress(addr_str);
          boolean is_coordinator = coord_str.trim().equals("T") || coord_str.trim().equals("t");

          if (retval == null) retval = new ArrayList<>();
          retval.add(new PingData(uuid, true, name_str, phys_addr).coord(is_coordinator));
        } catch (Throwable t) {
          log.error(Util.getMessage("FailedReadingLineOfInputStream"), t);
        }
      }
      return retval;
    } finally {
      Util.close(in);
    }
  }
Beispiel #4
0
 /**
  * Unmarshal the original message (in the payload) and then pass it up (unless already delivered)
  *
  * @param msg
  */
 protected void unwrapAndDeliver(final Message msg, boolean flush_ack) {
   try {
     Message msg_to_deliver =
         Util.streamableFromBuffer(
             Message.class, msg.getRawBuffer(), msg.getOffset(), msg.getLength());
     SequencerHeader hdr = (SequencerHeader) msg_to_deliver.getHeader(this.id);
     if (flush_ack) hdr.flush_ack = true;
     deliver(msg_to_deliver, new Event(Event.MSG, msg_to_deliver), hdr);
   } catch (Exception ex) {
     log.error(Util.getMessage("FailureUnmarshallingBuffer"), ex);
   }
 }
Beispiel #5
0
 /**
  * Creates a byte[] representation of the PingData, but DISCARDING the view it contains.
  *
  * @param data the PingData instance to serialize.
  * @return
  */
 protected byte[] serializeWithoutView(PingData data) {
   final PingData clone =
       new PingData(
               data.getAddress(), data.isServer(), data.getLogicalName(), data.getPhysicalAddr())
           .coord(data.isCoord());
   try {
     return Util.streamableToByteBuffer(clone);
   } catch (Exception e) {
     log.error(Util.getMessage("ErrorSerializingPingData"), e);
     return null;
   }
 }
Beispiel #6
0
 protected void forward(final Message msg, long seqno, boolean flush) {
   Address target = coord;
   if (target == null) return;
   byte type = flush ? SequencerHeader.FLUSH : SequencerHeader.FORWARD;
   try {
     SequencerHeader hdr = new SequencerHeader(type, seqno);
     Message forward_msg =
         new Message(target, Util.streamableToBuffer(msg)).putHeader(this.id, hdr);
     down_prot.down(new Event(Event.MSG, forward_msg));
     forwarded_msgs++;
   } catch (Exception ex) {
     log.error(Util.getMessage("FailedForwardingMessageTo") + msg.getDest(), ex);
   }
 }
Beispiel #7
0
  public void up(MessageBatch batch) {
    for (Message msg : batch) {
      if (msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER)
          || msg.isFlagSet(Message.Flag.OOB)
          || msg.getHeader(id) == null) continue;
      batch.remove(msg);

      // simplistic implementation
      try {
        up(new Event(Event.MSG, msg));
      } catch (Throwable t) {
        log.error(Util.getMessage("FailedPassingUpMessage"), t);
      }
    }

    if (!batch.isEmpty()) up_prot.up(batch);
  }
Beispiel #8
0
  /**
   * Sends all messages currently in forward_table to the new coordinator (changing the dest field).
   * This needs to be done, so the underlying reliable unicast protocol (e.g. UNICAST) adds these
   * messages to its retransmission mechanism<br>
   * Note that we need to resend the messages in order of their seqnos ! We also need to prevent
   * other message from being inserted until we're done, that's why there's synchronization.<br>
   * Access to the forward_table doesn't need to be synchronized as there won't be any insertions
   * during flushing (all down-threads are blocked)
   */
  protected void flushMessagesInForwardTable() {
    if (is_coord) {
      for (Map.Entry<Long, Message> entry : forward_table.entrySet()) {
        Long key = entry.getKey();
        Message msg = entry.getValue();
        Buffer buf;
        try {
          buf = Util.streamableToBuffer(msg);
        } catch (Exception e) {
          log.error(Util.getMessage("FlushingBroadcastingFailed"), e);
          continue;
        }

        SequencerHeader hdr = new SequencerHeader(SequencerHeader.WRAPPED_BCAST, key);
        Message forward_msg = new Message(null, buf).putHeader(this.id, hdr);
        if (log.isTraceEnabled())
          log.trace(local_addr + ": flushing (broadcasting) " + local_addr + "::" + key);
        down_prot.down(new Event(Event.MSG, forward_msg));
      }
      return;
    }

    // for forwarded messages, we need to receive the forwarded message from the coordinator, to
    // prevent this case:
    // - V1={A,B,C}
    // - A crashes
    // - C installs V2={B,C}
    // - C forwards messages 3 and 4 to B (the new coord)
    // - B drops 3 because its view is still V1
    // - B installs V2
    // - B receives message 4 and broadcasts it
    // ==> C's message 4 is delivered *before* message 3 !
    // ==> By resending 3 until it is received, then resending 4 until it is received, we make sure
    // this won't happen
    // (see https://issues.jboss.org/browse/JGRP-1449)
    while (flushing && running && !forward_table.isEmpty()) {
      Map.Entry<Long, Message> entry = forward_table.firstEntry();
      final Long key = entry.getKey();
      Message msg = entry.getValue();
      Buffer buf;

      try {
        buf = Util.streamableToBuffer(msg);
      } catch (Exception e) {
        log.error(Util.getMessage("FlushingBroadcastingFailed"), e);
        continue;
      }

      while (flushing && running && !forward_table.isEmpty()) {
        SequencerHeader hdr = new SequencerHeader(SequencerHeader.FLUSH, key);
        Message forward_msg =
            new Message(coord, buf).putHeader(this.id, hdr).setFlag(Message.Flag.DONT_BUNDLE);
        if (log.isTraceEnabled())
          log.trace(
              local_addr
                  + ": flushing (forwarding) "
                  + local_addr
                  + "::"
                  + key
                  + " to coord "
                  + coord);
        ack_promise.reset();
        down_prot.down(new Event(Event.MSG, forward_msg));
        Long ack = ack_promise.getResult(500);
        if ((Objects.equals(ack, key)) || !forward_table.containsKey(key)) break;
      }
    }
  }
Beispiel #9
0
  public Object down(Event evt) {
    switch (evt.getType()) {
      case Event.MSG:
        Message msg = (Message) evt.getArg();
        if (msg.getDest() != null
            || msg.isFlagSet(Message.Flag.NO_TOTAL_ORDER)
            || msg.isFlagSet(Message.Flag.OOB)) break;

        if (msg.getSrc() == null) msg.setSrc(local_addr);

        if (flushing) block();

        // A seqno is not used to establish ordering, but only to weed out duplicates; next_seqno
        // doesn't need
        // to increase monotonically, but only to be unique
        // (https://issues.jboss.org/browse/JGRP-1461) !
        long next_seqno = seqno.incrementAndGet();
        in_flight_sends.incrementAndGet();
        try {
          SequencerHeader hdr =
              new SequencerHeader(
                  is_coord ? SequencerHeader.BCAST : SequencerHeader.WRAPPED_BCAST, next_seqno);
          msg.putHeader(this.id, hdr);
          if (log.isTraceEnabled())
            log.trace(
                "["
                    + local_addr
                    + "]: forwarding "
                    + local_addr
                    + "::"
                    + seqno
                    + " to coord "
                    + coord);

          // We always forward messages to the coordinator, even if we're the coordinator. Having
          // the coord
          // send its messages directly led to starvation of messages from other members. MPerf perf
          // went up
          // from 20MB/sec/node to 50MB/sec/node with this change !
          forwardToCoord(next_seqno, msg);
        } catch (Exception ex) {
          log.error(Util.getMessage("FailedSendingMessage"), ex);
        } finally {
          in_flight_sends.decrementAndGet();
        }
        return null; // don't pass down

      case Event.VIEW_CHANGE:
        handleViewChange((View) evt.getArg());
        break;

      case Event.TMP_VIEW:
        handleTmpView((View) evt.getArg());
        break;

      case Event.SET_LOCAL_ADDRESS:
        local_addr = (Address) evt.getArg();
        break;
    }
    return down_prot.down(evt);
  }