Пример #1
0
  /** Execute when new member join or leave Group */
  public void viewAccepted(View v) {
    memberSize = v.size();
    if (mainFrame != null) setTitle();
    members.clear();
    members.addAll(v.getMembers());

    if (v instanceof MergeView) {
      System.out.println("** " + v);

      // This is a simple merge function, which fetches the state from the coordinator
      // on a merge and overwrites all of its own state
      if (useState && !members.isEmpty()) {
        Address coord = members.get(0);
        Address local_addr = channel.getAddress();
        if (local_addr != null && !local_addr.equals(coord)) {
          try {

            // make a copy of our state first
            Map<Point, Color> copy = null;
            if (send_own_state_on_merge) {
              synchronized (drawPanel.state) {
                copy = new LinkedHashMap<Point, Color>(drawPanel.state);
              }
            }
            System.out.println("fetching state from " + coord);
            channel.getState(coord, 5000);
            if (copy != null)
              sendOwnState(copy); // multicast my own state so everybody else has it too
          } catch (Exception e) {
            e.printStackTrace();
          }
        }
      }
    } else System.out.println("** View=" + v);
  }
Пример #2
0
  protected boolean _startFlush(
      final View new_view, int maxAttempts, long randomFloor, long randomCeiling) {
    if (!flushProtocolInStack) return true;
    if (flushInvokerClass != null) {
      try {
        Callable<Boolean> invoker =
            flushInvokerClass.getDeclaredConstructor(View.class).newInstance(new_view);
        return invoker.call();
      } catch (Throwable e) {
        return false;
      }
    }

    try {
      boolean successfulFlush = false;
      boolean validView = new_view != null && new_view.size() > 0;
      if (validView && flushProtocolInStack) {
        int attemptCount = 0;
        while (attemptCount < maxAttempts) {
          try {
            up_prot.up(new Event(Event.SUSPEND, new ArrayList<Address>(new_view.getMembers())));
            successfulFlush = true;
            break;
          } catch (Exception e) {
            Util.sleepRandom(randomFloor, randomCeiling);
            attemptCount++;
          }
        }

        if (successfulFlush) {
          if (log.isTraceEnabled()) log.trace(local_addr + ": successful GMS flush by coordinator");
        } else {
          if (log.isWarnEnabled()) log.warn(local_addr + ": GMS flush by coordinator failed");
        }
      }
      return successfulFlush;
    } catch (Exception e) {
      return false;
    }
  }
Пример #3
0
  /**
   * Broadcasts the new view and digest, and waits for acks from all members in the list given as
   * argument. If the list is null, we take the members who are part of new_view
   */
  public void castViewChange(
      View new_view, Digest digest, JoinRsp jr, Collection<Address> newMembers) {
    if (log.isTraceEnabled())
      log.trace(local_addr + ": mcasting view " + new_view + " (" + new_view.size() + " mbrs)\n");

    // Send down a local TMP_VIEW event. This is needed by certain layers (e.g. NAKACK) to compute
    // correct digest
    // in case client's next request (e.g. getState()) reaches us *before* our own view change
    // multicast.
    // Check NAKACK's TMP_VIEW handling for details
    down_prot.up(new Event(Event.TMP_VIEW, new_view));
    down_prot.down(new Event(Event.TMP_VIEW, new_view));

    List<Address> ackMembers = new ArrayList<Address>(new_view.getMembers());
    if (newMembers != null && !newMembers.isEmpty()) ackMembers.removeAll(newMembers);

    Message view_change_msg = new Message(); // bcast to all members
    GmsHeader hdr = new GmsHeader(GmsHeader.VIEW, new_view);
    hdr.my_digest = digest;
    view_change_msg.putHeader(this.id, hdr);

    // If we're the only member the VIEW is broadcast to, let's simply install the view directly,
    // without
    // sending the VIEW multicast ! Or else N-1 members drop the multicast anyway...
    if (local_addr != null && ackMembers.size() == 1 && ackMembers.get(0).equals(local_addr)) {
      // we need to add the message to the retransmit window (e.g. in NAKACK), so (1) it can be
      // retransmitted and
      // (2) we increment the seqno (otherwise, we'd return an incorrect digest)
      down_prot.down(new Event(Event.ADD_TO_XMIT_TABLE, view_change_msg));
      impl.handleViewChange(new_view, digest);
    } else {
      if (!ackMembers.isEmpty()) ack_collector.reset(ackMembers);

      down_prot.down(new Event(Event.MSG, view_change_msg));
      try {
        if (!ackMembers.isEmpty()) {
          ack_collector.waitForAllAcks(view_ack_collection_timeout);
          if (log.isTraceEnabled())
            log.trace(
                local_addr
                    + ": received all "
                    + ack_collector.expectedAcks()
                    + " ACKs from members for view "
                    + new_view.getVid());
        }
      } catch (TimeoutException e) {
        if (log_collect_msgs && log.isWarnEnabled()) {
          log.warn(
              local_addr
                  + ": failed to collect all ACKs (expected="
                  + ack_collector.expectedAcks()
                  + ") for view "
                  + new_view.getViewId()
                  + " after "
                  + view_ack_collection_timeout
                  + "ms, missing ACKs from "
                  + ack_collector.printMissing());
        }
      }
    }

    if (jr != null && (newMembers != null && !newMembers.isEmpty())) {
      ack_collector.reset(new ArrayList<Address>(newMembers));
      for (Address joiner : newMembers) {
        sendJoinResponse(jr, joiner);
      }
      try {
        ack_collector.waitForAllAcks(view_ack_collection_timeout);
        if (log.isTraceEnabled())
          log.trace(
              local_addr
                  + ": received all ACKs ("
                  + ack_collector.expectedAcks()
                  + ") from joiners for view "
                  + new_view.getVid());
      } catch (TimeoutException e) {
        if (log_collect_msgs && log.isWarnEnabled()) {
          log.warn(
              local_addr
                  + ": failed to collect all ACKs (expected="
                  + ack_collector.expectedAcks()
                  + ") for unicast view "
                  + new_view
                  + " after "
                  + view_ack_collection_timeout
                  + "ms, missing ACKs from "
                  + ack_collector.printMissing());
        }
      }
    }
  }