public class RemoteConcurrentSerialGatewaySenderEventProcessor
    extends ConcurrentSerialGatewaySenderEventProcessor {

  private static final Logger logger = LogService.getLogger();

  public RemoteConcurrentSerialGatewaySenderEventProcessor(AbstractGatewaySender sender) {
    super(sender);
  }

  @Override
  protected void initializeMessageQueue(String id) {
    for (int i = 0; i < sender.getDispatcherThreads(); i++) {
      processors.add(new RemoteSerialGatewaySenderEventProcessor(this.sender, id + "." + i));
      if (logger.isDebugEnabled()) {
        logger.debug("Created the RemoteSerialGatewayEventProcessor_{}->{}", i, processors.get(i));
      }
    }
  }
}
/**
 * RegionVersionHolders are part of a RegionVersionVector. A RVH holds the current version for a
 * member and a list of exceptions, which are holes in the list of versions received from that
 * member.
 *
 * <p>RegionVersionHolders should be modified under synchronization on the holder.
 *
 * <p>Starting in 7.0.1 the holder has a BitSet that records the most recent versions. The variable
 * bitSetVersion corresponds to bit zero, and subsequent bits represent bitSetVersion+1, +2, etc.
 * The method mergeBitSet() should be used to dump the BitSet's exceptions into the regular
 * exceptions list prior to performing operations like exceptions- comparisons or dominance checks.
 *
 * <p>Starting in 8.0, the holder introduced a special exception to describe following use case of
 * unfinished operation: Operation R4 and R5 are applied locally, but never distributed to P. So P's
 * RVV for R is still 3. After R GIIed from P, R's RVV becomes R5(3-6), i.e. Exception's nextVersion
 * is currentVersion+1.
 *
 * @author Bruce Schuchardt
 */
public class RegionVersionHolder<T> implements Cloneable, DataSerializable {

  private static final Logger logger = LogService.getLogger();

  private static List<RVVException> EMPTY_EXCEPTIONS = Collections.emptyList();

  long version = -1; // received version
  transient T id;
  private List<RVVException> exceptions;
  boolean isDepartedMember;

  // non final for tests
  public static int BIT_SET_WIDTH = 64 * 16; // should be a multiple of 4 64-bit longs

  private long bitSetVersion = 1;
  private BitSet bitSet;

  /**
   * This contructor should only be used for cloning a RegionVersionHolder or initializing and
   * invalid version holder (with version -1)
   *
   * @param ver
   */
  public RegionVersionHolder(long ver) {
    this.version = ver;
  }

  public RegionVersionHolder(T id) {
    this.id = id;
    this.version = 0;
    this.bitSetVersion = 1;
    this.bitSet = new BitSet(RegionVersionHolder.BIT_SET_WIDTH);
  }

  public RegionVersionHolder(DataInput in) throws IOException {
    fromData(in);
  }

  public synchronized long getVersion() {
    RVVException e = null;
    List<RVVException> exs = getExceptions();
    if (!exs.isEmpty()) {
      e = exs.get(0);
    }
    if (isSpecialException(e, this)) {
      return e.getHighestReceivedVersion();
    } else {
      return this.version;
    }
  }

  private synchronized RVVException getSpecialException() {
    RVVException e = null;
    if (this.exceptions != null && !this.exceptions.isEmpty()) {
      e = this.exceptions.get(0);
    }
    if (isSpecialException(e, this)) {
      return e;
    } else {
      return null;
    }
  }

  public long getBitSetVersionForTesting() {
    return this.bitSetVersion;
  }

  private synchronized List<RVVException> getExceptions() {
    mergeBitSet();
    if (this.exceptions != null) {
      return this.exceptions;
    } else {
      return EMPTY_EXCEPTIONS;
    }
  }

  public synchronized List<RVVException> getExceptionForTest() {
    return getExceptions();
  }

  public synchronized int getExceptionCount() {
    return getExceptions().size();
  }

  public synchronized String exceptionsToString() {
    return getExceptions().toString();
  }

  /* test only method */
  public void setVersion(long ver) {
    this.version = ver;
  }

  @Override
  public synchronized RegionVersionHolder<T> clone() {
    RegionVersionHolder<T> clone = new RegionVersionHolder<T>(this.version);
    clone.id = this.id;
    clone.isDepartedMember = this.isDepartedMember;
    if (this.exceptions != null) {
      clone.exceptions = new LinkedList<RVVException>();
      for (RVVException e : this.exceptions) {
        clone.exceptions.add(e.clone());
      }
    }
    if (this.bitSet != null) {
      clone.bitSet = (BitSet) this.bitSet.clone();
      clone.bitSetVersion = this.bitSetVersion;
      clone.mergeBitSet();
    }
    return clone;
  }

  @Override
  public synchronized String toString() {
    //    mergeBitSet();
    StringBuilder sb = new StringBuilder();
    sb.append("{rv").append(this.version).append(" bsv").append(this.bitSetVersion).append(" bs=[");
    if (this.bitSet != null) {
      int i = this.bitSet.nextSetBit(0);
      if (i >= 0) {
        sb.append("0");
        for (i = this.bitSet.nextSetBit(1); i > 0; i = this.bitSet.nextSetBit(i + 1)) {
          sb.append(',').append(i);
        }
      }
    }
    sb.append(']');
    if (this.exceptions != null && !this.exceptions.isEmpty()) {
      sb.append(this.exceptions.toString());
    }
    return sb.toString();
  }

  /** add a version that is older than this.bitSetVersion */
  private void addOlderVersion(long missingVersion) {
    // exceptions iterate in reverse order on their previousVersion variable
    if (this.exceptions == null) {
      return;
    }
    int i = 0;
    for (Iterator<RVVException> it = this.exceptions.iterator(); it.hasNext(); ) {
      RVVException e = it.next();
      if (e.nextVersion <= missingVersion) {
        return; // there is no RVVException for this version
      }
      if (e.previousVersion < missingVersion && missingVersion < e.nextVersion) {
        String fine = null;
        if (logger.isTraceEnabled(LogMarker.RVV)) {
          fine = e.toString();
        }
        e.add(missingVersion);
        if (e.isFilled()) {
          if (fine != null) {
            logger.trace(LogMarker.RVV, "Filled exception {}", fine);
          }
          it.remove();
        } else if (e.shouldChangeForm()) {
          this.exceptions.set(i, e.changeForm());
        }
        if (this.exceptions.isEmpty()) {
          this.exceptions = null;
        }
        return;
      }
      i++;
    }
  }

  void flushBitSetDuringRecording(long version) {
    int length = BIT_SET_WIDTH;
    int bitCountToFlush = length * 3 / 4;
    if (logger.isTraceEnabled(LogMarker.RVV)) {
      logger.trace(
          LogMarker.RVV,
          "flushing RVV bitset bitSetVersion={}; bits={}",
          this.bitSetVersion,
          this.bitSet);
    }
    // see if we can shift part of the bits so that exceptions in the recent bits can
    // be kept in the bitset and later filled without having to create real exception objects
    if (version >= this.bitSetVersion + length + bitCountToFlush) {
      // nope - flush the whole bitset
      addBitSetExceptions(length, version);
    } else {
      // yes - flush the lower part.  We can only flush up to the last set bit because
      // the exceptions list includes a "next version" that indicates a received version.
      addBitSetExceptions(bitCountToFlush, this.bitSetVersion + bitCountToFlush);
    }
    if (logger.isTraceEnabled(LogMarker.RVV)) {
      logger.trace(
          LogMarker.RVV,
          "After flushing bitSetVersion={}; bits={}",
          this.bitSetVersion,
          this.bitSet);
    }
  }

  /** merge bit-set exceptions into the regular exceptions list */
  private synchronized void mergeBitSet() {
    if (this.bitSet != null && this.bitSetVersion < this.version) {
      addBitSetExceptions((int) (this.version - this.bitSetVersion), this.version);
    }
  }

  /**
   * Add exceptions from the BitSet array to the exceptions list. Assumes that the BitSet[0]
   * corresponds to this.bitSetVersion. This scans the bitset looking for gaps that are recorded as
   * RVV exceptions. The scan terminates at numBits or when the last set bit is found. The bitSet is
   * adjusted and a new bitSetVersion is established.
   *
   * @param newVersion the desired new bitSetVersion, which may be > the max representable in the
   *     bitset
   * @param numBits the desired number of bits to flush from the bitset
   */
  private void addBitSetExceptions(int numBits, long newVersion) {
    final boolean isDebugEnabled_RVV = logger.isTraceEnabled(LogMarker.RVV);
    int lastSetIndex = -1;

    if (isDebugEnabled_RVV) {
      logger.trace(LogMarker.RVV, "addBitSetExceptions({},{})", numBits, newVersion);
    }

    for (int idx = 0; idx < numBits; ) {
      int nextMissingIndex = this.bitSet.nextClearBit(idx);
      if (nextMissingIndex < 0) {
        break;
      }

      lastSetIndex = nextMissingIndex - 1;

      int nextReceivedIndex = this.bitSet.nextSetBit(nextMissingIndex + 1);
      long nextReceivedVersion = -1;
      if (nextReceivedIndex > 0) {
        lastSetIndex = nextReceivedIndex;
        nextReceivedVersion = (long) (nextReceivedIndex) + this.bitSetVersion;
        idx = nextReceivedIndex + 1;
        if (isDebugEnabled_RVV) {
          logger.trace(
              LogMarker.RVV,
              "found gap in bitSet: missing bit at index={}; next set index={}",
              nextMissingIndex,
              nextReceivedIndex);
        }
      } else {
        // We can't flush any more bits from the bit set because there
        // are no more received versions
        if (isDebugEnabled_RVV) {
          logger.trace(
              LogMarker.RVV,
              "terminating flush at bit {} because of missing entries",
              lastSetIndex);
        }
        this.bitSetVersion += lastSetIndex;
        this.bitSet.clear();
        if (lastSetIndex != -1) {
          this.bitSet.set(0);
        }
        return;
      }
      long nextMissingVersion = Math.max(1, nextMissingIndex + this.bitSetVersion);
      if (nextReceivedVersion > nextMissingVersion) {
        addException(nextMissingVersion - 1, nextReceivedVersion);
        if (isDebugEnabled_RVV) {
          logger.trace(
              LogMarker.RVV,
              "Added rvv exception e<rv{} - rv{}>",
              (nextMissingVersion - 1),
              nextReceivedVersion);
        }
      }
    }
    this.bitSet = this.bitSet.get(lastSetIndex, Math.max(lastSetIndex + 1, bitSet.size()));
    if (lastSetIndex > 0) {
      this.bitSetVersion = this.bitSetVersion + (long) lastSetIndex;
    }
  }

  synchronized void recordVersion(long version) {

    if (this.version != version) {
      if (this.bitSet == null) {
        if (this.version < version - 1) {
          this.addException(this.version, version);
          if (logger.isTraceEnabled(LogMarker.RVV)) {
            logger.trace(
                LogMarker.RVV, "Added rvv exception e<rv{} - rv{}>", this.version, version);
          }
        } else if (this.version > version) {
          this.addOlderVersion(version);
        }
      } else { // have a bitSet
        if (this.bitSetVersion + BIT_SET_WIDTH - 1 < version) {
          this.flushBitSetDuringRecording(version);
        }
        if (version < this.bitSetVersion) {
          this.addOlderVersion(version);
        } else {
          // If there's special exception, version maybe >= this.bitSetVersion. We need to fill the
          // hole
          // in the special exception. For example, holder=R5(3,6), bitSetVersion=3, bs=[0]. Adding
          // version=4
          // will become: holder=R5(4,6), bitsetVersion=3, bs[0,1]
          if (this.getSpecialException() != null) {
            this.addOlderVersion(version);
          }
          this.bitSet.set((int) (version - this.bitSetVersion));
        }
      }
      this.version = Math.max(this.version, version);
    } else {
      if (this.bitSet != null && version >= this.bitSetVersion) {
        this.bitSet.set((int) (version - this.bitSetVersion));
      }
      this.addOlderVersion(version);
    }
  }

  /** Add an exception that is older than this.bitSetVersion. */
  protected synchronized void addException(long previousVersion, long nextVersion) {
    if (this.exceptions == null) {
      this.exceptions = new LinkedList<RVVException>();
    }
    int i = 0;
    for (Iterator<RVVException> it = this.exceptions.iterator(); it.hasNext(); i++) {
      RVVException e = it.next();
      if (previousVersion >= e.nextVersion) {
        RVVException except = RVVException.createException(previousVersion, nextVersion);
        this.exceptions.add(i, except);
        return;
      }
    }
    this.exceptions.add(RVVException.createException(previousVersion, nextVersion));
  }

  synchronized void removeExceptionsOlderThan(long v) {
    mergeBitSet();
    if (this.exceptions != null) {
      for (Iterator<RVVException> it = this.exceptions.iterator(); it.hasNext(); ) {
        RVVException e = it.next();
        if (e.nextVersion <= v) {
          it.remove();
        }
      }
      if (this.exceptions.isEmpty()) {
        this.exceptions = null;
      }
    }
  }

  /**
   * Initialize this version holder from another version holder This is called during GII.
   *
   * <p>It's more likely that the other holder has seen most of the versions, and this version
   * holder only has a few updates that happened since the GII started. So we apply our seen
   * versions to the other version holder and then initialize this version holder from the other
   * version holder.
   */
  public synchronized void initializeFrom(RegionVersionHolder<T> source) {
    // Make sure the bitsets are merged in both the source
    // and this vector
    mergeBitSet();

    RegionVersionHolder<T> other = source.clone();
    other.mergeBitSet();
    // Get a copy of the local version and exceptions
    long myVersion = this.version;

    // initialize our version and exceptions to match the others
    this.exceptions = other.exceptions;
    this.version = other.version;

    // Initialize the bit set to be empty. Merge bit set should
    // have already done this, but just to be sure.
    if (this.bitSet != null) {
      this.bitSetVersion = this.version;
      // Make sure the bit set is empty except for the first, bit, indicating
      // that the version has been received.
      this.bitSet.set(0);
    }

    // Now if this.version/exceptions overlap with myVersion/myExceptions, use this'
    // The only case needs special handling is: if myVersion is newer than this.version,
    // should create an exception (this.version+1, myversion) and set this.version=myversion
    if (myVersion > this.version) {
      RVVException e = RVVException.createException(this.version, myVersion + 1);
      // add special exception
      if (this.exceptions == null) {
        this.exceptions = new LinkedList<RVVException>();
      }
      int i = 0;
      for (RVVException exception : this.exceptions) {
        if (e.compareTo(exception) >= 0) {
          break;
        }
        i++;
      }
      this.exceptions.add(i, e);
      this.version = myVersion;
    }
  }

  /**
   * initialize a holder that was cloned from another holder so it is ready for use in a live vector
   */
  void makeReadyForRecording() {
    if (this.bitSet == null) {
      this.bitSet = new BitSet(BIT_SET_WIDTH);
      this.bitSetVersion = this.version;
      this.bitSet.set(0);
    }
  }

  /** returns true if this version holder has seen the given version number */
  synchronized boolean contains(long v) {
    if (v > getVersion()) {
      return false;
    } else {
      if (this.bitSet != null && v >= this.bitSetVersion) {
        return this.bitSet.get((int) (v - this.bitSetVersion));
      }
      if (this.exceptions == null) {
        return true;
      }
      for (Iterator<RVVException> it = this.exceptions.iterator(); it.hasNext(); ) {
        RVVException e = it.next();
        if (e.nextVersion <= v) {
          return true; // there is no RVVException for this version
        }
        if (e.previousVersion < v && v < e.nextVersion) {
          return e.contains(v);
        }
      }
      return true;
    }
  }

  /**
   * Returns true if this version hold has an exception in the exception list for the given version
   * number.
   *
   * <p>This differs from contains because it returns true if v is greater than the last seen
   * version for this holder.
   */
  synchronized boolean hasExceptionFor(long v) {
    if (this.bitSet != null && v >= this.bitSetVersion) {
      if (v > this.bitSetVersion + this.bitSet.length()) {
        return false;
      }
      return this.bitSet.get((int) (v - this.bitSetVersion));
    }
    if (this.exceptions == null) {
      return false;
    }
    for (Iterator<RVVException> it = this.exceptions.iterator(); it.hasNext(); ) {
      RVVException e = it.next();
      if (e.nextVersion <= v) {
        return false; // there is no RVVException for this version
      }
      if (e.previousVersion < v && v < e.nextVersion) {
        return !e.contains(v);
      }
    }
    return false;
  }

  public boolean dominates(RegionVersionHolder<T> other) {
    return !other.isNewerThanOrCanFillExceptionsFor(this);
  }

  public boolean isSpecialException(RVVException e, RegionVersionHolder holder) {
    // deltaGII introduced a special exception, i.e. the hone is not in the middle, but at the end
    // For example, P was at P3, operation P4 is on-going and identified as unfinished operation.
    // The next operation from P should be P5, but P's currentVersion() should be 3. In holder,
    // it's described as P3(2-4), i.e. exception.nextVersion == holder.version + 1
    return (e != null && e.nextVersion == holder.version + 1);
  }

  /** returns true if this holder has seen versions that the other holder hasn't */
  public synchronized boolean isNewerThanOrCanFillExceptionsFor(RegionVersionHolder<T> source) {
    if (source == null || getVersion() > source.getVersion()) {
      return true;
    }

    // Prevent synhronization issues if other is a live version vector.
    RegionVersionHolder<T> other = source.clone();

    // since the exception sets are sorted with most recent ones first
    // we can make one pass over both sets to see if there are overlapping
    // exceptions or exceptions I don't have that the other does
    mergeBitSet(); // dump the bit-set exceptions into the regular exceptions list
    other.mergeBitSet();
    List<RVVException> mine = canonicalExceptions(this.exceptions);
    Iterator<RVVException> myIterator = mine.iterator();
    List<RVVException> his = canonicalExceptions(other.exceptions);
    Iterator<RVVException> otherIterator = his.iterator();
    //    System.out.println("comparing " + mine + " with " + his);
    RVVException myException = myIterator.hasNext() ? myIterator.next() : null;
    RVVException otherException = otherIterator.hasNext() ? otherIterator.next() : null;
    // I can't fill exceptions that are newer than anything I've seen, so skip them
    while ((otherException != null && otherException.previousVersion > this.version)
        || isSpecialException(otherException, other)) {
      otherException = otherIterator.hasNext() ? otherIterator.next() : null;
    }
    while (otherException != null) {
      //      System.out.println("comparing " + myException + " with " + otherException);
      if (myException == null) {
        return true;
      }
      if (isSpecialException(myException, this)) {
        // skip special exception
        myException = myIterator.hasNext() ? myIterator.next() : null;
        continue;
      }
      if (isSpecialException(otherException, other)) {
        // skip special exception
        otherException = otherIterator.hasNext() ? otherIterator.next() : null;
        continue;
      }
      if (myException.previousVersion >= otherException.nextVersion) {
        //        |____|  my exception
        // |____|         other exception
        // my exception is newer than the other exception, so get the next one in the sorted list
        myException = myIterator.hasNext() ? myIterator.next() : null;
        continue;
      }
      if (otherException.previousVersion >= myException.nextVersion) {
        // |____|         my exception
        //        |____|  other exception
        // my exception is older than the other exception, so I have seen changes
        // it has not
        return true;
      }
      if ((myException.previousVersion == otherException.previousVersion)
          && (myException.nextVersion == otherException.nextVersion)) {
        // |____| my exception
        // |____|   -- other exception
        // If the exceptions are identical we can skip both of them and
        // go to the next pair
        myException = myIterator.hasNext() ? myIterator.next() : null;
        otherException = otherIterator.hasNext() ? otherIterator.next() : null;
        continue;
      }
      // There is some overlap between my exception and the other exception.
      //
      //     |_________________|       my exception
      //   |____|                   \
      //            |____|*          \ the other exception is one of
      //                    |____|   / these
      //   |_____________________|  /
      //
      // Unless my exception completely contains the other exception (*)
      // I have seen changes the other hasn't
      if ((otherException.previousVersion < myException.previousVersion)
          || (myException.nextVersion < otherException.nextVersion)) {
        return true;
      }
      // My exception completely contains the other exception and I have not
      // received any thing within its exception's range that it has not also seen
      otherException = otherIterator.hasNext() ? otherIterator.next() : null;
    }
    //    System.out.println("Done iterating and returning false");
    return false;
  }

  /* (non-Javadoc)
   * @see com.gemstone.gemfire.DataSerializable#toData(java.io.DataOutput)
   *
   * Version Holders serialized to disk, so if the serialization
   * format of version holder changes, we need to upgrade our persistence
   * format.
   */
  public synchronized void toData(DataOutput out) throws IOException {
    mergeBitSet();
    InternalDataSerializer.writeUnsignedVL(this.version, out);
    int size = (this.exceptions == null) ? 0 : this.exceptions.size();
    InternalDataSerializer.writeUnsignedVL(size, out);
    out.writeBoolean(this.isDepartedMember);
    if (size > 0) {
      for (RVVException e : this.exceptions) {
        InternalDataSerializer.invokeToData(e, out);
      }
    }
  }

  /* (non-Javadoc)
   * @see com.gemstone.gemfire.DataSerializable#fromData(java.io.DataInput)
   */
  public void fromData(DataInput in) throws IOException {
    this.version = InternalDataSerializer.readUnsignedVL(in);
    int size = (int) InternalDataSerializer.readUnsignedVL(in);
    this.isDepartedMember = in.readBoolean();
    if (size > 0) {
      this.exceptions = new LinkedList<RVVException>();
      for (int i = 0; i < size; i++) {
        RVVException e = RVVException.createException(in);
        this.exceptions.add(e);
      }
    }
  }

  /* Warning: this hashcode uses mutable state and is only good for as long
   * as the holder is not modified.  It was added for unit testing.
   *
   * (non-Javadoc)
   * @see java.lang.Object#hashCode()
   */
  public synchronized int hashCode() {
    mergeBitSet();
    final int prime = 31;
    int result = 1;
    result = prime * result + (int) version;
    result = prime * result + (int) (version >> 32);
    result = prime * result + canonicalExceptions(exceptions).hashCode();
    return result;
  }

  // special exception will be kept in clone, but sometime we need to remove it for comparing
  // 2 RegionVersionHolders are actually the same
  void removeSpecialException() {
    if (this.exceptions != null && !this.exceptions.isEmpty()) {
      for (Iterator<RVVException> it = this.exceptions.iterator(); it.hasNext(); ) {
        RVVException e = it.next();
        if (isSpecialException(e, this)) {
          it.remove();
        }
      }
      if (this.exceptions.isEmpty()) {
        this.exceptions = null;
      }
    }
  }

  /**
   * For test purposes only. Two RVVs that have effectively same exceptions may represent the
   * exceptions differently. This method will test to see if the exception lists are effectively the
   * same, regardless of representation.
   */
  public synchronized boolean sameAs(RegionVersionHolder<T> other) {
    mergeBitSet();
    if (getVersion() != other.getVersion()) {
      return false;
    }
    RegionVersionHolder<T> vh1 = this.clone();
    RegionVersionHolder<T> vh2 = other.clone();
    vh1.removeSpecialException();
    vh2.removeSpecialException();
    if (vh1.exceptions == null || vh1.exceptions.isEmpty()) {
      if (vh2.exceptions != null && !vh2.exceptions.isEmpty()) {
        return false;
      }
    } else {
      List<RVVException> e1 = canonicalExceptions(vh1.exceptions);
      List<RVVException> e2 = canonicalExceptions(vh2.exceptions);
      Iterator<RVVException> it1 = e1.iterator();
      Iterator<RVVException> it2 = e2.iterator();
      while (it1.hasNext() && it2.hasNext()) {
        if (!it1.next().sameAs(it2.next())) {
          return false;
        }
      }
      return (!it1.hasNext() && !it2.hasNext());
    }

    return true;
  }

  @Override
  public boolean equals(Object obj) {
    if (obj == null || !(obj instanceof RegionVersionHolder)) {
      return false;
    }
    return sameAs((RegionVersionHolder) obj);
  }

  /**
   * Canonicalize an ordered set of exceptions. In the canonical form, none of the RVVExceptions
   * have any received versions.
   *
   * @param exceptions
   * @return The canonicalized set of exceptions.
   */
  protected List<RVVException> canonicalExceptions(List<RVVException> exceptions) {
    LinkedList<RVVException> canon = new LinkedList<RVVException>();
    if (exceptions != null) {
      // Iterate through the set of exceptions
      for (RVVException exception : exceptions) {
        if (exception.isEmpty()) {
          canon.add(exception);
        } else {
          long previous = exception.previousVersion;
          // Iterate through the set of received versions for this exception
          int insertAt = canon.size();
          for (ReceivedVersionsIterator it = exception.receivedVersionsIterator(); it.hasNext(); ) {
            Long received = it.next();
            // If we find a gap between the previous received version and the
            // next received version, add an exception.
            if (received != previous + 1) {
              canon.add(insertAt, RVVException.createException(previous, received));
            }
            // move the previous reference
            previous = received;
          }

          // if there is a gap between the last received version and the next
          // version, add an exception
          // this also handles the case where the RVV has no received versions,
          // because previous==exception.previousVersion in that case.
          if (exception.nextVersion != previous + 1) {
            canon.add(insertAt, RVVException.createException(previous, exception.nextVersion));
          }
        }
      }
    }
    return canon;
  }
}
/**
 * The base PartitionedRegion message type upon which other messages should be based.
 *
 * @author gregp
 * @since 6.5
 */
public abstract class RemoteOperationMessage extends DistributionMessage
    implements MessageWithReply, TransactionMessage {
  private static final Logger logger = LogService.getLogger();

  /** default exception to ensure a false-positive response is never returned */
  static final ForceReattemptException UNHANDLED_EXCEPTION =
      (ForceReattemptException)
          new ForceReattemptException(
                  LocalizedStrings.PartitionMessage_UNKNOWN_EXCEPTION.toLocalizedString())
              .fillInStackTrace();

  protected int processorId;

  /** the type of executor to use */
  protected int processorType;

  protected String regionPath;

  /**
   * The unique transaction Id on the sending member, used to construct a TXId on the receiving side
   */
  private int txUniqId = TXManagerImpl.NOTX;

  private InternalDistributedMember txMemberId = null;

  protected transient short flags;

  /*TODO [DISTTX] Convert into flag*/
  protected boolean isTransactionDistributed = false;

  public RemoteOperationMessage() {}

  public RemoteOperationMessage(
      InternalDistributedMember recipient, String regionPath, ReplyProcessor21 processor) {
    Assert.assertTrue(recipient != null, "RemoteMesssage recipient can not be null");
    setRecipient(recipient);
    this.regionPath = regionPath;
    this.processorId = processor == null ? 0 : processor.getProcessorId();
    if (processor != null && this.isSevereAlertCompatible()) {
      processor.enableSevereAlertProcessing();
    }
    this.txUniqId = TXManagerImpl.getCurrentTXUniqueId();
    TXStateProxy txState = TXManagerImpl.getCurrentTXState();
    if (txState != null && txState.isMemberIdForwardingRequired()) {
      this.txMemberId = txState.getOriginatingMember();
    }
    setIfTransactionDistributed();
  }

  public RemoteOperationMessage(Set recipients, String regionPath, ReplyProcessor21 processor) {
    setRecipients(recipients);
    this.regionPath = regionPath;
    this.processorId = processor == null ? 0 : processor.getProcessorId();
    if (processor != null && this.isSevereAlertCompatible()) {
      processor.enableSevereAlertProcessing();
    }
    this.txUniqId = TXManagerImpl.getCurrentTXUniqueId();
    TXStateProxy txState = TXManagerImpl.getCurrentTXState();
    if (txState != null && txState.isMemberIdForwardingRequired()) {
      this.txMemberId = txState.getOriginatingMember();
    }
    setIfTransactionDistributed();
  }

  /**
   * Copy constructor that initializes the fields declared in this class
   *
   * @param other
   */
  public RemoteOperationMessage(RemoteOperationMessage other) {
    this.regionPath = other.regionPath;
    this.processorId = other.processorId;
    this.txUniqId = other.getTXUniqId();
    this.txMemberId = other.getTXMemberId();
    this.isTransactionDistributed = other.isTransactionDistributed;
  }

  /**
   * Severe alert processing enables suspect processing at the ack-wait-threshold and issuing of a
   * severe alert at the end of the ack-severe-alert-threshold. Some messages should not support
   * this type of processing (e.g., GII, or DLockRequests)
   *
   * @return whether severe-alert processing may be performed on behalf of this message
   */
  @Override
  public boolean isSevereAlertCompatible() {
    return true;
  }

  @Override
  public int getProcessorType() {
    return DistributionManager.PARTITIONED_REGION_EXECUTOR;
  }

  /** @return the full path of the region */
  public final String getRegionPath() {
    return regionPath;
  }

  /**
   * @return the {@link ReplyProcessor21}id associated with the message, null if no acknowlegement
   *     is required.
   */
  @Override
  public final int getProcessorId() {
    return this.processorId;
  }

  /**
   * @param processorId1 the {@link com.gemstone.gemfire.distributed.internal.ReplyProcessor21} id
   *     associated with the message, null if no acknowlegement is required.
   */
  public final void registerProcessor(int processorId1) {
    this.processorId = processorId1;
  }

  public void setCacheOpRecipients(Collection cacheOpRecipients) {
    // TODO need to implement this for other remote ops
    assert this instanceof RemotePutMessage;
  }

  /** check to see if the cache is closing */
  public final boolean checkCacheClosing(DistributionManager dm) {
    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
    // return (cache != null && cache.isClosed());
    return cache == null || cache.isClosed();
  }

  /**
   * check to see if the distributed system is closing
   *
   * @return true if the distributed system is closing
   */
  public final boolean checkDSClosing(DistributionManager dm) {
    InternalDistributedSystem ds = dm.getSystem();
    return (ds == null || ds.isDisconnecting());
  }

  /**
   * Upon receipt of the message, both process the message and send an acknowledgement, not
   * necessarily in that order. Note: Any hang in this message may cause a distributed deadlock for
   * those threads waiting for an acknowledgement.
   *
   * @throws PartitionedRegionException if the region does not exist (typically, if it has been
   *     destroyed)
   */
  @Override
  public void process(final DistributionManager dm) {
    Throwable thr = null;
    boolean sendReply = true;
    LocalRegion r = null;
    long startTime = 0;
    try {
      if (checkCacheClosing(dm) || checkDSClosing(dm)) {
        thr =
            new CacheClosedException(
                LocalizedStrings.PartitionMessage_REMOTE_CACHE_IS_CLOSED_0.toLocalizedString(
                    dm.getId()));
        return;
      }
      GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
      r = gfc.getRegionByPathForProcessing(this.regionPath);
      if (r == null && failIfRegionMissing()) {
        // if the distributed system is disconnecting, don't send a reply saying
        // the partitioned region can't be found (bug 36585)
        thr =
            new RegionDestroyedException(
                LocalizedStrings.RemoteOperationMessage_0_COULD_NOT_FIND_REGION_1.toLocalizedString(
                    new Object[] {dm.getDistributionManagerId(), regionPath}),
                regionPath);
        return; // reply sent in finally block below
      }

      thr = UNHANDLED_EXCEPTION;

      // [bruce] r might be null here, so we have to go to the cache instance to get the txmgr
      TXManagerImpl txMgr = GemFireCacheImpl.getInstance().getTxManager();
      TXStateProxy tx = null;
      try {
        tx = txMgr.masqueradeAs(this);
        sendReply = operateOnRegion(dm, r, startTime);
      } finally {
        txMgr.unmasquerade(tx);
      }
      thr = null;

    } catch (RemoteOperationException fre) {
      thr = fre;
    } catch (DistributedSystemDisconnectedException se) {
      // bug 37026: this is too noisy...
      //      throw new CacheClosedException("remote system shutting down");
      //      thr = se; cache is closed, no point trying to send a reply
      thr = null;
      sendReply = false;
      if (logger.isDebugEnabled()) {
        logger.debug("shutdown caught, abandoning message: {}", se.getMessage(), se);
      }
    } catch (RegionDestroyedException rde) {
      // [bruce] RDE does not always mean that the sender's region is also
      //         destroyed, so we must send back an exception.  If the sender's
      //         region is also destroyed, who cares if we send it an exception
      // if (pr != null && pr.isClosed) {
      thr =
          new ForceReattemptException(
              LocalizedStrings.PartitionMessage_REGION_IS_DESTROYED_IN_0.toLocalizedString(
                  dm.getDistributionManagerId()),
              rde);
      // }
    } catch (VirtualMachineError err) {
      SystemFailure.initiateFailure(err);
      // If this ever returns, rethrow the error.  We're poisoned
      // now, so don't let this thread continue.
      throw err;
    } catch (Throwable t) {
      // Whenever you catch Error or Throwable, you must also
      // catch VirtualMachineError (see above).  However, there is
      // _still_ a possibility that you are dealing with a cascading
      // error condition, so you also need to check to see if the JVM
      // is still usable:
      SystemFailure.checkFailure();
      // log the exception at fine level if there is no reply to the message
      thr = null;
      if (sendReply) {
        if (!checkDSClosing(dm)) {
          thr = t;
        } else {
          // don't pass arbitrary runtime exceptions and errors back if this
          // cache/vm is closing
          thr =
              new ForceReattemptException(
                  LocalizedStrings.PartitionMessage_DISTRIBUTED_SYSTEM_IS_DISCONNECTING
                      .toLocalizedString());
        }
      }
      if (logger.isTraceEnabled(LogMarker.DM) && (t instanceof RuntimeException)) {
        logger.trace(LogMarker.DM, "Exception caught while processing message", t);
      }
    } finally {
      if (sendReply) {
        ReplyException rex = null;

        if (thr != null) {
          // don't transmit the exception if this message was to a listener
          // and this listener is shutting down
          rex = new ReplyException(thr);
        }

        // Send the reply if the operateOnPartitionedRegion returned true
        sendReply(getSender(), this.processorId, dm, rex, r, startTime);
      }
    }
  }

  /**
   * Send a generic ReplyMessage. This is in a method so that subclasses can override the reply
   * message type
   *
   * @param pr the Partitioned Region for the message whose statistics are incremented
   * @param startTime the start time of the operation in nanoseconds
   * @see PutMessage#sendReply
   */
  protected void sendReply(
      InternalDistributedMember member,
      int procId,
      DM dm,
      ReplyException ex,
      LocalRegion pr,
      long startTime) {
    //    if (pr != null && startTime > 0) {
    // pr.getPrStats().endRemoteOperationMessagesProcessing(startTime);
    //    }

    ReplyMessage.send(member, procId, ex, getReplySender(dm), pr != null && pr.isInternalRegion());
  }

  /**
   * Allow classes that over-ride to choose whether a RegionDestroyException is thrown if no
   * partitioned region is found (typically occurs if the message will be sent before the
   * PartitionedRegion has been fully constructed.
   *
   * @return true if throwing a {@link RegionDestroyedException} is acceptable
   */
  protected boolean failIfRegionMissing() {
    return true;
  }

  /**
   * return a new reply processor for this class, for use in relaying a response. This <b>must</b>
   * be an instance method so subclasses can override it properly.
   */
  RemoteOperationResponse createReplyProcessor(PartitionedRegion r, Set recipients) {
    return new RemoteOperationResponse(r.getSystem(), recipients);
  }

  protected abstract boolean operateOnRegion(DistributionManager dm, LocalRegion r, long startTime)
      throws RemoteOperationException;

  /**
   * Fill out this instance of the message using the <code>DataInput</code> Required to be a {@link
   * com.gemstone.gemfire.DataSerializable}Note: must be symmetric with {@link
   * #toData(DataOutput)}in what it reads
   */
  @Override
  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
    super.fromData(in);
    this.flags = in.readShort();
    setFlags(this.flags, in);
    this.regionPath = DataSerializer.readString(in);

    // extra field post 9.0
    if (InternalDataSerializer.getVersionForDataStream(in).compareTo(Version.GFE_90) >= 0) {
      this.isTransactionDistributed = in.readBoolean();
    }
  }

  public InternalDistributedMember getTXOriginatorClient() {
    return this.txMemberId;
  }

  /**
   * Send the contents of this instance to the DataOutput Required to be a {@link
   * com.gemstone.gemfire.DataSerializable}Note: must be symmetric with {@link
   * #fromData(DataInput)}in what it writes
   */
  @Override
  public void toData(DataOutput out) throws IOException {
    super.toData(out);
    short flags = computeCompressedShort();
    out.writeShort(flags);
    if (this.processorId != 0) {
      out.writeInt(this.processorId);
    }
    if (this.processorType != 0) {
      out.writeByte(this.processorType);
    }
    if (this.getTXUniqId() != TXManagerImpl.NOTX) {
      out.writeInt(this.getTXUniqId());
    }
    if (this.getTXMemberId() != null) {
      DataSerializer.writeObject(this.getTXMemberId(), out);
    }
    DataSerializer.writeString(this.regionPath, out);

    // extra field post 9.0
    if (InternalDataSerializer.getVersionForDataStream(out).compareTo(Version.GFE_90) >= 0) {
      out.writeBoolean(this.isTransactionDistributed);
    }
  }

  protected short computeCompressedShort() {
    short flags = 0;
    if (this.processorId != 0) flags |= HAS_PROCESSOR_ID;
    if (this.processorType != 0) flags |= HAS_PROCESSOR_TYPE;
    if (this.getTXUniqId() != TXManagerImpl.NOTX) flags |= HAS_TX_ID;
    if (this.getTXMemberId() != null) flags |= HAS_TX_MEMBERID;
    return flags;
  }

  protected void setFlags(short flags, DataInput in) throws IOException, ClassNotFoundException {
    if ((flags & HAS_PROCESSOR_ID) != 0) {
      this.processorId = in.readInt();
      ReplyProcessor21.setMessageRPId(this.processorId);
    }
    if ((flags & HAS_PROCESSOR_TYPE) != 0) {
      this.processorType = in.readByte();
    }
    if ((flags & HAS_TX_ID) != 0) {
      this.txUniqId = in.readInt();
    }
    if ((flags & HAS_TX_MEMBERID) != 0) {
      this.txMemberId = DataSerializer.readObject(in);
    }
  }

  protected final InternalDistributedMember getTXMemberId() {
    return txMemberId;
  }

  private static final String PN_TOKEN = ".cache.";

  @Override
  public String toString() {
    StringBuffer buff = new StringBuffer();
    String className = getClass().getName();
    //    className.substring(className.lastIndexOf('.', className.lastIndexOf('.') - 1) + 1);  //
    // partition.<foo> more generic version
    buff.append(
        className.substring(className.indexOf(PN_TOKEN) + PN_TOKEN.length())); // partition.<foo>
    buff.append("(regionPath="); // make sure this is the first one
    buff.append(this.regionPath);
    appendFields(buff);
    buff.append(" ,distTx=");
    buff.append(this.isTransactionDistributed);
    buff.append(")");
    return buff.toString();
  }

  /**
   * Helper class of {@link #toString()}
   *
   * @param buff buffer in which to append the state of this instance
   */
  protected void appendFields(StringBuffer buff) {
    buff.append("; sender=").append(getSender()).append("; recipients=[");
    InternalDistributedMember[] recips = getRecipients();
    for (int i = 0; i < recips.length - 1; i++) {
      buff.append(recips[i]).append(',');
    }
    if (recips.length > 0) {
      buff.append(recips[recips.length - 1]);
    }
    buff.append("]; processorId=").append(this.processorId);
  }

  public InternalDistributedMember getRecipient() {
    return getRecipients()[0];
  }

  public void setOperation(Operation op) {
    // override in subclasses holding operations
  }

  /**
   * added to support old value to be written on wire.
   *
   * @param value true or false
   * @since 6.5
   */
  public void setHasOldValue(boolean value) {
    // override in subclasses which need old value to be serialized.
    // overridden by classes like PutMessage, DestroyMessage.
  }

  /** @return the txUniqId */
  public final int getTXUniqId() {
    return txUniqId;
  }

  public final InternalDistributedMember getMemberToMasqueradeAs() {
    if (txMemberId == null) {
      return getSender();
    }
    return txMemberId;
  }

  public boolean canStartRemoteTransaction() {
    return true;
  }

  @Override
  public boolean canParticipateInTransaction() {
    return true;
  }

  /**
   * A processor on which to await a response from the {@link RemoteOperationMessage} recipient,
   * capturing any CacheException thrown by the recipient and handle it as an expected exception.
   *
   * @author Greg Passmore
   * @since 6.5
   * @see #waitForCacheException()
   */
  public static class RemoteOperationResponse extends DirectReplyProcessor {
    /** The exception thrown when the recipient does not reply */
    volatile ForceReattemptException prce;

    /** Whether a response has been received */
    volatile boolean responseReceived;

    /** whether a response is required */
    boolean responseRequired;

    public RemoteOperationResponse(InternalDistributedSystem dm, Collection initMembers) {
      this(dm, initMembers, true);
    }

    public RemoteOperationResponse(
        InternalDistributedSystem dm, Collection initMembers, boolean register) {
      super(dm, initMembers);
      if (register) {
        register();
      }
    }

    public RemoteOperationResponse(InternalDistributedSystem dm, InternalDistributedMember member) {
      this(dm, member, true);
    }

    public RemoteOperationResponse(
        InternalDistributedSystem dm, InternalDistributedMember member, boolean register) {
      super(dm, member);
      if (register) {
        register();
      }
    }
    /** require a response message to be received */
    public void requireResponse() {
      this.responseRequired = true;
    }

    @Override
    public void memberDeparted(final InternalDistributedMember id, final boolean crashed) {
      if (id != null) {
        if (removeMember(id, true)) {
          this.prce =
              new ForceReattemptException(
                  LocalizedStrings
                      .PartitionMessage_PARTITIONRESPONSE_GOT_MEMBERDEPARTED_EVENT_FOR_0_CRASHED_1
                      .toLocalizedString(new Object[] {id, Boolean.valueOf(crashed)}));
        }
        checkIfDone();
      } else {
        Exception e =
            new Exception(
                LocalizedStrings.PartitionMessage_MEMBERDEPARTED_GOT_NULL_MEMBERID
                    .toLocalizedString());
        logger.info(
            LocalizedMessage.create(
                LocalizedStrings.PartitionMessage_MEMBERDEPARTED_GOT_NULL_MEMBERID_CRASHED_0,
                Boolean.valueOf(crashed)),
            e);
      }
    }

    /**
     * Waits for the response from the {@link RemoteOperationMessage}'s recipient
     *
     * @throws CacheException if the recipient threw a cache exception during message processing
     * @throws ForceReattemptException if the recipient left the distributed system before the
     *     response was received.
     * @throws PrimaryBucketException
     */
    public final void waitForCacheException()
        throws CacheException, RemoteOperationException, PrimaryBucketException {
      try {
        waitForRepliesUninterruptibly();
        if (this.prce != null || (this.responseRequired && !this.responseReceived)) {
          throw new RemoteOperationException(
              LocalizedStrings.PartitionMessage_ATTEMPT_FAILED.toLocalizedString(), this.prce);
        }
      } catch (ReplyException e) {
        Throwable t = e.getCause();
        if (t instanceof CacheException) {
          throw (CacheException) t;
        } else if (t instanceof RemoteOperationException) {
          RemoteOperationException ft = (RemoteOperationException) t;
          // See FetchEntriesMessage, which can marshal a ForceReattempt
          // across to the sender
          RemoteOperationException fre =
              new RemoteOperationException(
                  LocalizedStrings.PartitionMessage_PEER_REQUESTS_REATTEMPT.toLocalizedString(), t);
          if (ft.hasHash()) {
            fre.setHash(ft.getHash());
          }
          throw fre;
        } else if (t instanceof PrimaryBucketException) {
          // See FetchEntryMessage, GetMessage, InvalidateMessage,
          // PutMessage
          // which can marshal a ForceReattemptacross to the sender
          throw new PrimaryBucketException(
              LocalizedStrings.PartitionMessage_PEER_FAILED_PRIMARY_TEST.toLocalizedString(), t);
        } else if (t instanceof RegionDestroyedException) {
          RegionDestroyedException rde = (RegionDestroyedException) t;
          throw rde;
        } else if (t instanceof CancelException) {
          if (logger.isDebugEnabled()) {
            logger.debug(
                "RemoteOperationResponse got CacheClosedException from {}, throwing ForceReattemptException",
                e.getSender(),
                t);
          }
          throw new RemoteOperationException(
              LocalizedStrings.PartitionMessage_PARTITIONRESPONSE_GOT_REMOTE_CACHECLOSEDEXCEPTION
                  .toLocalizedString(),
              t);
        } else if (t instanceof LowMemoryException) {
          if (logger.isDebugEnabled()) {
            logger.debug(
                "RemoteOperationResponse re-throwing remote LowMemoryException from {}",
                e.getSender(),
                t);
          }
          throw (LowMemoryException) t;
        }
        e.handleAsUnexpected();
      }
    }

    /* overridden from ReplyProcessor21 */
    @Override
    public void process(DistributionMessage msg) {
      this.responseReceived = true;
      super.process(msg);
    }
  }

  @Override
  public boolean isTransactionDistributed() {
    return this.isTransactionDistributed;
  }

  /*
   * For Distributed Tx
   */
  public void setTransactionDistributed(boolean isDistTx) {
    this.isTransactionDistributed = isDistTx;
  }

  /*
   * For Distributed Tx
   */
  private void setIfTransactionDistributed() {
    GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
    if (cache != null) {
      if (cache.getTxManager() != null) {
        this.isTransactionDistributed = cache.getTxManager().isDistributed();
      }
    }
  }
}
Exemplo n.º 4
0
/**
 * @author jdeppe
 * @author jblum
 * @since 8.1
 */
@SuppressWarnings("unused")
public class JettyHelper {
  private static final Logger logger = LogService.getLogger();

  private static final String FILE_PATH_SEPARATOR = System.getProperty("file.separator");
  private static final String USER_DIR = System.getProperty("user.dir");

  private static final String USER_NAME = System.getProperty("user.name");

  private static final String HTTPS = "https";

  private static String bindAddress = "0.0.0.0";

  private static int port = 0;

  public static Server initJetty(
      final String bindAddress,
      final int port,
      boolean useSSL,
      boolean needClientAuth,
      String protocols,
      String ciphers,
      Properties sysProps)
      throws Exception {

    final Server jettyServer = new Server();

    // Add a handler collection here, so that each new context adds itself
    // to this collection.
    jettyServer.setHandler(new HandlerCollection());
    ServerConnector connector = null;

    HttpConfiguration httpConfig = new HttpConfiguration();
    httpConfig.setSecureScheme(HTTPS);
    httpConfig.setSecurePort(port);

    if (useSSL) {
      SslContextFactory sslContextFactory = new SslContextFactory();

      sslContextFactory.setNeedClientAuth(needClientAuth);

      if (!StringUtils.isBlank(ciphers) && !"any".equalsIgnoreCase(ciphers)) {
        // If use has mentioned "any" let the SSL layer decide on the ciphers
        sslContextFactory.setIncludeCipherSuites(SSLUtil.readArray(ciphers));
      }

      String protocol = SSLUtil.getSSLAlgo(SSLUtil.readArray(protocols));
      if (protocol != null) {
        sslContextFactory.setProtocol(protocol);
      } else {
        logger.warn(ManagementStrings.SSL_PROTOCOAL_COULD_NOT_BE_DETERMINED);
      }

      if (StringUtils.isBlank(sysProps.getProperty("javax.net.ssl.keyStore"))) {
        throw new GemFireConfigException(
            "Key store can't be empty if SSL is enabled for HttpService");
      }

      sslContextFactory.setKeyStorePath(sysProps.getProperty("javax.net.ssl.keyStore"));

      if (!StringUtils.isBlank(sysProps.getProperty("javax.net.ssl.keyStoreType"))) {
        sslContextFactory.setKeyStoreType(sysProps.getProperty("javax.net.ssl.keyStoreType"));
      }

      if (!StringUtils.isBlank(sysProps.getProperty("javax.net.ssl.keyStorePassword"))) {
        sslContextFactory.setKeyStorePassword(
            sysProps.getProperty("javax.net.ssl.keyStorePassword"));
      }

      if (!StringUtils.isBlank(sysProps.getProperty("javax.net.ssl.trustStore"))) {
        sslContextFactory.setTrustStorePath(sysProps.getProperty("javax.net.ssl.trustStore"));
      }

      if (!StringUtils.isBlank(sysProps.getProperty("javax.net.ssl.trustStorePassword"))) {
        sslContextFactory.setTrustStorePassword(
            sysProps.getProperty("javax.net.ssl.trustStorePassword"));
      }

      httpConfig.addCustomizer(new SecureRequestCustomizer());

      // Somehow With HTTP_2.0 Jetty throwing NPE. Need to investigate further whether all GemFire
      // web application(Pulse, REST) can do with HTTP_1.1
      connector =
          new ServerConnector(
              jettyServer,
              new SslConnectionFactory(sslContextFactory, HttpVersion.HTTP_1_1.asString()),
              new HttpConnectionFactory(httpConfig));

      connector.setPort(port);
    } else {
      connector = new ServerConnector(jettyServer, new HttpConnectionFactory(httpConfig));

      connector.setPort(port);
    }

    jettyServer.setConnectors(new Connector[] {connector});

    if (!StringUtils.isBlank(bindAddress)) {
      connector.setHost(bindAddress);
    }

    if (bindAddress != null && !bindAddress.isEmpty()) {
      JettyHelper.bindAddress = bindAddress;
    }

    JettyHelper.port = port;

    return jettyServer;
  }

  public static Server startJetty(final Server jetty) throws Exception {
    jetty.start();
    return jetty;
  }

  public static Server addWebApplication(
      final Server jetty, final String webAppContext, final String warFilePath) {
    WebAppContext webapp = new WebAppContext();
    webapp.setContextPath(webAppContext);
    webapp.setWar(warFilePath);
    webapp.setParentLoaderPriority(false);

    File tmpPath = new File(getWebAppBaseDirectory(webAppContext));
    tmpPath.mkdirs();
    webapp.setTempDirectory(tmpPath);

    ((HandlerCollection) jetty.getHandler()).addHandler(webapp);

    return jetty;
  }

  private static String getWebAppBaseDirectory(final String context) {
    String underscoredContext = context.replace("/", "_");
    final String workingDirectory =
        USER_DIR
            .concat(FILE_PATH_SEPARATOR)
            .concat("GemFire_" + USER_NAME)
            .concat(FILE_PATH_SEPARATOR)
            .concat("services")
            .concat(FILE_PATH_SEPARATOR)
            .concat("http")
            .concat(FILE_PATH_SEPARATOR)
            .concat((StringUtils.isBlank(bindAddress)) ? "0.0.0.0" : bindAddress)
            .concat("_")
            .concat(String.valueOf(port).concat(underscoredContext));

    return workingDirectory;
  }

  private static final CountDownLatch latch = new CountDownLatch(1);

  private static String normalizeWebAppArchivePath(final String webAppArchivePath) {
    return (webAppArchivePath.startsWith(File.separator)
            ? new File(webAppArchivePath)
            : new File(".", webAppArchivePath))
        .getAbsolutePath();
  }

  private static String normalizeWebAppContext(final String webAppContext) {
    return (webAppContext.startsWith("/") ? webAppContext : "/" + webAppContext);
  }

  public static void main(final String... args) throws Exception {
    if (args.length > 1) {
      System.out.printf("Temporary Directory @ ($1%s)%n", USER_DIR);

      final Server jetty = JettyHelper.initJetty(null, 8090, false, false, null, null, null);

      for (int index = 0; index < args.length; index += 2) {
        final String webAppContext = args[index];
        final String webAppArchivePath = args[index + 1];

        JettyHelper.addWebApplication(
            jetty,
            normalizeWebAppContext(webAppContext),
            normalizeWebAppArchivePath(webAppArchivePath));
      }

      JettyHelper.startJetty(jetty);
      latch.await();
    } else {
      System.out.printf(
          "usage:%n>java com.gemstone.gemfire.management.internal.TomcatHelper <web-app-context> <war-file-path> [<web-app-context> <war-file-path>]*");
    }
  }
}
/**
 * Controls a {@link ControllableProcess} using files to communicate between processes.
 *
 * @since 8.0
 */
public class FileProcessController implements ProcessController {
  private static final Logger logger = LogService.getLogger();

  public static final String STATUS_TIMEOUT_PROPERTY =
      "gemfire.FileProcessController.STATUS_TIMEOUT";

  private final long statusTimeoutMillis;
  private final FileControllerParameters arguments;
  private final int pid;

  /**
   * Constructs an instance for controlling a local process.
   *
   * @param arguments details about the controllable process
   * @param pid process id identifying the process to control
   * @throws IllegalArgumentException if pid is not a positive integer
   */
  public FileProcessController(final FileControllerParameters arguments, final int pid) {
    this(arguments, pid, Long.getLong(STATUS_TIMEOUT_PROPERTY, 60 * 1000), TimeUnit.MILLISECONDS);
  }

  /**
   * Constructs an instance for controlling a local process.
   *
   * @param arguments details about the controllable process
   * @param pid process id identifying the process to control
   * @param timeout the timeout that operations must complete within
   * @param units the units of the timeout
   * @throws IllegalArgumentException if pid is not a positive integer
   */
  public FileProcessController(
      final FileControllerParameters arguments,
      final int pid,
      final long timeout,
      final TimeUnit units) {
    if (pid < 1) {
      throw new IllegalArgumentException("Invalid pid '" + pid + "' specified");
    }
    this.pid = pid;
    this.arguments = arguments;
    this.statusTimeoutMillis = units.toMillis(timeout);
  }

  @Override
  public int getProcessId() {
    return this.pid;
  }

  @Override
  public String status()
      throws UnableToControlProcessException, IOException, InterruptedException, TimeoutException {
    return status(
        this.arguments.getWorkingDirectory(),
        this.arguments.getProcessType().getStatusRequestFileName(),
        this.arguments.getProcessType().getStatusFileName());
  }

  @Override
  public void stop() throws UnableToControlProcessException, IOException {
    stop(
        this.arguments.getWorkingDirectory(),
        this.arguments.getProcessType().getStopRequestFileName());
  }

  @Override
  public void checkPidSupport() {
    throw new AttachAPINotFoundException(
        LocalizedStrings.Launcher_ATTACH_API_NOT_FOUND_ERROR_MESSAGE.toLocalizedString());
  }

  private void stop(final File workingDir, final String stopRequestFileName)
      throws UnableToControlProcessException, IOException {
    final File stopRequestFile = new File(workingDir, stopRequestFileName);
    if (!stopRequestFile.exists()) {
      stopRequestFile.createNewFile();
    }
  }

  private String status(
      final File workingDir, final String statusRequestFileName, final String statusFileName)
      throws UnableToControlProcessException, IOException, InterruptedException, TimeoutException {
    // monitor for statusFile
    final File statusFile = new File(workingDir, statusFileName);
    final AtomicReference<String> statusRef = new AtomicReference<String>();

    final ControlRequestHandler statusHandler =
        new ControlRequestHandler() {
          @Override
          public void handleRequest() throws IOException {
            // read the statusFile
            final BufferedReader reader = new BufferedReader(new FileReader(statusFile));
            final StringBuilder lines = new StringBuilder();
            try {
              String line = null;
              while ((line = reader.readLine()) != null) {
                lines.append(line);
              }
            } finally {
              statusRef.set(lines.toString());
              reader.close();
            }
          }
        };

    final ControlFileWatchdog statusFileWatchdog =
        new ControlFileWatchdog(workingDir, statusFileName, statusHandler, true);
    statusFileWatchdog.start();

    final File statusRequestFile = new File(workingDir, statusRequestFileName);
    if (!statusRequestFile.exists()) {
      statusRequestFile.createNewFile();
    }

    // if timeout invoke stop and then throw TimeoutException
    final long start = System.currentTimeMillis();
    while (statusFileWatchdog.isAlive()) {
      Thread.sleep(10);
      if (System.currentTimeMillis() >= start + this.statusTimeoutMillis) {
        final TimeoutException te =
            new TimeoutException("Timed out waiting for process to create " + statusFile);
        try {
          statusFileWatchdog.stop();
        } catch (InterruptedException e) {
          logger.info("Interrupted while stopping status file watchdog.", e);
        } catch (RuntimeException e) {
          logger.info("Unexpected failure while stopping status file watchdog.", e);
        }
        throw te;
      }
    }

    final String lines = statusRef.get();
    if (null == lines || lines.trim().isEmpty()) {
      throw new IllegalStateException("Failed to read status file");
    }
    return lines;
  }
}
/**
 * A listener which will try to resend the instantiators to all servers if the entire server
 * distributed system was lost and came back one line. This listener also takes care of sending the
 * initial list of instantiators to the servers <br>
 * <br>
 * TODO - There is a window in which all of the servers could crash and come back up and we would
 * connect to a new server before realizing that all the servers crashed. To fix this, we would need
 * to get some kind of birthdate of the server ds we connect and use that to decide if we need to
 * recover instantiators. As it is, the window is not very large.
 *
 * @author dsmith
 */
public class InstantiatorRecoveryListener extends EndpointManager.EndpointListenerAdapter {
  private static final Logger logger = LogService.getLogger();

  private final AtomicInteger endpointCount = new AtomicInteger();
  protected final InternalPool pool;
  protected final ScheduledExecutorService background;
  protected final long pingInterval;
  protected final Object recoveryScheduledLock = new Object();
  protected boolean recoveryScheduled;

  public InstantiatorRecoveryListener(ScheduledExecutorService background, InternalPool pool) {
    this.pool = pool;
    this.pingInterval = pool.getPingInterval();
    this.background = background;
  }

  @Override
  public void endpointCrashed(Endpoint endpoint) {
    int count = endpointCount.decrementAndGet();
    if (logger.isDebugEnabled()) {
      logger.debug("InstantiatorRecoveryTask - EndpointCrashed. Now have {} endpoints", count);
    }
  }

  @Override
  public void endpointNoLongerInUse(Endpoint endpoint) {
    int count = endpointCount.decrementAndGet();
    if (logger.isDebugEnabled()) {
      logger.debug(
          "InstantiatorRecoveryTask - EndpointNoLongerInUse. Now have {} endpoints", count);
    }
  }

  @Override
  public void endpointNowInUse(Endpoint endpoint) {
    int count = endpointCount.incrementAndGet();
    final boolean isDebugEnabled = logger.isDebugEnabled();
    if (isDebugEnabled) {
      logger.debug("InstantiatorRecoveryTask - EndpointNowInUse. Now have {} endpoints", count);
    }
    if (count == 1) {
      synchronized (recoveryScheduledLock) {
        if (!recoveryScheduled) {
          try {
            recoveryScheduled = true;
            background.execute(new RecoveryTask());
            if (isDebugEnabled) {
              logger.debug("InstantiatorRecoveryTask - Scheduled Recovery Task");
            }
          } catch (RejectedExecutionException e) {
            // ignore, the timer has been cancelled, which means we're shutting down.
          }
        }
      }
    }
  }

  protected class RecoveryTask extends PoolTask {

    @Override
    public void run2() {
      if (pool.getCancelCriterion().cancelInProgress() != null) {
        return;
      }
      synchronized (recoveryScheduledLock) {
        recoveryScheduled = false;
      }
      Object[] objects = InternalInstantiator.getInstantiatorsForSerialization();
      if (objects.length == 0) {
        return;
      }
      EventID eventId = InternalInstantiator.generateEventId();
      // Fix for bug:40930
      if (eventId == null) {
        background.schedule(new RecoveryTask(), pingInterval, TimeUnit.MILLISECONDS);
        recoveryScheduled = true;
      } else {
        try {
          RegisterInstantiatorsOp.execute(pool, objects, eventId);
        } catch (CancelException e) {
          throw e;
        } catch (RejectedExecutionException e) {
          // This is probably because we've started to shut down.
          pool.getCancelCriterion().checkCancelInProgress(e);
          throw e; // weird
        } catch (Exception e) {
          pool.getCancelCriterion().checkCancelInProgress(e);

          // If an exception occurred on the server, don't retry
          Throwable cause = e.getCause();
          if (cause instanceof ClassNotFoundException) {
            logger.warn(
                LocalizedMessage.create(
                    LocalizedStrings
                        .InstantiatorRecoveryListener_INSTANTIATORRECOVERYTASK_ERROR_CLASSNOTFOUNDEXCEPTION,
                    cause.getMessage()));
          } else {
            logger.warn(
                LocalizedMessage.create(
                    LocalizedStrings
                        .InstantiatorRecoveryListener_INSTANTIATORRECOVERYTASK_ERROR_RECOVERING_INSTANTIATORS),
                e);
          }
        } finally {
          pool.releaseThreadLocalConnection();
        }
      }
    }
  }
}
/**
 * A Partitioned Region meta-data update message. This is used to send all local bucket's meta-data
 * to other members with the same Partitioned Region.
 *
 * @author Yogesh Mahajan
 * @since 6.6
 */
public final class AllBucketProfilesUpdateMessage extends DistributionMessage
    implements MessageWithReply {
  private static final Logger logger = LogService.getLogger();

  private static final long serialVersionUID = 1L;
  private int prId;
  private int processorId = 0;
  private Map<Integer, BucketAdvisor.BucketProfile> profiles;

  public AllBucketProfilesUpdateMessage() {}

  @Override
  public final int getProcessorType() {
    return DistributionManager.WAITING_POOL_EXECUTOR;
  }

  private AllBucketProfilesUpdateMessage(
      Set recipients,
      int partitionedRegionId,
      int processorId,
      Map<Integer, BucketAdvisor.BucketProfile> profiles) {
    setRecipients(recipients);
    this.processorId = processorId;
    this.prId = partitionedRegionId;
    this.profiles = profiles;
  }

  @Override
  public int getProcessorId() {
    return this.processorId;
  }

  @Override
  protected void process(DistributionManager dm) {
    try {
      PartitionedRegion pr = PartitionedRegion.getPRFromId(this.prId);
      for (Map.Entry<Integer, BucketAdvisor.BucketProfile> profile : this.profiles.entrySet()) {
        pr.getRegionAdvisor().putBucketProfile(profile.getKey(), profile.getValue());
      }
    } catch (PRLocallyDestroyedException fre) {
      if (logger.isDebugEnabled()) logger.debug("<region locally destroyed> ///{}", this);
    } catch (RegionDestroyedException e) {
      if (logger.isDebugEnabled()) logger.debug("<region destroyed> ///{}", this);
    } catch (CancelException e) {
      if (logger.isDebugEnabled()) logger.debug("<cache closed> ///{}", this);
    } catch (VirtualMachineError err) {
      SystemFailure.initiateFailure(err);
      // If this ever returns, rethrow the error.  We're poisoned
      // now, so don't let this thread continue.
      throw err;
    } catch (Throwable ignore) {
      // Whenever you catch Error or Throwable, you must also
      // catch VirtualMachineError (see above).  However, there is
      // _still_ a possibility that you are dealing with a cascading
      // error condition, so you also need to check to see if the JVM
      // is still usable:
      SystemFailure.checkFailure();
    } finally {
      if (this.processorId != 0) {
        ReplyMessage.send(getSender(), this.processorId, null, dm);
      }
    }
  }

  /**
   * Send a profile update to a set of members.
   *
   * @param recipients the set of members to be notified
   * @param dm the distribution manager used to send the message
   * @param prId the unique partitioned region identifier
   * @param profiles bucked id to profile map
   * @param requireAck whether or not to expect a reply
   * @return an instance of reply processor if requireAck is true on which the caller can wait until
   *     the event has finished.
   */
  public static ReplyProcessor21 send(
      Set recipients,
      DM dm,
      int prId,
      Map<Integer, BucketAdvisor.BucketProfile> profiles,
      boolean requireAck) {
    if (recipients.isEmpty()) {
      return null;
    }
    ReplyProcessor21 rp = null;
    int procId = 0;
    if (requireAck) {
      rp = new ReplyProcessor21(dm, recipients);
      procId = rp.getProcessorId();
    }
    AllBucketProfilesUpdateMessage m =
        new AllBucketProfilesUpdateMessage(recipients, prId, procId, profiles);
    dm.putOutgoing(m);
    return rp;
  }

  public int getDSFID() {
    return PR_ALL_BUCKET_PROFILES_UPDATE_MESSAGE;
  }

  @Override
  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
    super.fromData(in);
    this.prId = in.readInt();
    this.processorId = in.readInt();
    this.profiles = DataSerializer.readObject(in);
  }

  @Override
  public void toData(DataOutput out) throws IOException {
    super.toData(out);
    out.writeInt(this.prId);
    out.writeInt(this.processorId);
    DataSerializer.writeObject(this.profiles, out);
  }
}
Exemplo n.º 8
0
/**
 * Class <code>ChunkedMessage</code> is used to send messages from a server to a client divided into
 * chunks.
 *
 * <p>This class encapsulates the wire protocol. It provides accessors to encode and decode a
 * message and serialize it out to the wire.
 *
 * <PRE>
 *
 * msgType - int - 4 bytes type of message, types enumerated below
 *
 * numberOfParts - int - 4 bytes number of elements (LEN-BYTE* pairs) contained
 * in the payload. Message can be a multi-part message
 *
 * transId - int - 4 bytes filled in by the requestor, copied back into the
 * response len1 part1 . . . lenn partn
 *
 * </PRE>
 *
 * We read the fixed length 15 bytes into a byte[] and populate a bytebuffer We read the fixed
 * length header tokens from the header parse the header and use information contained in there to
 * read the payload.
 *
 * <p>See also <a href="package-summary.html#messages">package description </a>.
 *
 * @see com.gemstone.gemfire.internal.cache.tier.MessageType
 * @author Barry Oglesby
 * @since 4.2
 */
public class ChunkedMessage extends Message {
  private static final Logger logger = LogService.getLogger();

  /**
   * The chunk header length. The chunk header contains a 5-byte int chunk length (4 bytes for the
   * chunk length and 1 byte for the last chunk boolean)
   */
  private static final int CHUNK_HEADER_LENGTH = 5;
  /** The main header length. The main header contains 3 4-byte ints */
  private static final int CHUNK_MSG_HEADER_LENGTH = 12;

  /** The chunk's payload length */
  protected int chunkLength;

  /** Whether this is the last chunk */
  protected byte lastChunk;

  //  /**
  //   * The main header length. The main header contains 3 4-byte ints
  //   */
  //  private static final int HEADER_LENGTH = 12;

  /**
   * Initially false; set to true once the message header is sent; set back to false when last chunk
   * is sent.
   */
  private transient boolean headerSent = false;

  @Override
  public String toString() {
    StringBuffer sb = new StringBuffer();

    sb.append(super.toString());
    sb.append("; chunkLength= " + chunkLength);
    sb.append("; lastChunk=" + lastChunk);
    return sb.toString();
  }

  /**
   * Creates a new message with the given number of parts
   *
   * @param numberOfParts The number of parts to create
   */
  public ChunkedMessage(int numberOfParts, Version version) {
    super(numberOfParts, version);
  }

  /**
   * Returns the header length.
   *
   * @return the header length
   */
  @Override
  public int getHeaderLength() {
    return CHUNK_MSG_HEADER_LENGTH;
  }

  /**
   * Sets whether this is the last chunk.
   *
   * @param lastChunk Whether this is the last chunk
   */
  public void setLastChunk(boolean lastChunk) {
    // TODO:hitesh now it should send security header(connectionID)
    if (lastChunk) {
      this.lastChunk = 0X01;
      setFESpecialCase();
    } else {
      this.lastChunk = 0X00;
    }
  }

  private void setFESpecialCase() {
    byte b = ServerConnection.isExecuteFunctionOnLocalNodeOnly();
    if ((b & 1) == 1) {
      // we are in special function execution case, where filter key is one only
      // now checking whether this function executed locally or not.
      // if not then inform client so that it will refresh pr-meta-data
      if (((b & 2) == 2)) {

        this.lastChunk |= 0x04; // setting third bit, we are okay
      }
    }
  }

  public void setLastChunkAndNumParts(boolean lastChunk, int numParts) {
    setLastChunk(lastChunk);
    if (this.sc != null && this.sc.getClientVersion().compareTo(Version.GFE_65) >= 0) {
      // we us e three bits for number of parts in last chunk byte
      // we us e three bits for number of parts in last chunk byte
      byte localLastChunk = (byte) (numParts << 5);
      this.lastChunk |= localLastChunk;
    }
  }

  public void setServerConnection(ServerConnection servConn) {
    this.sc = servConn;
  }

  /**
   * Answers whether this is the last chunk.
   *
   * @return whether this is the last chunk
   */
  public boolean isLastChunk() {
    if ((this.lastChunk & 0X01) == 0X01) {
      return true;
    }

    return false;
  }

  /**
   * Returns the chunk length.
   *
   * @return the chunk length
   */
  public int getChunkLength() {
    return this.chunkLength;
  }

  /** Populates the header with information received via socket */
  public void readHeader() throws IOException {
    if (this.socket != null) {
      final ByteBuffer cb = getCommBuffer();
      synchronized (cb) {
        fetchHeader();
        final int type = cb.getInt();
        final int numParts = cb.getInt();
        final int txid = cb.getInt();
        cb.clear();
        if (!MessageType.validate(type)) {
          throw new IOException(
              LocalizedStrings.ChunkedMessage_INVALID_MESSAGE_TYPE_0_WHILE_READING_HEADER
                  .toLocalizedString(Integer.valueOf(type)));
        }

        // Set the header and payload fields only after receiving all the
        // socket data, providing better message consistency in the face
        // of exceptional conditions (e.g. IO problems, timeouts etc.)
        this.msgType = type;
        this.numberOfParts = numParts; // Already set in setPayloadFields via setNumberOfParts
        this.transactionId = txid;
      }
    } else {
      throw new IOException(LocalizedStrings.ChunkedMessage_DEAD_CONNECTION.toLocalizedString());
    }
  }

  /** Reads a chunk of this message. */
  public void receiveChunk() throws IOException {
    if (this.socket != null) {
      synchronized (getCommBuffer()) {
        readChunk();
      }
    } else {
      throw new IOException(LocalizedStrings.ChunkedMessage_DEAD_CONNECTION.toLocalizedString());
    }
  }

  /** Reads a chunk of this message. */
  private void readChunk() throws IOException {
    final ByteBuffer cb = getCommBuffer();
    flush();
    cb.clear();
    int totalBytesRead = 0;
    do {
      // @TODO DARREL: add channel support
      int bytesRead = 0;
      try {
        bytesRead = is.read(cb.array(), totalBytesRead, CHUNK_HEADER_LENGTH - totalBytesRead);
      } catch (SocketTimeoutException e) {
        //          bytesRead = 0;
        // TODO add a cancellation check
        throw e;
      }
      if (bytesRead == -1) {
        throw new EOFException(
            LocalizedStrings.ChunkedMessage_CHUNK_READ_ERROR_CONNECTION_RESET.toLocalizedString());
      }
      totalBytesRead += bytesRead;
      if (this.msgStats != null) {
        this.msgStats.incReceivedBytes(bytesRead);
      }
    } while (totalBytesRead < CHUNK_HEADER_LENGTH);

    cb.rewind();

    // Set chunk length and last chunk
    this.chunkLength = cb.getInt();
    // setLastChunk(cb.get() == 0x01);
    byte lastChunk = cb.get();
    setLastChunk((lastChunk & 0x01) == 0x01);
    if ((lastChunk & 0x02) == 0x02) {
      this.securePart = new Part();
      if (logger.isDebugEnabled()) {
        logger.debug("ChunkedMessage.readChunk() securePart present");
      }
    }
    cb.clear();
    if ((lastChunk & 0x01) == 0x01) {
      int numParts = lastChunk >> 5;
      if (numParts > 0) {
        this.numberOfParts = numParts;
      }
    }
    readPayloadFields(this.numberOfParts, this.chunkLength);
  }

  /** Sends the header of this message. */
  public void sendHeader() throws IOException {
    if (this.socket != null) {
      synchronized (getCommBuffer()) {
        getHeaderBytesForWrite();
        flushBuffer();
        // Darrel says: I see no need for the following os.flush() call
        // so I've deadcoded it for performance.
        // this.os.flush();
      }
      this.currentPart = 0;
      this.headerSent = true;
    } else {
      throw new IOException(LocalizedStrings.ChunkedMessage_DEAD_CONNECTION.toLocalizedString());
    }
  }

  /** Return true if the header for this message has already been sent. */
  public boolean headerHasBeenSent() {
    return this.headerSent;
  }

  /** Sends a chunk of this message. */
  public void sendChunk() throws IOException {
    if (isLastChunk()) {
      this.headerSent = false;
    }
    sendBytes(true);
  }

  /** Sends a chunk of this message. */
  public void sendChunk(ServerConnection servConn) throws IOException {
    this.sc = servConn;
    if (isLastChunk()) {
      this.headerSent = false;
    }
    sendBytes(true);
  }

  @Override
  protected Part getSecurityPart() {
    // TODO Auto-generated method stub
    if (this.isLastChunk()) return super.getSecurityPart();
    else return null;
  }

  @Override
  protected int checkAndSetSecurityPart() {
    return (this.securePart != null) ? 1 : 0;
  }

  @Override
  protected void packHeaderInfoForSending(int msgLen, boolean isSecurityHeader) {
    final ByteBuffer cb = getCommBuffer();
    cb.putInt(msgLen);
    byte isLastChunk = 0x00;
    if (isLastChunk()) {
      // isLastChunk = (byte) 0x01 ;
      isLastChunk = this.lastChunk;
      if (isSecurityHeader) {
        isLastChunk |= 0x02;
      }
    }
    // cb.put(isLastChunk() ? (byte) 0x01 : (byte) 0x00);
    cb.put(isLastChunk);
  }

  /**
   * Converts the header of this message into a <code>byte</code> array using a {@link ByteBuffer}.
   */
  protected void getHeaderBytesForWrite() {
    final ByteBuffer cb = getCommBuffer();
    cb.clear();
    cb.putInt(this.msgType);
    cb.putInt(this.numberOfParts);

    cb.putInt(this.transactionId);
  }
}
public abstract class AbstractDistributedRegionJUnitTest extends TestCase {
  protected static final Logger logger = LogService.getLogger();

  private RegionAttributes createRegionAttributes(boolean isConcurrencyChecksEnabled) {
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.DISTRIBUTED_ACK);
    factory.setDataPolicy(DataPolicy.REPLICATE);
    factory.setConcurrencyChecksEnabled(isConcurrencyChecksEnabled); //
    RegionAttributes ra = factory.create();
    return ra;
  }

  private EventID createDummyEventID() {
    byte[] memId = {1, 2, 3};
    EventID eventId = new EventID(memId, 11, 12, 13);
    return eventId;
  }

  private EntryEventImpl createDummyEvent(DistributedRegion region) {
    // create a dummy event id
    EventID eventId = createDummyEventID();
    String key = "key1";
    String value = "Value1";

    // create an event
    EntryEventImpl event =
        EntryEventImpl.create(
            region,
            Operation.CREATE,
            key,
            value,
            null,
            false /* origin remote */,
            null,
            false /* generateCallbacks */,
            eventId);
    // avoid calling invokeCallbacks
    event.callbacksInvoked(true);

    return event;
  }

  private VersionTag createVersionTag(boolean validVersionTag) {
    InternalDistributedMember remotemember = mock(InternalDistributedMember.class);
    VersionTag tag = VersionTag.create(remotemember);
    if (validVersionTag) {
      tag.setRegionVersion(1);
      tag.setEntryVersion(1);
    }
    return tag;
  }

  private void doTest(DistributedRegion region, EntryEventImpl event, int cnt) {
    // do the virtualPut test
    verifyDistributeUpdate(region, event, cnt);

    // do the basicDestroy test
    verifyDistributeDestroy(region, event, cnt);

    // do the basicInvalidate test
    verifyDistributeInvalidate(region, event, cnt);

    // do the basicUpdateEntryVersion test
    verifyDistributeUpdateEntryVersion(region, event, cnt);
  }

  protected abstract void setInternalRegionArguments(InternalRegionArguments ira);

  protected abstract DistributedRegion createAndDefineRegion(
      boolean isConcurrencyChecksEnabled,
      RegionAttributes ra,
      InternalRegionArguments ira,
      GemFireCacheImpl cache);

  protected abstract void verifyDistributeUpdate(
      DistributedRegion region, EntryEventImpl event, int cnt);

  protected abstract void verifyDistributeDestroy(
      DistributedRegion region, EntryEventImpl event, int cnt);

  protected abstract void verifyDistributeInvalidate(
      DistributedRegion region, EntryEventImpl event, int cnt);

  protected abstract void verifyDistributeUpdateEntryVersion(
      DistributedRegion region, EntryEventImpl event, int cnt);

  protected DistributedRegion prepare(boolean isConcurrencyChecksEnabled) {
    GemFireCacheImpl cache = Fakes.cache();

    // create region attributes and internal region arguments
    RegionAttributes ra = createRegionAttributes(isConcurrencyChecksEnabled);
    InternalRegionArguments ira = new InternalRegionArguments();

    setInternalRegionArguments(ira);

    // create a region object
    DistributedRegion region = createAndDefineRegion(isConcurrencyChecksEnabled, ra, ira, cache);
    if (isConcurrencyChecksEnabled) {
      region.enableConcurrencyChecks();
    }

    doNothing().when(region).notifyGatewaySender(any(), any());
    doReturn(true).when(region).hasSeenEvent(any(EntryEventImpl.class));
    return region;
  }

  @Test
  public void testConcurrencyFalseTagNull() {
    // case 1: concurrencyCheckEanbled = false, version tag is null: distribute
    DistributedRegion region = prepare(false);
    EntryEventImpl event = createDummyEvent(region);
    assertNull(event.getVersionTag());
    doTest(region, event, 1);
  }

  @Test
  public void testConcurrencyTrueTagNull() {
    // case 2: concurrencyCheckEanbled = true,  version tag is null: not to distribute
    DistributedRegion region = prepare(true);
    EntryEventImpl event = createDummyEvent(region);
    assertNull(event.getVersionTag());
    doTest(region, event, 0);
  }

  @Test
  public void testConcurrencyTrueTagInvalid() {
    // case 3: concurrencyCheckEanbled = true,  version tag is invalid: not to distribute
    DistributedRegion region = prepare(true);
    EntryEventImpl event = createDummyEvent(region);
    VersionTag tag = createVersionTag(false);
    event.setVersionTag(tag);
    assertFalse(tag.hasValidVersion());
    doTest(region, event, 0);
  }

  @Test
  public void testConcurrencyTrueTagValid() {
    // case 4: concurrencyCheckEanbled = true,  version tag is valid: distribute
    DistributedRegion region = prepare(true);
    EntryEventImpl event = createDummyEvent(region);
    VersionTag tag = createVersionTag(true);
    event.setVersionTag(tag);
    assertTrue(tag.hasValidVersion());
    doTest(region, event, 1);
  }
}
/**
 * ExpirationScheduler uses a single instance of java.util.Timer (and therefore a single thread) per
 * VM to schedule and execute region and entry expiration tasks.
 */
public class ExpirationScheduler {
  private static final Logger logger = LogService.getLogger();

  private final SystemTimer timer;
  private final AtomicInteger pendingCancels = new AtomicInteger();
  private static final int MAX_PENDING_CANCELS =
      Integer.getInteger("gemfire.MAX_PENDING_CANCELS", 10000).intValue();

  public ExpirationScheduler(InternalDistributedSystem ds) {
    this.timer = new SystemTimer(ds, true);
  }

  public void forcePurge() {
    pendingCancels.getAndSet(0);
    this.timer.timerPurge();
  }
  /**
   * Called when we have cancelled a scheduled timer task. Do work, if possible to fix bug 37574.
   */
  public void incCancels() {
    int pc = pendingCancels.incrementAndGet();
    if (pc > MAX_PENDING_CANCELS) {
      pc = pendingCancels.getAndSet(0);
      if (pc > MAX_PENDING_CANCELS) {
        this.timer.timerPurge();
        //        int purgedCancels = CFactory.timerPurge(this.timer);
        // we could try to do some fancy stuff here but the value
        // of the atomic is just a hint so don't bother adjusting it
        //         // take the diff between the number of actual cancels we purged
        //         // "purgedCancels" and the number we said we would purge "pc".
        //         int diff = purgedCancels - pc;
      } else {
        // some other thread beat us to it so add back in the cancels
        // we just removed by setting it to 0
        pendingCancels.addAndGet(pc);
      }
    }
  }

  /** schedules the given expiration task */
  public ExpiryTask addExpiryTask(ExpiryTask task) {
    try {
      if (logger.isTraceEnabled()) {
        logger.trace(
            LocalizedMessage.create(
                LocalizedStrings.ExpirationScheduler_SCHEDULING__0__TO_FIRE_IN__1__MS,
                new Object[] {task, Long.valueOf(task.getExpiryMillis())}));
      }
      // To fix bug 52267 do not create a Date here; instead calculate the relative duration.
      timer.schedule(task, task.getExpiryMillis());
    } catch (EntryNotFoundException e) {
      // ignore - there are unsynchronized paths that allow an entry to
      // be destroyed out from under us.
      return null;
    } catch (IllegalStateException e) {
      // task must have been cancelled by another thread so don't schedule it
      return null;
    }
    return task;
  }

  /** schedules the given entry expiration task and returns true; returns false if not scheduled */
  public boolean addEntryExpiryTask(EntryExpiryTask task) {
    return addExpiryTask(task) != null;
  }

  /** @see java.util.Timer#cancel() */
  public void cancel() {
    timer.cancel();
  }
}
/** Test behavior of region when running out of off-heap memory. */
@SuppressWarnings("serial")
public class OutOfOffHeapMemoryDUnitTest extends CacheTestCase {
  private static final Logger logger = LogService.getLogger();

  protected static final AtomicReference<Cache> cache = new AtomicReference<Cache>();
  protected static final AtomicReference<DistributedSystem> system =
      new AtomicReference<DistributedSystem>();
  protected static final AtomicBoolean isSmallerVM = new AtomicBoolean();

  public OutOfOffHeapMemoryDUnitTest(String name) {
    super(name);
  }

  @Override
  public final void preSetUp() throws Exception {
    disconnectAllFromDS();
  }

  @Override
  public final void postSetUp() throws Exception {
    IgnoredException.addIgnoredException(OutOfOffHeapMemoryException.class.getSimpleName());
  }

  @Override
  public final void preTearDownCacheTestCase() throws Exception {
    final SerializableRunnable checkOrphans =
        new SerializableRunnable() {
          @Override
          public void run() {
            if (hasCache()) {
              OffHeapTestUtil.checkOrphans();
            }
          }
        };
    Invoke.invokeInEveryVM(checkOrphans);
    checkOrphans.run();
  }

  @SuppressWarnings("unused") // invoked by reflection from tearDown2()
  private static void cleanup() {
    disconnectFromDS();
    MemoryAllocatorImpl.freeOffHeapMemory();
    cache.set(null);
    system.set(null);
    isSmallerVM.set(false);
  }

  protected String getOffHeapMemorySize() {
    return "2m";
  }

  protected String getSmallerOffHeapMemorySize() {
    return "1m";
  }

  protected RegionShortcut getRegionShortcut() {
    return RegionShortcut.REPLICATE;
  }

  protected String getRegionName() {
    return "region1";
  }

  @Override
  public Properties getDistributedSystemProperties() {
    final Properties props = new Properties();
    props.put(DistributionConfig.MCAST_PORT_NAME, "0");
    props.put(DistributionConfig.STATISTIC_SAMPLING_ENABLED_NAME, "true");
    if (isSmallerVM.get()) {
      props.setProperty(
          DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, getSmallerOffHeapMemorySize());
    } else {
      props.setProperty(DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, getOffHeapMemorySize());
    }
    return props;
  }

  public void testSimpleOutOfOffHeapMemoryMemberDisconnects() {
    final DistributedSystem system = getSystem();
    final Cache cache = getCache();
    final DistributionManager dm =
        (DistributionManager) ((InternalDistributedSystem) system).getDistributionManager();

    Region<Object, Object> region =
        cache.createRegionFactory(getRegionShortcut()).setOffHeap(true).create(getRegionName());
    OutOfOffHeapMemoryException ooohme;
    try {
      Object value = new byte[1024];
      for (int i = 0; true; i++) {
        region.put("key-" + i, value);
      }
    } catch (OutOfOffHeapMemoryException e) {
      ooohme = e;
    }
    assertNotNull(ooohme);

    with()
        .pollInterval(100, TimeUnit.MILLISECONDS)
        .await()
        .atMost(10, TimeUnit.SECONDS)
        .until(() -> cache.isClosed() && !system.isConnected() && dm.isClosed());

    // wait for cache instance to be nulled out
    with()
        .pollInterval(100, TimeUnit.MILLISECONDS)
        .await()
        .atMost(10, TimeUnit.SECONDS)
        .until(
            () ->
                GemFireCacheImpl.getInstance() == null
                    && InternalDistributedSystem.getAnyInstance() == null);

    assertNull(GemFireCacheImpl.getInstance());

    // verify system was closed out due to OutOfOffHeapMemoryException
    assertFalse(system.isConnected());
    InternalDistributedSystem ids = (InternalDistributedSystem) system;
    try {
      ids.getDistributionManager();
      fail(
          "InternalDistributedSystem.getDistributionManager() should throw DistributedSystemDisconnectedException");
    } catch (DistributedSystemDisconnectedException expected) {
      assertRootCause(expected, OutOfOffHeapMemoryException.class);
    }

    // verify dm was closed out due to OutOfOffHeapMemoryException
    assertTrue(dm.isClosed());
    try {
      dm.throwIfDistributionStopped();
      fail(
          "DistributionManager.throwIfDistributionStopped() should throw DistributedSystemDisconnectedException");
    } catch (DistributedSystemDisconnectedException expected) {
      assertRootCause(expected, OutOfOffHeapMemoryException.class);
    }

    // verify cache was closed out due to OutOfOffHeapMemoryException
    assertTrue(cache.isClosed());
    try {
      cache.getCancelCriterion().checkCancelInProgress(null);
      fail(
          "GemFireCacheImpl.getCancelCriterion().checkCancelInProgress should throw DistributedSystemDisconnectedException");
    } catch (DistributedSystemDisconnectedException expected) {
      assertRootCause(expected, OutOfOffHeapMemoryException.class);
    }
  }

  private void assertRootCause(Throwable throwable, Class<?> expected) {
    boolean passed = false;
    Throwable cause = throwable.getCause();
    while (cause != null) {
      if (cause.getClass().equals(expected)) {
        passed = true;
        break;
      }
      cause = cause.getCause();
    }
    if (!passed) {
      throw new AssertionError(
          "Throwable does not contain expected root cause " + expected, throwable);
    }
  }

  public void testOtherMembersSeeOutOfOffHeapMemoryMemberDisconnects() {
    final int vmCount = Host.getHost(0).getVMCount();

    final String name = getRegionName();
    final RegionShortcut shortcut = getRegionShortcut();
    final int biggerVM = 0;
    final int smallerVM = 1;

    Host.getHost(0)
        .getVM(smallerVM)
        .invoke(
            new SerializableRunnable() {
              public void run() {
                OutOfOffHeapMemoryDUnitTest.isSmallerVM.set(true);
              }
            });

    // create off-heap region in all members
    for (int i = 0; i < vmCount; i++) {
      Host.getHost(0)
          .getVM(i)
          .invoke(
              new SerializableRunnable() {
                public void run() {
                  OutOfOffHeapMemoryDUnitTest.cache.set(getCache());
                  OutOfOffHeapMemoryDUnitTest.system.set(getSystem());

                  final Region<Object, Object> region =
                      OutOfOffHeapMemoryDUnitTest.cache
                          .get()
                          .createRegionFactory(shortcut)
                          .setOffHeap(true)
                          .create(name);
                  assertNotNull(region);
                }
              });
    }

    // make sure there are vmCount+1 members total
    for (int i = 0; i < vmCount; i++) {
      Host.getHost(0)
          .getVM(i)
          .invoke(
              new SerializableRunnable() {
                public void run() {
                  assertFalse(OutOfOffHeapMemoryDUnitTest.cache.get().isClosed());
                  assertTrue(OutOfOffHeapMemoryDUnitTest.system.get().isConnected());

                  final int countMembersPlusLocator = vmCount + 1; // +1 for locator
                  final int countOtherMembers = vmCount - 1; // -1 one for self

                  assertEquals(
                      countMembersPlusLocator,
                      ((InternalDistributedSystem) OutOfOffHeapMemoryDUnitTest.system.get())
                          .getDistributionManager()
                          .getDistributionManagerIds()
                          .size());
                  assertEquals(
                      countOtherMembers,
                      ((DistributedRegion) OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name))
                          .getDistributionAdvisor()
                          .getNumProfiles());
                }
              });
    }

    // perform puts in bigger member until smaller member goes OOOHME
    Host.getHost(0)
        .getVM(biggerVM)
        .invoke(
            new SerializableRunnable() {
              public void run() {
                final long TIME_LIMIT = 30 * 1000;
                final StopWatch stopWatch = new StopWatch(true);

                int countOtherMembers = vmCount - 1; // -1 for self
                final int countOtherMembersMinusSmaller =
                    vmCount - 1 - 1; // -1 for self, -1 for smallerVM

                final Region<Object, Object> region =
                    OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name);
                for (int i = 0; countOtherMembers > countOtherMembersMinusSmaller; i++) {
                  region.put("key-" + i, new byte[1024]);
                  countOtherMembers =
                      ((DistributedRegion) OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name))
                          .getDistributionAdvisor()
                          .getNumProfiles();
                  assertTrue(
                      "puts failed to push member out of off-heap memory within time limit",
                      stopWatch.elapsedTimeMillis() < TIME_LIMIT);
                }
                assertEquals(
                    "Member did not depart from OutOfOffHeapMemory",
                    countOtherMembersMinusSmaller,
                    countOtherMembers);
              }
            });

    // verify that member with OOOHME closed
    Host.getHost(0)
        .getVM(smallerVM)
        .invoke(
            new SerializableRunnable() {
              public void run() {
                assertTrue(OutOfOffHeapMemoryDUnitTest.cache.get().isClosed());
                assertFalse(OutOfOffHeapMemoryDUnitTest.system.get().isConnected());
              }
            });

    // verify that all other members noticed smaller member closed
    for (int i = 0; i < vmCount; i++) {
      if (i == smallerVM) {
        continue;
      }
      Host.getHost(0)
          .getVM(i)
          .invoke(
              new SerializableRunnable() {
                public void run() {
                  final int countMembersPlusLocator =
                      vmCount + 1 - 1; // +1 for locator, -1 for OOOHME member
                  final int countOtherMembers =
                      vmCount - 1 - 1; // -1 for self, -1 for OOOHME member

                  with()
                      .pollInterval(10, TimeUnit.MILLISECONDS)
                      .await()
                      .atMost(30, TimeUnit.SECONDS)
                      .until(numDistributionManagers(), equalTo(countMembersPlusLocator));
                  with()
                      .pollInterval(10, TimeUnit.MILLISECONDS)
                      .await()
                      .atMost(30, TimeUnit.SECONDS)
                      .until(numProfiles(), equalTo(countOtherMembers));
                }

                private Callable<Integer> numProfiles() {
                  return () -> {
                    DistributedRegion dr =
                        (DistributedRegion) OutOfOffHeapMemoryDUnitTest.cache.get().getRegion(name);
                    return dr.getDistributionAdvisor().getNumProfiles();
                  };
                }

                private Callable<Integer> numDistributionManagers() {
                  return () -> {
                    InternalDistributedSystem ids =
                        (InternalDistributedSystem) OutOfOffHeapMemoryDUnitTest.system.get();
                    return ids.getDistributionManager().getDistributionManagerIds().size();
                  };
                }
              });
    }
  }
}
/**
 * This operation ensures that a particular member has seen all state changes for a Region prior to
 * a point in time. Currently this is fixed at the time the member using this operation exchanged
 * profiles with other users of the Region, and is useful only for ensuring consistency for
 * InitialImageOperation.
 *
 * <p>StateFlushOperation works with distribution advisors and with the membership manager to flush
 * cache operations from threads to communications channels and then from the communications
 * channels to the cache of the member selected to be an initial image provider.
 *
 * <p>To make an operation subject to StateFlushOperation you must encapsulate the message part of
 * the operation (prior to asking for distribution advice) in a try/finally block. The try/finally
 * block must work with the distribution manager like this:
 *
 * <pre>
 * try {
 *   long version = advisor.startOperation();
 *   ... get advice and write the message (dm.putOutgoing())
 *   advisor.endOperation(version);
 *   version = -1;
 *   ... wait for replies, etc.
 * } finally {
 *   if (version >= 0) {
 *     advisor.endOperation(version);
 *   }
 * }
 * </pre>
 *
 * On the receiving side the messaging system will look at the result of invoking
 * containsCacheContentChange() on the message. If the message does not return true from this
 * message then state-flush will not wait for it to be applied to the cache before GII starts.
 *
 * <pre>
 * \@Override
 * public boolean containsCacheContentChange() {
 *   return true;
 * }
 * </pre>
 *
 * The messaging infrastructure will handle the rest for you. For examples look at the uses of
 * startOperation() and endOperation(). There are some complex examples in transaction processing
 * and a more straightforward example in DistributedCacheOperation.
 *
 * @author Bruce Schuchardt
 * @since 5.0.1
 */
public class StateFlushOperation {

  private static final Logger logger = LogService.getLogger();

  private DistributedRegion region;

  private DM dm;

  /** flush current ops to the given members for the given region */
  public static void flushTo(Set<InternalDistributedMember> targets, DistributedRegion region) {
    DM dm = region.getDistributionManager();
    DistributedRegion r = region;
    boolean initialized = r.isInitialized();
    if (initialized) {
      r.getDistributionAdvisor()
          .forceNewMembershipVersion(); // force a new "view" so we can track current ops
      try {
        r.getDistributionAdvisor().waitForCurrentOperations();
      } catch (RegionDestroyedException e) {
        return;
      }
    }
    // send all state-flush messages and then wait for replies
    Set<ReplyProcessor21> processors = new HashSet<ReplyProcessor21>();
    for (InternalDistributedMember target : targets) {
      StateStabilizationMessage gr = new StateStabilizationMessage();
      gr.isSingleFlushTo = true; // new for flushTo operation
      gr.requestingMember = dm.getDistributionManagerId();
      gr.setRecipient(target);
      ReplyProcessor21 processor = new ReplyProcessor21(dm, target);
      gr.processorId = processor.getProcessorId();
      gr.channelState = dm.getMembershipManager().getMessageState(target, false);
      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)
          && ((gr.channelState != null) && (gr.channelState.size() > 0))) {
        logger.trace(
            LogMarker.STATE_FLUSH_OP,
            "channel states: {}",
            gr.channelStateDescription(gr.channelState));
      }
      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
        logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr);
      }
      dm.putOutgoing(gr);
      processors.add(processor);
    }
    for (ReplyProcessor21 processor : processors) {
      try {
        processor.waitForReplies();
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        return;
      }
    }
  }

  /**
   * Constructor for StateFlushOperation
   *
   * @param r The region whose state is to be flushed
   */
  public StateFlushOperation(DistributedRegion r) {
    this.region = r;
    this.dm = r.getDistributionManager();
  }

  /**
   * Constructor for StateFlushOperation for flushing all regions
   *
   * @param dm the distribution manager to use in distributing the operation
   */
  public StateFlushOperation(DM dm) {
    this.dm = dm;
  }

  /**
   * flush state to the given target
   *
   * @param recipients The members who may be making state changes to the region. This is typically
   *     taken from a CacheDistributionAdvisor membership set
   * @param target The member who should have all state flushed to it
   * @param processorType The execution processor type for the marker message that is sent to all
   *     members using the given region
   * @param flushNewOps normally only ops that were started before region profile exchange are
   *     flushed. Setting this to true causes the flush to wait for any started after the profile
   *     exchange as well.
   * @throws InterruptedException If the operation is interrupted, usually for shutdown, an
   *     InterruptedException will be thrown
   * @return true if the state was flushed, false if not
   */
  public boolean flush(
      Set recipients, DistributedMember target, int processorType, boolean flushNewOps)
      throws InterruptedException {

    Set recips = recipients; // do not use recipients parameter past this point
    if (Thread.interrupted()) {
      throw new InterruptedException();
    }

    InternalDistributedMember myId = this.dm.getDistributionManagerId();

    if (!recips.contains(target) && !myId.equals(target)) {
      recips = new HashSet(recipients);
      recips.add(target);
    }
    // partial fix for bug 38773 - ensures that this cache will get both
    // a cache op and an adjunct message when creating a bucket region
    //    if (recips.size() < 2 && !myId.equals(target)) {
    //      return true; // no state to flush to a single holder of the region
    //    }
    StateMarkerMessage smm = new StateMarkerMessage();
    smm.relayRecipient = target;
    smm.processorType = processorType;
    smm.flushNewOps = flushNewOps;
    if (region == null) {
      smm.allRegions = true;
    } else {
      smm.regionPath = region.getFullPath();
    }
    smm.setRecipients(recips);

    StateFlushReplyProcessor gfprocessor = new StateFlushReplyProcessor(dm, recips, target);
    smm.processorId = gfprocessor.getProcessorId();
    if (region != null
        && region.isUsedForPartitionedRegionBucket()
        && region.getDistributionConfig().getAckSevereAlertThreshold() > 0) {
      smm.severeAlertEnabled = true;
      gfprocessor.enableSevereAlertProcessing();
    }
    if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
      logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {} with processor {}", smm, gfprocessor);
    }
    Set failures = this.dm.putOutgoing(smm);
    if (failures != null) {
      if (failures.contains(target)) {
        if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
          logger.trace(
              LogMarker.STATE_FLUSH_OP,
              "failed to send StateMarkerMessage to target {}; returning from flush without waiting for replies",
              target);
        }
        return false;
      }
      gfprocessor.messageNotSentTo(failures);
    }

    try {
      //      try { Thread.sleep(100); } catch (InterruptedException e) {
      // Thread.currentThread().interrupt(); } // DEBUGGING - stall before getting membership to
      // increase odds that target has left
      gfprocessor.waitForReplies();
      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
        logger.trace(LogMarker.STATE_FLUSH_OP, "Finished processing {}", smm);
      }
    } catch (ReplyException re) {
      logger.warn(
          LocalizedMessage.create(
              LocalizedStrings.StateFlushOperation_STATE_FLUSH_TERMINATED_WITH_EXCEPTION),
          re);
      return false;
    }
    return true;
  }

  /**
   * This message is sent, e.g., before requesting an initial image from a single provider. It is
   * sent to all members holding the region, and has the effect of causing those members to send a
   * serial distribution message (a StateStabilizationMessage) to the image provider. The provider
   * then sends a reply message back to this process on behalf of the member receiving the .
   *
   * <pre>
   * requestor ----> member1 --StateStabilizationMessage--> provider --StateStabilizedMessage--> requestor
   *           ----> member2 --StateStabilizationMessage--> provider --StateStabilizedMessage--> requestor
   *           ----> provider --StateStabilizedMessage--> requestor
   * </pre>
   *
   * This flushes the ordered messages in flight between members and the gii provider, so we don't
   * miss data when the image is requested.
   *
   * @author bruce
   * @since 5.0.1
   * @see StateFlushOperation.StateStabilizationMessage
   * @see StateFlushOperation.StateStabilizedMessage
   */
  public static final class StateMarkerMessage extends DistributionMessage
      implements MessageWithReply {
    /** roll the membership version to force flushing of new ops */
    public boolean flushNewOps;
    /** the member acting as the relay point */
    protected DistributedMember relayRecipient;
    /** the reply processor identity */
    protected int processorId;
    /** the type of executor to use */
    protected int processorType;
    /** the target region's full path */
    protected String regionPath;
    /** the associated Region */
    protected DistributedRegion region;
    /** whether to enable severe alert processing */
    protected transient boolean severeAlertEnabled;
    /**
     * whether all regions must be flushed to the relay target. If this is true, then regionPath may
     * be null.
     */
    protected boolean allRegions;

    public StateMarkerMessage() {
      super();
    }

    @Override
    public int getProcessorId() {
      return this.processorId;
    }

    @Override
    public final int getProcessorType() {
      return processorType;
    }

    private DistributedRegion getRegion(DistributionManager dm) {
      if (region != null) {
        return region;
      }
      // set the init level requirement so that we don't hang in CacheFactory.getInstance() (bug
      // 36175)
      int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
      try {
        GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
        Region r = gfc.getRegionByPathForProcessing(this.regionPath);
        if (r instanceof DistributedRegion) {
          region = (DistributedRegion) r;
        }
      } finally {
        LocalRegion.setThreadInitLevelRequirement(oldLevel);
      }
      return region;
    }

    /** returns a set of all DistributedRegions for allRegions processing */
    private Set<DistributedRegion> getAllRegions(DistributionManager dm) {
      // set the init level requirement so that we don't hang in CacheFactory.getInstance() (bug
      // 36175)
      int oldLevel = LocalRegion.setThreadInitLevelRequirement(LocalRegion.BEFORE_INITIAL_IMAGE);
      try {
        GemFireCacheImpl gfc = (GemFireCacheImpl) CacheFactory.getInstance(dm.getSystem());
        Set<DistributedRegion> result = new HashSet();
        for (LocalRegion r : gfc.getAllRegions()) {
          // it's important not to check if the cache is closing, so access
          // the isDestroyed boolean directly
          if (r instanceof DistributedRegion && !r.isDestroyed) {
            result.add((DistributedRegion) r);
          }
        }
        return result;
      } finally {
        LocalRegion.setThreadInitLevelRequirement(oldLevel);
      }
    }

    @Override
    protected void process(DistributionManager dm) {
      logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this);
      if (dm.getDistributionManagerId().equals(relayRecipient)) {
        // no need to send a relay request to this process - just send the
        // ack back to the sender
        StateStabilizedMessage ga = new StateStabilizedMessage();
        ga.sendingMember = relayRecipient;
        ga.setRecipient(this.getSender());
        ga.setProcessorId(processorId);
        dm.putOutgoing(ga);
      } else {
        // 1) wait for all messages based on the membership version (or older)
        //    at which the sender "joined" this region to be put on the pipe
        // 2) record the state of all communication channels from this process
        //    to the relay point
        // 3) send a stabilization message to the relay point that holds the
        //    communication channel state information
        StateStabilizationMessage gr = new StateStabilizationMessage();
        gr.setRecipient((InternalDistributedMember) relayRecipient);
        gr.requestingMember = this.getSender();
        gr.processorId = processorId;
        try {
          Set<DistributedRegion> regions;
          if (this.allRegions) {
            regions = getAllRegions(dm);
          } else {
            regions = Collections.singleton(this.getRegion(dm));
          }
          for (DistributedRegion r : regions) {
            if (r == null) {
              if (logger.isTraceEnabled(LogMarker.DM)) {
                logger.trace(LogMarker.DM, "Region not found - skipping channel state assessment");
              }
            }
            if (r != null) {
              if (this.allRegions && r.doesNotDistribute()) {
                // no need to flush a region that does no distribution
                continue;
              }
              boolean initialized = r.isInitialized();
              if (initialized) {
                if (this.flushNewOps) {
                  r.getDistributionAdvisor()
                      .forceNewMembershipVersion(); // force a new "view" so we can track current
                  // ops
                }
                try {
                  r.getDistributionAdvisor().waitForCurrentOperations();
                } catch (RegionDestroyedException e) {
                  // continue with the next region
                }
              }
              boolean useMulticast =
                  r.getMulticastEnabled() && r.getSystem().getConfig().getMcastPort() != 0;
              if (initialized) {
                Map channelStates =
                    dm.getMembershipManager().getMessageState(relayRecipient, useMulticast);
                if (gr.channelState != null) {
                  gr.channelState.putAll(channelStates);
                } else {
                  gr.channelState = channelStates;
                }
                if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)
                    && ((gr.channelState != null) && (gr.channelState.size() > 0))) {
                  logger.trace(
                      LogMarker.STATE_FLUSH_OP,
                      "channel states: {}",
                      gr.channelStateDescription(gr.channelState));
                }
              }
            }
          }
        } catch (CancelException cce) {
          // cache is closed - no distribution advisor available for the region so nothing to do but
          // send the stabilization message
        } catch (Exception e) {
          logger.fatal(
              LocalizedMessage.create(
                  LocalizedStrings
                      .StateFlushOperation_0__EXCEPTION_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE,
                  this),
              e);
        } catch (ThreadDeath td) {
          throw td;
        } catch (VirtualMachineError err) {
          SystemFailure.initiateFailure(err);
          // If this ever returns, rethrow the error.  We're poisoned
          // now, so don't let this thread continue.
          throw err;
        } catch (Throwable t) {
          // Whenever you catch Error or Throwable, you must also
          // catch VirtualMachineError (see above).  However, there is
          // _still_ a possibility that you are dealing with a cascading
          // error condition, so you also need to check to see if the JVM
          // is still usable:
          SystemFailure.checkFailure();
          logger.fatal(
              LocalizedMessage.create(
                  LocalizedStrings
                      .StateFlushOperation_0__THROWABLE_CAUGHT_WHILE_DETERMINING_CHANNEL_STATE,
                  this),
              t);
        } finally {
          if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
            logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", gr);
          }
          dm.putOutgoing(gr);
        }
      }
    }

    @Override
    public void toData(DataOutput dout) throws IOException {
      super.toData(dout);
      DataSerializer.writeObject(relayRecipient, dout);
      dout.writeInt(processorId);
      dout.writeInt(processorType);
      dout.writeBoolean(allRegions);
      if (!allRegions) {
        DataSerializer.writeString(regionPath, dout);
      }
    }

    public int getDSFID() {
      return STATE_MARKER_MESSAGE;
    }

    @Override
    public void fromData(DataInput din) throws IOException, ClassNotFoundException {
      super.fromData(din);
      relayRecipient = (DistributedMember) DataSerializer.readObject(din);
      processorId = din.readInt();
      processorType = din.readInt();
      allRegions = din.readBoolean();
      if (!allRegions) {
        regionPath = DataSerializer.readString(din);
      }
    }

    @Override
    public String toString() {
      return "StateMarkerMessage(requestingMember="
          + this.getSender()
          + ",processorId="
          + processorId
          + ",target="
          + relayRecipient
          + ",region="
          + regionPath
          + ")";
    }

    @Override
    public boolean isSevereAlertCompatible() {
      return severeAlertEnabled;
    }
  }

  /**
   * StateStabilizationMessage is sent by a distributed member to a member who is the target of a
   * state flush. The target then sends a StateStabilizedMessage to the sender of the
   * StateStabilizationMessage when all state has been flushed to it.
   *
   * <p>author bruce
   *
   * @see StateFlushOperation.StateStabilizedMessage
   * @see StateFlushOperation.StateMarkerMessage
   * @since 5.0.1
   */
  public static final class StateStabilizationMessage extends SerialDistributionMessage {
    /** the member that requested StateStabilizedMessages */
    protected DistributedMember requestingMember;
    /** the processor id for the requesting member */
    protected int processorId;
    /**
     * a map of the communication channel state between the sending process and the receiving
     * process
     */
    protected Map channelState;
    /** whether this is a simple request/response two-party flush or (false) a proxied flush */
    protected boolean isSingleFlushTo;

    public StateStabilizationMessage() {
      super();
    }

    public String channelStateDescription(Object state) {
      if (!(state instanceof Map)) {
        return "unknown channelState content";
      } else {
        Map csmap = (Map) state;
        StringBuffer result = new StringBuffer(200);
        for (Iterator it = csmap.entrySet().iterator(); it.hasNext(); ) {
          Map.Entry entry = (Map.Entry) it.next();
          result.append(entry.getKey()).append('=').append(entry.getValue());
          if (it.hasNext()) {
            result.append(", ");
          }
        }
        return result.toString();
      }
    }

    @Override
    protected void process(final DistributionManager dm) {
      // though this message must be transmitted on an ordered connection to
      // ensure that datagram channnels are flushed, we need to execute
      // in the waiting pool to avoid blocking those connections
      dm.getWaitingThreadPool()
          .execute(
              new Runnable() {
                public void run() {
                  if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
                    logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this);
                  }
                  try {
                    if (channelState != null) {
                      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)
                          && ((channelState != null) && (channelState.size() > 0))) {
                        logger.trace(
                            LogMarker.STATE_FLUSH_OP,
                            "Waiting for channel states:  {}",
                            channelStateDescription(channelState));
                      }
                      for (; ; ) {
                        dm.getCancelCriterion().checkCancelInProgress(null);
                        boolean interrupted = Thread.interrupted();
                        try {
                          dm.getMembershipManager().waitForMessageState(getSender(), channelState);
                          break;
                        } catch (InterruptedException e) {
                          interrupted = true;
                        } finally {
                          if (interrupted) {
                            Thread.currentThread().interrupt();
                          }
                        }
                      } // for
                    }
                  } catch (ThreadDeath td) {
                    throw td;
                  } catch (VirtualMachineError err) {
                    SystemFailure.initiateFailure(err);
                    // If this ever returns, rethrow the error.  We're poisoned
                    // now, so don't let this thread continue.
                    throw err;
                  } catch (Throwable e) {
                    // Whenever you catch Error or Throwable, you must also
                    // catch VirtualMachineError (see above).  However, there is
                    // _still_ a possibility that you are dealing with a cascading
                    // error condition, so you also need to check to see if the JVM
                    // is still usable:
                    SystemFailure.checkFailure();
                    logger.fatal(
                        LocalizedMessage.create(
                            LocalizedStrings
                                .StateFlushOperation_EXCEPTION_CAUGHT_WHILE_WAITING_FOR_CHANNEL_STATE),
                        e);
                  } finally {
                    StateStabilizedMessage ga = new StateStabilizedMessage();
                    ga.setRecipient((InternalDistributedMember) requestingMember);
                    if (isSingleFlushTo) {
                      // not a proxied message but a simple request-response
                      ga.sendingMember = dm.getDistributionManagerId();
                    } else {
                      ga.sendingMember = getSender();
                    }
                    ga.setProcessorId(processorId);
                    if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
                      logger.trace(LogMarker.STATE_FLUSH_OP, "Sending {}", ga);
                    }
                    if (requestingMember.equals(dm.getDistributionManagerId())) {
                      ga.dmProcess(dm);
                    } else {
                      dm.putOutgoing(ga);
                    }
                  }
                }
              });
    }

    @Override
    public void toData(DataOutput dout) throws IOException {
      super.toData(dout);
      dout.writeInt(processorId);
      DataSerializer.writeHashMap(channelState, dout);
      DataSerializer.writeObject(requestingMember, dout);
      dout.writeBoolean(this.isSingleFlushTo);
    }

    public int getDSFID() {
      return STATE_STABILIZATION_MESSAGE;
    }

    @Override
    public void fromData(DataInput din) throws IOException, ClassNotFoundException {
      super.fromData(din);
      processorId = din.readInt();
      channelState = DataSerializer.readHashMap(din);
      requestingMember = (DistributedMember) DataSerializer.readObject(din);
      this.isSingleFlushTo = din.readBoolean();
    }

    @Override
    public String toString() {
      return "StateStabilizationMessage(recipients="
          + getRecipientsDescription()
          + ",requestingMember="
          + requestingMember
          + ",processorId="
          + processorId
          + ")";
    }
  }

  /**
   * StateStabilizedMessage is sent from a VM that will provide an initial image and is part of a
   * higher-order protocol that is intended to force data in serial execution queues to be processed
   * before the initial image is requested.
   *
   * <p>author bruce
   *
   * @see StateFlushOperation.StateMarkerMessage
   * @see StateFlushOperation.StateStabilizationMessage
   * @since 5.0.1
   */
  public static final class StateStabilizedMessage extends ReplyMessage {
    /** the member for whom this ack is being sent */
    protected DistributedMember sendingMember;

    public StateStabilizedMessage() {
      super();
    }

    // overridden to spoof the source of the message
    @Override
    public InternalDistributedMember getSender() {
      return (InternalDistributedMember) this.sendingMember;
    }

    @Override
    public void process(final DM dm, final ReplyProcessor21 processor) {
      if (logger.isTraceEnabled(LogMarker.STATE_FLUSH_OP)) {
        logger.trace(LogMarker.STATE_FLUSH_OP, "Processing {}", this);
      }
      super.process(dm, processor);
    }

    @Override
    public void toData(DataOutput dout) throws IOException {
      super.toData(dout);
      DataSerializer.writeObject(sendingMember, dout);
    }

    @Override
    public int getDSFID() {
      return STATE_STABILIZED_MESSAGE;
    }

    @Override
    public void fromData(DataInput din) throws IOException, ClassNotFoundException {
      super.fromData(din);
      sendingMember = (DistributedMember) DataSerializer.readObject(din);
    }

    @Override
    public String toString() {
      StringBuffer sb = new StringBuffer();
      sb.append("StateStabilizedMessage ");
      sb.append(this.processorId);
      if (super.getSender() != null) {
        sb.append(" from ");
        sb.append(super.getSender());
      }
      if (getRecipients().length > 0) {
        String recip = getRecipientsDescription();
        sb.append(" to ");
        sb.append(recip);
      }
      sb.append(" on behalf of ");
      sb.append(sendingMember);
      ReplyException ex = this.getException();
      if (ex != null) {
        sb.append(" with exception ");
        sb.append(ex);
      }

      return sb.toString();
    }
  }

  /**
   * StateFlushReplyProcessor waits for proxy acks (StateStabilizedMessages) from the target vm. If
   * the target vm goes away, this processor wakes up immediately
   */
  public static class StateFlushReplyProcessor extends ReplyProcessor21 {

    /** the target of the StateFlushOperation */
    InternalDistributedMember targetMember;

    int originalCount;

    /** whether the target member has left the distributed system */
    boolean targetMemberHasLeft;

    public StateFlushReplyProcessor(DM manager, Set initMembers, DistributedMember target) {
      super(manager, initMembers);
      this.targetMember = (InternalDistributedMember) target;
      this.originalCount = initMembers.size();
      this.targetMemberHasLeft =
          targetMemberHasLeft // bug #43583 - perform an initial membership check
              || !manager.isCurrentMember((InternalDistributedMember) target);
    }

    /** process the failure set from sending the message */
    public void messageNotSentTo(Set failures) {
      for (Iterator it = failures.iterator(); it.hasNext(); ) {
        this.memberDeparted((InternalDistributedMember) it.next(), true);
      }
    }

    @Override
    public void memberDeparted(final InternalDistributedMember id, final boolean crashed) {
      super.memberDeparted(id, crashed);
    }

    @Override
    protected void processActiveMembers(Set activeMembers) {
      super.processActiveMembers(activeMembers);
      if (!activeMembers.contains(this.targetMember)) {
        targetMemberHasLeft = true;
      }
    }

    @Override
    protected boolean stillWaiting() {
      targetMemberHasLeft =
          targetMemberHasLeft || !getDistributionManager().isCurrentMember(targetMember);
      return super.stillWaiting() && !targetMemberHasLeft;
    }

    @Override
    public String toString() {
      return "<"
          + shortName()
          + " "
          + this.getProcessorId()
          + " targeting "
          + targetMember
          + " waiting for "
          + numMembers()
          + " replies out of "
          + this.originalCount
          + " "
          + (exception == null ? "" : (" exception: " + exception))
          + " from "
          + membersToString()
          + ">";
    }
  }
}
Exemplo n.º 13
0
/**
 * AbstractLRUClockHand holds the lrulist, and the behavior for maintaining the list in a cu-pipe
 * and determining the next entry to be removed. Each EntriesMap that supports LRU holds one of
 * these.
 */
public class NewLRUClockHand {
  private static final Logger logger = LogService.getLogger();

  private BucketRegion bucketRegion = null;

  /** The last node in the LRU list after which all new nodes are added */
  protected LRUClockNode tail = new GuardNode();

  /** The starting point in the LRU list for searching for the LRU node */
  protected LRUClockNode head = new GuardNode();

  /** The object for locking the head of the cu-pipe. */
  protected final HeadLock lock;

  /** Description of the Field */
  private final LRUStatistics stats;
  /** Counter for the size of the LRU list */
  private int size = 0;

  public static final boolean debug = Boolean.getBoolean("gemfire.verbose-lru-clock");

  private static final int maxEntries;

  static {
    String squelch = System.getProperty("gemfire.lru.maxSearchEntries");
    if (squelch == null) maxEntries = -1;
    else maxEntries = Integer.parseInt(squelch);
  }

  /** only used by enhancer */
  // protected NewLRUClockHand( ) { }

  //   private long size = 0;

  public NewLRUClockHand(
      Object region, EnableLRU ccHelper, InternalRegionArguments internalRegionArgs) {
    setBucketRegion(region);
    this.lock = new HeadLock();
    // behavior relies on a single evicted node in the pipe when the pipe is empty.
    initHeadAndTail();
    if (this.bucketRegion != null) {
      this.stats =
          internalRegionArgs.getPartitionedRegion() != null
              ? internalRegionArgs.getPartitionedRegion().getEvictionController().stats
              : null;
    } else {
      LRUStatistics tmp = null;
      if (region instanceof PlaceHolderDiskRegion) {
        tmp = ((PlaceHolderDiskRegion) region).getPRLRUStats();
      } else if (region instanceof PartitionedRegion) {
        tmp = ((PartitionedRegion) region).getPRLRUStatsDuringInitialization(); // bug 41938
        PartitionedRegion pr = (PartitionedRegion) region;
        if (tmp != null) {
          pr.getEvictionController().stats = tmp;
        }
      }
      if (tmp == null) {
        StatisticsFactory sf = GemFireCacheImpl.getExisting("").getDistributedSystem();
        tmp = ccHelper.initStats(region, sf);
      }
      this.stats = tmp;
    }
  }

  public void setBucketRegion(Object r) {
    if (r instanceof BucketRegion) {
      this.bucketRegion = (BucketRegion) r; // see bug 41388
    }
  }

  public NewLRUClockHand(Region region, EnableLRU ccHelper, NewLRUClockHand oldList) {
    setBucketRegion(region);
    this.lock = new HeadLock();
    // behavior relies on a single evicted node in the pipe when the pipe is empty.
    initHeadAndTail();
    if (oldList.stats == null) {
      // see bug 41388
      StatisticsFactory sf = region.getCache().getDistributedSystem();
      this.stats = ccHelper.initStats(region, sf);
    } else {
      this.stats = oldList.stats;
      if (this.bucketRegion != null) {
        this.stats.decrementCounter(this.bucketRegion.getCounter());
        this.bucketRegion.resetCounter();
      } else {
        this.stats.resetCounter();
      }
    }
  }

  /** Description of the Method */
  public void close() {
    closeStats();
    if (bucketRegion != null) bucketRegion.close();
  }

  public void closeStats() {
    LRUStatistics ls = this.stats;
    if (ls != null) {
      ls.close();
    }
  }

  /**
   * Adds a new lru node for the entry between the current tail and head of the list.
   *
   * @param aNode Description of the Parameter
   */
  public final void appendEntry(final LRUClockNode aNode) {
    synchronized (this.lock) {
      if (aNode.nextLRUNode() != null || aNode.prevLRUNode() != null) {
        return;
      }

      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
        logger.trace(
            LogMarker.LRU_CLOCK,
            LocalizedMessage.create(
                LocalizedStrings.NewLRUClockHand_ADDING_ANODE_TO_LRU_LIST, aNode));
      }
      aNode.setNextLRUNode(this.tail);
      this.tail.prevLRUNode().setNextLRUNode(aNode);
      aNode.setPrevLRUNode(this.tail.prevLRUNode());
      this.tail.setPrevLRUNode(aNode);

      this.size++;
    }
  }

  /**
   * return the head entry in the list preserving the cupipe requirement of at least one entry left
   * in the list
   */
  private LRUClockNode getHeadEntry() {
    synchronized (lock) {
      LRUClockNode aNode = NewLRUClockHand.this.head.nextLRUNode();
      if (aNode == this.tail) {
        return null;
      }

      LRUClockNode next = aNode.nextLRUNode();
      this.head.setNextLRUNode(next);
      next.setPrevLRUNode(this.head);

      aNode.setNextLRUNode(null);
      aNode.setPrevLRUNode(null);
      this.size++;
      return aNode;
    }
  }

  /**
   * return the Entry that is considered least recently used. The entry will no longer be in the
   * pipe (unless it is the last empty marker).
   */
  public LRUClockNode getLRUEntry() {
    long numEvals = 0;

    for (; ; ) {
      LRUClockNode aNode = null;
      aNode = getHeadEntry();

      if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
        logger.trace(LogMarker.LRU_CLOCK, "lru considering {}", aNode);
      }

      if (aNode == null) { // hit the end of the list
        this.stats.incEvaluations(numEvals);
        return aNode;
      } // hit the end of the list

      numEvals++;

      // If this Entry is part of a transaction, skip it since
      // eviction should not cause commit conflicts
      synchronized (aNode) {
        if (aNode instanceof AbstractRegionEntry) {
          if (((AbstractRegionEntry) aNode).isInUseByTransaction()) {
            if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
              logger.trace(
                  LogMarker.LRU_CLOCK,
                  LocalizedMessage.create(
                      LocalizedStrings
                          .NewLRUClockHand_REMOVING_TRANSACTIONAL_ENTRY_FROM_CONSIDERATION));
            }
            continue;
          }
        }
        if (aNode.testEvicted()) {
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(LocalizedStrings.NewLRUClockHand_DISCARDING_EVICTED_ENTRY));
          }
          continue;
        }

        // At this point we have any acceptable entry.  Now
        // use various criteria to determine if it's good enough
        // to return, or if we need to add it back to the list.
        if (maxEntries > 0 && numEvals > maxEntries) {
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(
                    LocalizedStrings.NewLRUClockHand_GREEDILY_PICKING_AN_AVAILABLE_ENTRY));
          }
          this.stats.incGreedyReturns(1);
          // fall through, return this node
        } else if (aNode.testRecentlyUsed()) {
          // Throw it back, it's in the working set
          aNode.unsetRecentlyUsed();
          // aNode.setInList();
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(
                    LocalizedStrings.NewLRUClockHand_SKIPPING_RECENTLY_USED_ENTRY, aNode));
          }
          appendEntry(aNode);
          continue; // keep looking
        } else {
          if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
            logger.trace(
                LogMarker.LRU_CLOCK,
                LocalizedMessage.create(
                    LocalizedStrings.NewLRUClockHand_RETURNING_UNUSED_ENTRY, aNode));
          }
          // fall through, return this node
        }

        // Return the current node.
        this.stats.incEvaluations(numEvals);
        return aNode;
      } // synchronized
    } // for
  }

  public void dumpList() {
    final boolean isDebugEnabled = logger.isTraceEnabled(LogMarker.LRU_CLOCK);
    if (!isDebugEnabled) {
      return;
    }
    synchronized (lock) {
      int idx = 1;
      for (LRUClockNode aNode = this.head; aNode != null; aNode = aNode.nextLRUNode()) {
        if (isDebugEnabled) {
          logger.trace(LogMarker.LRU_CLOCK, "  ({}) {}", (idx++), aNode);
        }
      }
    }
  }

  public long getExpensiveListCount() {
    synchronized (lock) {
      long count = 0;
      for (LRUClockNode aNode = this.head.nextLRUNode();
          aNode != this.tail;
          aNode = aNode.nextLRUNode()) {
        count++;
      }
      return count;
    }
  }

  public String getAuditReport() {
    LRUClockNode h = this.head;
    int totalNodes = 0;
    int evictedNodes = 0;
    int usedNodes = 0;
    while (h != null) {
      totalNodes++;
      if (h.testEvicted()) evictedNodes++;
      if (h.testRecentlyUsed()) usedNodes++;
      h = h.nextLRUNode();
    }
    StringBuffer result = new StringBuffer(128);
    result
        .append("LRUList Audit: listEntries = ")
        .append(totalNodes)
        .append(" evicted = ")
        .append(evictedNodes)
        .append(" used = ")
        .append(usedNodes);
    return result.toString();
  }

  /** unsynchronized audit...only run after activity has ceased. */
  public void audit() {
    System.out.println(getAuditReport());
  }

  /** remove an entry from the pipe... (marks it evicted to be skipped later) */
  public boolean unlinkEntry(LRUClockNode entry) {
    if (logger.isTraceEnabled(LogMarker.LRU_CLOCK)) {
      logger.trace(
          LogMarker.LRU_CLOCK,
          LocalizedMessage.create(LocalizedStrings.NewLRUClockHand_UNLINKENTRY_CALLED, entry));
    }
    entry.setEvicted();
    stats().incDestroys();
    synchronized (lock) {
      LRUClockNode next = entry.nextLRUNode();
      LRUClockNode prev = entry.prevLRUNode();
      if (next == null || prev == null) {
        // not in the list anymore.
        return false;
      }
      next.setPrevLRUNode(prev);
      prev.setNextLRUNode(next);
      entry.setNextLRUNode(null);
      entry.setPrevLRUNode(null);
    }
    return true;
  }

  /**
   * Get the modifier for lru based statistics.
   *
   * @return The LRUStatistics for this Clock hand's region.
   */
  public LRUStatistics stats() {
    return this.stats;
  }
  /** called when an LRU map is cleared... resets stats and releases prev and next. */

  public void clear(RegionVersionVector rvv) {
    if (rvv != null) {
      return; // when concurrency checks are enabled the clear operation removes entries iteratively
    }
    synchronized (this.lock) {
      if (bucketRegion != null) {
        this.stats.decrementCounter(bucketRegion.getCounter());
        bucketRegion.resetCounter();
      } else {
        this.stats.resetCounter();
      }
      initHeadAndTail();
      //      LRUClockNode node = this.tail;
      //      node.setEvicted();
      //
      //      // NYI need to walk the list and call unsetInList for each one.
      //
      //      // tail's next should already be null.
      //      setHead( node );
    }
  }

  private void initHeadAndTail() {
    // I'm not sure, but I think it's important that we
    // drop the references to the old head and tail on a region clear
    // That will prevent any concurrent operations that are messing
    // with existing nodes from screwing up the head and tail after
    // the clear.
    // Dan 9/23/09
    this.head = new GuardNode();
    this.tail = new GuardNode();
    this.head.setNextLRUNode(this.tail);
    this.tail.setPrevLRUNode(this.head);
    this.size = 0;
  }

  /** perform work of clear(), after subclass has properly synchronized */
  //  private void internalClear() {
  //    stats().resetCounter();
  //    LRUClockNode node = this.tail;
  //    node.setEvicted();
  //
  //    // NYI need to walk the list and call unsetInList for each one.
  //
  //    // tail's next should already be null.
  //    setHead( node );
  //  }

  /** Marker class name to identify the lock more easily in thread dumps */
  protected static class HeadLock extends Object {}

  private static final class GuardNode implements LRUClockNode {

    private LRUClockNode next;
    LRUClockNode prev;

    public int getEntrySize() {
      return 0;
    }

    public LRUClockNode nextLRUNode() {
      return next;
    }

    public LRUClockNode prevLRUNode() {
      return prev;
    }

    public void setEvicted() {}

    public void setNextLRUNode(LRUClockNode next) {
      this.next = next;
    }

    public void setPrevLRUNode(LRUClockNode prev) {
      this.prev = prev;
    }

    public void setRecentlyUsed() {}

    public boolean testEvicted() {
      return false;
    }

    public boolean testRecentlyUsed() {
      return false;
    }

    public void unsetEvicted() {}

    public void unsetRecentlyUsed() {}

    public int updateEntrySize(EnableLRU ccHelper) {
      return 0;
    }

    public int updateEntrySize(EnableLRU ccHelper, Object value) {
      return 0;
    }
  }
}
/**
 * An implementation of the configuration object for an <code>AdminDistributedSystem</code>. After a
 * config has been used to create an <code>AdminDistributedSystem</code> most of the configuration
 * attributes cannot be changed. However, some operations (such as getting information about GemFire
 * managers and distribution locators) are "passed through" to the <code>AdminDistributedSystem
 * </code> associated with this configuration object.
 *
 * @since 3.5
 */
public class DistributedSystemConfigImpl implements DistributedSystemConfig {

  private static final Logger logger = LogService.getLogger();

  private String entityConfigXMLFile = DEFAULT_ENTITY_CONFIG_XML_FILE;
  private String systemId = DEFAULT_SYSTEM_ID;
  private String mcastAddress = DEFAULT_MCAST_ADDRESS;
  private int mcastPort = DEFAULT_MCAST_PORT;
  private int ackWaitThreshold = DEFAULT_ACK_WAIT_THRESHOLD;
  private int ackSevereAlertThreshold = DEFAULT_ACK_SEVERE_ALERT_THRESHOLD;
  private String locators = DEFAULT_LOCATORS;
  private String bindAddress = DEFAULT_BIND_ADDRESS;
  private String serverBindAddress = DEFAULT_BIND_ADDRESS;
  private String remoteCommand = DEFAULT_REMOTE_COMMAND;
  private boolean disableTcp = DEFAULT_DISABLE_TCP;
  private boolean enableNetworkPartitionDetection = DEFAULT_ENABLE_NETWORK_PARTITION_DETECTION;
  private boolean disableAutoReconnect = DEFAULT_DISABLE_AUTO_RECONNECT;
  private int memberTimeout = DEFAULT_MEMBER_TIMEOUT;
  private String membershipPortRange = getMembershipPortRangeString(DEFAULT_MEMBERSHIP_PORT_RANGE);
  private int tcpPort = DEFAULT_TCP_PORT;

  private String logFile = DEFAULT_LOG_FILE;
  private String logLevel = DEFAULT_LOG_LEVEL;
  private int logDiskSpaceLimit = DEFAULT_LOG_DISK_SPACE_LIMIT;
  private int logFileSizeLimit = DEFAULT_LOG_FILE_SIZE_LIMIT;
  private int refreshInterval = DEFAULT_REFRESH_INTERVAL;
  private Properties gfSecurityProperties = new Properties();

  /** Listeners to notify when this DistributedSystemConfig changes */
  private Set listeners = new HashSet();

  /** Configs for CacheServers that this system config is aware of */
  private Set cacheServerConfigs = new HashSet();

  /** Configs for the managed distribution locators in the distributed system */
  private Set locatorConfigs = new HashSet();

  /** The display name of this distributed system */
  private String systemName = DEFAULT_NAME;

  /**
   * The admin distributed system object that is configured by this config object.
   *
   * @since 4.0
   */
  private AdminDistributedSystemImpl system;

  /** The GemFire log writer used by the distributed system */
  private InternalLogWriter logWriter;

  ///////////////////////  Static Methods  ///////////////////////

  /**
   * Filters out all properties that are unique to the admin <code>DistributedSystemConfig</code>
   * that are not present in the internal <code>DistributionConfig</code>.
   *
   * @since 4.0
   */
  private static Properties filterOutAdminProperties(Properties props) {

    Properties props2 = new Properties();
    for (Enumeration names = props.propertyNames(); names.hasMoreElements(); ) {
      String name = (String) names.nextElement();
      if (!(ENTITY_CONFIG_XML_FILE_NAME.equals(name)
          || REFRESH_INTERVAL_NAME.equals(name)
          || REMOTE_COMMAND_NAME.equals(name))) {
        String value = props.getProperty(name);
        if ((name != null) && (value != null)) {
          props2.setProperty(name, value);
        }
      }
    }

    return props2;
  }

  ////////////////////////  Constructors  ////////////////////////

  /**
   * Creates a new <code>DistributedSystemConfigImpl</code> based on the configuration stored in a
   * <code>DistributedSystem</code>'s <code>DistributionConfig</code>.
   */
  public DistributedSystemConfigImpl(DistributionConfig distConfig, String remoteCommand) {
    if (distConfig == null) {
      throw new IllegalArgumentException(
          LocalizedStrings.DistributedSystemConfigImpl_DISTRIBUTIONCONFIG_MUST_NOT_BE_NULL
              .toLocalizedString());
    }

    this.mcastAddress = InetAddressUtil.toString(distConfig.getMcastAddress());
    this.mcastPort = distConfig.getMcastPort();
    this.locators = distConfig.getLocators();
    this.membershipPortRange = getMembershipPortRangeString(distConfig.getMembershipPortRange());

    this.systemName = distConfig.getName();

    this.sslEnabled = distConfig.getSSLEnabled();
    this.sslCiphers = distConfig.getSSLCiphers();
    this.sslProtocols = distConfig.getSSLProtocols();
    this.sslAuthenticationRequired = distConfig.getSSLRequireAuthentication();

    this.logFile = distConfig.getLogFile().getPath();
    this.logLevel = LogWriterImpl.levelToString(distConfig.getLogLevel());
    this.logDiskSpaceLimit = distConfig.getLogDiskSpaceLimit();
    this.logFileSizeLimit = distConfig.getLogFileSizeLimit();

    basicSetBindAddress(distConfig.getBindAddress());
    this.tcpPort = distConfig.getTcpPort();

    this.disableTcp = distConfig.getDisableTcp();

    this.remoteCommand = remoteCommand;
    this.serverBindAddress = distConfig.getServerBindAddress();
    this.enableNetworkPartitionDetection = distConfig.getEnableNetworkPartitionDetection();
    this.memberTimeout = distConfig.getMemberTimeout();
    this.refreshInterval = DistributedSystemConfig.DEFAULT_REFRESH_INTERVAL;
    this.gfSecurityProperties = (Properties) distConfig.getSSLProperties().clone();
  }

  /**
   * Zero-argument constructor to be used only by subclasses.
   *
   * @since 4.0
   */
  protected DistributedSystemConfigImpl() {}

  /**
   * Creates a new <code>DistributedSystemConifgImpl</code> whose configuration is specified by the
   * given <code>Properties</code> object.
   */
  protected DistributedSystemConfigImpl(Properties props) {
    this(props, false);
  }

  /**
   * Creates a new <code>DistributedSystemConifgImpl</code> whose configuration is specified by the
   * given <code>Properties</code> object.
   *
   * @param props The configuration properties specified by the caller
   * @param ignoreGemFirePropsFile whether to skip loading distributed system properties from
   *     gemfire.properties file
   * @since 6.5
   */
  protected DistributedSystemConfigImpl(Properties props, boolean ignoreGemFirePropsFile) {
    this(
        new DistributionConfigImpl(filterOutAdminProperties(props), ignoreGemFirePropsFile),
        DEFAULT_REMOTE_COMMAND);
    String remoteCommand = props.getProperty(REMOTE_COMMAND_NAME);
    if (remoteCommand != null) {
      this.remoteCommand = remoteCommand;
    }

    String entityConfigXMLFile = props.getProperty(ENTITY_CONFIG_XML_FILE_NAME);
    if (entityConfigXMLFile != null) {
      this.entityConfigXMLFile = entityConfigXMLFile;
    }

    String refreshInterval = props.getProperty(REFRESH_INTERVAL_NAME);
    if (refreshInterval != null) {
      try {
        this.refreshInterval = Integer.parseInt(refreshInterval);
      } catch (NumberFormatException nfEx) {
        throw new IllegalArgumentException(
            LocalizedStrings.DistributedSystemConfigImpl_0_IS_NOT_A_VALID_INTEGER_1
                .toLocalizedString(new Object[] {refreshInterval, REFRESH_INTERVAL_NAME}));
      }
    }
  }

  //////////////////////  Instance Methods  //////////////////////

  /**
   * Returns the <code>LogWriterI18n</code> to be used when administering the distributed system.
   * Returns null if nothing has been provided via <code>setInternalLogWriter</code>.
   *
   * @since 4.0
   */
  public InternalLogWriter getInternalLogWriter() {
    // LOG: used only for sharing between IDS, AdminDSImpl and AgentImpl -- to prevent multiple
    // banners, etc.
    synchronized (this) {
      return this.logWriter;
    }
  }

  /** Sets the <code>LogWriterI18n</code> to be used when administering the distributed system. */
  public void setInternalLogWriter(InternalLogWriter logWriter) {
    // LOG: used only for sharing between IDS, AdminDSImpl and AgentImpl -- to prevent multiple
    // banners, etc.
    synchronized (this) {
      this.logWriter = logWriter;
    }
  }

  public LogConfig createLogConfig() {
    return new LogConfig() {
      @Override
      public int getLogLevel() {
        return LogWriterImpl.levelNameToCode(DistributedSystemConfigImpl.this.getLogLevel());
      }

      @Override
      public File getLogFile() {
        return new File(DistributedSystemConfigImpl.this.getLogFile());
      }

      @Override
      public int getLogFileSizeLimit() {
        return DistributedSystemConfigImpl.this.getLogFileSizeLimit();
      }

      @Override
      public int getLogDiskSpaceLimit() {
        return DistributedSystemConfigImpl.this.getLogDiskSpaceLimit();
      }

      @Override
      public String getName() {
        return DistributedSystemConfigImpl.this.getSystemName();
      }

      @Override
      public String toLoggerString() {
        return DistributedSystemConfigImpl.this.toString();
      }
    };
  }

  /**
   * Marks this config object as "read only". Attempts to modify a config object will result in a
   * {@link IllegalStateException} being thrown.
   *
   * @since 4.0
   */
  void setDistributedSystem(AdminDistributedSystemImpl system) {
    this.system = system;
  }

  /**
   * Checks to see if this config object is "read only". If it is, then an {@link
   * IllegalStateException} is thrown.
   *
   * @since 4.0
   */
  protected void checkReadOnly() {
    if (this.system != null) {
      throw new IllegalStateException(
          LocalizedStrings
              .DistributedSystemConfigImpl_A_DISTRIBUTEDSYSTEMCONFIG_OBJECT_CANNOT_BE_MODIFIED_AFTER_IT_HAS_BEEN_USED_TO_CREATE_AN_ADMINDISTRIBUTEDSYSTEM
              .toLocalizedString());
    }
  }

  public String getEntityConfigXMLFile() {
    return this.entityConfigXMLFile;
  }

  public void setEntityConfigXMLFile(String xmlFile) {
    checkReadOnly();
    this.entityConfigXMLFile = xmlFile;
    configChanged();
  }

  /**
   * Parses the XML configuration file that describes managed entities.
   *
   * @throws AdminXmlException If a problem is encountered while parsing the XML file.
   */
  private void parseEntityConfigXMLFile() {
    String fileName = this.entityConfigXMLFile;
    File xmlFile = new File(fileName);
    if (!xmlFile.exists()) {
      if (DEFAULT_ENTITY_CONFIG_XML_FILE.equals(fileName)) {
        // Default doesn't exist, no big deal
        return;
      } else {
        throw new AdminXmlException(
            LocalizedStrings
                .DistributedSystemConfigImpl_ENTITY_CONFIGURATION_XML_FILE_0_DOES_NOT_EXIST
                .toLocalizedString(fileName));
      }
    }

    try {
      InputStream is = new FileInputStream(xmlFile);
      try {
        ManagedEntityConfigXmlParser.parse(is, this);
      } finally {
        is.close();
      }
    } catch (IOException ex) {
      throw new AdminXmlException(
          LocalizedStrings.DistributedSystemConfigImpl_WHILE_PARSING_0.toLocalizedString(fileName),
          ex);
    }
  }

  public String getSystemId() {
    return this.systemId;
  }

  public void setSystemId(String systemId) {
    checkReadOnly();
    this.systemId = systemId;
    configChanged();
  }

  /** Returns the multicast address for the system */
  public String getMcastAddress() {
    return this.mcastAddress;
  }

  public void setMcastAddress(String mcastAddress) {
    checkReadOnly();
    this.mcastAddress = mcastAddress;
    configChanged();
  }

  /** Returns the multicast port for the system */
  public int getMcastPort() {
    return this.mcastPort;
  }

  public void setMcastPort(int mcastPort) {
    checkReadOnly();
    this.mcastPort = mcastPort;
    configChanged();
  }

  public int getAckWaitThreshold() {
    return this.ackWaitThreshold;
  }

  public void setAckWaitThreshold(int seconds) {
    checkReadOnly();
    this.ackWaitThreshold = seconds;
    configChanged();
  }

  public int getAckSevereAlertThreshold() {
    return this.ackSevereAlertThreshold;
  }

  public void setAckSevereAlertThreshold(int seconds) {
    checkReadOnly();
    this.ackSevereAlertThreshold = seconds;
    configChanged();
  }

  /** Returns the comma-delimited list of locators for the system */
  public String getLocators() {
    return this.locators;
  }

  public void setLocators(String locators) {
    checkReadOnly();
    if (locators == null) {
      this.locators = "";
    } else {
      this.locators = locators;
    }
    configChanged();
  }

  /**
   * Returns the value for membership-port-range
   *
   * @return the value for the Distributed System property membership-port-range
   */
  public String getMembershipPortRange() {
    return this.membershipPortRange;
  }

  /**
   * Sets the Distributed System property membership-port-range
   *
   * @param membershipPortRangeStr the value for membership-port-range given as two numbers
   *     separated by a minus sign.
   */
  public void setMembershipPortRange(String membershipPortRangeStr) {
    /*
     * FIXME: Setting attributes in DistributedSystemConfig has no effect on
     * DistributionConfig which is actually used for connection with DS. This is
     * true for all such attributes. Should be addressed in the Admin Revamp if
     * we want these 'set' calls to affect anything. Then we can use the
     * validation code in DistributionConfigImpl code.
     */
    checkReadOnly();
    if (membershipPortRangeStr == null) {
      this.membershipPortRange = getMembershipPortRangeString(DEFAULT_MEMBERSHIP_PORT_RANGE);
    } else {
      try {
        if (validateMembershipRange(membershipPortRangeStr)) {
          this.membershipPortRange = membershipPortRangeStr;
        } else {
          throw new IllegalArgumentException(
              LocalizedStrings.DistributedSystemConfigImpl_INVALID_VALUE_FOR_MEMBERSHIP_PORT_RANGE
                  .toLocalizedString(
                      new Object[] {membershipPortRangeStr, MEMBERSHIP_PORT_RANGE_NAME}));
        }
      } catch (Exception e) {
        if (logger.isDebugEnabled()) {
          logger.debug(e.getMessage(), e);
        }
      }
    }
  }

  public void setTcpPort(int port) {
    checkReadOnly();
    this.tcpPort = port;
    configChanged();
  }

  public int getTcpPort() {
    return this.tcpPort;
  }

  /**
   * Validates the given string - which is expected in the format as two numbers separated by a
   * minus sign - in to an integer array of length 2 with first element as lower end & second
   * element as upper end of the range.
   *
   * @param membershipPortRange membership-port-range given as two numbers separated by a minus
   *     sign.
   * @return true if the membership-port-range string is valid, false otherwise
   */
  private boolean validateMembershipRange(String membershipPortRange) {
    int[] range = null;
    if (membershipPortRange != null && membershipPortRange.trim().length() > 0) {
      String[] splitted = membershipPortRange.split("-");
      range = new int[2];
      range[0] = Integer.parseInt(splitted[0].trim());
      range[1] = Integer.parseInt(splitted[1].trim());
      // NumberFormatException if any could be thrown

      if (range[0] < 0 || range[0] >= range[1] || range[1] < 0 || range[1] > 65535) {
        range = null;
      }
    }
    return range != null;
  }

  /**
   * @return the String representation of membershipPortRange with lower & upper limits of the port
   *     range separated by '-' e.g. 1-65535
   */
  private static String getMembershipPortRangeString(int[] membershipPortRange) {
    String membershipPortRangeString = "";
    if (membershipPortRange != null && membershipPortRange.length == 2) {
      membershipPortRangeString = membershipPortRange[0] + "-" + membershipPortRange[1];
    }

    return membershipPortRangeString;
  }

  public String getBindAddress() {
    return this.bindAddress;
  }

  public void setBindAddress(String bindAddress) {
    checkReadOnly();
    basicSetBindAddress(bindAddress);
    configChanged();
  }

  public String getServerBindAddress() {
    return this.serverBindAddress;
  }

  public void setServerBindAddress(String bindAddress) {
    checkReadOnly();
    basicSetServerBindAddress(bindAddress);
    configChanged();
  }

  public boolean getDisableTcp() {
    return this.disableTcp;
  }

  public void setDisableTcp(boolean flag) {
    checkReadOnly();
    disableTcp = flag;
    configChanged();
  }

  public void setEnableNetworkPartitionDetection(boolean newValue) {
    checkReadOnly();
    this.enableNetworkPartitionDetection = newValue;
    configChanged();
  }

  public boolean getEnableNetworkPartitionDetection() {
    return this.enableNetworkPartitionDetection;
  }

  public void setDisableAutoReconnect(boolean newValue) {
    checkReadOnly();
    this.disableAutoReconnect = newValue;
    configChanged();
  }

  public boolean getDisableAutoReconnect() {
    return this.disableAutoReconnect;
  }

  public int getMemberTimeout() {
    return this.memberTimeout;
  }

  public void setMemberTimeout(int value) {
    checkReadOnly();
    this.memberTimeout = value;
    configChanged();
  }

  private void basicSetBindAddress(String bindAddress) {
    if (!validateBindAddress(bindAddress)) {
      throw new IllegalArgumentException(
          LocalizedStrings.DistributedSystemConfigImpl_INVALID_BIND_ADDRESS_0.toLocalizedString(
              bindAddress));
    }
    this.bindAddress = bindAddress;
  }

  private void basicSetServerBindAddress(String bindAddress) {
    if (!validateBindAddress(bindAddress)) {
      throw new IllegalArgumentException(
          LocalizedStrings.DistributedSystemConfigImpl_INVALID_BIND_ADDRESS_0.toLocalizedString(
              bindAddress));
    }
    this.serverBindAddress = bindAddress;
  }

  /** Returns the remote command setting to use for remote administration */
  public String getRemoteCommand() {
    return this.remoteCommand;
  }

  /**
   * Sets the remote command for this config object. This attribute may be modified after this
   * config object has been used to create an admin distributed system.
   */
  public void setRemoteCommand(String remoteCommand) {
    if (!ALLOW_ALL_REMOTE_COMMANDS) {
      checkRemoteCommand(remoteCommand);
    }
    this.remoteCommand = remoteCommand;
    configChanged();
  }

  private static final boolean ALLOW_ALL_REMOTE_COMMANDS =
      Boolean.getBoolean("gemfire.admin.ALLOW_ALL_REMOTE_COMMANDS");
  private static final String[] LEGAL_REMOTE_COMMANDS = {"rsh", "ssh"};
  private static final String ILLEGAL_REMOTE_COMMAND_RSH_OR_SSH =
      "Allowed remote commands include \"rsh {HOST} {CMD}\" or \"ssh {HOST} {CMD}\" with valid rsh or ssh switches. Invalid: ";

  private final void checkRemoteCommand(final String remoteCommand) {
    if (remoteCommand == null || remoteCommand.isEmpty()) {
      return;
    }
    final String command = remoteCommand.toLowerCase().trim();
    if (!command.contains("{host}") || !command.contains("{cmd}")) {
      throw new IllegalArgumentException(ILLEGAL_REMOTE_COMMAND_RSH_OR_SSH + remoteCommand);
    }

    final StringTokenizer tokenizer = new StringTokenizer(command, " ");
    final ArrayList<String> array = new ArrayList<String>();
    for (int i = 0; tokenizer.hasMoreTokens(); i++) {
      String string = tokenizer.nextToken();
      if (i == 0) {
        // first element must be rsh or ssh
        boolean found = false;
        for (int j = 0; j < LEGAL_REMOTE_COMMANDS.length; j++) {
          if (string.contains(LEGAL_REMOTE_COMMANDS[j])) {
            // verify command is at end of string
            if (!(string.endsWith(LEGAL_REMOTE_COMMANDS[j])
                || string.endsWith(LEGAL_REMOTE_COMMANDS[j] + ".exe"))) {
              throw new IllegalArgumentException(ILLEGAL_REMOTE_COMMAND_RSH_OR_SSH + remoteCommand);
            }
            found = true;
          }
        }
        if (!found) {
          throw new IllegalArgumentException(ILLEGAL_REMOTE_COMMAND_RSH_OR_SSH + remoteCommand);
        }
      } else {
        final boolean isSwitch = string.startsWith("-");
        final boolean isHostOrCmd = string.equals("{host}") || string.equals("{cmd}");

        // additional elements must be switches or values-for-switches or {host} or user@{host} or
        // {cmd}
        if (!isSwitch && !isHostOrCmd) {
          final String previous =
              (array == null || array.isEmpty()) ? null : array.get(array.size() - 1);
          final boolean isValueForSwitch = previous != null && previous.startsWith("-");
          final boolean isHostWithUser = string.contains("@") && string.endsWith("{host}");

          if (!(isValueForSwitch || isHostWithUser)) {
            throw new IllegalArgumentException(ILLEGAL_REMOTE_COMMAND_RSH_OR_SSH + remoteCommand);
          }
        }
      }
      array.add(string);
    }
  }

  public String getSystemName() {
    return this.systemName;
  }

  public void setSystemName(final String systemName) {
    checkReadOnly();
    this.systemName = systemName;
    configChanged();
  }

  /**
   * Returns an array of configurations for statically known CacheServers
   *
   * @since 4.0
   */
  public CacheServerConfig[] getCacheServerConfigs() {
    return (CacheServerConfig[])
        this.cacheServerConfigs.toArray(new CacheServerConfig[this.cacheServerConfigs.size()]);
  }

  public CacheVmConfig[] getCacheVmConfigs() {
    return (CacheVmConfig[])
        this.cacheServerConfigs.toArray(new CacheVmConfig[this.cacheServerConfigs.size()]);
  }

  /**
   * Creates the configuration for a CacheServer
   *
   * @since 4.0
   */
  public CacheServerConfig createCacheServerConfig() {
    CacheServerConfig config = new CacheServerConfigImpl();
    addCacheServerConfig(config);
    return config;
  }

  public CacheVmConfig createCacheVmConfig() {
    return (CacheVmConfig) createCacheServerConfig();
  }

  /**
   * Adds the configuration for a CacheServer
   *
   * @since 4.0
   */
  private void addCacheServerConfig(CacheServerConfig managerConfig) {
    checkReadOnly();

    if (managerConfig == null) return;
    for (Iterator iter = this.cacheServerConfigs.iterator(); iter.hasNext(); ) {
      CacheServerConfigImpl impl = (CacheServerConfigImpl) iter.next();
      if (impl.equals(managerConfig)) {
        return;
      }
    }
    this.cacheServerConfigs.add(managerConfig);
    configChanged();
  }

  /**
   * Removes the configuration for a CacheServer
   *
   * @since 4.0
   */
  public void removeCacheServerConfig(CacheServerConfig managerConfig) {
    removeCacheVmConfig((CacheVmConfig) managerConfig);
  }

  public void removeCacheVmConfig(CacheVmConfig managerConfig) {
    checkReadOnly();
    this.cacheServerConfigs.remove(managerConfig);
    configChanged();
  }

  /** Returns the configurations of all managed distribution locators */
  public DistributionLocatorConfig[] getDistributionLocatorConfigs() {
    if (this.system != null) {
      DistributionLocator[] locators = this.system.getDistributionLocators();
      DistributionLocatorConfig[] configs = new DistributionLocatorConfig[locators.length];
      for (int i = 0; i < locators.length; i++) {
        configs[i] = locators[i].getConfig();
      }
      return configs;

    } else {
      Object[] array = new DistributionLocatorConfig[this.locatorConfigs.size()];
      return (DistributionLocatorConfig[]) this.locatorConfigs.toArray(array);
    }
  }

  /** Creates the configuration for a DistributionLocator */
  public DistributionLocatorConfig createDistributionLocatorConfig() {
    checkReadOnly();
    DistributionLocatorConfig config = new DistributionLocatorConfigImpl();
    addDistributionLocatorConfig(config);
    return config;
  }

  /** Adds the configuration for a DistributionLocator */
  private void addDistributionLocatorConfig(DistributionLocatorConfig config) {
    checkReadOnly();
    this.locatorConfigs.add(config);
    configChanged();
  }

  /** Removes the configuration for a DistributionLocator */
  public void removeDistributionLocatorConfig(DistributionLocatorConfig config) {
    checkReadOnly();
    this.locatorConfigs.remove(config);
    configChanged();
  }

  /**
   * Validates the bind address. The address may be a host name or IP address, but it must not be
   * empty and must be usable for creating an InetAddress. Cannot have a leading '/' (which
   * InetAddress.toString() produces).
   *
   * @param bindAddress host name or IP address to validate
   */
  public static boolean validateBindAddress(String bindAddress) {
    if (bindAddress == null || bindAddress.length() == 0) return true;
    if (InetAddressUtil.validateHost(bindAddress) == null) return false;
    return true;
  }

  public synchronized void configChanged() {
    ConfigListener[] clients = null;
    synchronized (this.listeners) {
      clients = (ConfigListener[]) listeners.toArray(new ConfigListener[this.listeners.size()]);
    }
    for (int i = 0; i < clients.length; i++) {
      try {
        clients[i].configChanged(this);
      } catch (Exception e) {
        logger.warn(e.getMessage(), e);
      }
    }
  }

  /** Registers listener for notification of changes in this config. */
  public void addListener(ConfigListener listener) {
    synchronized (this.listeners) {
      this.listeners.add(listener);
    }
  }

  /** Removes previously registered listener of this config. */
  public void removeListener(ConfigListener listener) {
    synchronized (this.listeners) {
      this.listeners.remove(listener);
    }
  }

  // -------------------------------------------------------------------------
  //   SSL support...
  // -------------------------------------------------------------------------
  private boolean sslEnabled = DistributionConfig.DEFAULT_SSL_ENABLED;
  private String sslProtocols = DistributionConfig.DEFAULT_SSL_PROTOCOLS;
  private String sslCiphers = DistributionConfig.DEFAULT_SSL_CIPHERS;
  private boolean sslAuthenticationRequired = DistributionConfig.DEFAULT_SSL_REQUIRE_AUTHENTICATION;
  private Properties sslProperties = new Properties();

  public boolean isSSLEnabled() {
    return this.sslEnabled;
  }

  public void setSSLEnabled(boolean enabled) {
    checkReadOnly();
    this.sslEnabled = enabled;
    configChanged();
  }

  public String getSSLProtocols() {
    return this.sslProtocols;
  }

  public void setSSLProtocols(String protocols) {
    checkReadOnly();
    this.sslProtocols = protocols;
    configChanged();
  }

  public String getSSLCiphers() {
    return this.sslCiphers;
  }

  public void setSSLCiphers(String ciphers) {
    checkReadOnly();
    this.sslCiphers = ciphers;
    configChanged();
  }

  public boolean isSSLAuthenticationRequired() {
    return this.sslAuthenticationRequired;
  }

  public void setSSLAuthenticationRequired(boolean authRequired) {
    checkReadOnly();
    this.sslAuthenticationRequired = authRequired;
    configChanged();
  }

  public Properties getSSLProperties() {
    return this.sslProperties;
  }

  public void setSSLProperties(Properties sslProperties) {
    checkReadOnly();
    this.sslProperties = sslProperties;
    if (this.sslProperties == null) {
      this.sslProperties = new Properties();
    }
    configChanged();
  }

  public void addSSLProperty(String key, String value) {
    checkReadOnly();
    this.sslProperties.put(key, value);
    configChanged();
  }

  public void removeSSLProperty(String key) {
    checkReadOnly();
    this.sslProperties.remove(key);
    configChanged();
  }

  /**
   * @return the gfSecurityProperties
   * @since 6.6.3
   */
  public Properties getGfSecurityProperties() {
    return gfSecurityProperties;
  }

  public String getLogFile() {
    return this.logFile;
  }

  public void setLogFile(String logFile) {
    checkReadOnly();
    this.logFile = logFile;
    configChanged();
  }

  public String getLogLevel() {
    return this.logLevel;
  }

  public void setLogLevel(String logLevel) {
    checkReadOnly();
    this.logLevel = logLevel;
    configChanged();
  }

  public int getLogDiskSpaceLimit() {
    return this.logDiskSpaceLimit;
  }

  public void setLogDiskSpaceLimit(int limit) {
    checkReadOnly();
    this.logDiskSpaceLimit = limit;
    configChanged();
  }

  public int getLogFileSizeLimit() {
    return this.logFileSizeLimit;
  }

  public void setLogFileSizeLimit(int limit) {
    checkReadOnly();
    this.logFileSizeLimit = limit;
    configChanged();
  }

  /** Returns the refreshInterval in seconds */
  public int getRefreshInterval() {
    return this.refreshInterval;
  }

  /** Sets the refreshInterval in seconds */
  public void setRefreshInterval(int timeInSecs) {
    checkReadOnly();
    this.refreshInterval = timeInSecs;
    configChanged();
  }

  /**
   * Makes sure that the mcast port and locators are correct and consistent.
   *
   * @throws IllegalArgumentException If configuration is not valid
   */
  public void validate() {
    if (this.getMcastPort() < MIN_MCAST_PORT || this.getMcastPort() > MAX_MCAST_PORT) {
      throw new IllegalArgumentException(
          LocalizedStrings
              .DistributedSystemConfigImpl_MCASTPORT_MUST_BE_AN_INTEGER_INCLUSIVELY_BETWEEN_0_AND_1
              .toLocalizedString(
                  new Object[] {Integer.valueOf(MIN_MCAST_PORT), Integer.valueOf(MAX_MCAST_PORT)}));
    }

    // disabled in 5.1 - multicast and locators can be used together
    // if (!DEFAULT_LOCATORS.equals(this.getLocators()) &&
    //    this.mcastPort > 0) {
    //  throw new IllegalArgumentException(
    //    "mcastPort must be zero when locators are specified");
    // }

    LogWriterImpl.levelNameToCode(this.logLevel);

    if (this.logFileSizeLimit < MIN_LOG_FILE_SIZE_LIMIT
        || this.logFileSizeLimit > MAX_LOG_FILE_SIZE_LIMIT) {
      throw new IllegalArgumentException(
          LocalizedStrings
              .DistributedSystemConfigImpl_LOGFILESIZELIMIT_MUST_BE_AN_INTEGER_BETWEEN_0_AND_1
              .toLocalizedString(
                  new Object[] {
                    Integer.valueOf(MIN_LOG_FILE_SIZE_LIMIT),
                    Integer.valueOf(MAX_LOG_FILE_SIZE_LIMIT)
                  }));
    }

    if (this.logDiskSpaceLimit < MIN_LOG_DISK_SPACE_LIMIT
        || this.logDiskSpaceLimit > MAX_LOG_DISK_SPACE_LIMIT) {
      throw new IllegalArgumentException(
          LocalizedStrings
              .DistributedSystemConfigImpl_LOGDISKSPACELIMIT_MUST_BE_AN_INTEGER_BETWEEN_0_AND_1
              .toLocalizedString(
                  new Object[] {
                    Integer.valueOf(MIN_LOG_DISK_SPACE_LIMIT),
                    Integer.valueOf(MAX_LOG_DISK_SPACE_LIMIT)
                  }));
    }

    parseEntityConfigXMLFile();
  }

  /** Makes a deep copy of this config object. */
  @Override
  public Object clone() throws CloneNotSupportedException {
    DistributedSystemConfigImpl other = (DistributedSystemConfigImpl) super.clone();
    other.system = null;
    other.cacheServerConfigs = new HashSet();
    other.locatorConfigs = new HashSet();

    DistributionLocatorConfig[] myLocators = this.getDistributionLocatorConfigs();
    for (int i = 0; i < myLocators.length; i++) {
      DistributionLocatorConfig locator = myLocators[i];
      other.addDistributionLocatorConfig((DistributionLocatorConfig) locator.clone());
    }

    CacheServerConfig[] myCacheServers = this.getCacheServerConfigs();
    for (int i = 0; i < myCacheServers.length; i++) {
      CacheServerConfig locator = myCacheServers[i];
      other.addCacheServerConfig((CacheServerConfig) locator.clone());
    }

    return other;
  }

  @Override
  public String toString() {
    StringBuffer buf = new StringBuffer(1000);
    String lf = System.getProperty("line.separator");
    if (lf == null) lf = ",";

    buf.append("DistributedSystemConfig(");
    buf.append(lf);
    buf.append("  system-name=");
    buf.append(String.valueOf(this.systemName));
    buf.append(lf);
    buf.append("  " + MCAST_ADDRESS_NAME + "=");
    buf.append(String.valueOf(this.mcastAddress));
    buf.append(lf);
    buf.append("  " + MCAST_PORT_NAME + "=");
    buf.append(String.valueOf(this.mcastPort));
    buf.append(lf);
    buf.append("  " + LOCATORS_NAME + "=");
    buf.append(String.valueOf(this.locators));
    buf.append(lf);
    buf.append("  " + MEMBERSHIP_PORT_RANGE_NAME + "=");
    buf.append(getMembershipPortRange());
    buf.append(lf);
    buf.append("  " + BIND_ADDRESS_NAME + "=");
    buf.append(String.valueOf(this.bindAddress));
    buf.append(lf);
    buf.append("  " + TCP_PORT_NAME + "=" + this.tcpPort);
    buf.append(lf);
    buf.append("  " + DistributionConfig.DISABLE_TCP_NAME + "=");
    buf.append(String.valueOf(this.disableTcp));
    buf.append(lf);
    buf.append("  " + DistributionConfig.DISABLE_AUTO_RECONNECT_NAME + "=");
    buf.append(String.valueOf(this.disableAutoReconnect));
    buf.append(lf);
    buf.append("  " + REMOTE_COMMAND_NAME + "=");
    buf.append(String.valueOf(this.remoteCommand));
    buf.append(lf);
    buf.append("  " + SSL_ENABLED_NAME + "=");
    buf.append(String.valueOf(this.sslEnabled));
    buf.append(lf);
    buf.append("  " + SSL_CIPHERS_NAME + "=");
    buf.append(String.valueOf(this.sslCiphers));
    buf.append(lf);
    buf.append("  " + SSL_PROTOCOLS_NAME + "=");
    buf.append(String.valueOf(this.sslProtocols));
    buf.append(lf);
    buf.append("  " + SSL_REQUIRE_AUTHENTICATION_NAME + "=");
    buf.append(String.valueOf(this.sslAuthenticationRequired));
    buf.append(lf);
    buf.append("  " + LOG_FILE_NAME + "=");
    buf.append(String.valueOf(this.logFile));
    buf.append(lf);
    buf.append("  " + LOG_LEVEL_NAME + "=");
    buf.append(String.valueOf(this.logLevel));
    buf.append(lf);
    buf.append("  " + LOG_DISK_SPACE_LIMIT_NAME + "=");
    buf.append(String.valueOf(this.logDiskSpaceLimit));
    buf.append(lf);
    buf.append("  " + LOG_FILE_SIZE_LIMIT_NAME + "=");
    buf.append(String.valueOf(this.logFileSizeLimit));
    buf.append(lf);
    buf.append("  " + REFRESH_INTERVAL_NAME + "=");
    buf.append(String.valueOf(this.refreshInterval));
    buf.append(")");
    return buf.toString();
  }
}
Exemplo n.º 15
0
public class GMSLocator implements Locator, NetLocator {

  /* package */ static final int LOCATOR_FILE_STAMP = 0x7b8cf741;

  private static final Logger logger = LogService.getLogger();

  private final boolean usePreferredCoordinators;
  private final boolean networkPartitionDetectionEnabled;
  private final String locatorString;
  private final List<InetSocketAddress> locators;
  private Services services;
  private final LocatorStats stats;
  private InternalDistributedMember localAddress;

  private Set<InternalDistributedMember> registrants = new HashSet<InternalDistributedMember>();

  /** The current membership view, or one recovered from disk. This is a copy-on-write variable. */
  private transient NetView view;

  private File viewFile;

  /**
   * @param bindAddress network address that TcpServer will bind to
   * @param stateFile the file to persist state to/recover from
   * @param locatorString location of other locators (bootstrapping, failover)
   * @param usePreferredCoordinators true if the membership coordinator should be a Locator
   * @param networkPartitionDetectionEnabled true if network partition detection is enabled
   * @param stats the locator statistics object
   */
  public GMSLocator(
      InetAddress bindAddress,
      File stateFile,
      String locatorString,
      boolean usePreferredCoordinators,
      boolean networkPartitionDetectionEnabled,
      LocatorStats stats) {
    this.usePreferredCoordinators = usePreferredCoordinators;
    this.networkPartitionDetectionEnabled = networkPartitionDetectionEnabled;
    this.locatorString = locatorString;
    if (this.locatorString == null || this.locatorString.length() == 0) {
      this.locators = new ArrayList<InetSocketAddress>(0);
    } else {
      this.locators = GMSUtil.parseLocators(locatorString, bindAddress);
    }
    this.viewFile = stateFile;
    this.stats = stats;
  }

  @Override
  public boolean setMembershipManager(MembershipManager mgr) {
    if (services == null || services.isStopped()) {
      logger.info("Peer locator is connecting to local membership services");
      services = ((GMSMembershipManager) mgr).getServices();
      localAddress = services.getMessenger().getMemberID();
      services.setLocator(this);
      NetView newView = services.getJoinLeave().getView();
      if (newView != null) {
        this.view = newView;
      }
      return true;
    }
    return false;
  }

  @Override
  public void init(TcpServer server) throws InternalGemFireException {
    logger.info(
        "GemFire peer location service starting.  Other locators: {}  Locators preferred as coordinators: {}  Network partition detection enabled: {}  View persistence file: {}",
        locatorString,
        usePreferredCoordinators,
        networkPartitionDetectionEnabled,
        viewFile);
    recover();
  }

  private void findServices() {
    InternalDistributedSystem sys = InternalDistributedSystem.getAnyInstance();
    if (sys != null && services == null) {
      logger.info("Peer locator found distributed system " + sys);
      setMembershipManager(sys.getDM().getMembershipManager());
    }
  }

  @Override
  public void installView(NetView view) {
    synchronized (this.registrants) {
      registrants.clear();
    }
    logger.info("Peer locator received new membership view: " + view);
    this.view = view;
    saveView(view);
  }

  @Override
  public Object processRequest(Object request) throws IOException {
    Object response = null;

    if (logger.isDebugEnabled()) {
      logger.debug("Peer locator processing {}", request);
    }

    if (localAddress == null && services != null) {
      localAddress = services.getMessenger().getMemberID();
    }

    if (request instanceof GetViewRequest) {
      if (view != null) {
        response = new GetViewResponse(view);
      }
    } else if (request instanceof FindCoordinatorRequest) {
      FindCoordinatorRequest findRequest = (FindCoordinatorRequest) request;

      if (findRequest.getMemberID() != null) {
        InternalDistributedMember coord = null;

        // at this level we want to return the coordinator known to membership services,
        // which may be more up-to-date than the one known by the membership manager
        if (view == null) {
          findServices();
        }

        boolean fromView = false;
        int viewId = -1;
        NetView v = this.view;

        if (v != null) {
          // if the ID of the requester matches an entry in the membership view then remove
          // that entry - it's obviously an old member since the ID has been reused
          InternalDistributedMember rid = findRequest.getMemberID();
          for (InternalDistributedMember id : v.getMembers()) {
            if (rid.compareTo(id, false) == 0) {
              NetView newView = new NetView(v, v.getViewId());
              newView.remove(id);
              v = newView;
              break;
            }
          }
          viewId = v.getViewId();
          if (viewId > findRequest.getLastViewId()) {
            // ignore the requests rejectedCoordinators if the view has changed
            coord = v.getCoordinator(Collections.<InternalDistributedMember>emptyList());
          } else {
            coord = v.getCoordinator(findRequest.getRejectedCoordinators());
          }
          logger.debug("Peer locator: coordinator from view is {}", coord);
          fromView = true;
        }

        if (coord == null) {
          // find the "oldest" registrant
          Collection<InternalDistributedMember> rejections = findRequest.getRejectedCoordinators();
          if (rejections == null) {
            rejections = Collections.emptyList();
          }
          synchronized (registrants) {
            registrants.add(findRequest.getMemberID());
            if (services != null) {
              coord = services.getJoinLeave().getMemberID();
            }
            for (InternalDistributedMember mbr : registrants) {
              if (mbr != coord && (coord == null || mbr.compareTo(coord) < 0)) {
                if (!rejections.contains(mbr)
                    && (mbr.getNetMember().preferredForCoordinator()
                        || !mbr.getNetMember().splitBrainEnabled())) {
                  coord = mbr;
                }
              }
            }
            logger.debug("Peer locator: coordinator from registrations is {}", coord);
          }
        }

        synchronized (registrants) {
          response =
              new FindCoordinatorResponse(
                  coord,
                  localAddress,
                  fromView,
                  view,
                  new HashSet<InternalDistributedMember>(registrants),
                  this.networkPartitionDetectionEnabled,
                  this.usePreferredCoordinators);
        }
      }
    }
    if (logger.isDebugEnabled()) {
      logger.debug("Peer locator returning {}", response);
    }
    return response;
  }

  public void saveView(NetView view) {
    if (viewFile == null) {
      return;
    }
    if (!viewFile.delete() && viewFile.exists()) {
      logger.warn(
          "Peer locator is unable to delete persistent membership information in "
              + viewFile.getAbsolutePath());
    }
    try {
      ObjectOutputStream oos = null;
      try {
        oos = new ObjectOutputStream(new FileOutputStream(viewFile));
        oos.writeInt(LOCATOR_FILE_STAMP);
        oos.writeInt(Version.CURRENT_ORDINAL);
        DataSerializer.writeObject(view, oos);
      } finally {
        oos.flush();
        oos.close();
      }
    } catch (Exception e) {
      logger.warn(
          "Peer locator encountered an error writing current membership to disk.  Disabling persistence.  Care should be taken when bouncing this locator as it will not be able to recover knowledge of the running distributed system",
          e);
      this.viewFile = null;
    }
  }

  @Override
  public void endRequest(Object request, long startTime) {
    stats.endLocatorRequest(startTime);
  }

  @Override
  public void endResponse(Object request, long startTime) {
    stats.endLocatorResponse(startTime);
  }

  @Override
  public void shutDown() {
    // nothing to do for GMSLocator
  }

  // test hook
  public List<InternalDistributedMember> getMembers() {
    if (view != null) {
      return new ArrayList<InternalDistributedMember>(view.getMembers());
    } else {
      synchronized (registrants) {
        return new ArrayList<InternalDistributedMember>(registrants);
      }
    }
  }

  @Override
  public void restarting(
      DistributedSystem ds, GemFireCache cache, SharedConfiguration sharedConfig) {
    setMembershipManager(((InternalDistributedSystem) ds).getDM().getMembershipManager());
  }

  public void recover() throws InternalGemFireException {
    if (!recoverFromOthers()) {
      recoverFromFile(viewFile);
    }
  }

  private boolean recoverFromOthers() {
    for (InetSocketAddress other : this.locators) {
      if (recover(other)) {
        logger.info("Peer locator recovered state from " + other);
        return true;
      }
    } // for
    return false;
  }

  private boolean recover(InetSocketAddress other) {
    try {
      logger.info("Peer locator attempting to recover from " + other);
      Object response =
          TcpClient.requestToServer(
              other.getAddress(), other.getPort(), new GetViewRequest(), 20000, true);
      if (response != null && (response instanceof GetViewResponse)) {
        this.view = ((GetViewResponse) response).getView();
        logger.info("Peer locator recovered initial membership of {}", view);
        return true;
      }
    } catch (IOException | ClassNotFoundException ignore) {
      logger.debug(
          "Peer locator could not recover membership view from {}: {}", other, ignore.getMessage());
    }
    logger.info("Peer locator was unable to recover state from this locator");
    return false;
  }

  /* package */ boolean recoverFromFile(File file) throws InternalGemFireException {
    if (!file.exists()) {
      return false;
    }

    logger.info("Peer locator recovering from " + file.getAbsolutePath());
    try (ObjectInput ois = new ObjectInputStream(new FileInputStream(file))) {
      if (ois.readInt() != LOCATOR_FILE_STAMP) {
        return false;
      }

      ObjectInput ois2 = ois;
      int version = ois2.readInt();
      if (version != Version.CURRENT_ORDINAL) {
        Version geodeVersion = Version.fromOrdinalNoThrow((short) version, false);
        logger.info("Peer locator found that persistent view was written with {}", geodeVersion);
        ois2 = new VersionedObjectInput(ois2, geodeVersion);
      }

      Object o = DataSerializer.readObject(ois2);
      this.view = (NetView) o;

      logger.info("Peer locator initial membership is " + view);
      return true;

    } catch (Exception e) {
      String msg = LOCATOR_UNABLE_TO_RECOVER_VIEW.toLocalizedString(file.toString());
      logger.warn(msg, e);
      if (!file.delete() && file.exists()) {
        logger.warn("Peer locator was unable to recover from or delete " + file);
        this.viewFile = null;
      }
      throw new InternalGemFireException(msg, e);
    }
  }
}
/**
 * @author darrel
 * @since 7.0
 */
public class JmxManagerAdvisor extends DistributionAdvisor {

  private static final Logger logger = LogService.getLogger();

  private JmxManagerAdvisor(DistributionAdvisee advisee) {
    super(advisee);
    JmxManagerProfile p =
        new JmxManagerProfile(getDistributionManager().getId(), incrementAndGetVersion());
    advisee.fillInProfile(p);
    ((JmxManagerAdvisee) advisee).initProfile(p);
  }

  public static JmxManagerAdvisor createJmxManagerAdvisor(DistributionAdvisee advisee) {
    JmxManagerAdvisor advisor = new JmxManagerAdvisor(advisee);
    advisor.initialize();
    return advisor;
  }

  @Override
  public String toString() {
    return new StringBuilder().append("JmxManagerAdvisor for " + getAdvisee()).toString();
  }

  public void broadcastChange() {
    try {
      Set<InternalDistributedMember> recips = adviseGeneric(); // for now just tell everyone
      JmxManagerProfile p =
          new JmxManagerProfile(getDistributionManager().getId(), incrementAndGetVersion());
      getAdvisee().fillInProfile(p);
      JmxManagerProfileMessage.send(getAdvisee().getSystem().getDistributionManager(), recips, p);
    } catch (CancelException ignore) {
    }
  }

  @SuppressWarnings("unchecked")
  public List<JmxManagerProfile> adviseAlreadyManaging() {
    return fetchProfiles(
        new Filter() {
          public boolean include(Profile profile) {
            assert profile instanceof JmxManagerProfile;
            JmxManagerProfile jmxProfile = (JmxManagerProfile) profile;
            return jmxProfile.isJmxManagerRunning();
          }
        });
  }

  @SuppressWarnings("unchecked")
  public List<JmxManagerProfile> adviseWillingToManage() {
    return fetchProfiles(
        new Filter() {
          public boolean include(Profile profile) {
            assert profile instanceof JmxManagerProfile;
            JmxManagerProfile jmxProfile = (JmxManagerProfile) profile;
            return jmxProfile.isJmxManager();
          }
        });
  }

  @Override
  protected Profile instantiateProfile(InternalDistributedMember memberId, int version) {
    return new JmxManagerProfile(memberId, version);
  }

  @Override
  /** Overridden to also include our profile. If our profile is included it will always be first. */
  protected List /*<Profile>*/ fetchProfiles(Filter f) {
    initializationGate();
    List result = null;
    {
      JmxManagerAdvisee advisee = (JmxManagerAdvisee) getAdvisee();
      Profile myp = advisee.getMyMostRecentProfile();
      if (f == null || f.include(myp)) {
        if (result == null) {
          result = new ArrayList();
        }
        result.add(myp);
      }
    }
    Profile[] locProfiles = this.profiles; // grab current profiles
    for (int i = 0; i < locProfiles.length; i++) {
      Profile profile = locProfiles[i];
      if (f == null || f.include(profile)) {
        if (result == null) {
          result = new ArrayList(locProfiles.length);
        }
        result.add(profile);
      }
    }
    if (result == null) {
      result = Collections.EMPTY_LIST;
    } else {
      result = Collections.unmodifiableList(result);
    }
    return result;
  }
  /** Message used to push event updates to remote VMs */
  public static class JmxManagerProfileMessage extends HighPriorityDistributionMessage {
    private volatile JmxManagerProfile profile;
    private volatile int processorId;

    /** Default constructor used for de-serialization (used during receipt) */
    public JmxManagerProfileMessage() {}

    @Override
    public boolean sendViaJGroups() {
      return true;
    }

    /**
     * Constructor used to send
     *
     * @param recips
     * @param p
     */
    private JmxManagerProfileMessage(
        final Set<InternalDistributedMember> recips, final JmxManagerProfile p) {
      setRecipients(recips);
      this.processorId = 0;
      this.profile = p;
    }

    /* (non-Javadoc)
     * @see com.gemstone.gemfire.distributed.internal.DistributionMessage#process(com.gemstone.gemfire.distributed.internal.DistributionManager)
     */
    @Override
    protected void process(DistributionManager dm) {
      Throwable thr = null;
      JmxManagerProfile p = null;
      try {
        final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
        if (cache != null && !cache.isClosed()) {
          final JmxManagerAdvisor adv = cache.getJmxManagerAdvisor();
          p = this.profile;
          if (p != null) {
            adv.putProfile(p);
          }
        } else {
          if (logger.isDebugEnabled()) {
            logger.debug("No cache {}", this);
          }
        }
      } catch (CancelException e) {
        if (logger.isDebugEnabled()) {
          logger.debug("Cache closed, ", this);
        }
      } catch (VirtualMachineError err) {
        SystemFailure.initiateFailure(err);
        // If this ever returns, rethrow the error.  We're poisoned
        // now, so don't let this thread continue.
        throw err;
      } catch (Throwable t) {
        // Whenever you catch Error or Throwable, you must also
        // catch VirtualMachineError (see above).  However, there is
        // _still_ a possibility that you are dealing with a cascading
        // error condition, so you also need to check to see if the JVM
        // is still usable:
        SystemFailure.checkFailure();
        thr = t;
      } finally {
        if (thr != null) {
          dm.getCancelCriterion().checkCancelInProgress(null);
          logger.info(
              LocalizedMessage.create(
                  LocalizedStrings.ResourceAdvisor_MEMBER_CAUGHT_EXCEPTION_PROCESSING_PROFILE,
                  new Object[] {p, toString()},
                  thr));
        }
      }
    }

    /* (non-Javadoc)
     * @see com.gemstone.gemfire.internal.DataSerializableFixedID#getDSFID()
     */
    public int getDSFID() {
      return JMX_MANAGER_PROFILE_MESSAGE;
    }

    @Override
    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
      super.fromData(in);
      this.processorId = in.readInt();
      this.profile = (JmxManagerProfile) DataSerializer.readObject(in);
    }

    @Override
    public void toData(DataOutput out) throws IOException {
      super.toData(out);
      out.writeInt(this.processorId);
      DataSerializer.writeObject(this.profile, out);
    }

    /**
     * Send profile to the provided members
     *
     * @param recips The recipients of the message
     * @throws InterruptedException
     * @throws ReplyException
     */
    public static void send(
        final DM dm, Set<InternalDistributedMember> recips, JmxManagerProfile profile) {
      JmxManagerProfileMessage r = new JmxManagerProfileMessage(recips, profile);
      dm.putOutgoing(r);
    }

    @Override
    public String getShortClassName() {
      return "JmxManagerProfileMessage";
    }

    @Override
    public String toString() {
      StringBuilder sb = new StringBuilder();
      sb.append(getShortClassName())
          .append(" (processorId=")
          .append(this.processorId)
          .append("; profile=")
          .append(this.profile);
      sb.append(")");
      return sb.toString();
    }
  }

  public static class JmxManagerProfile extends Profile {

    private boolean jmxManager;
    private String host;
    private int port;
    private boolean ssl;
    private boolean started;

    // Constructor for de-serialization
    public JmxManagerProfile() {}

    public boolean isJmxManager() {
      return this.jmxManager;
    }

    public boolean isJmxManagerRunning() {
      return this.started;
    }

    public void setInfo(
        boolean jmxManager2, String host2, int port2, boolean ssl2, boolean started2) {
      this.jmxManager = jmxManager2;
      this.host = host2;
      this.port = port2;
      this.ssl = ssl2;
      this.started = started2;
    }

    public String getHost() {
      return this.host;
    }

    public int getPort() {
      return this.port;
    }

    public boolean getSsl() {
      return this.ssl;
    }

    // Constructor for sending purposes
    public JmxManagerProfile(InternalDistributedMember memberId, int version) {
      super(memberId, version);
    }

    public StringBuilder getToStringHeader() {
      return new StringBuilder("JmxManagerAdvisor.JmxManagerProfile");
    }

    @Override
    public void fillInToString(StringBuilder sb) {
      super.fillInToString(sb);
      synchronized (this) {
        if (this.jmxManager) {
          sb.append("; jmxManager");
        }
        sb.append("; host=").append(this.host).append("; port=").append(this.port);
        if (this.ssl) {
          sb.append("; ssl");
        }
        if (this.started) {
          sb.append("; started");
        }
      }
    }

    @Override
    public void processIncoming(
        DistributionManager dm,
        String adviseePath,
        boolean removeProfile,
        boolean exchangeProfiles,
        final List<Profile> replyProfiles) {
      final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
      if (cache != null && !cache.isClosed()) {
        handleDistributionAdvisee(
            cache.getJmxManagerAdvisor().getAdvisee(),
            removeProfile,
            exchangeProfiles,
            replyProfiles);
      }
    }

    @Override
    public void fromData(DataInput in) throws IOException, ClassNotFoundException {
      super.fromData(in);
      this.jmxManager = DataSerializer.readPrimitiveBoolean(in);
      this.host = DataSerializer.readString(in);
      this.port = DataSerializer.readPrimitiveInt(in);
      this.ssl = DataSerializer.readPrimitiveBoolean(in);
      this.started = DataSerializer.readPrimitiveBoolean(in);
    }

    @Override
    public void toData(DataOutput out) throws IOException {
      boolean tmpJmxManager;
      String tmpHost;
      int tmpPort;
      boolean tmpSsl;
      boolean tmpStarted;
      synchronized (this) {
        tmpJmxManager = this.jmxManager;
        tmpHost = this.host;
        tmpPort = this.port;
        tmpSsl = this.ssl;
        tmpStarted = this.started;
      }
      super.toData(out);
      DataSerializer.writePrimitiveBoolean(tmpJmxManager, out);
      DataSerializer.writeString(tmpHost, out);
      DataSerializer.writePrimitiveInt(tmpPort, out);
      DataSerializer.writePrimitiveBoolean(tmpSsl, out);
      DataSerializer.writePrimitiveBoolean(tmpStarted, out);
    }

    @Override
    public int getDSFID() {
      return JMX_MANAGER_PROFILE;
    }
  }
}
/**
 * @author Eric Zoerner
 * @author Asif
 * @version $Revision: 1.2 $
 */
public class DefaultQueryService implements QueryService {
  private static final Logger logger = LogService.getLogger();

  /**
   * System property to allow query on region with heterogeneous objects. By default its set to
   * false.
   */
  public static final boolean QUERY_HETEROGENEOUS_OBJECTS =
      Boolean.valueOf(System.getProperty("gemfire.QueryService.QueryHeterogeneousObjects", "true"))
          .booleanValue();

  public static boolean COPY_ON_READ_AT_ENTRY_LEVEL =
      Boolean.valueOf(System.getProperty("gemfire.QueryService.CopyOnReadAtEntryLevel", "false"))
          .booleanValue();

  /** Test purpose only */
  public static boolean TEST_QUERY_HETEROGENEOUS_OBJECTS = false;

  private final InternalCache cache;

  private InternalPool pool;

  private Map<Region, HashSet<IndexCreationData>> indexDefinitions =
      Collections.synchronizedMap(new HashMap<Region, HashSet<IndexCreationData>>());

  public DefaultQueryService(InternalCache cache) {
    if (cache == null)
      throw new IllegalArgumentException(
          LocalizedStrings.DefaultQueryService_CACHE_MUST_NOT_BE_NULL.toLocalizedString());
    this.cache = cache;
  }

  /**
   * Constructs a new <code>Query</code> object. Uses the default namespace, which is the Objects
   * Context of the current application.
   *
   * @return The new <code>Query</code> object.
   * @throws IllegalArgumentException if the query syntax is invalid.
   * @see com.gemstone.gemfire.cache.query.Query
   */
  public Query newQuery(String queryString) {
    if (QueryMonitor.isLowMemory()) {
      String reason =
          LocalizedStrings.QueryMonitor_LOW_MEMORY_CANCELED_QUERY.toLocalizedString(
              QueryMonitor.getMemoryUsedDuringLowMemory());
      throw new QueryExecutionLowMemoryException(reason);
    }
    if (queryString == null)
      throw new QueryInvalidException(
          LocalizedStrings.DefaultQueryService_THE_QUERY_STRING_MUST_NOT_BE_NULL
              .toLocalizedString());
    if (queryString.length() == 0)
      throw new QueryInvalidException(
          LocalizedStrings.DefaultQueryService_THE_QUERY_STRING_MUST_NOT_BE_EMPTY
              .toLocalizedString());
    DefaultQuery query = new DefaultQuery(queryString, this.cache);
    ServerProxy serverProxy = pool == null ? null : new ServerProxy(pool);
    query.setServerProxy(serverProxy);
    return query;
  }

  public Query newQuery(String queryString, ProxyCache proxyCache) {
    Query query = newQuery(queryString);
    ((DefaultQuery) query).setProxyCache(proxyCache);
    return query;
  }

  public Index createHashIndex(String indexName, String indexedExpression, String fromClause)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {
    return createHashIndex(indexName, indexedExpression, fromClause, null);
  }

  public Index createHashIndex(
      String indexName, String indexedExpression, String fromClause, String imports)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {
    return createIndex(indexName, IndexType.HASH, indexedExpression, fromClause, imports);
  }

  public Index createIndex(String indexName, String indexedExpression, String fromClause)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {
    return createIndex(indexName, IndexType.FUNCTIONAL, indexedExpression, fromClause, null);
  }

  public Index createIndex(
      String indexName, String indexedExpression, String fromClause, String imports)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {
    return createIndex(indexName, IndexType.FUNCTIONAL, indexedExpression, fromClause, imports);
  }

  public Index createKeyIndex(String indexName, String indexedExpression, String fromClause)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {
    return createIndex(indexName, IndexType.PRIMARY_KEY, indexedExpression, fromClause, null);
  }

  public Index createIndex(
      String indexName, IndexType indexType, String indexedExpression, String fromClause)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {
    return createIndex(indexName, indexType, indexedExpression, fromClause, null);
  }

  public Index createIndex(
      String indexName,
      IndexType indexType,
      String indexedExpression,
      String fromClause,
      String imports,
      boolean loadEntries)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {
    return createIndex(
        indexName, indexType, indexedExpression, fromClause, imports, loadEntries, null);
  }

  public Index createIndex(
      String indexName,
      IndexType indexType,
      String indexedExpression,
      String fromClause,
      String imports,
      boolean loadEntries,
      Region region)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index creation on the server is not supported from the client.");
    }
    PartitionedIndex parIndex = null;
    if (region == null) {
      region = getRegionFromPath(imports, fromClause);
    }
    RegionAttributes ra = region.getAttributes();

    // Asif: If the evistion action is Overflow to disk then do not allow index creation
    // It is Ok to have index creation if it is persist only mode as data will always
    // exist in memory
    // if(ra.getEvictionAttributes().getAction().isOverflowToDisk() ) {
    //  throw new
    // UnsupportedOperationException(LocalizedStrings.DefaultQueryService_INDEX_CREATION_IS_NOT_SUPPORTED_FOR_REGIONS_WHICH_OVERFLOW_TO_DISK_THE_REGION_INVOLVED_IS_0.toLocalizedString(regionPath));
    // }
    // if its a pr the create index on all of the local buckets.
    if (((LocalRegion) region).heapThresholdReached.get()
        && !InternalResourceManager.isLowMemoryExceptionDisabled()) {
      LocalRegion lr = (LocalRegion) region;
      throw new LowMemoryException(
          LocalizedStrings.ResourceManager_LOW_MEMORY_FOR_INDEX.toLocalizedString(region.getName()),
          lr.getHeapThresholdReachedMembers());
    }
    if (region instanceof PartitionedRegion) {
      try {
        parIndex =
            (PartitionedIndex)
                ((PartitionedRegion) region)
                    .createIndex(
                        false,
                        indexType,
                        indexName,
                        indexedExpression,
                        fromClause,
                        imports,
                        loadEntries);
      } catch (ForceReattemptException ex) {
        region
            .getCache()
            .getLoggerI18n()
            .info(
                LocalizedStrings
                    .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR,
                ex);
      } catch (IndexCreationException exx) {
        region
            .getCache()
            .getLoggerI18n()
            .info(
                LocalizedStrings
                    .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR,
                exx);
      }
      return parIndex;

    } else {

      IndexManager indexManager = IndexUtils.getIndexManager(region, true);
      Index index =
          indexManager.createIndex(
              indexName,
              indexType,
              indexedExpression,
              fromClause,
              imports,
              null,
              null,
              loadEntries);

      return index;
    }
  }

  public Index createIndex(
      String indexName,
      IndexType indexType,
      String indexedExpression,
      String fromClause,
      String imports)
      throws IndexNameConflictException, IndexExistsException, RegionNotFoundException {

    return createIndex(indexName, indexType, indexedExpression, fromClause, imports, true);
  }

  private Region getRegionFromPath(String imports, String fromClause)
      throws RegionNotFoundException {
    QCompiler compiler = new QCompiler();
    if (imports != null) {
      compiler.compileImports(imports);
    }
    List list = compiler.compileFromClause(fromClause);
    CompiledValue cv =
        QueryUtils.obtainTheBottomMostCompiledValue(
            ((CompiledIteratorDef) list.get(0)).getCollectionExpr());
    String regionPath = null;
    if (cv.getType() == OQLLexerTokenTypes.RegionPath) {
      regionPath = ((CompiledRegion) cv).getRegionPath();
    } else {
      throw new RegionNotFoundException(
          LocalizedStrings
              .DefaultQueryService_DEFAULTQUERYSERVICECREATEINDEXFIRST_ITERATOR_OF_INDEX_FROM_CLAUSE_DOES_NOT_EVALUATE_TO_A_REGION_PATH_THE_FROM_CLAUSE_USED_FOR_INDEX_CREATION_IS_0
              .toLocalizedString(fromClause));
    }
    Region region = cache.getRegion(regionPath);
    if (region == null) {
      throw new RegionNotFoundException(
          LocalizedStrings.DefaultQueryService_REGION_0_NOT_FOUND_FROM_1.toLocalizedString(
              new Object[] {regionPath, fromClause}));
    }
    return region;
  }

  /**
   * Asif : Gets an exact match index ( match level 0)
   *
   * @param regionPath String containing the region name
   * @param definitions An array of String objects containing canonicalized definitions of
   *     RuntimeIterators. A Canonicalized definition of a RuntimeIterator is the canonicalized
   *     expression obtainded from its underlying collection expression.
   * @param indexType IndexType object which can be either of type RangeIndex or PrimaryKey Index
   * @param indexedExpression CompiledValue containing the path expression on which index needs to
   *     be created
   * @param context ExecutionContext
   * @return IndexData object
   * @throws NameResolutionException
   * @throws TypeMismatchException
   * @throws AmbiguousNameException
   */
  public IndexData getIndex(
      String regionPath,
      String[] definitions,
      IndexType indexType,
      CompiledValue indexedExpression,
      ExecutionContext context)
      throws AmbiguousNameException, TypeMismatchException, NameResolutionException {
    Region region = cache.getRegion(regionPath);
    if (region == null) {
      return null;
    }
    IndexManager indexManager = IndexUtils.getIndexManager(region, true);
    IndexData indexData = indexManager.getIndex(indexType, definitions, indexedExpression, context);
    return indexData;
  }

  public Index getIndex(Region region, String indexName) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    // A Partition Region does not have an IndexManager, but it's buckets have.
    if (region instanceof PartitionedRegion) {
      return (Index) ((PartitionedRegion) region).getIndex().get(indexName);
    } else {
      IndexManager indexManager = IndexUtils.getIndexManager(region, false);
      if (indexManager == null) return null;
      return indexManager.getIndex(indexName);
    }
  }

  /**
   * Asif: Gets a best match index which is available. An index with match level equal to 0 is the
   * best index to use as it implies that the query from clause iterators belonging to the region
   * exactly match the index from clause iterators ( the difference in the relative positions of the
   * iterators do not matter). A match level less than 0 means that number of iteratots in the index
   * resultset is more than that present in the query from clause and hence index resultset will
   * need a cutdown. A match level greater than 0 means that there definitely is atleast one
   * iterator in the query from clause which is more than the index from clause iterators & hence
   * definitely expansion of index results will be needed. Pls note that a match level greater than
   * 0 does not imply that index from clause does not have an extra iterator in it , too. Hence a
   * match level greater than 0 will definitely mean expansion of index results but may also require
   * a cut down of results . The order of preference is match level 0 , less than 0 and lastly
   * greater than 0
   *
   * @param regionPath String containing the region name
   * @param definitions An array of String objects containing canonicalized definitions of
   *     RuntimeIterators. A Canonicalized definition of a RuntimeIterator is the canonicalized
   *     expression obtainded from its underlying collection expression.
   * @param indexType IndexType object which can be either of type RangeIndex or PrimaryKey Index
   * @param indexedExpression CompiledValue representing the path expression on which index needs to
   *     be created
   * @param context ExecutionContext object
   * @return IndexData object
   * @throws NameResolutionException
   * @throws TypeMismatchException
   * @throws AmbiguousNameException
   */
  public IndexData getBestMatchIndex(
      String regionPath,
      String definitions[],
      IndexType indexType,
      CompiledValue indexedExpression,
      ExecutionContext context)
      throws AmbiguousNameException, TypeMismatchException, NameResolutionException {
    Region region = cache.getRegion(regionPath);
    if (region == null) {
      return null;
    }
    // return getBestMatchIndex(region, indexType, definitions,
    // indexedExpression);
    IndexManager indexManager = IndexUtils.getIndexManager(region, false);
    if (indexManager == null) {
      return null;
    }
    return indexManager.getBestMatchIndex(indexType, definitions, indexedExpression, context);
  }

  public Collection getIndexes() {
    ArrayList allIndexes = new ArrayList();
    Iterator rootRegions = cache.rootRegions().iterator();
    while (rootRegions.hasNext()) {
      Region region = (Region) rootRegions.next();
      Collection indexes = getIndexes(region);
      if (indexes != null) allIndexes.addAll(indexes);
      Iterator subRegions = region.subregions(true).iterator();
      while (subRegions.hasNext()) {
        indexes = getIndexes((Region) subRegions.next());
        if (indexes != null) allIndexes.addAll(indexes);
      }
    }
    return allIndexes;
  }

  public Collection getIndexes(Region region) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    if (region instanceof PartitionedRegion) {
      return ((PartitionedRegion) region).getIndexes();
    }
    IndexManager indexManager = IndexUtils.getIndexManager(region, false);
    if (indexManager == null) return null;
    return indexManager.getIndexes();
  }

  public Collection getIndexes(Region region, IndexType indexType) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    IndexManager indexManager = IndexUtils.getIndexManager(region, false);
    if (indexManager == null) return null;
    return indexManager.getIndexes(indexType);
  }

  public void removeIndex(Index index) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    Region region = index.getRegion();
    if (region instanceof PartitionedRegion) {
      try {
        ((PartitionedRegion) region).removeIndex(index, false);
      } catch (ForceReattemptException ex) {
        logger.info(
            LocalizedMessage.create(
                LocalizedStrings.DefaultQueryService_EXCEPTION_REMOVING_INDEX___0),
            ex);
      }
      return;
    }
    // get write lock for indexes in replicated region
    // for PR lock will be taken in PartitionRegion.removeIndex
    ((AbstractIndex) index).acquireIndexWriteLockForRemove();
    try {
      IndexManager indexManager = ((LocalRegion) index.getRegion()).getIndexManager();
      indexManager.removeIndex(index);
    } finally {
      ((AbstractIndex) index).releaseIndexWriteLockForRemove();
    }
  }

  public void removeIndexes() {
    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    Iterator rootRegions = cache.rootRegions().iterator();
    while (rootRegions.hasNext()) {
      Region region = (Region) rootRegions.next();
      Iterator subRegions = region.subregions(true).iterator();
      while (subRegions.hasNext()) {
        removeIndexes((Region) subRegions.next());
      }
      removeIndexes(region);
    }
  }

  public void removeIndexes(Region region) {

    if (pool != null) {
      throw new UnsupportedOperationException(
          "Index Operation is not supported on the Server Region.");
    }

    // removing indexes on paritioned region will reguire sending message and
    // remvoing all the local indexes on the local bucket regions.
    if (region instanceof PartitionedRegion) {
      try {
        // not remotely orignated
        ((PartitionedRegion) region).removeIndexes(false);
      } catch (ForceReattemptException ex) {
        // will have to throw a proper exception relating to remove index.
        logger.info(
            LocalizedMessage.create(
                LocalizedStrings.DefaultQueryService_EXCEPTION_REMOVING_INDEX___0),
            ex);
      }
    }
    IndexManager indexManager = IndexUtils.getIndexManager(region, false);
    if (indexManager == null) return;

    indexManager.removeIndexes();
  }

  // CqService Related API implementation.

  /**
   * Constructs a new continuous query, represented by an instance of CqQuery. The CqQuery is not
   * executed until the execute method is invoked on the CqQuery.
   *
   * @param queryString the OQL query
   * @param cqAttributes the CqAttributes
   * @return the newly created CqQuery object
   * @throws IllegalArgumentException if queryString or cqAttr is null
   * @throws IllegalStateException if this method is called from a cache server
   * @throws QueryInvalidException if there is a syntax error in the query
   * @throws CqException if failed to create cq, failure during creating managing cq metadata info.
   *     E.g.: Query string should refer only one region, join not supported. The query must be a
   *     SELECT statement. DISTINCT queries are not supported. Projections are not supported. Only
   *     one iterator in the FROM clause is supported, and it must be a region path. Bind parameters
   *     in the query are not yet supported.
   */
  public CqQuery newCq(String queryString, CqAttributes cqAttributes)
      throws QueryInvalidException, CqException {
    ClientCQ cq = null;
    try {
      cq = (ClientCQ) getCqService().newCq(null, queryString, cqAttributes, this.pool, false);
    } catch (CqExistsException cqe) {
      // Should not throw in here.
      if (logger.isDebugEnabled()) {
        logger.debug("Unable to createCq. Error :{}", cqe.getMessage(), cqe);
      }
    }
    return cq;
  }

  /**
   * Constructs a new continuous query, represented by an instance of CqQuery. The CqQuery is not
   * executed until the execute method is invoked on the CqQuery.
   *
   * @param queryString the OQL query
   * @param cqAttributes the CqAttributes
   * @param isDurable true if the CQ is durable
   * @return the newly created CqQuery object
   * @throws IllegalArgumentException if queryString or cqAttr is null
   * @throws IllegalStateException if this method is called from a cache server
   * @throws QueryInvalidException if there is a syntax error in the query
   * @throws CqException if failed to create cq, failure during creating managing cq metadata info.
   *     E.g.: Query string should refer only one region, join not supported. The query must be a
   *     SELECT statement. DISTINCT queries are not supported. Projections are not supported. Only
   *     one iterator in the FROM clause is supported, and it must be a region path. Bind parameters
   *     in the query are not yet supported.
   */
  public CqQuery newCq(String queryString, CqAttributes cqAttributes, boolean isDurable)
      throws QueryInvalidException, CqException {
    ClientCQ cq = null;
    try {
      cq = (ClientCQ) getCqService().newCq(null, queryString, cqAttributes, this.pool, isDurable);
    } catch (CqExistsException cqe) {
      // Should not throw in here.
      if (logger.isDebugEnabled()) {
        logger.debug("Unable to createCq. Error :{}", cqe.getMessage(), cqe);
      }
    }
    return cq;
  }

  /**
   * Constructs a new named continuous query, represented by an instance of CqQuery. The CqQuery is
   * not executed, however, until the execute method is invoked on the CqQuery. The name of the
   * query will be used to identify this query in statistics archival.
   *
   * @param cqName the String name for this query
   * @param queryString the OQL query
   * @param cqAttributes the CqAttributes
   * @return the newly created CqQuery object
   * @throws CqExistsException if a CQ by this name already exists on this client
   * @throws IllegalArgumentException if queryString or cqAttr is null
   * @throws IllegalStateException if this method is called from a cache server
   * @throws QueryInvalidException if there is a syntax error in the query
   * @throws CqException if failed to create cq, failure during creating managing cq metadata info.
   *     E.g.: Query string should refer only one region, join not supported. The query must be a
   *     SELECT statement. DISTINCT queries are not supported. Projections are not supported. Only
   *     one iterator in the FROM clause is supported, and it must be a region path. Bind parameters
   *     in the query are not yet supported.
   */
  public CqQuery newCq(String cqName, String queryString, CqAttributes cqAttributes)
      throws QueryInvalidException, CqExistsException, CqException {
    if (cqName == null) {
      throw new IllegalArgumentException(
          LocalizedStrings.DefaultQueryService_CQNAME_MUST_NOT_BE_NULL.toLocalizedString());
    }
    ClientCQ cq =
        (ClientCQ) getCqService().newCq(cqName, queryString, cqAttributes, this.pool, false);
    return cq;
  }

  /**
   * Constructs a new named continuous query, represented by an instance of CqQuery. The CqQuery is
   * not executed, however, until the execute method is invoked on the CqQuery. The name of the
   * query will be used to identify this query in statistics archival.
   *
   * @param cqName the String name for this query
   * @param queryString the OQL query
   * @param cqAttributes the CqAttributes
   * @param isDurable true if the CQ is durable
   * @return the newly created CqQuery object
   * @throws CqExistsException if a CQ by this name already exists on this client
   * @throws IllegalArgumentException if queryString or cqAttr is null
   * @throws IllegalStateException if this method is called from a cache server
   * @throws QueryInvalidException if there is a syntax error in the query
   * @throws CqException if failed to create cq, failure during creating managing cq metadata info.
   *     E.g.: Query string should refer only one region, join not supported. The query must be a
   *     SELECT statement. DISTINCT queries are not supported. Projections are not supported. Only
   *     one iterator in the FROM clause is supported, and it must be a region path. Bind parameters
   *     in the query are not yet supported.
   */
  public CqQuery newCq(
      String cqName, String queryString, CqAttributes cqAttributes, boolean isDurable)
      throws QueryInvalidException, CqExistsException, CqException {
    if (cqName == null) {
      throw new IllegalArgumentException(
          LocalizedStrings.DefaultQueryService_CQNAME_MUST_NOT_BE_NULL.toLocalizedString());
    }
    ClientCQ cq =
        (ClientCQ) getCqService().newCq(cqName, queryString, cqAttributes, this.pool, isDurable);
    return cq;
  }

  /**
   * Close all CQs executing in this VM, and release resources associated with executing CQs.
   * CqQuerys created by other VMs are unaffected.
   */
  public void closeCqs() {
    try {
      getCqService().closeAllCqs(true);
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug("Unable to closeAll Cqs. Error :{}", cqe.getMessage(), cqe);
      }
    }
  }

  /**
   * Retrieve a CqQuery by name.
   *
   * @return the CqQuery or null if not found
   */
  public CqQuery getCq(String cqName) {
    CqQuery cq = null;
    try {
      cq = (CqQuery) getCqService().getCq(cqName);
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug("Unable to getCq. Error :{}", cqe.getMessage(), cqe);
      }
    }
    return cq;
  }

  /**
   * Retrieve all CqQuerys created by this VM.
   *
   * @return null if there are no cqs.
   */
  public CqQuery[] getCqs() {
    CqQuery[] cqs = null;
    try {
      return toArray(getCqService().getAllCqs());
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug("Unable to getAllCqs. Error :{}", cqe.getMessage(), cqe);
      }
    }
    return cqs;
  }

  private CqQuery[] toArray(Collection<? extends InternalCqQuery> allCqs) {
    CqQuery[] cqs = new CqQuery[allCqs.size()];
    allCqs.toArray(cqs);
    return cqs;
  }

  /** Returns all the cq on a given region. */
  public CqQuery[] getCqs(final String regionName) throws CqException {
    return toArray(getCqService().getAllCqs(regionName));
  }

  /**
   * Starts execution of all the registered continuous queries for this client. This is
   * complementary to stopCqs.
   *
   * @see QueryService#stopCqs()
   * @throws CqException if failure to execute CQ.
   */
  public void executeCqs() throws CqException {
    try {
      getCqService().executeAllClientCqs();
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug("Unable to execute all cqs. Error :{}", cqe.getMessage(), cqe);
      }
    }
  }

  /**
   * Stops execution of all the continuous queries for this client to become inactive. This is
   * useful when client needs to control the incoming cq messages during bulk region operations.
   *
   * @see QueryService#executeCqs()
   * @throws CqException if failure to execute CQ.
   */
  public void stopCqs() throws CqException {
    try {
      getCqService().stopAllClientCqs();
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug("Unable to stop all CQs. Error :{}", cqe.getMessage(), cqe);
      }
    }
  }

  /**
   * Starts execution of all the continuous queries registered on the specified region for this
   * client. This is complementary method to stopCQs().
   *
   * @see QueryService#stopCqs()
   * @throws CqException if failure to stop CQs.
   */
  public void executeCqs(String regionName) throws CqException {
    try {
      getCqService().executeAllRegionCqs(regionName);
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Unable to execute cqs on the specified region. Error :{}", cqe.getMessage(), cqe);
      }
    }
  }

  /**
   * Stops execution of all the continuous queries registered on the specified region for this
   * client. This is useful when client needs to control the incoming cq messages during bulk region
   * operations.
   *
   * @see QueryService#executeCqs()
   * @throws CqException if failure to execute CQs.
   */
  public void stopCqs(String regionName) throws CqException {
    try {
      getCqService().stopAllRegionCqs(regionName);
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug(
            "Unable to stop cqs on the specified region. Error :{}", cqe.getMessage(), cqe);
      }
    }
  }

  /**
   * Get statistics information for this query.
   *
   * @return CQ statistics null if the continuous query object not found for the given cqName.
   */
  public CqServiceStatistics getCqStatistics() {
    CqServiceStatistics stats = null;
    try {
      stats = getCqService().getCqStatistics();
    } catch (CqException cqe) {
      if (logger.isDebugEnabled()) {
        logger.debug("Unable get CQ Statistics. Error :{}", cqe.getMessage(), cqe);
      }
    }
    return stats;
  }

  /**
   * Is the CQ service in a cache server environment
   *
   * @return true if cache server, false otherwise
   */
  public boolean isServer() {
    if (this.cache.getCacheServers().isEmpty()) {
      return false;
    }
    return true;
  }

  /** Close the CQ Service after clean up if any. */
  public void closeCqService() {
    cache.getCqService().close();
  }

  /** @return CqService */
  public CqService getCqService() throws CqException {
    CqService service = cache.getCqService();
    service.start();
    return service;
  }

  public void setPool(InternalPool pool) {
    this.pool = pool;
    if (logger.isDebugEnabled()) {
      logger.debug(
          "Setting ServerProxy with the Query Service using the pool :{} ", pool.getName());
    }
  }

  public List<String> getAllDurableCqsFromServer() throws CqException {
    if (!isServer()) {
      if (pool != null) {
        return getCqService().getAllDurableCqsFromServer(pool);
      } else {
        throw new UnsupportedOperationException(
            "GetAllDurableCQsFromServer requires a pool to be configured.");
      }
    } else {
      // we are a server
      return Collections.EMPTY_LIST;
    }
  }

  public UserAttributes getUserAttributes(String cqName) {
    try {
      return getCqService().getUserAttributes(cqName);
    } catch (CqException ce) {
      return null;
    }
  }

  @Override
  public void defineKeyIndex(String indexName, String indexedExpression, String fromClause)
      throws RegionNotFoundException {
    defineIndex(indexName, IndexType.PRIMARY_KEY, indexedExpression, fromClause, null);
  }

  @Override
  public void defineHashIndex(String indexName, String indexedExpression, String fromClause)
      throws RegionNotFoundException {
    defineIndex(indexName, IndexType.HASH, indexedExpression, fromClause, null);
  }

  @Override
  public void defineHashIndex(
      String indexName, String indexedExpression, String fromClause, String imports)
      throws RegionNotFoundException {
    defineIndex(indexName, IndexType.HASH, indexedExpression, fromClause, imports);
  }

  @Override
  public void defineIndex(String indexName, String indexedExpression, String fromClause)
      throws RegionNotFoundException {
    defineIndex(indexName, IndexType.FUNCTIONAL, indexedExpression, fromClause, null);
  }

  @Override
  public void defineIndex(
      String indexName, String indexedExpression, String fromClause, String imports)
      throws RegionNotFoundException {
    defineIndex(indexName, IndexType.FUNCTIONAL, indexedExpression, fromClause, imports);
  }

  public void defineIndex(
      String indexName,
      IndexType indexType,
      String indexedExpression,
      String fromClause,
      String imports)
      throws RegionNotFoundException {
    IndexCreationData indexData = new IndexCreationData(indexName);
    indexData.setIndexData(indexType, fromClause, indexedExpression, imports);
    Region r = getRegionFromPath(imports, fromClause);
    synchronized (indexDefinitions) {
      HashSet<IndexCreationData> s = indexDefinitions.get(r);
      if (s == null) {
        s = new HashSet<IndexCreationData>();
      }
      s.add(indexData);
      indexDefinitions.put(r, s);
    }
  }

  @Override
  public List<Index> createDefinedIndexes() throws MultiIndexCreationException {
    HashSet<Index> indexes = new HashSet<Index>();
    boolean throwException = false;
    HashMap<String, Exception> exceptionsMap = new HashMap<String, Exception>();

    synchronized (indexDefinitions) {
      for (Entry<Region, HashSet<IndexCreationData>> e : indexDefinitions.entrySet()) {
        Region region = e.getKey();
        HashSet<IndexCreationData> icds = e.getValue();
        if (region instanceof PartitionedRegion) {
          throwException =
              createDefinedIndexesForPR(indexes, (PartitionedRegion) region, icds, exceptionsMap);
        } else {
          throwException =
              createDefinedIndexesForReplicatedRegion(indexes, region, icds, exceptionsMap);
        }
      }
    } // end sync

    if (throwException) {
      throw new MultiIndexCreationException(exceptionsMap);
    }

    return new ArrayList<Index>(indexes);
  }

  private boolean createDefinedIndexesForPR(
      HashSet<Index> indexes,
      PartitionedRegion region,
      HashSet<IndexCreationData> icds,
      HashMap<String, Exception> exceptionsMap) {
    try {
      indexes.addAll(((PartitionedRegion) region).createIndexes(false, icds));
    } catch (IndexCreationException e1) {
      logger.info(
          LocalizedMessage.create(
              LocalizedStrings
                  .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR),
          e1);
    } catch (CacheException e1) {
      logger.info(
          LocalizedMessage.create(
              LocalizedStrings
                  .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR),
          e1);
      return true;
    } catch (ForceReattemptException e1) {
      logger.info(
          LocalizedMessage.create(
              LocalizedStrings
                  .DefaultQueryService_EXCEPTION_WHILE_CREATING_INDEX_ON_PR_DEFAULT_QUERY_PROCESSOR),
          e1);
      return true;
    } catch (MultiIndexCreationException e) {
      exceptionsMap.putAll(e.getExceptionsMap());
      return true;
    }
    return false;
  }

  private boolean createDefinedIndexesForReplicatedRegion(
      HashSet<Index> indexes,
      Region region,
      Set<IndexCreationData> icds,
      HashMap<String, Exception> exceptionsMap) {
    boolean throwException = false;
    for (IndexCreationData icd : icds) {
      try {
        // First step is creating all the defined indexes. Do this only if
        // the region is not PR. For PR creation and population is done in
        // the PartitionedRegion#createDefinedIndexes
        indexes.add(
            createIndex(
                icd.getIndexName(),
                icd.getIndexType(),
                icd.getIndexExpression(),
                icd.getIndexFromClause(),
                icd.getIndexImportString(),
                false,
                region));
      } catch (Exception ex) {
        // If an index creation fails, add the exception to the map and
        // continue creating rest of the indexes.The failed indexes will
        // be removed from the IndexManager#indexes map by the createIndex
        // method so that those indexes will not be populated in the next
        // step.
        if (logger.isDebugEnabled()) {
          logger.debug("Index creation failed, {}, {}", icd.getIndexName(), ex.getMessage(), ex);
        }
        exceptionsMap.put(icd.getIndexName(), ex);
        throwException = true;
      }
    }
    if (IndexManager.testHook != null) {
      IndexManager.testHook.hook(13);
    }
    // Second step is iterating over REs and populating all the created
    // indexes
    IndexManager indexManager = IndexUtils.getIndexManager(region, false);
    if (indexManager == null) {
      for (IndexCreationData icd : icds) {
        exceptionsMap.put(
            icd.getIndexName(),
            new IndexCreationException("Index Creation Failed due to region destroy"));
      }
      return true;
    }

    if (indexes.size() > 0) {
      try {
        indexManager.populateIndexes(indexes);
      } catch (MultiIndexCreationException ex) {
        exceptionsMap.putAll(ex.getExceptionsMap());
        throwException = true;
      }
    }
    return throwException;
  }

  public boolean clearDefinedIndexes() {
    this.indexDefinitions.clear();
    return true;
  }

  public InternalPool getPool() {
    return pool;
  }
}
/** This class represents a ConnectionProxy of the CacheClient */
public final class ClientProxyMembershipID
    implements DataSerializableFixedID, Serializable, Externalizable {

  private static final Logger logger = LogService.getLogger();

  private static ThreadLocal<String> POOL_NAME = new ThreadLocal<String>();

  public static void setPoolName(String poolName) {
    POOL_NAME.set(poolName);
  }

  public static String getPoolName() {
    return POOL_NAME.get();
  }

  private static final int BYTES_32KB = 32768;

  // TODO:Asif : If possible remove the static data from here
  // Uniquely identifies the distributed system of client. These static fields
  // have significance
  // only in the Cache Client VM
  // public volatile static byte[] client_side_identity = null;

  public static volatile DistributedSystem system = null;

  /** the membershpi id of the distributed system in this client (if running in a client) */
  public static DistributedMember systemMemberId;

  // durable_synch_counter=1 is reserved for durable clients
  // so that when pools are being created and deleted the same client
  // session is selected on the serverside by always using the
  // same uniqueID value which is set via the synch_counter
  private static final int durable_synch_counter = 1;
  private static int synch_counter = 0;

  // private byte[] proxyID ;
  protected byte[] identity;

  /** cached membership identifier */
  private transient DistributedMember memberId;

  /** cached tostring of the memberID */
  private transient String memberIdString;

  protected int uniqueId;

  // private final String proxyIDStr;
  // private final String clientIdStr ;

  @Override
  public int hashCode() {
    int result = 17;
    final int mult = 37;
    if (isDurable()) {
      result = mult * result + getDurableId().hashCode();
    } else {
      if (this.identity != null && this.identity.length > 0) {
        for (int i = 0; i < this.identity.length; i++) {
          result = mult * result + this.identity[i];
        }
      }
    }
    // we can't use unique_id in hashCode
    // because of HandShake's hashCode using our HashCode but
    // its equals using our isSameDSMember which ignores unique_id
    // result = mult * result + this.unique_id;
    return result;
  }

  @Override
  public boolean equals(Object obj) {
    if (this == obj) {
      return true;
    }
    if ((obj == null) || !(obj instanceof ClientProxyMembershipID)) {
      return false;
    }
    ClientProxyMembershipID that = (ClientProxyMembershipID) obj;
    if (this.uniqueId != that.uniqueId) {
      return false;
    }
    boolean isDurable = this.isDurable();
    if (isDurable && !that.isDurable()) {
      return false;
    }
    if (isDurable) {
      return this.getDurableId().equals(that.getDurableId());
    }
    return Arrays.equals(this.identity, that.identity);
  }

  /** Return true if "that" can be used in place of "this" when canonicalizing. */
  private boolean isCanonicalEquals(ClientProxyMembershipID that) {
    if (this == that) {
      return true;
    }
    if (this.uniqueId != that.uniqueId) {
      return false;
    }
    return Arrays.equals(this.identity, that.identity);
  }

  boolean isSameDSMember(ClientProxyMembershipID that) {
    if (that != null) {
      // Test whether:
      // - the durable ids are equal (if durable) or
      // - the identities are equal (if non-durable)
      return isDurable()
          ? this.getDurableId().equals(that.getDurableId())
          : Arrays.equals(this.identity, that.identity);
    } else {
      return false;
    }
  }

  /** method to obtain ClientProxyMembership for client side */
  public static synchronized ClientProxyMembershipID getNewProxyMembership(DistributedSystem sys) {
    byte[] ba = initializeAndGetDSIdentity(sys);
    return new ClientProxyMembershipID(++synch_counter, ba);
  }

  public static ClientProxyMembershipID getClientId(DistributedMember member) {
    return new ClientProxyMembershipID(member);
  }

  public static byte[] initializeAndGetDSIdentity(DistributedSystem sys) {
    byte[] client_side_identity = null;
    if (sys == null) {
      // DistributedSystem is required now before handshaking -Kirk
      throw new IllegalStateException(
          LocalizedStrings
              .ClientProxyMembershipID_ATTEMPTING_TO_HANDSHAKE_WITH_CACHESERVER_BEFORE_CREATING_DISTRIBUTEDSYSTEM_AND_CACHE
              .toLocalizedString());
    }
    // if (system != sys)
    {
      // DS already exists... make sure it's for current DS connection
      systemMemberId = sys.getDistributedMember();
      try {
        HeapDataOutputStream hdos = new HeapDataOutputStream(256, Version.CURRENT);
        DataSerializer.writeObject(systemMemberId, hdos);
        client_side_identity = hdos.toByteArray();
      } catch (IOException ioe) {
        throw new InternalGemFireException(
            LocalizedStrings.ClientProxyMembershipID_UNABLE_TO_SERIALIZE_IDENTITY
                .toLocalizedString(),
            ioe);
      }

      system = sys;
    }
    return client_side_identity;
  }

  private ClientProxyMembershipID(int id, byte[] clientSideIdentity) {
    Boolean specialCase = Boolean.getBoolean("gemfire.SPECIAL_DURABLE");
    String durableID = this.system.getProperties().getProperty("durable-client-id");
    if (specialCase.booleanValue() && durableID != null && (!durableID.equals(""))) {
      this.uniqueId = durable_synch_counter;
    } else {
      this.uniqueId = id;
    }
    this.identity = clientSideIdentity;
    this.memberId = systemMemberId;
  }

  public ClientProxyMembershipID() {}

  public ClientProxyMembershipID(DistributedMember member) {
    this.uniqueId = 1;
    this.memberId = member;
    updateID(member);
  }

  private transient String _toString;

  //  private transient int transientPort; // variable for debugging member ID issues

  @Override
  public String toString() {
    if (this.identity != null
        && ((InternalDistributedMember) getDistributedMember()).getPort() == 0) {
      return this.toStringNoCache();
    }
    if (this._toString == null) {
      this._toString = this.toStringNoCache();
    }
    return this._toString;
  }

  /** returns a string representation of this identifier, ignoring the toString cache */
  public String toStringNoCache() {
    StringBuffer sb =
        new StringBuffer("identity(")
            .append(getDSMembership())
            .append(",connection=")
            .append(uniqueId);
    if (identity != null) {
      DurableClientAttributes dca = getDurableAttributes();
      if (dca.getId().length() > 0) {
        sb.append(",durableAttributes=").append(getDurableAttributes()).append(')').toString();
      }
    }
    return sb.toString();
  }

  /**
   * For Externalizable
   *
   * @see Externalizable
   */
  public void writeExternal(ObjectOutput out) throws IOException {
    //    if (this.transientPort == 0) {
    //      InternalDistributedSystem.getLoggerI18n().warning(
    //          LocalizedStrings.DEBUG,
    //          "externalizing a client ID with zero port: " + this.toString(),
    //          new Exception("Stack trace"));
    //    }
    Assert.assertTrue(this.identity.length <= BYTES_32KB);
    out.writeShort(this.identity.length);
    out.write(this.identity);
    out.writeInt(this.uniqueId);
  }

  /** returns the externalized size of this object */
  public int getSerializedSize() {
    return 4 + identity.length + 4;
  }

  /**
   * For Externalizable
   *
   * @see Externalizable
   */
  public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
    int identityLength = in.readShort();
    if (identityLength > BYTES_32KB) {
      throw new IOException(
          LocalizedStrings.ClientProxyMembershipID_HANDSHAKE_IDENTITY_LENGTH_IS_TOO_BIG
              .toLocalizedString());
    }
    this.identity = new byte[identityLength];
    read(in, this.identity);
    this.uniqueId = in.readInt();
    if (this.uniqueId == -1) {
      throw new IOException(
          LocalizedStrings
              .ClientProxyMembershipID_UNEXPECTED_EOF_REACHED_UNIQUE_ID_COULD_NOT_BE_READ
              .toLocalizedString());
    }
    //    {toString(); this.transientPort = ((InternalDistributedMember)this.memberId).getPort();}
  }

  private void read(ObjectInput dis, byte[] toFill) throws IOException {

    int idBytes = 0;
    int toFillLength = toFill.length;
    while (idBytes < toFillLength) {
      // idBytes += dis.read(toFill, idBytes, (toFillLength - idBytes));
      int dataRead = dis.read(toFill, idBytes, (toFillLength - idBytes));
      if (dataRead == -1) {
        throw new IOException(
            LocalizedStrings
                .ClientProxyMembershipID_UNEXPECTED_EOF_REACHED_DISTRIBUTED_MEMBERSHIPID_COULD_NOT_BE_READ
                .toLocalizedString());
      }
      idBytes += dataRead;
    }
  }

  public int getDSFID() {
    return CLIENT_PROXY_MEMBERSHIPID;
  }

  public void toData(DataOutput out) throws IOException {
    //    if (this.transientPort == 0) {
    //      InternalDistributedSystem.getLoggerI18n().warning(
    //          LocalizedStrings.DEBUG,
    //          "serializing a client ID with zero port: " + this.toString(),
    //          new Exception("Stack trace"));
    //    }
    DataSerializer.writeByteArray(this.identity, out);
    out.writeInt(this.uniqueId);
  }

  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
    this.identity = DataSerializer.readByteArray(in);
    this.uniqueId = in.readInt();
    //    {toString(); this.transientPort = ((InternalDistributedMember)this.memberId).getPort();}
  }

  public Version getClientVersion() {
    return ((InternalDistributedMember) getDistributedMember()).getVersionObject();
  }

  public String getDSMembership() {
    if (identity == null) {
      // some unit tests create IDs that have no real identity, so return null
      return "null";
    }
    // don't cache if we haven't connected to the server yet
    if (((InternalDistributedMember) getDistributedMember()).getPort() == 0) {
      return getDistributedMember().toString();
    }
    if (memberIdString == null) {
      memberIdString = getDistributedMember().toString();
    }
    return memberIdString;
  }

  /**
   * this method uses CacheClientNotifier to try to obtain an ID that is equal to this one. This is
   * used during deserialization to reduce storage overhead.
   */
  private ClientProxyMembershipID canonicalReference() {
    CacheClientNotifier ccn = CacheClientNotifier.getInstance();
    if (ccn != null) {
      CacheClientProxy cp = ccn.getClientProxy(this, true);
      if (cp != null) {
        if (this.isCanonicalEquals(cp.getProxyID())) {
          return cp.getProxyID();
        }
      }
    }
    return this;
  }

  /**
   * deserializes the membership id, if necessary, and returns it. All access to membershipId should
   * be through this method
   */
  public DistributedMember getDistributedMember() {
    if (memberId == null) {
      ByteArrayInputStream bais = new ByteArrayInputStream(identity);
      DataInputStream dis = new VersionedDataInputStream(bais, Version.CURRENT);
      try {
        memberId = (DistributedMember) DataSerializer.readObject(dis);
      } catch (Exception e) {
        logger.error(
            LocalizedMessage.create(
                LocalizedStrings.ClientProxyMembershipID_UNABLE_TO_DESERIALIZE_MEMBERSHIP_ID),
            e);
      }
    }
    return memberId;
  }

  /** Returns the byte-array for membership identity */
  byte[] getMembershipByteArray() {
    return this.identity;
  }

  /**
   * Returns whether this <code>ClientProxyMembershipID</code> is durable.
   *
   * @return whether this <code>ClientProxyMembershipID</code> is durable
   * @since 5.5
   */
  public boolean isDurable() {
    String durableClientId = getDistributedMember().getDurableClientAttributes().getId();
    return durableClientId != null && !(durableClientId.length() == 0);
  }

  /**
   * Returns this <code>ClientProxyMembershipID</code>'s durable attributes.
   *
   * @return this <code>ClientProxyMembershipID</code>'s durable attributes
   * @since 5.5
   */
  protected DurableClientAttributes getDurableAttributes() {
    return getDistributedMember().getDurableClientAttributes();
  }

  /**
   * Returns this <code>ClientProxyMembershipID</code>'s durable id.
   *
   * @return this <code>ClientProxyMembershipID</code>'s durable id
   * @since 5.5
   */
  public String getDurableId() {
    DurableClientAttributes dca = getDurableAttributes();
    return dca == null ? "" : dca.getId();
  }

  /**
   * Returns this <code>ClientProxyMembershipID</code>'s durable timeout.
   *
   * @return this <code>ClientProxyMembershipID</code>'s durable timeout
   * @since 5.5
   */
  protected int getDurableTimeout() {
    DurableClientAttributes dca = getDurableAttributes();
    return dca == null ? 0 : dca.getTimeout();
  }

  /** Used to update the timeout when a durable client comes back to a server */
  public void updateDurableTimeout(int newValue) {
    DurableClientAttributes dca = getDurableAttributes();
    if (dca != null) {
      dca.updateTimeout(newValue);
    }
  }

  /** call this when the distributed system ID has been modified */
  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
      value = "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD",
      justification =
          "Only applicable in client DS and in that case too multiple instances do not modify it at the same time.")
  public void updateID(DistributedMember idm) {
    //    this.transientPort = ((InternalDistributedMember)this.memberId).getPort();
    //    if (this.transientPort == 0) {
    //      InternalDistributedSystem.getLoggerI18n().warning(
    //          LocalizedStrings.DEBUG,
    //          "updating client ID when member port is zero: " + this.memberId,
    //          new Exception("stack trace")
    //          );
    //    }
    HeapDataOutputStream hdos = new HeapDataOutputStream(256, Version.CURRENT);
    try {
      DataSerializer.writeObject(idm, hdos);
    } catch (IOException e) {
      throw new InternalGemFireException("Unable to serialize member: " + this.memberId, e);
    }
    this.identity = hdos.toByteArray();
    if (this.memberId != null && this.memberId == systemMemberId) {
      systemMemberId = idm;
      // client_side_identity = this.identity;
    }
    this.memberId = idm;
    this._toString = null; // make sure we don't retain the old ID representation in toString
  }

  /**
   * Return the name of the <code>HARegion</code> queueing this proxy's messages. This is name is
   * generated based on whether or not this proxy id is durable. If this proxy id is durable, then
   * the durable client id is used. If this proxy id is not durable, then the<code>DistributedMember
   * </code> string is used.
   *
   * @return the name of the <code>HARegion</code> queueing this proxy's messages.
   * @since 5.5
   */
  protected String getHARegionName() {
    return getBaseRegionName() + "_queue";
  }

  /**
   * Return the name of the region used for communicating interest changes between servers.
   *
   * @return the name of the region used for communicating interest changes between servers
   * @since 5.6
   */
  protected String getInterestRegionName() {
    return getBaseRegionName() + "_interest";
  }

  private String getBaseRegionName() {
    String id = isDurable() ? getDurableId() : getDSMembership();
    if (id.indexOf('/') >= 0) {
      id = id.replace('/', ':');
    }
    StringBuffer buffer =
        new StringBuffer()
            .append("_gfe_")
            .append(isDurable() ? "" : "non_")
            .append("durable_client_")
            .append("with_id_" + id)
            .append("_")
            .append(this.uniqueId);
    return buffer.toString();
  }

  /**
   * Resets the unique id counter. This is done for durable clients that stops/starts its cache.
   * When it restarts its cache, it needs to maintain the same unique id
   *
   * @since 5.5
   */
  public static synchronized void resetUniqueIdCounter() {
    synch_counter = 0;
  }

  public Identity getIdentity() {
    return new Identity();
  }

  /**
   * Used to represent a unique identity of this ClientProxyMembershipID. It does this by ignoring
   * the durable id and only respecting the unique_id and identity.
   *
   * <p>This class is used to clean up resources associated with a particular client and thus does
   * not want to limit itself to the durable id.
   *
   * @since 5.7
   */
  public class Identity {
    public int getUniqueId() {
      return uniqueId;
    }

    public byte[] getMemberIdBytes() {
      return identity;
    }

    @Override
    public int hashCode() {
      int result = 17;
      final int mult = 37;
      byte[] idBytes = getMemberIdBytes();
      if (idBytes != null && idBytes.length > 0) {
        for (int i = 0; i < idBytes.length; i++) {
          result = mult * result + idBytes[i];
        }
      }
      result = mult * result + uniqueId;
      return result;
    }

    @Override
    public boolean equals(Object obj) {
      if ((obj == null) || !(obj instanceof ClientProxyMembershipID.Identity)) {
        return false;
      }
      ClientProxyMembershipID.Identity that = (ClientProxyMembershipID.Identity) obj;
      return (getUniqueId() == that.getUniqueId()
          && Arrays.equals(getMemberIdBytes(), that.getMemberIdBytes()));
    }

    public ClientProxyMembershipID getClientProxyID() {
      return ClientProxyMembershipID.this;
    }
  }

  @Override
  public Version[] getSerializationVersions() {
    return null;
  }

  public static ClientProxyMembershipID readCanonicalized(DataInput in)
      throws IOException, ClassNotFoundException {

    ClientProxyMembershipID result = DataSerializer.readObject(in);
    // We can't canonicalize if we have no identity.
    // I only saw this happen in unit tests that serialize "new ClientProxyMembershipID()".
    if (result == null || result.identity == null) {
      return result;
    }
    return result.canonicalReference();
  }
}
/**
 * Internal implementation of PartitionAttributes. New attributes existing only in this class and
 * not in {@link PartitionAttributes} are for internal use only.
 *
 * @since 5.5
 */
public class PartitionAttributesImpl implements PartitionAttributes, Cloneable, DataSerializable {
  private static final Logger logger = LogService.getLogger();
  private static final long serialVersionUID = -7120239286748961954L;

  private static final int OFF_HEAP_LOCAL_MAX_MEMORY_PLACEHOLDER = 1;

  /** Partition resolver. */
  private transient PartitionResolver partitionResolver;

  private transient boolean hasPartitionResolver;

  /** the number of redundant copies to keep of each datum */
  private int redundancy = 0;

  private transient boolean hasRedundancy;

  /** maximum global size of the partitioned region, in megabytes */
  private long totalMaxMemory = PartitionAttributesFactory.GLOBAL_MAX_MEMORY_DEFAULT;

  private transient boolean hasTotalMaxMemory;

  /**
   * local settings GLOBAL_MAX_MEMORY_PROPERTY - deprecated, use setTotalMaxMemory
   * GLOBAL_MAX_BUCKETS_PROPERTY - deprecated, use setTotalNumBuckets
   */
  private Properties localProperties = new Properties();

  /** non-local settings LOCAL_MAX_MEMORY_PROPERTY - deprecated, use setLocalMaxMemory */
  private Properties globalProperties = new Properties();

  /*
   * This is used to artificially set the amount of available off-heap memory
   * when no distributed system is available. This value works the same way as
   * specifying off-heap as a GemFire property, so "100m" = 100 megabytes,
   * "100g" = 100 gigabytes, etc.
   */
  private static String testAvailableOffHeapMemory = null;

  /** the amount of local memory to use, in megabytes */
  private int localMaxMemory = PartitionAttributesFactory.LOCAL_MAX_MEMORY_DEFAULT;

  private transient boolean hasLocalMaxMemory;
  private transient boolean localMaxMemoryExists;

  /**
   * Used to determine how to calculate the default local max memory. This was made transient since
   * we do not support p2p backwards compat changes to values stored in a region and our PR
   * implementation stores this object in the internal PRRoot internal region.
   */
  private transient boolean offHeap = false;

  private transient boolean hasOffHeap;

  /** placeholder for javadoc for this variable */
  private int totalNumBuckets = PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT;

  private transient boolean hasTotalNumBuckets;

  /**
   * Specifies the partition region name with which this newly created partitione region is
   * colocated
   */
  private String colocatedRegionName;

  private transient boolean hasColocatedRegionName;

  /** Specifies how long existing members will wait before recoverying redundancy */
  private long recoveryDelay = PartitionAttributesFactory.RECOVERY_DELAY_DEFAULT;

  private transient boolean hasRecoveryDelay;
  /** Specifies how new members will wait before recoverying redundancy */
  private long startupRecoveryDelay = PartitionAttributesFactory.STARTUP_RECOVERY_DELAY_DEFAULT;

  private transient boolean hasStartupRecoveryDelay;

  private ArrayList<PartitionListener> partitionListeners;
  private transient boolean hasPartitionListeners;

  /** the set of the static partitions defined for the region */
  private List<FixedPartitionAttributesImpl> fixedPAttrs;

  private transient boolean hasFixedPAttrs;

  public void setTotalNumBuckets(int maxNumberOfBuckets) {
    this.totalNumBuckets = maxNumberOfBuckets;
    this.globalProperties.setProperty(
        PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_PROPERTY,
        String.valueOf(this.totalNumBuckets));
    this.hasTotalNumBuckets = true;
  }

  public void setTotalMaxMemory(long maximumMB) {
    this.totalMaxMemory = maximumMB;
    this.globalProperties.setProperty(
        PartitionAttributesFactory.GLOBAL_MAX_MEMORY_PROPERTY, String.valueOf(maximumMB));
    this.hasTotalMaxMemory = true;
  }

  public void setLocalMaxMemory(int maximumMB) {
    this.localMaxMemory = maximumMB;
    this.localProperties.setProperty(
        PartitionAttributesFactory.LOCAL_MAX_MEMORY_PROPERTY, String.valueOf(this.localMaxMemory));
    this.hasLocalMaxMemory = true;
    this.localMaxMemoryExists = true;
  }

  public void setOffHeap(final boolean offHeap) {
    this.offHeap = offHeap;
    this.hasOffHeap = true;
    if (this.offHeap && !this.hasLocalMaxMemory) {
      this.localMaxMemory = computeOffHeapLocalMaxMemory();
    }
  }

  public void setColocatedWith(String colocatedRegionFullPath) {
    this.colocatedRegionName = colocatedRegionFullPath;
    this.hasColocatedRegionName = true;
  }

  public void setRecoveryDelay(long recoveryDelay) {
    this.recoveryDelay = recoveryDelay;
    this.hasRecoveryDelay = true;
  }

  public void setStartupRecoveryDelay(long startupRecoveryDelay) {
    this.startupRecoveryDelay = startupRecoveryDelay;
    this.hasStartupRecoveryDelay = true;
  }

  /**
   * Constructs an instance of <code>PartitionAttributes</code> with default settings.
   *
   * @see PartitionAttributesFactory
   */
  public PartitionAttributesImpl() {}

  public PartitionResolver getPartitionResolver() {
    return this.partitionResolver;
  }

  public void addPartitionListener(PartitionListener listener) {
    ArrayList<PartitionListener> listeners = this.partitionListeners;
    if (listeners == null) {
      ArrayList<PartitionListener> al = new ArrayList<PartitionListener>(1);
      al.add(listener);
      addPartitionListeners(al);
    } else {
      synchronized (listeners) {
        listeners.add(listener);
      }
    }
  }

  private void addPartitionListeners(ArrayList<PartitionListener> listeners) {
    this.partitionListeners = listeners;
    this.hasPartitionListeners = true;
  }

  //    public ExpirationAttributes getEntryTimeToLive()
  //    {
  //      return new ExpirationAttributes(this.entryTimeToLiveExpiration.getTimeout(),
  //          this.entryTimeToLiveExpiration.getAction());
  //    }
  //
  //    public ExpirationAttributes getEntryIdleTimeout()
  //    {
  //      return new ExpirationAttributes(this.entryIdleTimeoutExpiration.getTimeout(),
  //          this.entryIdleTimeoutExpiration.getAction());
  //    }

  public int getRedundantCopies() {
    return this.redundancy;
  }

  public int getTotalNumBuckets() {
    return this.totalNumBuckets;
  }

  // deprecated method
  public long getTotalSize() {
    return this.getTotalMaxMemory();
  }

  public long getTotalMaxMemory() {
    return this.totalMaxMemory;
  }

  public boolean getOffHeap() {
    return this.offHeap;
  }

  /**
   * Returns localMaxMemory that must not be a temporary placeholder for offHeapLocalMaxMemory if
   * off-heap. This must return the true final value of localMaxMemory which requires the
   * DistributedSystem to be created if off-heap. See bug #52003.
   *
   * @throws IllegalStateException if off-heap and the actual value is not yet known (because the
   *     DistributedSystem has not yet been created)
   * @see #getLocalMaxMemoryForValidation()
   */
  public int getLocalMaxMemory() {
    if (this.offHeap && !this.localMaxMemoryExists) {
      int value = computeOffHeapLocalMaxMemory();
      if (this.localMaxMemoryExists) { // real value now exists so set it and return
        this.localMaxMemory = value;
      }
    }
    checkLocalMaxMemoryExists();
    return this.localMaxMemory;
  }
  /**
   * @throws IllegalStateException if off-heap and the actual value is not yet known (because the
   *     DistributedSystem has not yet been created)
   */
  private void checkLocalMaxMemoryExists() {
    if (this.offHeap
        && !this
            .localMaxMemoryExists) { // real value does NOT yet exist so throw IllegalStateException
      throw new IllegalStateException(
          "Attempting to use localMaxMemory for off-heap but value is not yet known (default value is equal to off-heap-memory-size)");
    }
  }

  /**
   * Returns localMaxMemory for validation of attributes before Region is created (possibly before
   * DistributedSystem is created). Returned value may be the temporary placeholder representing
   * offHeapLocalMaxMemory which cannot be calculated until the DistributedSystem is created. See
   * bug #52003.
   *
   * @see #OFF_HEAP_LOCAL_MAX_MEMORY_PLACEHOLDER
   * @see #getLocalMaxMemory()
   */
  public int getLocalMaxMemoryForValidation() {
    if (this.offHeap && !this.hasLocalMaxMemory && !this.localMaxMemoryExists) {
      int value = computeOffHeapLocalMaxMemory();
      if (this.localMaxMemoryExists) { // real value now exists so set it and return
        this.localMaxMemory = value;
      }
    }
    return this.localMaxMemory;
  }

  public String getColocatedWith() {
    return this.colocatedRegionName;
  }

  public Properties getLocalProperties() {
    return this.localProperties;
  }

  public Properties getGlobalProperties() {
    return this.globalProperties;
  }

  public long getStartupRecoveryDelay() {
    return startupRecoveryDelay;
  }

  public long getRecoveryDelay() {
    return recoveryDelay;
  }

  public List<FixedPartitionAttributesImpl> getFixedPartitionAttributes() {
    return this.fixedPAttrs;
  }

  private static final PartitionListener[] EMPTY_PARTITION_LISTENERS = new PartitionListener[0];

  public PartitionListener[] getPartitionListeners() {
    ArrayList<PartitionListener> listeners = this.partitionListeners;
    if (listeners == null) {
      return (PartitionListener[]) EMPTY_PARTITION_LISTENERS;
    } else {
      synchronized (listeners) {
        if (listeners.size() == 0) {
          return (PartitionListener[]) EMPTY_PARTITION_LISTENERS;
        } else {
          PartitionListener[] result = new PartitionListener[listeners.size()];
          listeners.toArray(result);
          return result;
        }
      }
    }
  }

  @Override
  public Object clone() {
    try {
      PartitionAttributesImpl copy = (PartitionAttributesImpl) super.clone();
      if (copy.fixedPAttrs != null) {
        copy.fixedPAttrs = new ArrayList<FixedPartitionAttributesImpl>(copy.fixedPAttrs);
      }
      if (copy.partitionListeners != null) {
        copy.partitionListeners = new ArrayList<PartitionListener>(copy.partitionListeners);
      }
      return copy;
    } catch (CloneNotSupportedException e) {
      throw new InternalGemFireError(
          LocalizedStrings
              .PartitionAttributesImpl_CLONENOTSUPPORTEDEXCEPTION_THROWN_IN_CLASS_THAT_IMPLEMENTS_CLONEABLE
              .toLocalizedString());
    }
  }

  public PartitionAttributesImpl copy() {
    return (PartitionAttributesImpl) clone();
  }

  @Override
  public String toString() {
    StringBuffer s = new StringBuffer();
    return s.append("PartitionAttributes@")
        .append(System.identityHashCode(this))
        .append("[redundantCopies=")
        .append(getRedundantCopies())
        .append(";localMaxMemory=")
        .append(getLocalMaxMemory())
        .append(";totalMaxMemory=")
        .append(this.totalMaxMemory)
        .append(";totalNumBuckets=")
        .append(this.totalNumBuckets)
        .append(";partitionResolver=")
        .append(this.partitionResolver)
        .append(";colocatedWith=")
        .append(this.colocatedRegionName)
        .append(";recoveryDelay=")
        .append(this.recoveryDelay)
        .append(";startupRecoveryDelay=")
        .append(this.startupRecoveryDelay)
        .append(";FixedPartitionAttributes=")
        .append(this.fixedPAttrs)
        .append(";partitionListeners=")
        .append(this.partitionListeners)
        .append("]")
        .toString();
  }

  public String getStringForSQLF() {
    final StringBuilder sb = new StringBuilder();
    return sb.append("redundantCopies=")
        .append(getRedundantCopies())
        .append(",totalMaxMemory=")
        .append(this.totalMaxMemory)
        .append(",totalNumBuckets=")
        .append(this.totalNumBuckets)
        .append(",colocatedWith=")
        .append(this.colocatedRegionName)
        .append(",recoveryDelay=")
        .append(this.recoveryDelay)
        .append(",startupRecoveryDelay=")
        .append(this.startupRecoveryDelay)
        .toString();
  }

  /**
   * @throws IllegalStateException if off-heap and the actual value is not yet known (because the
   *     DistributedSystem has not yet been created)
   */
  public void toData(DataOutput out) throws IOException {
    checkLocalMaxMemoryExists();
    out.writeInt(this.redundancy);
    out.writeLong(this.totalMaxMemory);
    out.writeInt(
        getLocalMaxMemory()); // call the gettor to force it to be computed in the offheap case
    out.writeInt(this.totalNumBuckets);
    DataSerializer.writeString(this.colocatedRegionName, out);
    DataSerializer.writeObject(this.localProperties, out);
    DataSerializer.writeObject(this.globalProperties, out);
    out.writeLong(this.recoveryDelay);
    out.writeLong(this.startupRecoveryDelay);
    DataSerializer.writeObject(this.fixedPAttrs, out);
  }

  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
    this.redundancy = in.readInt();
    this.totalMaxMemory = in.readLong();
    this.localMaxMemory = in.readInt();
    this.totalNumBuckets = in.readInt();
    this.colocatedRegionName = DataSerializer.readString(in);
    this.localProperties = (Properties) DataSerializer.readObject(in);
    this.globalProperties = (Properties) DataSerializer.readObject(in);
    this.recoveryDelay = in.readLong();
    this.startupRecoveryDelay = in.readLong();
    this.fixedPAttrs = DataSerializer.readObject(in);
  }

  public static PartitionAttributesImpl createFromData(DataInput in)
      throws IOException, ClassNotFoundException {
    PartitionAttributesImpl result = new PartitionAttributesImpl();
    InternalDataSerializer.invokeFromData(result, in);
    return result;
  }

  public void setPartitionResolver(PartitionResolver partitionResolver) {
    this.partitionResolver = partitionResolver;
    this.hasPartitionResolver = true;
  }

  @Override
  public boolean equals(final Object obj) {
    if (this == obj) {
      return true;
    }

    if (!(obj instanceof PartitionAttributesImpl)) {
      return false;
    }

    PartitionAttributesImpl other = (PartitionAttributesImpl) obj;

    if (this.redundancy != other.getRedundantCopies()
        || getLocalMaxMemory() != other.getLocalMaxMemory()
        || this.offHeap != other.getOffHeap()
        || this.totalNumBuckets != other.getTotalNumBuckets()
        || this.totalMaxMemory != other.getTotalMaxMemory()
        || this.startupRecoveryDelay != other.getStartupRecoveryDelay()
        || this.recoveryDelay != other.getRecoveryDelay()
        //          || ! this.localProperties.equals(other.getLocalProperties())
        //          || ! this.globalProperties.equals(other.getGlobalProperties())
        || ((this.partitionResolver == null) != (other.getPartitionResolver() == null))
        || (this.partitionResolver != null
            && !this.partitionResolver.equals(other.getPartitionResolver()))
        || ((this.colocatedRegionName == null) != (other.getColocatedWith() == null))
        || (this.colocatedRegionName != null
            && !this.colocatedRegionName.equals(other.getColocatedWith()))
        || ((this.fixedPAttrs == null) != (other.getFixedPartitionAttributes() == null))
        || (this.fixedPAttrs != null
            && !this.fixedPAttrs.equals(other.getFixedPartitionAttributes()))) {
      // throw new RuntimeException("this="+this.toString() + "   other=" + other.toString());
      return false;
    }

    PartitionListener[] otherPListeners = other.getPartitionListeners();
    PartitionListener[] thisPListeners = this.getPartitionListeners();

    if (otherPListeners.length != thisPListeners.length) {
      return false;
    }
    Set<String> otherListenerClassName = new HashSet<String>();
    for (int i = 0; i < otherPListeners.length; i++) {
      PartitionListener listener = otherPListeners[i];
      otherListenerClassName.add(listener.getClass().getName());
    }
    Set<String> thisListenerClassName = new HashSet<String>();
    for (int i = 0; i < thisPListeners.length; i++) {
      PartitionListener listener = thisPListeners[i];
      thisListenerClassName.add(listener.getClass().getName());
    }
    if (!thisListenerClassName.equals(otherListenerClassName)) {
      return false;
    }

    return true;
  }

  @Override
  public int hashCode() {
    return this.getRedundantCopies();
  }

  public int getRedundancy() {
    return redundancy;
  }

  public void setRedundantCopies(int redundancy) {
    this.redundancy = redundancy;
    this.hasRedundancy = true;
  }

  /**
   * Set local properties
   *
   * @deprecated use {@link #setLocalMaxMemory(int)} in GemFire 5.1 and later releases
   * @param localProps those properties for the local VM
   */
  @Deprecated
  public void setLocalProperties(Properties localProps) {
    this.localProperties = localProps;
    if (localProps.get(PartitionAttributesFactory.LOCAL_MAX_MEMORY_PROPERTY) != null) {
      setLocalMaxMemory(
          Integer.parseInt(
              (String) localProps.get(PartitionAttributesFactory.LOCAL_MAX_MEMORY_PROPERTY)));
    }
  }

  /**
   * Set global properties
   *
   * @deprecated use {@link #setTotalMaxMemory(long)} and {@link #setTotalNumBuckets(int)} in
   *     GemFire 5.1 and later releases
   * @param globalProps those properties for the entire Partitioned Region
   */
  @Deprecated
  public void setGlobalProperties(Properties globalProps) {
    this.globalProperties = globalProps;
    String propVal = globalProps.getProperty(PartitionAttributesFactory.GLOBAL_MAX_MEMORY_PROPERTY);
    if (propVal != null) {
      try {
        setTotalMaxMemory(Integer.parseInt(propVal));
      } catch (RuntimeException e) {
        this.totalMaxMemory = PartitionAttributesFactory.GLOBAL_MAX_MEMORY_DEFAULT;
      }
    }
    propVal = globalProps.getProperty(PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_PROPERTY);
    if (propVal != null) {
      try {
        this.setTotalNumBuckets(Integer.parseInt(propVal));
      } catch (RuntimeException e) {
        this.totalNumBuckets = PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_DEFAULT;
      }
    }
  }

  public void addFixedPartitionAttributes(FixedPartitionAttributes fpa) {
    if (this.fixedPAttrs == null) {
      this.fixedPAttrs = new ArrayList<FixedPartitionAttributesImpl>(1);
      this.fixedPAttrs.add((FixedPartitionAttributesImpl) fpa);
      this.hasFixedPAttrs = true;
    } else {
      this.fixedPAttrs.add((FixedPartitionAttributesImpl) fpa);
    }
  }

  private void addFixedPartitionAttributes(List<FixedPartitionAttributesImpl> fpas) {
    this.fixedPAttrs = fpas;
    this.hasFixedPAttrs = true;
  }

  /**
   * Validates that the attributes are consistent with each other. The following rules are checked
   * and enforced:
   *
   * <ul>
   *   <li>Redundancy should be between 1 and 4
   *   <li>Scope should be either DIST_ACK or DIST_NO_ACK
   * </ul>
   *
   * NOTE: validation that depends on more than one attribute can not be done in this method. That
   * validation needs to be done in validateWhenAllAttributesAreSet
   *
   * @throws IllegalStateException if the attributes are not consistent with each other.
   */
  public void validateAttributes() {
    if ((this.totalNumBuckets <= 0)) {
      throw new IllegalStateException(
          LocalizedStrings
              .PartitionAttributesImpl_TOTALNUMBICKETS_0_IS_AN_ILLEGAL_VALUE_PLEASE_CHOOSE_A_VALUE_GREATER_THAN_0
              .toLocalizedString(Integer.valueOf(this.totalNumBuckets)));
    }
    if ((this.redundancy < 0) || (this.redundancy >= 4)) {
      throw new IllegalStateException(
          LocalizedStrings
              .PartitionAttributesImpl_REDUNDANTCOPIES_0_IS_AN_ILLEGAL_VALUE_PLEASE_CHOOSE_A_VALUE_BETWEEN_0_AND_3
              .toLocalizedString(Integer.valueOf(this.redundancy)));
    }
    for (Iterator it = this.getLocalProperties().keySet().iterator(); it.hasNext(); ) {
      String propName = (String) it.next();
      if (!PartitionAttributesFactory.LOCAL_MAX_MEMORY_PROPERTY.equals(propName)) {
        throw new IllegalStateException(
            LocalizedStrings.PartitionAttributesImpl_UNKNOWN_LOCAL_PROPERTY_0.toLocalizedString(
                propName));
      }
    }
    for (Iterator it = this.getGlobalProperties().keySet().iterator(); it.hasNext(); ) {
      String propName = (String) it.next();
      if (!PartitionAttributesFactory.GLOBAL_MAX_BUCKETS_PROPERTY.equals(propName)
          && !PartitionAttributesFactory.GLOBAL_MAX_MEMORY_PROPERTY.equals(propName)) {
        throw new IllegalStateException(
            LocalizedStrings.PartitionAttributesImpl_UNKNOWN_GLOBAL_PROPERTY_0.toLocalizedString(
                propName));
      }
    }
    if (this.recoveryDelay < -1) {
      throw new IllegalStateException(
          "RecoveryDelay "
              + this.recoveryDelay
              + " is an illegal value, please choose a value that is greater than or equal to -1");
    }
    if (this.startupRecoveryDelay < -1) {
      throw new IllegalStateException(
          "StartupRecoveryDelay "
              + this.startupRecoveryDelay
              + " is an illegal value, please choose a value that is greater than or equal to -1");
    }
    if (this.fixedPAttrs != null) {
      List<FixedPartitionAttributesImpl> duplicateFPAattrsList =
          new ArrayList<FixedPartitionAttributesImpl>();
      Set<FixedPartitionAttributes> fpAttrsSet = new HashSet<FixedPartitionAttributes>();
      for (FixedPartitionAttributesImpl fpa : this.fixedPAttrs) {
        if (fpa == null || fpa.getPartitionName() == null) {
          throw new IllegalStateException(
              LocalizedStrings.PartitionAttributesImpl_FIXED_PARTITION_NAME_CANNOT_BE_NULL
                  .toString());
        }
        if (fpAttrsSet.contains(fpa)) {
          duplicateFPAattrsList.add(fpa);
        } else {
          fpAttrsSet.add(fpa);
        }
      }
      if (duplicateFPAattrsList.size() != 0) {
        throw new IllegalStateException(
            LocalizedStrings
                .PartitionAttributesImpl_PARTITION_NAME_0_CAN_BE_ADDED_ONLY_ONCE_IN_FIXED_PARTITION_ATTRIBUTES
                .toString(duplicateFPAattrsList.toString()));
      }
    }
  }

  /**
   * This validation should only be done once the region attributes that owns this pa is ready to be
   * created. Need to do it this late because of bug 45749.
   */
  public void validateWhenAllAttributesAreSet(boolean isDeclarative) {
    if (this.colocatedRegionName != null) {
      if (this.fixedPAttrs != null) {
        throw new IllegalStateException(
            LocalizedStrings
                .PartitionAttributesImpl_IF_COLOCATED_WITH_IS_SPECFIED_THEN_FIXED_PARTITION_ATTRIBUTES_CAN_NOT_BE_SPECIFIED
                .toLocalizedString(this.fixedPAttrs));
      }
    }
    if (this.fixedPAttrs != null) {
      if (this.localMaxMemory == 0) {
        throw new IllegalStateException(
            LocalizedStrings
                .PartitionAttributesImpl_FIXED_PARTITION_ATTRBUTES_0_CANNOT_BE_DEFINED_FOR_ACCESSOR
                .toString(this.fixedPAttrs));
      }
    }
  }

  /**
   * Validates colocation of PartitionRegion <br>
   * This method used to be called when the RegionAttributes were created. But this was too early
   * since the region we are colocated with might not exist (yet). So it is now called when the PR
   * using these attributes is created. See bug 47197.
   *
   * <p>1. region passed in setColocatedWith should exist.<br>
   * 2. region passed should be of a PartitionedRegion <br>
   * 3. Custom partitioned should be enabled for colocated regions <br>
   * 4. totalNumBuckets should be same for colocated regions<br>
   * 5. redundancy of colocated regions should be same<br>
   *
   * @since 5.8Beta
   */
  void validateColocation() {
    if (this.colocatedRegionName == null) {
      return;
    }
    Cache cache = GemFireCacheImpl.getInstance();
    if (cache != null) {
      Region<?, ?> region = cache.getRegion(this.colocatedRegionName);
      {
        if (region == null) {
          throw new IllegalStateException(
              LocalizedStrings
                  .PartitionAttributesImpl_REGION_SPECIFIED_IN_COLOCATEDWITH_IS_NOT_PRESENT_IT_SHOULD_BE_CREATED_BEFORE_SETTING_COLOCATED_WITH_THIS_REGION
                  .toLocalizedString());
        }
        if (!(region instanceof PartitionedRegion)) {
          throw new IllegalStateException(
              LocalizedStrings
                  .PartitionAttributesImpl_SETTING_THE_ATTRIBUTE_COLOCATEDWITH_IS_SUPPORTED_ONLY_FOR_PARTITIONEDREGIONS
                  .toLocalizedString());
        }
        PartitionedRegion colocatedRegion = (PartitionedRegion) region;
        if (this.getTotalNumBuckets()
            != colocatedRegion.getPartitionAttributes().getTotalNumBuckets()) {
          throw new IllegalStateException(
              LocalizedStrings
                  .PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_TOTALNUMBUCKETS_SHOULD_BE_SAME_AS_TOTALNUMBUCKETS_OF_COLOCATED_PARTITIONEDREGION
                  .toLocalizedString());
        }
        if (this.getRedundancy() != colocatedRegion.getPartitionAttributes().getRedundantCopies()) {
          throw new IllegalStateException(
              LocalizedStrings
                  .PartitionAttributesImpl_CURRENT_PARTITIONEDREGIONS_REDUNDANCY_SHOULD_BE_SAME_AS_THE_REDUNDANCY_OF_COLOCATED_PARTITIONEDREGION
                  .toLocalizedString());
        }
      }
    }
  }

  /**
   * Added for bug 45749. The attributes in pa are merged into this. Only attributes explicitly set
   * in pa will be merged into this. Any attribute set in pa will take precedence over an attribute
   * in this.
   *
   * @param pa the attributes to merge into this.
   * @since 7.0
   */
  public void merge(PartitionAttributesImpl pa) {
    if (pa.hasRedundancy) {
      setRedundantCopies(pa.getRedundantCopies());
    }
    if (pa.hasLocalMaxMemory) {
      setLocalMaxMemory(pa.getLocalMaxMemory());
    }
    if (pa.hasOffHeap) {
      setOffHeap(pa.getOffHeap());
    }
    if (pa.hasTotalMaxMemory) {
      setTotalMaxMemory(pa.getTotalMaxMemory());
    }
    if (pa.hasTotalNumBuckets) {
      setTotalNumBuckets(pa.getTotalNumBuckets());
    }
    if (pa.hasPartitionResolver) {
      setPartitionResolver(pa.getPartitionResolver());
    }
    if (pa.hasColocatedRegionName) {
      setColocatedWith(pa.getColocatedWith());
    }
    if (pa.hasRecoveryDelay) {
      setRecoveryDelay(pa.getRecoveryDelay());
    }
    if (pa.hasStartupRecoveryDelay) {
      setStartupRecoveryDelay(pa.getStartupRecoveryDelay());
    }
    if (pa.hasFixedPAttrs) {
      addFixedPartitionAttributes(pa.getFixedPartitionAttributes());
    }
    if (pa.hasPartitionListeners) {
      this.addPartitionListeners(pa.partitionListeners);
    }
  }

  @SuppressWarnings("unchecked")
  public void setAll(@SuppressWarnings("rawtypes") PartitionAttributes pa) {
    setRedundantCopies(pa.getRedundantCopies());
    setLocalProperties(pa.getLocalProperties());
    setGlobalProperties(pa.getGlobalProperties());
    setLocalMaxMemory(pa.getLocalMaxMemory());
    setTotalMaxMemory(pa.getTotalMaxMemory());
    setTotalNumBuckets(pa.getTotalNumBuckets());
    setPartitionResolver(pa.getPartitionResolver());
    setColocatedWith(pa.getColocatedWith());
    setRecoveryDelay(pa.getRecoveryDelay());
    setStartupRecoveryDelay(pa.getStartupRecoveryDelay());
    setOffHeap(((PartitionAttributesImpl) pa).getOffHeap());
    addFixedPartitionAttributes(pa.getFixedPartitionAttributes());
  }

  /**
   * Only used for testing. Sets the amount of available off-heap memory when no distributed system
   * is available. This method must be called before any instances of PartitionAttributesImpl are
   * created. Specify the value the same way the off-heap memory property is specified. So, "100m" =
   * 100 megabytes, etc.
   *
   * @param newTestAvailableOffHeapMemory The new test value for available off-heap memory.
   */
  public static void setTestAvailableOffHeapMemory(final String newTestAvailableOffHeapMemory) {
    testAvailableOffHeapMemory = newTestAvailableOffHeapMemory;
  }

  /** By default the partition can use up to 100% of the allocated off-heap memory. */
  private int computeOffHeapLocalMaxMemory() {

    long availableOffHeapMemoryInMB = 0;
    if (testAvailableOffHeapMemory != null) {
      availableOffHeapMemoryInMB =
          OffHeapStorage.parseOffHeapMemorySize(testAvailableOffHeapMemory) / (1024 * 1024);
    } else if (InternalDistributedSystem.getAnyInstance() == null) {
      this.localMaxMemoryExists = false;
      return OFF_HEAP_LOCAL_MAX_MEMORY_PLACEHOLDER; // fix 52033: return non-negative, non-zero
      // temporary placeholder for
      // offHeapLocalMaxMemory
    } else {
      String offHeapSizeConfigValue =
          InternalDistributedSystem.getAnyInstance().getOriginalConfig().getOffHeapMemorySize();
      availableOffHeapMemoryInMB =
          OffHeapStorage.parseOffHeapMemorySize(offHeapSizeConfigValue) / (1024 * 1024);
    }

    if (availableOffHeapMemoryInMB > Integer.MAX_VALUE) {
      logger.warn(
          LocalizedMessage.create(
              LocalizedStrings
                  .PartitionAttributesImpl_REDUCED_LOCAL_MAX_MEMORY_FOR_PARTITION_ATTRIBUTES_WHEN_SETTING_FROM_AVAILABLE_OFF_HEAP_MEMORY_SIZE));
      return Integer.MAX_VALUE;
    }

    this.localMaxMemoryExists = true;
    return (int) availableOffHeapMemoryInMB;
  }

  public int getLocalMaxMemoryDefault() {
    if (!this.offHeap) {
      return PartitionAttributesFactory.LOCAL_MAX_MEMORY_DEFAULT;
    }

    return computeOffHeapLocalMaxMemory();
  }
}
/** @author David Hoots */
@Category(IntegrationTest.class)
public class MemoryMonitorOffHeapJUnitTest {
  private static final Logger logger = LogService.getLogger();
  private static final int SYSTEM_LISTENERS = 1;

  DistributedSystem ds;
  GemFireCacheImpl cache;

  @Before
  public void setUp() throws Exception {
    Properties p = new Properties();
    p.setProperty(DistributionConfig.MCAST_PORT_NAME, "0");
    p.setProperty(DistributionConfig.OFF_HEAP_MEMORY_SIZE_NAME, "1m");
    this.ds = DistributedSystem.connect(p);
    this.cache = (GemFireCacheImpl) CacheFactory.create(this.ds);
    logger.info(addExpectedAbove);
    logger.info(addExpectedBelow);
  }

  @After
  public void tearDown() throws Exception {
    try {
      this.cache.close();
      this.ds.disconnect();
    } finally {
      logger.info(removeExpectedAbove);
      logger.info(removeExpectedBelow);
    }
  }

  static final String expectedEx =
      LocalizedStrings.MemoryMonitor_MEMBER_ABOVE_CRITICAL_THRESHOLD.getRawText()
          .replaceAll("\\{[0-9]+\\}", ".*?");
  public static final String addExpectedAbove =
      "<ExpectedException action=add>" + expectedEx + "</ExpectedException>";
  public static final String removeExpectedAbove =
      "<ExpectedException action=remove>" + expectedEx + "</ExpectedException>";
  static final String expectedBelow =
      LocalizedStrings.MemoryMonitor_MEMBER_BELOW_CRITICAL_THRESHOLD.getRawText()
          .replaceAll("\\{[0-9]+\\}", ".*?");
  public static final String addExpectedBelow =
      "<ExpectedException action=add>" + expectedBelow + "</ExpectedException>";
  public static final String removeExpectedBelow =
      "<ExpectedException action=remove>" + expectedBelow + "</ExpectedException>";

  @Test
  public void testGeneratingEvents() throws Exception {
    InternalResourceManager internalManager = this.cache.getResourceManager();
    OffHeapMemoryMonitor monitor = internalManager.getOffHeapMonitor();

    monitor.setEvictionThreshold(50.0f);
    monitor.setCriticalThreshold(75.0f);
    monitor.stopMonitoring(true);

    assertEquals(524288, internalManager.getStats().getOffHeapEvictionThreshold());
    assertEquals(786432, internalManager.getStats().getOffHeapCriticalThreshold());

    // Register a bunch of listeners
    for (int i = 0; i < 10; i++) {
      ResourceListener listener = new TestMemoryThresholdListener();
      internalManager.addResourceListener(ResourceType.OFFHEAP_MEMORY, listener);
    }
    assertEquals(
        10 + SYSTEM_LISTENERS,
        internalManager.getResourceListeners(ResourceType.OFFHEAP_MEMORY).size());

    // Start at normal
    setThenTestListenersAndStats(400000, 0, 0, 0, 0, 0, 0, 0);

    // Move to eviction
    setThenTestListenersAndStats(550000, 0, 1, 0, 0, 1, 0, 0);

    // Stay at eviction
    setThenTestListenersAndStats(560000, 0, 1, 0, 0, 1, 0, 0);

    // Move to critical
    setThenTestListenersAndStats(850000, 0, 1, 0, 1, 2, 1, 0);

    // Stay at critical (above critical clear margin)
    setThenTestListenersAndStats(786431, 0, 1, 0, 1, 2, 1, 0);
    setThenTestListenersAndStats(765465, 0, 1, 0, 1, 2, 1, 0);

    // Move to eviction
    setThenTestListenersAndStats(765454, 0, 1, 1, 1, 3, 1, 0);

    // Stay at eviction (above eviction clear margin)
    setThenTestListenersAndStats(524281, 0, 1, 1, 1, 3, 1, 0);
    setThenTestListenersAndStats(503321, 0, 1, 1, 1, 3, 1, 0);

    // Move to normal
    setThenTestListenersAndStats(503310, 1, 1, 1, 1, 3, 1, 1);

    // Disable eviction and verify normal event
    monitor.setEvictionThreshold(0f);
    setThenTestListenersAndStats(503315, 1, 1, 1, 1, 3, 1, 2);

    // Enable eviction verify normal event
    monitor.setEvictionThreshold(50f);
    setThenTestListenersAndStats(503315, 1, 1, 1, 1, 3, 1, 3);

    // Disable critical verify normal event
    monitor.setCriticalThreshold(0f);
    setThenTestListenersAndStats(503315, 1, 1, 1, 1, 3, 1, 4);

    // Enable critical verify normal event
    monitor.setCriticalThreshold(75f);
    setThenTestListenersAndStats(503315, 1, 1, 1, 1, 3, 1, 5);
  }

  private void setThenTestListenersAndStats(
      final long memUsed,
      final int evictionStop,
      final int evictionStart,
      final int safe,
      final int critical,
      final int evictionEvents,
      final int criticalEvents,
      final int normalEvents) {
    this.cache.getResourceManager().getOffHeapMonitor().updateStateAndSendEvent(memUsed);
    ResourceManagerStats stats = this.cache.getResourceManager().getStats();

    assertEquals(evictionStop, stats.getOffHeapEvictionStopEvents());
    assertEquals(evictionStart, stats.getOffHeapEvictionStartEvents());
    assertEquals(critical, stats.getOffHeapCriticalEvents());
    assertEquals(safe, stats.getOffHeapSafeEvents());

    for (ResourceListener listener :
        this.cache.getResourceManager().getResourceListeners(ResourceType.OFFHEAP_MEMORY)) {
      if (listener instanceof TestMemoryThresholdListener) {
        assertEquals(
            evictionEvents, ((TestMemoryThresholdListener) listener).getEvictionThresholdCalls());
        assertEquals(
            criticalEvents, ((TestMemoryThresholdListener) listener).getCriticalThresholdCalls());
        assertEquals(normalEvents, ((TestMemoryThresholdListener) listener).getNormalCalls());
      }
    }
  }

  @Test
  public void testDisabledThresholds() throws Exception {
    final InternalResourceManager irm = this.cache.getResourceManager();
    final OffHeapMemoryMonitor monitor = irm.getOffHeapMonitor();

    final RegionFactory regionFactory = this.cache.createRegionFactory(RegionShortcut.LOCAL);
    regionFactory.setOffHeap(true);
    final EvictionAttributesImpl evictionAttrs = new EvictionAttributesImpl();
    evictionAttrs.setAlgorithm(EvictionAlgorithm.NONE);
    regionFactory.setEvictionAttributes(evictionAttrs);
    final Region region = regionFactory.create("testDefaultThresholdsRegion");
    TestMemoryThresholdListener listener = new TestMemoryThresholdListener();
    irm.addResourceListener(ResourceType.OFFHEAP_MEMORY, listener);

    region.put("1", new Byte[550000]);
    region.put("2", new Byte[200000]);
    assertEquals(0, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(0, irm.getStats().getOffHeapEvictionStopEvents());
    assertEquals(0, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(0, irm.getStats().getOffHeapSafeEvents());
    assertEquals(0, listener.getEvictionThresholdCalls());
    assertEquals(0, listener.getCriticalThresholdCalls());

    // Enable eviction threshold and make sure event is generated
    monitor.setEvictionThreshold(50f);
    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(0, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(1, listener.getEvictionThresholdCalls());
    assertEquals(0, listener.getCriticalThresholdCalls());

    // Enable critical threshold and make sure event is generated
    region.put("3", new Byte[200000]);
    monitor.setCriticalThreshold(70f);
    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(1, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(2, listener.getEvictionThresholdCalls());
    assertEquals(1, listener.getCriticalThresholdCalls());

    // Disable thresholds and verify events
    monitor.setEvictionThreshold(0f);
    monitor.setCriticalThreshold(0f);

    assertEquals(1, irm.getStats().getOffHeapEvictionStartEvents());
    assertEquals(1, irm.getStats().getOffHeapEvictionStopEvents());
    assertEquals(1, irm.getStats().getOffHeapCriticalEvents());
    assertEquals(1, irm.getStats().getOffHeapSafeEvents());

    assertEquals(2, listener.getEvictionThresholdCalls());
    assertEquals(2, listener.getCriticalThresholdCalls());
    assertEquals(0, listener.getNormalCalls());
    assertEquals(2, listener.getEvictionDisabledCalls());
    assertEquals(2, listener.getCriticalDisabledCalls());
  }

  @Test
  public void testAllowedThreholds() {
    final OffHeapMemoryMonitor monitor = this.cache.getResourceManager().getOffHeapMonitor();

    // Test eviction bounds
    try {
      monitor.setEvictionThreshold(100.1f);
      fail("Too high value allowed for setEvictionThreshold");
    } catch (IllegalArgumentException expected) {
      // Expected
    }

    try {
      monitor.setEvictionThreshold(-0.1f);
      fail("Too low value allowed for setEvictionThreshold");
    } catch (IllegalArgumentException expected) {
      // Expected
    }

    monitor.setEvictionThreshold(13f);
    monitor.setEvictionThreshold(0f);
    monitor.setEvictionThreshold(92f);
    monitor.setEvictionThreshold(100f);
    monitor.setEvictionThreshold(0f);

    // Test critical bounds
    try {
      monitor.setCriticalThreshold(100.1f);
      fail("Too high value allowed for setCriticalThreshold");
    } catch (IllegalArgumentException expected) {
      // Expected
    }

    try {
      monitor.setCriticalThreshold(-0.1f);
      fail("Too low value allowed for setCriticalThreshold");
    } catch (IllegalArgumentException expected) {
      // Expected
    }

    monitor.setCriticalThreshold(13f);
    monitor.setCriticalThreshold(0f);
    monitor.setCriticalThreshold(92f);
    monitor.setCriticalThreshold(100f);
    monitor.setCriticalThreshold(0f);

    // Test values relative to each other
    monitor.setEvictionThreshold(1f);
    monitor.setCriticalThreshold(1.1f);
    monitor.setCriticalThreshold(0);
    monitor.setCriticalThreshold(1.1f);
    monitor.setEvictionThreshold(0);
    monitor.setEvictionThreshold(1.0f);
    monitor.setCriticalThreshold(100f);
    monitor.setEvictionThreshold(99.9f);
    monitor.setCriticalThreshold(0f);
    monitor.setEvictionThreshold(0f);
    monitor.setEvictionThreshold(64.1f);
    monitor.setCriticalThreshold(64.2f);

    try {
      monitor.setCriticalThreshold(50f);
      monitor.setEvictionThreshold(50.1f);
      fail("Allowed eviction threshold to be set higher than critical threshold");
    } catch (IllegalArgumentException expected) {
      // Expected
    }
  }

  @Test
  public void testMonitorRunning() {
    final OffHeapMemoryMonitor monitor = this.cache.getResourceManager().getOffHeapMonitor();

    assertFalse("Off-heap monitor is not running", monitor.started);

    monitor.setEvictionThreshold(1f);
    assertTrue("Off-heap monitor is running", monitor.started);
    monitor.setEvictionThreshold(0f);
    assertFalse("Off-heap monitor is not running", monitor.started);

    monitor.setCriticalThreshold(1f);
    assertTrue("Off-heap monitor is running", monitor.started);
    monitor.setCriticalThreshold(0f);
    assertFalse("Off-heap monitor is not running", monitor.started);

    monitor.setEvictionThreshold(1f);
    monitor.setCriticalThreshold(1.1f);
    assertTrue("Off-heap monitor is running", monitor.started);

    monitor.setEvictionThreshold(0f);
    monitor.setCriticalThreshold(0f);
    assertFalse("Off-heap monitor is not running", monitor.started);
  }

  @Test
  public void testGettersAndSetters() {
    final OffHeapMemoryMonitor monitor = this.cache.getResourceManager().getOffHeapMonitor();

    assertEquals(0f, monitor.getEvictionThreshold(), 0.01);
    assertEquals(0f, monitor.getCriticalThreshold(), 0.01);

    monitor.setEvictionThreshold(35);
    assertEquals(35f, monitor.getEvictionThreshold(), 0.01);
    assertEquals(0f, monitor.getCriticalThreshold(), 0.01);

    monitor.setCriticalThreshold(45);
    assertEquals(35f, monitor.getEvictionThreshold(), 0.01);
    assertEquals(45f, monitor.getCriticalThreshold(), 0.01);

    monitor.setEvictionThreshold(0);
    monitor.setCriticalThreshold(0);
    assertEquals(0f, monitor.getEvictionThreshold(), 0.01);
    assertEquals(0f, monitor.getCriticalThreshold(), 0.01);
  }
}
Exemplo n.º 21
0
/**
 * ** Domain class for defining a GemFire entity in XML.
 *
 * @author bansods
 * @author David Hoots
 */
public class XmlEntity implements DataSerializable {
  private static final long serialVersionUID = 1L;
  private static final Logger logger = LogService.getLogger();

  private String type;

  @SuppressWarnings("unused")
  private String parentType;

  private Map<String, String> attributes = new HashMap<String, String>();
  private String xmlDefinition;
  private String searchString;

  private String prefix = CacheXml.PREFIX;

  private String namespace = CacheXml.NAMESPACE;

  /**
   * Default constructor for serialization only.
   *
   * @deprecated Use {@link XmlEntity#builder()}.
   */
  @Deprecated
  public XmlEntity() {}

  /**
   * Construct a new XmlEntity while creating XML from the cache using the element which has a type
   * and attribute matching those given.
   *
   * @param type Type of the XML element to search for. Should be one of the constants from the
   *     {@link CacheXml} class. For example, CacheXml.REGION.
   * @param key Key of the attribute to match, for example, "name" or "id".
   * @param value Value of the attribute to match.
   */
  public XmlEntity(final String type, final String key, final String value) {
    this.type = type;
    this.attributes.put(key, value);

    init();
  }

  /**
   * ** Construct a new XmlEntity while creating Xml from the cache using the element which has
   * attributes matching those given
   *
   * @param parentType Parent type of the XML element to search for. Should be one of the constants
   *     from the {@link CacheXml} class. For example, CacheXml.REGION.
   * @param parentKey Identifier for the parent elements such "name/id"
   * @param parentValue Value of the identifier
   * @param childType Child type of the XML element to search for within the parent . Should be one
   *     of the constants from the {@link CacheXml} class. For example, CacheXml.INDEX.
   * @param childKey Identifier for the child element such as "name/id"
   * @param childValue Value of the child element identifier
   */
  public XmlEntity(
      final String parentType,
      final String parentKey,
      final String parentValue,
      final String childType,
      final String childKey,
      final String childValue) {
    // TODO this should be replaced with a builder.
    // TODO consider parent as nested XmlEntity type.
    this.parentType = parentType;
    this.type = childType;

    StringBuffer sb = new StringBuffer();
    sb.append("//").append(prefix).append(':').append(parentType);

    if (!StringUtils.isBlank(parentKey) && !StringUtils.isBlank(parentValue)) {
      sb.append("[@").append(parentKey).append("='").append(parentValue).append("']");
    }

    sb.append("/").append(prefix).append(':').append(childType);

    if (!StringUtils.isBlank(childKey) && !StringUtils.isBlank(childValue)) {
      sb.append("[@").append(childKey).append("='").append(childValue).append("']");
    }
    this.searchString = sb.toString();

    // no init();
  }

  /**
   * Initialize new instances. Called from {@link #XmlEntity(String, String, String)} and {@link
   * XmlEntityBuilder#build()}.
   *
   * @since 8.1
   */
  private final void init() {
    Assert.hasLength(type, "Type cannot be empty");
    Assert.hasLength(prefix, "Prefix cannot be empty");
    Assert.hasLength(namespace, "Namespace cannot be empty");
    Assert.notNull(attributes, "Attributes cannot be null");

    if (null == xmlDefinition) {
      xmlDefinition = loadXmlDefinition();
    }
  }

  /**
   * Use the CacheXmlGenerator to create XML from the entity associated with the current cache.
   *
   * @return XML string representation of the entity.
   */
  private final String loadXmlDefinition() {
    final Cache cache = CacheFactory.getAnyInstance();

    final StringWriter stringWriter = new StringWriter();
    final PrintWriter printWriter = new PrintWriter(stringWriter);
    CacheXmlGenerator.generate(cache, printWriter, true, false, false);
    printWriter.close();

    return loadXmlDefinition(stringWriter.toString());
  }

  /**
   * Used supplied xmlDocument to extract the XML for the defined {@link XmlEntity}.
   *
   * @param xmlDocument to extract XML from.
   * @return XML for {@link XmlEntity} if found, otherwise <code>null</code>.
   * @since 8.1
   */
  private final String loadXmlDefinition(final String xmlDocument) {
    final Cache cache = CacheFactory.getAnyInstance();
    try {
      InputSource inputSource = new InputSource(new StringReader(xmlDocument));
      return loadXmlDefinition(XmlUtils.getDocumentBuilder().parse(inputSource));
    } catch (IOException
        | SAXException
        | ParserConfigurationException
        | XPathExpressionException
        | TransformerFactoryConfigurationError
        | TransformerException e) {
      throw new InternalGemFireError("Could not parse XML when creating XMLEntity", e);
    }
  }

  /**
   * Used supplied XML {@link Document} to extract the XML for the defined {@link XmlEntity}.
   *
   * @param document to extract XML from.
   * @return XML for {@link XmlEntity} if found, otherwise <code>null</code>.
   * @throws XPathExpressionException
   * @throws TransformerException
   * @throws TransformerFactoryConfigurationError
   * @since 8.1
   */
  private final String loadXmlDefinition(final Document document)
      throws XPathExpressionException, TransformerFactoryConfigurationError, TransformerException {
    final Cache cache = CacheFactory.getAnyInstance();

    this.searchString = createQueryString(prefix, type, attributes);
    logger.info("XmlEntity:searchString: {}", this.searchString);

    if (document != null) {
      XPathContext xpathContext = new XPathContext();
      xpathContext.addNamespace(prefix, namespace);
      // Create an XPathContext here
      Node element = XmlUtils.querySingleElement(document, this.searchString, xpathContext);
      // Must copy to preserve namespaces.
      if (null != element) {
        return XmlUtils.elementToString(element);
      }
    }

    logger.warn(
        "No XML definition could be found with name={} and attributes={}", type, attributes);
    return null;
  }

  /**
   * Create an XmlPath query string from the given element name and attributes.
   *
   * @param element Name of the XML element to search for.
   * @param attributes Attributes of the element that should match, for example "name" or "id" and
   *     the value they should equal. This list may be empty.
   * @return An XmlPath query string.
   */
  private String createQueryString(
      final String prefix, final String element, final Map<String, String> attributes) {
    StringBuilder queryStringBuilder = new StringBuilder();
    Iterator<Entry<String, String>> attributeIter = attributes.entrySet().iterator();
    queryStringBuilder.append("//").append(prefix).append(':').append(element);

    if (attributes.size() > 0) {
      queryStringBuilder.append("[");
      Entry<String, String> attrEntry = attributeIter.next();
      // queryStringBuilder.append("@").append(attrEntry.getKey()).append("=\"").append(attrEntry.getValue()).append("\"");
      queryStringBuilder
          .append("@")
          .append(attrEntry.getKey())
          .append("='")
          .append(attrEntry.getValue())
          .append("'");
      while (attributeIter.hasNext()) {
        attrEntry = attributeIter.next();
        // queryStringBuilder.append(" and
        // @").append(attrEntry.getKey()).append("=\"").append(attrEntry.getValue()).append("\"");
        queryStringBuilder
            .append(" and @")
            .append(attrEntry.getKey())
            .append("='")
            .append(attrEntry.getValue())
            .append("'");
      }

      queryStringBuilder.append("]");
    }

    return queryStringBuilder.toString();
  }

  public String getSearchString() {
    return this.searchString;
  }

  public String getType() {
    return this.type;
  }

  public Map<String, String> getAttributes() {
    return this.attributes;
  }

  /**
   * Return the value of a single attribute.
   *
   * @param key Key of the attribute whose while will be returned.
   * @return The value of the attribute.
   */
  public String getAttribute(String key) {
    return this.attributes.get(key);
  }

  /**
   * A convenience method to get a name or id attributes from the list of attributes if one of them
   * has been set. Name takes precedence.
   *
   * @return The name or id attribute or null if neither is found.
   */
  public String getNameOrId() {
    if (this.attributes.containsKey("name")) {
      return this.attributes.get("name");
    }

    return this.attributes.get("id");
  }

  public String getXmlDefinition() {
    return this.xmlDefinition;
  }

  /**
   * Gets the namespace for the element. Defaults to {@link CacheXml#NAMESPACE} if not set.
   *
   * @return XML element namespace
   * @since 8.1
   */
  public String getNamespace() {
    return namespace;
  }

  /**
   * Gets the prefix for the element. Defaults to {@link CacheXml#PREFIX} if not set.
   *
   * @return XML element prefix
   * @since 8.1
   */
  public String getPrefix() {
    return prefix;
  }

  @Override
  public String toString() {
    return "XmlEntity [namespace="
        + namespace
        + ", type="
        + this.type
        + ", attributes="
        + this.attributes
        + ", xmlDefinition="
        + this.xmlDefinition
        + "]";
  }

  @Override
  public int hashCode() {
    final int prime = 31;
    int result = 1;
    result = prime * result + ((this.attributes == null) ? 0 : this.attributes.hashCode());
    result = prime * result + ((this.type == null) ? 0 : this.type.hashCode());
    return result;
  }

  @Override
  public boolean equals(Object obj) {
    if (this == obj) return true;
    if (obj == null) return false;
    if (getClass() != obj.getClass()) return false;
    XmlEntity other = (XmlEntity) obj;
    if (this.attributes == null) {
      if (other.attributes != null) return false;
    } else if (!this.attributes.equals(other.attributes)) return false;
    if (this.namespace == null) {
      if (other.namespace != null) return false;
    } else if (!this.namespace.equals(other.namespace)) return false;
    if (this.type == null) {
      if (other.type != null) return false;
    } else if (!this.type.equals(other.type)) return false;
    return true;
  }

  @Override
  public void toData(DataOutput out) throws IOException {
    DataSerializer.writeString(this.type, out);
    DataSerializer.writeObject(this.attributes, out);
    DataSerializer.writeString(this.xmlDefinition, out);
    DataSerializer.writeString(this.searchString, out);
    DataSerializer.writeString(this.prefix, out);
    DataSerializer.writeString(this.namespace, out);
  }

  @Override
  public void fromData(DataInput in) throws IOException, ClassNotFoundException {
    this.type = DataSerializer.readString(in);
    this.attributes = DataSerializer.readObject(in);
    this.xmlDefinition = DataSerializer.readString(in);
    this.searchString = DataSerializer.readString(in);
    this.prefix = DataSerializer.readString(in);
    this.namespace = DataSerializer.readString(in);
  }

  /**
   * Produce a new {@link XmlEntityBuilder}.
   *
   * @return new {@link XmlEntityBuilder}.
   * @since 8.1
   */
  public static final XmlEntityBuilder builder() {
    return new XmlEntityBuilder();
  }

  /**
   * Builder for {@link XmlEntity}. Default values are as described in {@link XmlEntity}.
   *
   * @author [email protected]
   * @since 8.1
   */
  public static final class XmlEntityBuilder {
    private XmlEntity xmlEntity;

    /**
     * Private contstructor.
     *
     * @since 8.1
     */
    private XmlEntityBuilder() {
      xmlEntity = new XmlEntity();
    }

    /**
     * Produce an {@link XmlEntity} with the supplied values. Builder is reset after {@link
     * #build()} is called. Subsequent calls will produce a new {@link XmlEntity}.
     *
     * <p>You are required to at least call {@link #withType(String)}.
     *
     * @return {@link XmlEntity}
     * @since 8.1
     */
    public XmlEntity build() {
      xmlEntity.init();

      final XmlEntity built = xmlEntity;
      xmlEntity = new XmlEntity();

      return built;
    }

    /**
     * Sets the type or element name value as returned by {@link XmlEntity#getType()}
     *
     * @param type Name of element type.
     * @return this {@link XmlEntityBuilder}
     * @since 8.1
     */
    public XmlEntityBuilder withType(final String type) {
      xmlEntity.type = type;

      return this;
    }

    /**
     * Sets the element prefix and namespace as returned by {@link XmlEntity#getPrefix()} and {@link
     * XmlEntity#getNamespace()} respectively. Defaults are {@link CacheXml#PREFIX} and {@link
     * CacheXml#NAMESPACE} respectively.
     *
     * @param prefix Prefix of element
     * @param namespace Namespace of element
     * @return this {@link XmlEntityBuilder}
     * @since 8.1
     */
    public XmlEntityBuilder withNamespace(final String prefix, final String namespace) {
      xmlEntity.prefix = prefix;
      xmlEntity.namespace = namespace;

      return this;
    }

    /**
     * Adds an attribute for the given <code>name</code> and <code>value</code> to the attributes
     * map returned by {@link XmlEntity#getAttributes()} or {@link XmlEntity#getAttribute(String)}.
     *
     * @param name Name of attribute to set.
     * @param value Value of attribute to set.
     * @return this {@link XmlEntityBuilder}
     * @since 8.1
     */
    public XmlEntityBuilder withAttribute(final String name, final String value) {
      xmlEntity.attributes.put(name, value);

      return this;
    }

    /**
     * Replaces all attributes with the supplied attributes {@link Map}.
     *
     * @param attributes {@link Map} to use.
     * @return this {@link XmlEntityBuilder}
     * @since 8.1
     */
    public XmlEntityBuilder withAttributes(final Map<String, String> attributes) {
      xmlEntity.attributes = attributes;

      return this;
    }

    /**
     * Sets a config xml document source to get the entity XML Definition from as returned by {@link
     * XmlEntity#getXmlDefinition()}. Defaults to current active configuration for {@link Cache}.
     *
     * <p><b>Should only be used for testing.</b>
     *
     * @param xmlDocument Config XML document.
     * @return this {@link XmlEntityBuilder}
     * @since 8.1
     */
    public XmlEntityBuilder withConfig(final String xmlDocument) {
      xmlEntity.xmlDefinition = xmlEntity.loadXmlDefinition(xmlDocument);

      return this;
    }

    /**
     * Sets a config xml document source to get the entity XML Definition from as returned by {@link
     * XmlEntity#getXmlDefinition()}. Defaults to current active configuration for {@link Cache}.
     *
     * <p><b>Should only be used for testing.</b>
     *
     * @param document Config XML {@link Document}.
     * @return this {@link XmlEntityBuilder}
     * @throws TransformerException
     * @throws TransformerFactoryConfigurationError
     * @throws XPathExpressionException
     * @since 8.1
     */
    public XmlEntityBuilder withConfig(final Document document)
        throws XPathExpressionException, TransformerFactoryConfigurationError,
            TransformerException {
      xmlEntity.xmlDefinition = xmlEntity.loadXmlDefinition(document);

      return this;
    }
  }
}