コード例 #1
0
  /** The common state. */
  static final class State<T> {
    /** The first observer or the one which buffers until the first arrives. */
    volatile Observer<? super T> observerRef = new BufferedObserver<T>();
    /** Allow a single subscriber only. */
    volatile int first;
    /** Field updater for observerRef. */
    @SuppressWarnings("rawtypes")
    static final AtomicReferenceFieldUpdater<State, Observer> OBSERVER_UPDATER =
        AtomicReferenceFieldUpdater.newUpdater(State.class, Observer.class, "observerRef");
    /** Field updater for first. */
    @SuppressWarnings("rawtypes")
    static final AtomicIntegerFieldUpdater<State> FIRST_UPDATER =
        AtomicIntegerFieldUpdater.newUpdater(State.class, "first");

    boolean casFirst(int expected, int next) {
      return FIRST_UPDATER.compareAndSet(this, expected, next);
    }

    void setObserverRef(Observer<? super T> o) {
      observerRef = o;
    }

    boolean casObserverRef(Observer<? super T> expected, Observer<? super T> next) {
      return OBSERVER_UPDATER.compareAndSet(this, expected, next);
    }
  }
コード例 #2
0
ファイル: AbstractPromise.java プロジェクト: rakesh-c/Play20
abstract class AbstractPromise {
  private volatile Object _ref;

  static final long _refoffset;

  static {
    try {
      _refoffset =
          Unsafe.instance.objectFieldOffset(AbstractPromise.class.getDeclaredField("_ref"));
    } catch (Throwable t) {
      throw new ExceptionInInitializerError(t);
    }
  }

  protected final boolean updateState(Object oldState, Object newState) {
    return Unsafe.instance.compareAndSwapObject(this, _refoffset, oldState, newState);
  }

  protected final Object getState() {
    return _ref;
  }

  protected static final AtomicReferenceFieldUpdater<AbstractPromise, Object> updater =
      AtomicReferenceFieldUpdater.newUpdater(AbstractPromise.class, Object.class, "_ref");
}
コード例 #3
0
  static final class PublisherTimerRunnable implements Runnable, Subscription {
    final Subscriber<? super Long> s;

    final Function<Runnable, ? extends Runnable> asyncExecutor;

    volatile Runnable cancel;
    static final AtomicReferenceFieldUpdater<PublisherTimerRunnable, Runnable> CANCEL =
        AtomicReferenceFieldUpdater.newUpdater(
            PublisherTimerRunnable.class, Runnable.class, "cancel");

    static final Runnable CANCELLED = () -> {};

    volatile boolean requested;

    public PublisherTimerRunnable(
        Subscriber<? super Long> s, Function<Runnable, ? extends Runnable> asyncExecutor) {
      this.s = s;
      this.asyncExecutor = asyncExecutor;
    }

    public void setCancel(Runnable cancel) {
      if (!CANCEL.compareAndSet(this, null, cancel)) {
        cancel.run();
      }
    }

    @Override
    public void run() {
      if (requested) {
        if (cancel != CANCELLED) {
          s.onNext(0L);
        }
        asyncExecutor.apply(null);
        if (cancel != CANCELLED) {
          s.onComplete();
        }
      } else {
        s.onError(new IllegalStateException("Could not emit value due to lack of requests"));
      }
    }

    @Override
    public void cancel() {
      Runnable c = cancel;
      if (c != CANCELLED) {
        c = CANCEL.getAndSet(this, CANCELLED);
        if (c != null && c != CANCELLED) {
          c.run();
        }
      }
      asyncExecutor.apply(null);
    }

    @Override
    public void request(long n) {
      if (SubscriptionHelper.validate(n)) {
        requested = true;
      }
    }
  }
コード例 #4
0
  /** compareAndSet in one thread enables another waiting for value to succeed */
  public void testCompareAndSetInMultipleThreads() throws Exception {
    x = one;
    final AtomicReferenceFieldUpdater<AtomicReferenceFieldUpdaterTest, Integer> a;
    try {
      a =
          AtomicReferenceFieldUpdater.newUpdater(
              AtomicReferenceFieldUpdaterTest.class, Integer.class, "x");
    } catch (RuntimeException ok) {
      return;
    }

    Thread t =
        new Thread(
            new CheckedRunnable() {
              public void realRun() {
                while (!a.compareAndSet(AtomicReferenceFieldUpdaterTest.this, two, three))
                  Thread.yield();
              }
            });

    t.start();
    assertTrue(a.compareAndSet(this, one, two));
    t.join(LONG_DELAY_MS);
    assertFalse(t.isAlive());
    assertSame(a.get(this), three);
  }
コード例 #5
0
 /** Constructor with non-volatile field throws exception */
 public void testConstructor3() {
   try {
     AtomicReferenceFieldUpdater<AtomicReferenceFieldUpdaterTest, Integer> a =
         AtomicReferenceFieldUpdater.newUpdater(
             AtomicReferenceFieldUpdaterTest.class, Integer.class, "w");
     shouldThrow();
   } catch (RuntimeException success) {
   }
 }
コード例 #6
0
  private static class Node<E> {
    volatile E item;
    volatile Node<E> next;

    @SuppressWarnings("rawtypes")
    private static final AtomicReferenceFieldUpdater<Node, Node> nextUpdater =
        AtomicReferenceFieldUpdater.newUpdater(Node.class, Node.class, "next");

    boolean casNext(Node<E> cmp, Node<E> val) {
      return nextUpdater.compareAndSet(this, cmp, val);
    }
  }
コード例 #7
0
ファイル: InternalNode.java プロジェクト: ChaoMai/ckdtree
/** Created by chaomai on 11/22/15. */
class InternalNode extends Node {
  private static final AtomicReferenceFieldUpdater<InternalNode, Node> leftUpdater =
      AtomicReferenceFieldUpdater.newUpdater(InternalNode.class, Node.class, "left");
  private static final AtomicReferenceFieldUpdater<InternalNode, Node> rightUpdater =
      AtomicReferenceFieldUpdater.newUpdater(InternalNode.class, Node.class, "right");
  private static final AtomicReferenceFieldUpdater<InternalNode, Update> updateUpdater =
      AtomicReferenceFieldUpdater.newUpdater(InternalNode.class, Update.class, "update");
  final int skippedDepth;
  volatile Node left;
  volatile Node right;
  volatile Update update;

  InternalNode(double[] key, Node left, Node right) {
    this(key, left, right, 0);
  }

  InternalNode(double[] key, Node left, Node right, int skippedDepth) {
    this(key, left, right, new Update(), skippedDepth);
  }

  InternalNode(double[] key, Node left, Node right, Update update, int skippedDepth) {
    super(key);
    this.left = left;
    this.right = right;
    this.update = update;
    this.skippedDepth = skippedDepth;
  }

  boolean CAS_LEFT(Node old, Node n) {
    return leftUpdater.compareAndSet(this, old, n);
  }

  boolean CAS_RIGHT(Node old, Node n) {
    return rightUpdater.compareAndSet(this, old, n);
  }

  boolean CAS_UPDATE(Update old, Update n) {
    return updateUpdater.compareAndSet(this, old, n);
  }
}
コード例 #8
0
 /** getAndSet returns previous value and sets to given value */
 public void testGetAndSet() {
   AtomicReferenceFieldUpdater<AtomicReferenceFieldUpdaterTest, Integer> a;
   try {
     a =
         AtomicReferenceFieldUpdater.newUpdater(
             AtomicReferenceFieldUpdaterTest.class, Integer.class, "x");
   } catch (RuntimeException ok) {
     return;
   }
   x = one;
   assertSame(one, a.getAndSet(this, zero));
   assertSame(zero, a.getAndSet(this, m10));
   assertSame(m10, a.getAndSet(this, 1));
 }
コード例 #9
0
 static {
   AtomicReferenceFieldUpdater<AbstractChannelHandlerContext, PausableChannelEventExecutor>
       updater =
           PlatformDependent.newAtomicReferenceFieldUpdater(
               AbstractChannelHandlerContext.class, "wrappedEventLoop");
   if (updater == null) {
     updater =
         AtomicReferenceFieldUpdater.newUpdater(
             AbstractChannelHandlerContext.class,
             PausableChannelEventExecutor.class,
             "wrappedEventLoop");
   }
   WRAPPED_EVENTEXECUTOR_UPDATER = updater;
 }
コード例 #10
0
 /** repeated weakCompareAndSet succeeds in changing value when equal to expected */
 public void testWeakCompareAndSet() {
   AtomicReferenceFieldUpdater<AtomicReferenceFieldUpdaterTest, Integer> a;
   try {
     a =
         AtomicReferenceFieldUpdater.newUpdater(
             AtomicReferenceFieldUpdaterTest.class, Integer.class, "x");
   } catch (RuntimeException ok) {
     return;
   }
   x = one;
   while (!a.weakCompareAndSet(this, one, two)) ;
   while (!a.weakCompareAndSet(this, two, m4)) ;
   assertSame(m4, a.get(this));
   while (!a.weakCompareAndSet(this, m4, seven)) ;
   assertSame(seven, a.get(this));
 }
コード例 #11
0
 /** get returns the last value lazySet by same thread */
 public void testGetLazySet() {
   AtomicReferenceFieldUpdater<AtomicReferenceFieldUpdaterTest, Integer> a;
   try {
     a =
         AtomicReferenceFieldUpdater.newUpdater(
             AtomicReferenceFieldUpdaterTest.class, Integer.class, "x");
   } catch (RuntimeException ok) {
     return;
   }
   x = one;
   assertSame(one, a.get(this));
   a.lazySet(this, two);
   assertSame(two, a.get(this));
   a.lazySet(this, m3);
   assertSame(m3, a.get(this));
 }
コード例 #12
0
  private static final class ResponseChannelFactory implements ChannelFactory<StreamSinkChannel> {
    private static final AtomicReferenceFieldUpdater<ResponseChannelFactory, ChannelWrapper[]>
        wrappersUpdater =
            AtomicReferenceFieldUpdater.newUpdater(
                ResponseChannelFactory.class, ChannelWrapper[].class, "wrappers");
    private final HttpServerExchange exchange;
    private final StreamSinkChannel firstChannel;

    @SuppressWarnings("unused")
    private volatile ChannelWrapper[] wrappers;

    ResponseChannelFactory(
        final HttpServerExchange exchange,
        final StreamSinkChannel firstChannel,
        final ChannelWrapper[] wrappers) {
      this.exchange = exchange;
      this.firstChannel = firstChannel;
      this.wrappers = wrappers;
    }

    public StreamSinkChannel create() {
      final ChannelWrapper[] wrappers = wrappersUpdater.getAndSet(this, null);
      if (wrappers == null) {
        return null;
      }
      StreamSinkChannel oldChannel = firstChannel;
      StreamSinkChannel channel = oldChannel;
      for (ChannelWrapper wrapper : wrappers) {
        channel = ((ChannelWrapper<StreamSinkChannel>) wrapper).wrap(channel, exchange);
        if (channel == null) {
          channel = oldChannel;
        }
      }
      exchange.startResponse();
      return channel;
    }
  }
コード例 #13
0
/**
 * An HTTP server request/response exchange. An instance of this class is constructed as soon as the
 * request headers are fully parsed.
 *
 * @author <a href="mailto:[email protected]">David M. Lloyd</a>
 */
public final class HttpServerExchange extends AbstractAttachable {
  // immutable state

  private static final Logger log = Logger.getLogger(HttpServerExchange.class);

  private final HttpServerConnection connection;
  private final HeaderMap requestHeaders = new HeaderMap();
  private final HeaderMap responseHeaders = new HeaderMap();

  private final Map<String, Deque<String>> queryParameters = new HashMap<String, Deque<String>>(0);

  private final StreamSinkChannel underlyingResponseChannel;
  private final StreamSourceChannel underlyingRequestChannel;

  private final Runnable requestTerminateAction;
  private final Runnable responseTerminateAction;

  private HttpString protocol;

  // mutable state

  private volatile int state = 200;
  private volatile HttpString requestMethod;
  private volatile String requestScheme;
  /** The original request URI. This will include the host name if it was specified by the client */
  private volatile String requestURI;
  /** The original request path. */
  private volatile String requestPath;
  /** The canonical version of the original path. */
  private volatile String canonicalPath;
  /** The remaining unresolved portion of the canonical path. */
  private volatile String relativePath;

  /** The resolved part of the canonical path. */
  private volatile String resolvedPath = "";

  /** the query string */
  private volatile String queryString;

  private boolean complete = false;

  private static final ChannelWrapper<StreamSourceChannel>[] NO_SOURCE_WRAPPERS =
      new ChannelWrapper[0];
  private static final ChannelWrapper<StreamSinkChannel>[] NO_SINK_WRAPPERS = new ChannelWrapper[0];

  private volatile ChannelWrapper[] requestWrappers = NO_SOURCE_WRAPPERS;
  private volatile ChannelWrapper[] responseWrappers = NO_SINK_WRAPPERS;

  private static final AtomicReferenceFieldUpdater<HttpServerExchange, ChannelWrapper[]>
      requestWrappersUpdater =
          AtomicReferenceFieldUpdater.newUpdater(
              HttpServerExchange.class, ChannelWrapper[].class, "requestWrappers");
  private static final AtomicReferenceFieldUpdater<HttpServerExchange, ChannelWrapper[]>
      responseWrappersUpdater =
          AtomicReferenceFieldUpdater.newUpdater(
              HttpServerExchange.class, ChannelWrapper[].class, "responseWrappers");

  private static final AtomicIntegerFieldUpdater<HttpServerExchange> stateUpdater =
      AtomicIntegerFieldUpdater.newUpdater(HttpServerExchange.class, "state");

  private static final int MASK_RESPONSE_CODE = intBitMask(0, 9);
  private static final int FLAG_RESPONSE_SENT = 1 << 10;
  private static final int FLAG_RESPONSE_TERMINATED = 1 << 11;
  private static final int FLAG_REQUEST_TERMINATED = 1 << 12;
  private static final int FLAG_CLEANUP = 1 << 13;

  HttpServerExchange(
      final HttpServerConnection connection,
      final StreamSourceChannel requestChannel,
      final StreamSinkChannel responseChannel,
      final Runnable requestTerminateAction,
      final Runnable responseTerminateAction) {
    this.connection = connection;
    this.underlyingRequestChannel = requestChannel;
    if (connection == null) {
      // just for unit tests
      this.underlyingResponseChannel = null;
    } else {
      this.underlyingResponseChannel =
          new HttpResponseChannel(responseChannel, connection.getBufferPool(), this);
    }
    this.requestTerminateAction = requestTerminateAction;
    this.responseTerminateAction = responseTerminateAction;
  }

  /**
   * Get the request protocol string. Normally this is one of the strings listed in {@link
   * Protocols}.
   *
   * @return the request protocol string
   */
  public HttpString getProtocol() {
    return protocol;
  }

  /**
   * Sets the http protocol
   *
   * @param protocol
   */
  public void setProtocol(final HttpString protocol) {
    this.protocol = protocol;
  }

  /**
   * Determine whether this request conforms to HTTP 0.9.
   *
   * @return {@code true} if the request protocol is equal to {@link Protocols#HTTP_0_9}, {@code
   *     false} otherwise
   */
  public boolean isHttp09() {
    return protocol.equals(Protocols.HTTP_0_9);
  }

  /**
   * Determine whether this request conforms to HTTP 1.0.
   *
   * @return {@code true} if the request protocol is equal to {@link Protocols#HTTP_1_0}, {@code
   *     false} otherwise
   */
  public boolean isHttp10() {
    return protocol.equals(Protocols.HTTP_1_0);
  }

  /**
   * Determine whether this request conforms to HTTP 1.1.
   *
   * @return {@code true} if the request protocol is equal to {@link Protocols#HTTP_1_1}, {@code
   *     false} otherwise
   */
  public boolean isHttp11() {
    return protocol.equals(Protocols.HTTP_1_1);
  }

  /**
   * Get the HTTP request method. Normally this is one of the strings listed in {@link Methods}.
   *
   * @return the HTTP request method
   */
  public HttpString getRequestMethod() {
    return requestMethod;
  }

  /**
   * Set the HTTP request method.
   *
   * @param requestMethod the HTTP request method
   */
  public void setRequestMethod(final HttpString requestMethod) {
    this.requestMethod = requestMethod;
  }

  /**
   * Get the request URI scheme. Normally this is one of {@code http} or {@code https}.
   *
   * @return the request URI scheme
   */
  public String getRequestScheme() {
    return requestScheme;
  }

  /**
   * Set the request URI scheme.
   *
   * @param requestScheme the request URI scheme
   */
  public void setRequestScheme(final String requestScheme) {
    this.requestScheme = requestScheme;
  }

  /**
   * Gets the request URI, including hostname, protocol etc if specified by the client.
   *
   * <p>In most cases this will be equal to {@link #requestPath}
   *
   * @return The request URI
   */
  public String getRequestURI() {
    return requestURI;
  }

  /**
   * Sets the request URI
   *
   * @param requestURI The new request URI
   */
  public void setRequestURI(final String requestURI) {
    this.requestURI = requestURI;
  }

  /**
   * Get the request URI path. This is the whole original request path.
   *
   * @return the request URI path
   */
  public String getRequestPath() {
    return requestPath;
  }

  /**
   * Set the request URI path.
   *
   * @param requestPath the request URI path
   */
  public void setRequestPath(final String requestPath) {
    this.requestPath = requestPath;
  }

  /**
   * Get the request relative path. This is the path which should be evaluated by the current
   * handler.
   *
   * <p>If the {@link io.undertow.server.handlers.CanonicalPathHandler} is installed in the current
   * chain then this path with be canonicalized
   *
   * @return the request relative path
   */
  public String getRelativePath() {
    return relativePath;
  }

  /**
   * Set the request relative path.
   *
   * @param relativePath the request relative path
   */
  public void setRelativePath(final String relativePath) {
    this.relativePath = relativePath;
  }

  /** internal method used by the parser to set both the request and relative path fields */
  void setParsedRequestPath(final String requestPath) {
    this.relativePath = requestPath;
    this.requestPath = requestPath;
  }

  /**
   * Get the resolved path.
   *
   * @return the resolved path
   */
  public String getResolvedPath() {
    return resolvedPath;
  }

  /**
   * Set the resolved path.
   *
   * @param resolvedPath the resolved path
   */
  public void setResolvedPath(final String resolvedPath) {
    this.resolvedPath = resolvedPath;
  }

  /**
   * Get the canonical path.
   *
   * @return the canonical path
   */
  public String getCanonicalPath() {
    return canonicalPath;
  }

  /**
   * Set the canonical path.
   *
   * @param canonicalPath the canonical path
   */
  public void setCanonicalPath(final String canonicalPath) {
    this.canonicalPath = canonicalPath;
  }

  public String getQueryString() {
    return queryString;
  }

  public void setQueryString(final String queryString) {
    this.queryString = queryString;
  }

  /**
   * Reconstructs the complete URL as seen by the user. This includes scheme, host name etc, but
   * does not include query string.
   */
  public String getRequestURL() {
    String host = getRequestHeaders().getFirst(Headers.HOST);
    if (host == null) {
      host = getDestinationAddress().getAddress().getHostAddress();
    }
    return getRequestScheme() + "://" + host + getRequestURI();
  }

  /**
   * Get the underlying HTTP connection.
   *
   * @return the underlying HTTP connection
   */
  public HttpServerConnection getConnection() {
    return connection;
  }

  /**
   * Upgrade the channel to a raw socket. This method set the response code to 101, and then marks
   * both the request and response as terminated, which means that once the current request is
   * completed the raw channel can be obtained from {@link
   * io.undertow.server.HttpServerConnection#getChannel()}
   *
   * @throws IllegalStateException if a response or upgrade was already sent, or if the request body
   *     is already being read
   */
  public void upgradeChannel() {
    setResponseCode(101);

    int oldVal, newVal;
    do {
      oldVal = state;
      if (allAreSet(oldVal, FLAG_REQUEST_TERMINATED | FLAG_RESPONSE_TERMINATED)) {
        // idempotent
        return;
      }
      newVal = oldVal | FLAG_REQUEST_TERMINATED | FLAG_RESPONSE_TERMINATED;
    } while (!stateUpdater.compareAndSet(this, oldVal, newVal));
  }

  /**
   * Get the source address of the HTTP request.
   *
   * @return the source address of the HTTP request
   */
  public InetSocketAddress getSourceAddress() {
    return connection.getPeerAddress(InetSocketAddress.class);
  }

  /**
   * Get the destination address of the HTTP request.
   *
   * @return the destination address of the HTTP request
   */
  public InetSocketAddress getDestinationAddress() {
    return connection.getLocalAddress(InetSocketAddress.class);
  }

  /**
   * Get the request headers.
   *
   * @return the request headers
   */
  public HeaderMap getRequestHeaders() {
    return requestHeaders;
  }

  /**
   * Get the response headers.
   *
   * @return the response headers
   */
  public HeaderMap getResponseHeaders() {
    return responseHeaders;
  }

  /**
   * Returns a mutable map of very parameters.
   *
   * @return The query parameters
   */
  public Map<String, Deque<String>> getQueryParameters() {
    return queryParameters;
  }

  void addQueryParam(final String name, final String param) {
    Deque<String> list = queryParameters.get(name);
    if (list == null) {
      queryParameters.put(name, list = new ArrayDeque<String>());
    }
    list.add(param);
  }

  /** @return <code>true</code> If the response has already been started */
  public boolean isResponseStarted() {
    return allAreSet(state, FLAG_RESPONSE_SENT);
  }

  /**
   * Get the inbound request. If there is no request body, calling this method may cause the next
   * request to immediately be processed. The {@link StreamSourceChannel#close()} or {@link
   * StreamSourceChannel#shutdownReads()} method must be called at some point after the request is
   * processed to prevent resource leakage and to allow the next request to proceed. Any unread
   * content will be discarded.
   *
   * @return the channel for the inbound request, or {@code null} if another party already acquired
   *     the channel
   */
  public StreamSourceChannel getRequestChannel() {
    final ChannelWrapper[] wrappers = requestWrappersUpdater.getAndSet(this, null);
    if (wrappers == null) {
      return null;
    }
    StreamSourceChannel channel = underlyingRequestChannel;
    for (ChannelWrapper wrapper : wrappers) {
      final StreamSourceChannel oldChannel = channel;
      channel = ((ChannelWrapper<StreamSourceChannel>) wrapper).wrap(oldChannel, this);
      if (channel == null) {
        channel = oldChannel;
      }
    }
    return channel;
  }

  public boolean isRequestChannelAvailable() {
    return requestWrappers != null;
  }

  /**
   * Returns true if the completion handler for this exchange has been invoked, and the request is
   * considered finished.
   */
  public boolean isComplete() {
    return complete;
  }

  /**
   * Force the codec to treat the request as fully read. Should only be invoked by handlers which
   * downgrade the socket or implement a transfer coding.
   */
  void terminateRequest() {
    int oldVal, newVal;
    do {
      oldVal = state;
      if (allAreSet(oldVal, FLAG_REQUEST_TERMINATED)) {
        // idempotent
        return;
      }
      newVal = oldVal | FLAG_REQUEST_TERMINATED;
    } while (!stateUpdater.compareAndSet(this, oldVal, newVal));
    requestTerminateAction.run();
  }

  /**
   * Get the factory to produce the response channel. The resultant channel's {@link
   * StreamSinkChannel#close()} or {@link StreamSinkChannel#shutdownWrites()} method must be called
   * at some point after the request is processed to prevent resource leakage and to allow the next
   * request to proceed. Closing a fixed-length response before the corresponding number of bytes
   * has been written will cause the connection to be reset and subsequent requests to fail; thus it
   * is important to ensure that the proper content length is delivered when one is specified. The
   * response channel may not be writable until after the response headers have been sent.
   *
   * <p>If this method is not called then an empty or default response body will be used, depending
   * on the response code set.
   *
   * <p>The returned channel will begin to write out headers when the first write request is
   * initiated, or when {@link java.nio.channels.Channel#close()} is called on the channel with no
   * content being written. Once the channel is acquired, however, the response code and headers may
   * not be modified.
   *
   * @return the response channel factory, or {@code null} if another party already acquired the
   *     channel factory
   */
  public ChannelFactory<StreamSinkChannel> getResponseChannelFactory() {
    final ChannelWrapper[] wrappers = responseWrappersUpdater.getAndSet(this, null);
    if (wrappers == null) {
      return null;
    }
    return new ResponseChannelFactory(this, underlyingResponseChannel, wrappers);
  }

  private static final class ResponseChannelFactory implements ChannelFactory<StreamSinkChannel> {
    private static final AtomicReferenceFieldUpdater<ResponseChannelFactory, ChannelWrapper[]>
        wrappersUpdater =
            AtomicReferenceFieldUpdater.newUpdater(
                ResponseChannelFactory.class, ChannelWrapper[].class, "wrappers");
    private final HttpServerExchange exchange;
    private final StreamSinkChannel firstChannel;

    @SuppressWarnings("unused")
    private volatile ChannelWrapper[] wrappers;

    ResponseChannelFactory(
        final HttpServerExchange exchange,
        final StreamSinkChannel firstChannel,
        final ChannelWrapper[] wrappers) {
      this.exchange = exchange;
      this.firstChannel = firstChannel;
      this.wrappers = wrappers;
    }

    public StreamSinkChannel create() {
      final ChannelWrapper[] wrappers = wrappersUpdater.getAndSet(this, null);
      if (wrappers == null) {
        return null;
      }
      StreamSinkChannel oldChannel = firstChannel;
      StreamSinkChannel channel = oldChannel;
      for (ChannelWrapper wrapper : wrappers) {
        channel = ((ChannelWrapper<StreamSinkChannel>) wrapper).wrap(channel, exchange);
        if (channel == null) {
          channel = oldChannel;
        }
      }
      exchange.startResponse();
      return channel;
    }
  }

  /** @return <code>true</code> if {@link #getResponseChannelFactory()} has not been called */
  public boolean isResponseChannelAvailable() {
    return responseWrappers != null;
  }

  /**
   * Change the response code for this response. If not specified, the code will be a {@code 200}.
   * Setting the response code after the response headers have been transmitted has no effect.
   *
   * @param responseCode the new code
   * @throws IllegalStateException if a response or upgrade was already sent
   */
  public void setResponseCode(final int responseCode) {
    if (responseCode < 0 || responseCode > 999) {
      throw new IllegalArgumentException("Invalid response code");
    }
    int oldVal, newVal;
    do {
      oldVal = state;
      if (allAreSet(oldVal, FLAG_RESPONSE_SENT)) {
        throw UndertowMessages.MESSAGES.responseAlreadyStarted();
      }
      newVal = oldVal & ~MASK_RESPONSE_CODE | responseCode & MASK_RESPONSE_CODE;
    } while (!stateUpdater.compareAndSet(this, oldVal, newVal));
  }

  /**
   * Adds a {@link ChannelWrapper} to the request wrapper chain.
   *
   * @param wrapper the wrapper
   */
  public void addRequestWrapper(final ChannelWrapper<StreamSourceChannel> wrapper) {
    ChannelWrapper[] oldVal;
    ChannelWrapper[] newVal;
    int oldLen;
    do {
      oldVal = requestWrappers;
      if (oldVal == null) {
        throw UndertowMessages.MESSAGES.requestChannelAlreadyProvided();
      }
      oldLen = oldVal.length;
      newVal = Arrays.copyOf(oldVal, oldLen + 1);
      newVal[oldLen] = wrapper;
    } while (!requestWrappersUpdater.compareAndSet(this, oldVal, newVal));
  }

  /**
   * Adds a {@link ChannelWrapper} to the response wrapper chain.
   *
   * @param wrapper the wrapper
   */
  public void addResponseWrapper(final ChannelWrapper<StreamSinkChannel> wrapper) {
    ChannelWrapper[] oldVal;
    ChannelWrapper[] newVal;
    int oldLen;
    do {
      oldVal = responseWrappers;
      if (oldVal == null) {
        throw UndertowMessages.MESSAGES.responseChannelAlreadyProvided();
      }
      oldLen = oldVal.length;
      newVal = Arrays.copyOf(oldVal, oldLen + 1);
      newVal[oldLen] = wrapper;
    } while (!responseWrappersUpdater.compareAndSet(this, oldVal, newVal));
  }

  /**
   * Get the response code.
   *
   * @return the response code
   */
  public int getResponseCode() {
    return state & MASK_RESPONSE_CODE;
  }

  /**
   * Force the codec to treat the response as fully written. Should only be invoked by handlers
   * which downgrade the socket or implement a transfer coding.
   */
  void terminateResponse() {
    int oldVal, newVal;
    do {
      oldVal = state;
      if (allAreSet(oldVal, FLAG_RESPONSE_TERMINATED)) {
        // idempotent
        return;
      }
      newVal = oldVal | FLAG_RESPONSE_TERMINATED;
    } while (!stateUpdater.compareAndSet(this, oldVal, newVal));
    if (responseTerminateAction != null) {
      responseTerminateAction.run();
    }
  }

  /**
   * Transmit the response headers. After this method successfully returns, the response channel may
   * become writable.
   *
   * <p>If this method fails the request and response channels will be closed.
   *
   * <p>This method runs asynchronously. If the channel is writable it will attempt to write as much
   * of the response header as possible, and then queue the rest in a listener and return.
   *
   * <p>If future handlers in the chain attempt to write before this is finished XNIO will just
   * magically sort it out so it works. This is not actually implemented yet, so we just terminate
   * the connection straight away at the moment.
   *
   * <p>TODO: make this work properly
   *
   * @throws IllegalStateException if the response headers were already sent
   */
  void startResponse() throws IllegalStateException {
    int oldVal, newVal;
    do {
      oldVal = state;
      if (allAreSet(oldVal, FLAG_RESPONSE_SENT)) {
        throw UndertowMessages.MESSAGES.responseAlreadyStarted();
      }
      newVal = oldVal | FLAG_RESPONSE_SENT;
    } while (!stateUpdater.compareAndSet(this, oldVal, newVal));

    log.tracef(
        "Starting to write response for %s using channel %s", this, underlyingResponseChannel);
    final HeaderMap responseHeaders = this.responseHeaders;
    responseHeaders.lock();
  }

  void cleanup() {
    // All other cleanup handlers have been called.  We will inspect the state of the exchange
    // and attempt to fix any leftover or broken crap as best as we can.
    //
    // At this point if any channels were not acquired, we know that not even default handlers have
    // handled the request, meaning we basically have no idea what their state is; the response
    // headers
    // may not even be valid.
    //
    // The only thing we can do is to determine if the request and reply were both terminated; if
    // not,
    // consume the request body nicely, send whatever HTTP response we have, and close down the
    // connection.
    complete = true;
    int oldVal, newVal;
    do {
      oldVal = state;
      if (allAreSet(oldVal, FLAG_CLEANUP)) {
        return;
      }
      newVal = oldVal | FLAG_CLEANUP | FLAG_REQUEST_TERMINATED | FLAG_RESPONSE_TERMINATED;
    } while (!stateUpdater.compareAndSet(this, oldVal, newVal));
    final StreamSourceChannel requestChannel = underlyingRequestChannel;
    final StreamSinkChannel responseChannel = underlyingResponseChannel;
    if (allAreSet(oldVal, FLAG_REQUEST_TERMINATED | FLAG_RESPONSE_TERMINATED)) {
      // we're good; a transfer coding handler took care of things.
      return;
    } else {
      try {
        // we do not attempt to drain the read side, as one of the reasons this could
        // be happening is because the request was too large
        requestChannel.shutdownReads();
        responseChannel.shutdownWrites();
        if (!responseChannel.flush()) {
          responseChannel
              .getWriteSetter()
              .set(
                  ChannelListeners.<StreamSinkChannel>flushingChannelListener(
                      new ChannelListener<StreamSinkChannel>() {
                        public void handleEvent(final StreamSinkChannel channel) {
                          // this shouldn't be necessary...
                          channel.suspendWrites();
                          channel.getWriteSetter().set(null);
                        }
                      },
                      ChannelListeners.closingChannelExceptionHandler()));
          responseChannel.resumeWrites();
        }
      } catch (Throwable t) {
        // All sorts of things could go wrong, from runtime exceptions to java.io.IOException to
        // errors.
        // Just kill off the connection, it's f****d beyond repair.
        safeClose(requestChannel);
        safeClose(responseChannel);
        safeClose(connection);
      }
    }
  }

  public XnioExecutor getWriteThread() {
    return underlyingResponseChannel.getWriteThread();
  }

  public XnioExecutor getReadThread() {
    return underlyingRequestChannel.getReadThread();
  }
}
コード例 #14
0
  static final class TimeoutTimedOtherSubscriber<T> implements NbpSubscriber<T>, Disposable {
    final NbpSubscriber<? super T> actual;
    final long timeout;
    final TimeUnit unit;
    final Scheduler.Worker worker;
    final NbpObservable<? extends T> other;

    Disposable s;

    final NbpFullArbiter<T> arbiter;

    volatile Disposable timer;

    @SuppressWarnings("rawtypes")
    static final AtomicReferenceFieldUpdater<TimeoutTimedOtherSubscriber, Disposable> TIMER =
        AtomicReferenceFieldUpdater.newUpdater(
            TimeoutTimedOtherSubscriber.class, Disposable.class, "timer");

    static final Disposable CANCELLED = () -> {};

    static final Disposable NEW_TIMER = () -> {};

    volatile long index;

    volatile boolean done;

    public TimeoutTimedOtherSubscriber(
        NbpSubscriber<? super T> actual,
        long timeout,
        TimeUnit unit,
        Worker worker,
        NbpObservable<? extends T> other) {
      this.actual = actual;
      this.timeout = timeout;
      this.unit = unit;
      this.worker = worker;
      this.other = other;
      this.arbiter = new NbpFullArbiter<>(actual, this, 8);
    }

    @Override
    public void onSubscribe(Disposable s) {
      if (SubscriptionHelper.validateDisposable(this.s, s)) {
        return;
      }

      this.s = s;
      if (arbiter.setSubscription(s)) {
        actual.onSubscribe(arbiter);

        scheduleTimeout(0L);
      }
    }

    @Override
    public void onNext(T t) {
      if (done) {
        return;
      }
      long idx = index + 1;
      index = idx;

      if (arbiter.onNext(t, s)) {
        scheduleTimeout(idx);
      }
    }

    void scheduleTimeout(long idx) {
      Disposable d = timer;
      if (d != null) {
        d.dispose();
      }

      if (TIMER.compareAndSet(this, d, NEW_TIMER)) {
        d =
            worker.schedule(
                () -> {
                  if (idx == index) {
                    done = true;
                    s.dispose();
                    disposeTimer();
                    worker.dispose();

                    if (other == null) {
                      actual.onError(new TimeoutException());
                    } else {
                      subscribeNext();
                    }
                  }
                },
                timeout,
                unit);

        if (!TIMER.compareAndSet(this, NEW_TIMER, d)) {
          d.dispose();
        }
      }
    }

    void subscribeNext() {
      other.subscribe(new NbpFullArbiterSubscriber<>(arbiter));
    }

    @Override
    public void onError(Throwable t) {
      if (done) {
        RxJavaPlugins.onError(t);
        return;
      }
      done = true;
      worker.dispose();
      disposeTimer();
      arbiter.onError(t, s);
    }

    @Override
    public void onComplete() {
      if (done) {
        return;
      }
      done = true;
      worker.dispose();
      disposeTimer();
      arbiter.onComplete(s);
    }

    @Override
    public void dispose() {
      worker.dispose();
      disposeTimer();
    }

    public void disposeTimer() {
      Disposable d = timer;
      if (d != CANCELLED) {
        d = TIMER.getAndSet(this, CANCELLED);
        if (d != CANCELLED && d != null) {
          d.dispose();
        }
      }
    }
  }
コード例 #15
0
/**
 * A SimpleSlot represents a single slot on a TaskManager instance, or a slot within a shared slot.
 *
 * <p>If this slot is part of a {@link SharedSlot}, then the parent attribute will point to that
 * shared slot. If not, then the parent attribute is null.
 */
public class SimpleSlot extends Slot {

  /** The updater used to atomically swap in the execution */
  private static final AtomicReferenceFieldUpdater<SimpleSlot, Execution> VERTEX_UPDATER =
      AtomicReferenceFieldUpdater.newUpdater(SimpleSlot.class, Execution.class, "executedTask");

  // ------------------------------------------------------------------------

  /**
   * Task being executed in the slot. Volatile to force a memory barrier and allow for correct
   * double-checking
   */
  private volatile Execution executedTask;

  /**
   * The locality attached to the slot, defining whether the slot was allocated at the desired
   * location.
   */
  private Locality locality = Locality.UNCONSTRAINED;

  /**
   * Creates a new simple slot that stands alone and does not belong to shared slot.
   *
   * @param jobID The ID of the job that the slot is allocated for.
   * @param instance The instance that the slot belongs to.
   * @param slotNumber The number of the task slot on the instance.
   */
  public SimpleSlot(JobID jobID, Instance instance, int slotNumber) {
    super(jobID, instance, slotNumber, null, null);
  }

  /**
   * Creates a new simple slot that belongs to the given shared slot and is identified by the given
   * ID..
   *
   * @param jobID The ID of the job that the slot is allocated for.
   * @param instance The instance that the slot belongs to.
   * @param slotNumber The number of the simple slot in its parent shared slot.
   * @param parent The parent shared slot.
   * @param groupID The ID that identifies the group that the slot belongs to.
   */
  public SimpleSlot(
      JobID jobID, Instance instance, int slotNumber, SharedSlot parent, AbstractID groupID) {
    super(jobID, instance, slotNumber, parent, groupID);
  }

  // ------------------------------------------------------------------------
  //  Properties
  // ------------------------------------------------------------------------

  @Override
  public int getNumberLeaves() {
    return 1;
  }

  /**
   * Gets the task execution attempt currently executed in this slot. This may return null, if no
   * task execution attempt has been placed into this slot.
   *
   * @return The slot's task execution attempt, or null, if no task is executed in this slot, yet.
   */
  public Execution getExecutedVertex() {
    return executedTask;
  }

  /**
   * Atomically sets the executed vertex, if no vertex has been assigned to this slot so far.
   *
   * @param executedVertex The vertex to assign to this slot.
   * @return True, if the vertex was assigned, false, otherwise.
   */
  public boolean setExecutedVertex(Execution executedVertex) {
    if (executedVertex == null) {
      throw new NullPointerException();
    }

    // check that we can actually run in this slot
    if (isCanceled()) {
      return false;
    }

    // atomically assign the vertex
    if (!VERTEX_UPDATER.compareAndSet(this, null, executedVertex)) {
      return false;
    }

    // we need to do a double check that we were not cancelled in the meantime
    if (isCanceled()) {
      this.executedTask = null;
      return false;
    }

    return true;
  }

  /**
   * Gets the locality information attached to this slot.
   *
   * @return The locality attached to the slot.
   */
  public Locality getLocality() {
    return locality;
  }

  /**
   * Attached locality information to this slot.
   *
   * @param locality The locality attached to the slot.
   */
  public void setLocality(Locality locality) {
    this.locality = locality;
  }

  // ------------------------------------------------------------------------
  //  Cancelling & Releasing
  // ------------------------------------------------------------------------

  @Override
  public void releaseSlot() {

    // try to transition to the CANCELED state. That state marks
    // that the releasing is in progress
    if (markCancelled()) {

      // kill all tasks currently running in this slot
      Execution exec = this.executedTask;
      if (exec != null && !exec.isFinished()) {
        exec.fail(
            new Exception(
                "The slot in which the task was executed has been released. Probably loss of TaskManager "
                    + getInstance()));
      }

      // release directly (if we are directly allocated),
      // otherwise release through the parent shared slot
      if (getParent() == null) {
        // we have to give back the slot to the owning instance
        getInstance().returnAllocatedSlot(this);
      } else {
        // we have to ask our parent to dispose us
        getParent().releaseChild(this);
      }
    }
  }

  // ------------------------------------------------------------------------
  //  Utilities
  // ------------------------------------------------------------------------

  @Override
  public String toString() {
    return "SimpleSlot " + super.toString();
  }
}
コード例 #16
0
/** @author Emanuel Muckenhuber */
class IdentityPatchContext implements PatchContentProvider {

  private final File miscBackup;
  private final File configBackup;
  private final File miscTargetRoot;

  private final PatchEntry identityEntry;
  private final InstalledImage installedImage;
  private final PatchContentProvider contentProvider;
  private final ContentVerificationPolicy contentPolicy;
  private final InstallationManager.InstallationModification modification;
  private final Map<String, PatchContentLoader> contentLoaders =
      new HashMap<String, PatchContentLoader>();
  private final PatchingHistory history;

  // TODO initialize layers in the correct order
  private final Map<String, PatchEntry> layers = new LinkedHashMap<String, PatchEntry>();
  private final Map<String, PatchEntry> addOns = new LinkedHashMap<String, PatchEntry>();

  private PatchingTaskContext.Mode mode;
  private volatile State state = State.NEW;
  private boolean checkForGarbageOnRestart; // flag to trigger a cleanup on restart
  private static final AtomicReferenceFieldUpdater<IdentityPatchContext, State> stateUpdater =
      AtomicReferenceFieldUpdater.newUpdater(IdentityPatchContext.class, State.class, "state");
  // The modules we need to invalidate
  private final List<File> moduleInvalidations = new ArrayList<File>();

  static enum State {
    NEW,
    PREPARED,
    COMPLETED,
    INVALIDATE,
    ROLLBACK_ONLY,
    ;
  }

  IdentityPatchContext(
      final File backup,
      final PatchContentProvider contentProvider,
      final ContentVerificationPolicy contentPolicy,
      final InstallationManager.InstallationModification modification,
      final PatchingTaskContext.Mode mode,
      final InstalledImage installedImage) {

    this.miscTargetRoot = installedImage.getJbossHome();

    this.mode = mode;
    this.contentProvider = contentProvider;
    this.contentPolicy = contentPolicy;
    this.modification = modification;
    this.installedImage = installedImage;
    this.history =
        PatchingHistory.Factory.getHistory(modification.getUnmodifiedInstallationState());

    if (backup != null) {
      this.miscBackup = new File(backup, PatchContentLoader.MISC);
      this.configBackup = new File(backup, Constants.CONFIGURATION);
    } else {
      this.miscBackup = null; // This will trigger a failure when the root is actually needed
      this.configBackup = null;
    }
    this.identityEntry = new PatchEntry(modification, null);
  }

  /**
   * Get the patch entry for the identity.
   *
   * @return the identity entry
   */
  PatchEntry getIdentityEntry() {
    return identityEntry;
  }

  /**
   * Get a patch entry for either a layer or add-on.
   *
   * @param name the layer name
   * @param addOn whether the target is a add-on
   * @return the patch entry, {@code null} if it there is no such layer
   */
  PatchEntry getEntry(final String name, boolean addOn) {
    return addOn ? addOns.get(name) : layers.get(name);
  }

  /**
   * Get all entries.
   *
   * @return the entries for all layers
   */
  Collection<PatchEntry> getLayers() {
    return layers.values();
  }

  /**
   * Get all add-ons.
   *
   * @return the entries for all add-ons
   */
  Collection<PatchEntry> getAddOns() {
    return addOns.values();
  }

  /**
   * Get the current modification.
   *
   * @return the modification
   */
  InstallationManager.InstallationModification getModification() {
    return modification;
  }

  /**
   * Get the patch history.
   *
   * @return the history
   */
  PatchingHistory getHistory() {
    return history;
  }

  /**
   * Get the current mode.
   *
   * @return the mode
   */
  PatchingTaskContext.Mode getMode() {
    return mode;
  }

  /**
   * In case we cannot delete a directory create a marker to recheck whether we can garbage collect
   * some not referenced directories and files.
   *
   * @param file the directory
   */
  protected void failedToCleanupDir(final File file) {
    checkForGarbageOnRestart = true;
    PatchLogger.ROOT_LOGGER.cannotDeleteFile(file.getAbsolutePath());
  }

  @Override
  public PatchContentLoader getLoader(final String patchId) {
    final PatchContentLoader loader = contentLoaders.get(patchId);
    if (loader != null) {
      return loader;
    }
    return contentProvider.getLoader(patchId);
  }

  @Override
  public void cleanup() {
    // If cleanup gets called before finalizePatch, something went wrong
    if (state != State.PREPARED) {
      undoChanges();
    }
  }

  /**
   * Get the target entry for a given patch element.
   *
   * @param element the patch element
   * @return the patch entry
   * @throws PatchingException
   */
  protected PatchEntry resolveForElement(final PatchElement element) throws PatchingException {
    assert state == State.NEW;
    final PatchElementProvider provider = element.getProvider();
    final String layerName = provider.getName();
    final LayerType layerType = provider.getLayerType();

    final Map<String, PatchEntry> map;
    if (layerType == LayerType.Layer) {
      map = layers;
    } else {
      map = addOns;
    }
    PatchEntry entry = map.get(layerName);
    if (entry == null) {
      final InstallationManager.MutablePatchingTarget target =
          modification.resolve(layerName, layerType);
      if (target == null) {
        throw PatchMessages.MESSAGES.noSuchLayer(layerName);
      }
      entry = new PatchEntry(target, element);
      map.put(layerName, entry);
    }
    // Maintain the most recent element
    entry.updateElement(element);
    return entry;
  }

  /**
   * Finalize the patch.
   *
   * @param callback the finalize callback
   * @return the result
   * @throws Exception
   */
  protected PatchingResult finalize(final FinalizeCallback callback) throws Exception {
    assert state == State.NEW;
    final Patch original = callback.getPatch();
    final Patch.PatchType patchType = original.getIdentity().getPatchType();
    final String patchId;
    if (patchType == Patch.PatchType.CUMULATIVE) {
      patchId = modification.getCumulativePatchID();
    } else {
      patchId = original.getPatchId();
    }
    try {
      // The processed patch, based on the recorded changes
      final Patch processedPatch = createProcessedPatch(original);
      // The rollback containing all the recorded rollback actions
      final RollbackPatch rollbackPatch = createRollbackPatch(patchId, patchType);
      callback.finishPatch(processedPatch, rollbackPatch, this);
    } catch (Exception e) {
      if (undoChanges()) {
        callback.operationCancelled(this);
      }
      throw e;
    }
    state = State.PREPARED;
    return new PatchingResult() {
      @Override
      public String getPatchId() {
        return original.getPatchId();
      }

      @Override
      public PatchInfo getPatchInfo() {
        return new PatchInfo() {
          @Override
          public String getVersion() {
            return identityEntry.getResultingVersion();
          }

          @Override
          public String getCumulativePatchID() {
            return identityEntry.delegate.getModifiedState().getCumulativePatchID();
          }

          @Override
          public List<String> getPatchIDs() {
            return identityEntry.delegate.getModifiedState().getPatchIDs();
          }
        };
      }

      @Override
      public void commit() {
        if (state == State.PREPARED) {
          complete(modification, callback);
        } else {
          undoChanges();
          throw new IllegalStateException();
        }
      }

      @Override
      public void rollback() {
        if (undoChanges()) {
          try {
            callback.operationCancelled(IdentityPatchContext.this);
          } finally {
            modification.cancel();
          }
        }
      }
    };
  }

  /**
   * Cancel the current patch and undo the changes.
   *
   * @param callback the finalize callback
   */
  protected void cancel(final FinalizeCallback callback) {
    try {
      undoChanges();
    } finally {
      callback.operationCancelled(this);
    }
  }

  /**
   * Complete the current operation and persist the current state to the disk. This will also
   * trigger the invalidation of outdated modules.
   *
   * @param modification the current modification
   * @param callback the completion callback
   */
  private void complete(
      final InstallationManager.InstallationModification modification,
      final FinalizeCallback callback) {
    final List<File> processed = new ArrayList<File>();
    try {
      try {
        // Update the state to invalidate and process module resources
        if (stateUpdater.compareAndSet(this, State.PREPARED, State.INVALIDATE)
            && mode == PatchingTaskContext.Mode.APPLY) {
          // Only invalidate modules when applying patches; on rollback files are immediately
          // restored
          for (final File invalidation : moduleInvalidations) {
            processed.add(invalidation);
            PatchModuleInvalidationUtils.processFile(invalidation, mode);
          }
        }
        modification.complete();
        callback.completed(this);
        state = State.COMPLETED;
      } catch (Exception e) {
        this.moduleInvalidations.clear();
        this.moduleInvalidations.addAll(processed);
        throw new RuntimeException(e);
      }
    } finally {
      if (state != State.COMPLETED) {
        try {
          modification.cancel();
        } finally {
          try {
            undoChanges();
          } finally {
            callback.operationCancelled(this);
          }
        }
      } else {
        try {
          if (checkForGarbageOnRestart) {
            final File cleanupMarker =
                new File(installedImage.getInstallationMetadata(), "cleanup-patching-dirs");
            cleanupMarker.createNewFile();
          }
        } catch (IOException e) {
          PatchLogger.ROOT_LOGGER.infof(e, "failed to create cleanup marker");
        }
      }
    }
  }

  /**
   * Internally undo recorded changes we did so far.
   *
   * @return whether the state required undo actions
   */
  boolean undoChanges() {
    final State state = stateUpdater.getAndSet(this, State.ROLLBACK_ONLY);
    if (state == State.COMPLETED || state == State.ROLLBACK_ONLY) {
      // Was actually completed already
      return false;
    }
    PatchingTaskContext.Mode currentMode = this.mode;
    mode = PatchingTaskContext.Mode.UNDO;
    final PatchContentLoader loader = PatchContentLoader.create(miscBackup, null, null);
    // Undo changes for the identity
    undoChanges(identityEntry, loader);
    // TODO maybe check if we need to do something for the layers too !?
    if (state == State.INVALIDATE || currentMode == PatchingTaskContext.Mode.ROLLBACK) {
      // For apply the state needs to be invalidate
      // For rollback the files are invalidated as part of the tasks
      final PatchingTaskContext.Mode mode =
          currentMode == PatchingTaskContext.Mode.APPLY
              ? PatchingTaskContext.Mode.ROLLBACK
              : PatchingTaskContext.Mode.APPLY;
      for (final File file : moduleInvalidations) {
        try {
          PatchModuleInvalidationUtils.processFile(file, mode);
        } catch (Exception e) {
          PatchLogger.ROOT_LOGGER.debugf(e, "failed to restore state for %s", file);
        }
      }
    }
    return true;
  }

  /**
   * Undo changes for a single patch entry.
   *
   * @param entry the patch entry
   * @param loader the content loader
   */
  static void undoChanges(final PatchEntry entry, final PatchContentLoader loader) {
    final List<ContentModification> modifications =
        new ArrayList<ContentModification>(entry.rollbackActions);
    for (final ContentModification modification : modifications) {
      final ContentItem item = modification.getItem();
      if (item.getContentType() != ContentType.MISC) {
        // Skip modules and bundles they should be removed as part of the {@link FinalizeCallback}
        continue;
      }
      final PatchingTaskDescription description =
          new PatchingTaskDescription(entry.applyPatchId, modification, loader, false, false);
      try {
        final PatchingTask task = PatchingTask.Factory.create(description, entry);
        task.execute(entry);
      } catch (Exception e) {
        PatchLogger.ROOT_LOGGER.warnf(e, "failed to undo change (%s)", modification);
      }
    }
  }

  /**
   * Add a rollback loader for a give patch.
   *
   * @param patchId the patch id.
   * @param target the patchable target
   * @throws XMLStreamException
   * @throws IOException
   */
  private void recordRollbackLoader(final String patchId, PatchableTarget.TargetInfo target) {
    // setup the content loader paths
    final DirectoryStructure structure = target.getDirectoryStructure();
    final InstalledImage image = structure.getInstalledImage();
    final File historyDir = image.getPatchHistoryDir(patchId);
    final File miscRoot = new File(historyDir, PatchContentLoader.MISC);
    final File modulesRoot = structure.getModulePatchDirectory(patchId);
    final File bundlesRoot = structure.getBundlesPatchDirectory(patchId);
    final PatchContentLoader loader = PatchContentLoader.create(miscRoot, bundlesRoot, modulesRoot);
    //
    recordContentLoader(patchId, loader);
  }

  /**
   * Record a content loader for a given patch id.
   *
   * @param patchID the patch id
   * @param contentLoader the content loader
   */
  protected void recordContentLoader(final String patchID, final PatchContentLoader contentLoader) {
    if (contentLoaders.containsKey(patchID)) {
      throw new IllegalStateException(
          "Content loader already registered for patch "
              + patchID); // internal wrong usage, no i18n
    }
    contentLoaders.put(patchID, contentLoader);
  }

  /**
   * Whether a content verification can be ignored or not.
   *
   * @param item the content item to verify
   * @return
   */
  public boolean isIgnored(final ContentItem item) {
    return contentPolicy.ignoreContentValidation(item);
  }

  /**
   * Whether a content task execution can be excluded.
   *
   * @param item the content item
   * @return
   */
  public boolean isExcluded(final ContentItem item) {
    return contentPolicy.preserveExisting(item);
  }

  /**
   * Get the target file for misc items.
   *
   * @param item the misc item
   * @return the target location
   */
  public File getTargetFile(final MiscContentItem item) {
    final State state = this.state;
    if (state == State.NEW || state == State.ROLLBACK_ONLY) {
      return getTargetFile(miscTargetRoot, item);
    } else {
      throw new IllegalStateException(); // internal wrong usage, no i18n
    }
  }

  /**
   * Create a patch representing what we actually processed. This may contain some fixed content
   * hashes for removed modules.
   *
   * @param original the original
   * @return the processed patch
   */
  protected Patch createProcessedPatch(final Patch original) {

    // Process elements
    final List<PatchElement> elements = new ArrayList<PatchElement>();
    // Process layers
    for (final PatchEntry entry : getLayers()) {
      final PatchElement element =
          createPatchElement(entry, entry.element.getId(), entry.modifications);
      elements.add(element);
    }
    // Process add-ons
    for (final PatchEntry entry : getAddOns()) {
      final PatchElement element =
          createPatchElement(entry, entry.element.getId(), entry.modifications);
      elements.add(element);
    }

    // Swap the patch element modifications, keep the identity ones since we don't need to fix the
    // misc modifications
    return new PatchImpl(
        original.getPatchId(),
        original.getDescription(),
        original.getIdentity(),
        elements,
        original.getModifications());
  }

  /**
   * Create a rollback patch based on the recorded actions.
   *
   * @param patchId the new patch id, depending on release or one-off
   * @param patchType the current patch identity
   * @return the rollback patch
   */
  protected RollbackPatch createRollbackPatch(
      final String patchId, final Patch.PatchType patchType) {
    // Process elements
    final List<PatchElement> elements = new ArrayList<PatchElement>();
    // Process layers
    for (final PatchEntry entry : getLayers()) {
      final PatchElement element = createRollbackElement(entry);
      elements.add(element);
    }
    // Process add-ons
    for (final PatchEntry entry : getAddOns()) {
      final PatchElement element = createRollbackElement(entry);
      elements.add(element);
    }

    final InstalledIdentity installedIdentity = modification.getUnmodifiedInstallationState();
    final String name = installedIdentity.getIdentity().getName();
    final IdentityImpl identity = new IdentityImpl(name, modification.getVersion());
    if (patchType == Patch.PatchType.CUMULATIVE) {
      identity.setPatchType(Patch.PatchType.CUMULATIVE);
      identity.setResultingVersion(installedIdentity.getIdentity().getVersion());
    } else if (patchType == Patch.PatchType.ONE_OFF) {
      identity.setPatchType(Patch.PatchType.ONE_OFF);
    }
    final List<ContentModification> modifications = identityEntry.rollbackActions;
    final Patch delegate =
        new PatchImpl(patchId, "rollback patch", identity, elements, modifications);
    return new PatchImpl.RollbackPatchImpl(delegate, installedIdentity);
  }

  /**
   * Get a misc file.
   *
   * @param root the root
   * @param item the misc content item
   * @return the misc file
   */
  static File getTargetFile(final File root, final MiscContentItem item) {
    return PatchContentLoader.getMiscPath(root, item);
  }

  /** Modification information for a patchable target. */
  class PatchEntry implements InstallationManager.MutablePatchingTarget, PatchingTaskContext {

    private String applyPatchId;
    private String resultingVersion;
    private PatchElement element;
    private final InstallationManager.MutablePatchingTarget delegate;
    private final List<ContentModification> modifications = new ArrayList<ContentModification>();
    private final List<ContentModification> rollbackActions = new ArrayList<ContentModification>();
    private final Map<Location, PatchingTasks.ContentTaskDefinition> definitions =
        new LinkedHashMap<Location, PatchingTasks.ContentTaskDefinition>();
    private final Set<String> rollbacks = new HashSet<String>();

    PatchEntry(
        final InstallationManager.MutablePatchingTarget delegate, final PatchElement element) {
      assert delegate != null;
      this.delegate = delegate;
      this.element = element;
      this.resultingVersion = modification.getVersion();
    }

    protected void updateElement(final PatchElement element) {
      this.element = element;
    }

    protected String getResultingVersion() {
      return resultingVersion;
    }

    public void setResultingVersion(String resultingVersion) {
      this.resultingVersion = resultingVersion;
    }

    @Override
    public boolean isApplied(String patchId) {
      return delegate.isApplied(patchId);
    }

    @Override
    public void rollback(String patchId) {
      rollbacks.add(patchId);
      // Rollback
      delegate.rollback(patchId);
      // Record rollback loader
      recordRollbackLoader(patchId, delegate);
    }

    @Override
    public void apply(String patchId, Patch.PatchType patchType) {
      delegate.apply(patchId, patchType);
      applyPatchId = patchId;
    }

    @Override
    public String getCumulativePatchID() {
      return delegate.getCumulativePatchID();
    }

    @Override
    public List<String> getPatchIDs() {
      return delegate.getPatchIDs();
    }

    @Override
    public Properties getProperties() {
      return delegate.getProperties();
    }

    @Override
    public DirectoryStructure getDirectoryStructure() {
      return delegate.getDirectoryStructure();
    }

    public Map<Location, PatchingTasks.ContentTaskDefinition> getDefinitions() {
      return definitions;
    }

    @Override
    public File getBackupFile(MiscContentItem item) {
      if (state == State.NEW) {
        return IdentityPatchContext.getTargetFile(miscBackup, item);
      } else if (state == State.ROLLBACK_ONLY) {
        // No backup when we undo the changes
        return null;
      } else {
        throw new IllegalStateException(); // internal wrong usage, no i18n
      }
    }

    @Override
    public boolean isExcluded(ContentItem contentItem) {
      return contentPolicy.preserveExisting(contentItem);
    }

    @Override
    public void recordChange(
        final ContentModification change, final ContentModification rollbackAction) {
      if (state == State.ROLLBACK_ONLY) {
        // don't record undo tasks
        return;
      }
      // Only misc remove is null, but we replace it with the
      if (change != null) {
        modifications.add(change);
      }
      if (rollbackAction != null) {
        rollbackActions.add(rollbackAction);
      }
    }

    @Override
    public Mode getCurrentMode() {
      return mode;
    }

    @Override
    public PatchableTarget.TargetInfo getModifiedState() {
      return delegate.getModifiedState();
    }

    @Override
    public File[] getTargetBundlePath() {
      // We need the updated state for invalidating one-off patches
      // When applying the overlay directory should not exist yet
      final PatchableTarget.TargetInfo updated =
          mode == Mode.APPLY ? delegate : delegate.getModifiedState();
      return PatchUtils.getBundlePath(delegate.getDirectoryStructure(), updated);
    }

    @Override
    public File[] getTargetModulePath() {
      // We need the updated state for invalidating one-off patches
      // When applying the overlay directory should not exist yet
      final PatchableTarget.TargetInfo updated =
          mode == Mode.APPLY ? delegate : delegate.getModifiedState();
      return PatchUtils.getModulePath(delegate.getDirectoryStructure(), updated);
    }

    @Override
    public File getTargetFile(ContentItem item) {
      if (item.getContentType() == ContentType.MISC) {
        return IdentityPatchContext.this.getTargetFile((MiscContentItem) item);
      }
      if (applyPatchId == null || state == State.ROLLBACK_ONLY) {
        throw new IllegalStateException(
            "cannot process rollback tasks for modules/bundles"); // internal wrong usage, no i18n
      }
      final File root;
      final DirectoryStructure structure = delegate.getDirectoryStructure();
      if (item.getContentType() == ContentType.BUNDLE) {
        root = structure.getBundlesPatchDirectory(applyPatchId);
      } else {
        root = structure.getModulePatchDirectory(applyPatchId);
      }
      return PatchContentLoader.getModulePath(root, (ModuleItem) item);
    }

    @Override
    public void invalidateRoot(final File moduleRoot) throws IOException {
      final File[] files =
          moduleRoot.listFiles(
              new FilenameFilter() {
                @Override
                public boolean accept(File dir, String name) {
                  return name.endsWith(".jar");
                }
              });
      if (files != null && files.length > 0) {
        for (final File file : files) {
          moduleInvalidations.add(file);
          if (mode == Mode.ROLLBACK) {
            // For rollback we need to restore the file before calculating the hash
            PatchModuleInvalidationUtils.processFile(file, mode);
          }
        }
      }
    }

    /** Cleanup the history directories for all recorded rolled back patches. */
    protected void cleanupRollbackPatchHistory() {
      final DirectoryStructure structure = getDirectoryStructure();
      for (final String rollback : rollbacks) {
        if (!IoUtils.recursiveDelete(structure.getBundlesPatchDirectory(rollback))) {
          failedToCleanupDir(structure.getBundlesPatchDirectory(rollback));
        }
        if (!IoUtils.recursiveDelete(structure.getModulePatchDirectory(rollback))) {
          failedToCleanupDir(structure.getModulePatchDirectory(rollback));
        }
      }
    }
  }

  /** Patch finalization callback. */
  interface FinalizeCallback {

    /**
     * Get the original patch.
     *
     * @return the patch
     */
    Patch getPatch();

    /**
     * Finish step after the content modification were executed.
     *
     * @param processedPatch the processed patch
     * @param rollbackPatch the rollback patch
     * @param context the patch context
     * @throws Exception
     */
    void finishPatch(
        Patch processedPatch, RollbackPatch rollbackPatch, IdentityPatchContext context)
        throws Exception;

    /**
     * Completed.
     *
     * @param context the context
     */
    void completed(IdentityPatchContext context);

    /**
     * Cancelled.
     *
     * @param context the context
     */
    void operationCancelled(IdentityPatchContext context);
  }

  /**
   * Create a patch element for the rollback patch.
   *
   * @param entry the entry
   * @return the new patch element
   */
  protected static PatchElement createRollbackElement(final PatchEntry entry) {
    final PatchElement patchElement = entry.element;
    final String patchId;
    final Patch.PatchType patchType = patchElement.getProvider().getPatchType();
    if (patchType == Patch.PatchType.CUMULATIVE) {
      patchId = entry.getCumulativePatchID();
    } else {
      patchId = patchElement.getId();
    }
    return createPatchElement(entry, patchId, entry.rollbackActions);
  }

  /**
   * Copy a patch element
   *
   * @param entry the patch entry
   * @param patchId the patch id for the element
   * @param modifications the element modifications
   * @return the new patch element
   */
  protected static PatchElement createPatchElement(
      final PatchEntry entry, String patchId, final List<ContentModification> modifications) {
    final PatchElement patchElement = entry.element;
    final PatchElementImpl element = new PatchElementImpl(patchId);
    element.setProvider(patchElement.getProvider());
    // Add all the rollback actions
    element.getModifications().addAll(modifications);
    return element;
  }

  /**
   * Backup the current configuration as part of the patch history.
   *
   * @throws IOException for any error
   */
  void backupConfiguration() throws IOException {

    final String configuration = Constants.CONFIGURATION;

    final File a = new File(installedImage.getAppClientDir(), configuration);
    final File d = new File(installedImage.getDomainDir(), configuration);
    final File s = new File(installedImage.getStandaloneDir(), configuration);

    if (a.exists()) {
      final File ab = new File(configBackup, Constants.APP_CLIENT);
      backupDirectory(a, ab);
    }
    if (d.exists()) {
      final File db = new File(configBackup, Constants.DOMAIN);
      backupDirectory(d, db);
    }
    if (s.exists()) {
      final File sb = new File(configBackup, Constants.STANDALONE);
      backupDirectory(s, sb);
    }
  }

  static final FileFilter CONFIG_FILTER =
      new FileFilter() {

        @Override
        public boolean accept(File pathName) {
          return pathName.isFile() && pathName.getName().endsWith(".xml");
        }
      };

  /**
   * Backup all xml files in a given directory.
   *
   * @param source the source directory
   * @param target the target directory
   * @throws IOException for any error
   */
  static void backupDirectory(final File source, final File target) throws IOException {
    if (!target.exists()) {
      if (!target.mkdirs()) {
        throw PatchMessages.MESSAGES.cannotCreateDirectory(target.getAbsolutePath());
      }
    }
    final File[] files = source.listFiles(CONFIG_FILTER);
    for (final File file : files) {
      final File t = new File(target, file.getName());
      IoUtils.copyFile(file, t);
    }
  }

  /**
   * Restore the configuration. Depending on reset-configuration this is going to replace the
   * original files with the backup, otherwise it will create a restored-configuration folder the
   * configuration directories.
   *
   * <p>TODO log a warning if the restored configuration files are different from the current one?
   * or should we check that before rolling back the patch to give the user a chance to save the
   * changes
   *
   * @param rollingBackPatchID the patch id
   * @param resetConfiguration whether to override the configuration files or not
   * @throws IOException for any error
   */
  void restoreConfiguration(final String rollingBackPatchID, final boolean resetConfiguration)
      throws IOException {

    final File backupConfigurationDir =
        new File(installedImage.getPatchHistoryDir(rollingBackPatchID), Constants.CONFIGURATION);
    final File ba = new File(backupConfigurationDir, Constants.APP_CLIENT);
    final File bd = new File(backupConfigurationDir, Constants.DOMAIN);
    final File bs = new File(backupConfigurationDir, Constants.STANDALONE);

    final String configuration;
    if (resetConfiguration) {
      configuration = Constants.CONFIGURATION;
    } else {
      configuration = Constants.CONFIGURATION + File.separator + Constants.RESTORED_CONFIGURATION;
    }

    if (ba.exists()) {
      final File a = new File(installedImage.getAppClientDir(), configuration);
      backupDirectory(ba, a);
    }
    if (bd.exists()) {
      final File d = new File(installedImage.getDomainDir(), configuration);
      backupDirectory(bd, d);
    }
    if (bs.exists()) {
      final File s = new File(installedImage.getStandaloneDir(), configuration);
      backupDirectory(bs, s);
    }
  }

  /**
   * Write the patch.xml
   *
   * @param rollbackPatch the patch
   * @param file the target file
   * @throws IOException
   */
  static void writePatch(final Patch rollbackPatch, final File file) throws IOException {
    final File parent = file.getParentFile();
    if (!parent.isDirectory()) {
      if (!parent.mkdirs() && !parent.exists()) {
        throw PatchMessages.MESSAGES.cannotCreateDirectory(file.getAbsolutePath());
      }
    }
    try {
      final OutputStream os = new FileOutputStream(file);
      try {
        PatchXml.marshal(os, rollbackPatch);
      } finally {
        IoUtils.safeClose(os);
      }
    } catch (XMLStreamException e) {
      throw new IOException(e);
    }
  }
}
コード例 #17
0
final class ConcreteResourceRegistration extends AbstractResourceRegistration {

  @SuppressWarnings("unused")
  private volatile Map<String, NodeSubregistry> children;

  @SuppressWarnings("unused")
  private volatile Map<String, OperationEntry> operations;

  @SuppressWarnings("unused")
  private volatile DescriptionProvider descriptionProvider;

  @SuppressWarnings("unused")
  private volatile Map<String, AttributeAccess> attributes;

  private final boolean runtimeOnly;

  private static final AtomicMapFieldUpdater<ConcreteResourceRegistration, String, NodeSubregistry>
      childrenUpdater =
          AtomicMapFieldUpdater.newMapUpdater(
              AtomicReferenceFieldUpdater.newUpdater(
                  ConcreteResourceRegistration.class, Map.class, "children"));
  private static final AtomicMapFieldUpdater<ConcreteResourceRegistration, String, OperationEntry>
      operationsUpdater =
          AtomicMapFieldUpdater.newMapUpdater(
              AtomicReferenceFieldUpdater.newUpdater(
                  ConcreteResourceRegistration.class, Map.class, "operations"));
  private static final AtomicMapFieldUpdater<ConcreteResourceRegistration, String, AttributeAccess>
      attributesUpdater =
          AtomicMapFieldUpdater.newMapUpdater(
              AtomicReferenceFieldUpdater.newUpdater(
                  ConcreteResourceRegistration.class, Map.class, "attributes"));
  private static final AtomicReferenceFieldUpdater<
          ConcreteResourceRegistration, DescriptionProvider>
      descriptionProviderUpdater =
          AtomicReferenceFieldUpdater.newUpdater(
              ConcreteResourceRegistration.class, DescriptionProvider.class, "descriptionProvider");

  ConcreteResourceRegistration(
      final String valueString,
      final NodeSubregistry parent,
      final DescriptionProvider provider,
      final boolean runtimeOnly) {
    super(valueString, parent);
    childrenUpdater.clear(this);
    operationsUpdater.clear(this);
    attributesUpdater.clear(this);
    descriptionProviderUpdater.set(this, provider);
    this.runtimeOnly = runtimeOnly;
  }

  @Override
  public boolean isRuntimeOnly() {
    return runtimeOnly;
  }

  @Override
  public boolean isRemote() {
    return false;
  }

  @Override
  public ManagementResourceRegistration registerSubModel(
      final PathElement address, final DescriptionProvider descriptionProvider) {
    if (address == null) {
      throw new IllegalArgumentException("address is null");
    }
    if (descriptionProvider == null) {
      throw new IllegalArgumentException("descriptionProvider is null");
    }
    if (runtimeOnly) {
      throw new IllegalStateException(
          "Cannot register non-runtime-only submodels with a runtime-only parent");
    }
    final String key = address.getKey();
    final NodeSubregistry child = getOrCreateSubregistry(key);
    return child.register(address.getValue(), descriptionProvider, false);
  }

  @Override
  public void registerSubModel(
      final PathElement address, final ManagementResourceRegistration subModel) {
    if (address == null) {
      throw new IllegalArgumentException("address is null");
    }
    if (subModel == null) {
      throw new IllegalArgumentException("subModel is null");
    }
    final String key = address.getKey();
    final NodeSubregistry child = getOrCreateSubregistry(key);
    child.register(address.getValue(), subModel);
  }

  @Override
  OperationEntry getOperationEntry(
      final ListIterator<PathElement> iterator,
      final String operationName,
      OperationEntry inherited) {
    if (iterator.hasNext()) {
      OperationEntry ourInherited = getInheritableOperationEntry(operationName);
      OperationEntry inheritance = ourInherited == null ? inherited : ourInherited;
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getOperationEntry(iterator, next.getValue(), operationName, inheritance);
    } else {
      final OperationEntry entry = operationsUpdater.get(this, operationName);
      return entry == null ? inherited : entry;
    }
  }

  @Override
  OperationEntry getInheritableOperationEntry(final String operationName) {
    final OperationEntry entry = operationsUpdater.get(this, operationName);
    if (entry != null && entry.isInherited()) {
      return entry;
    }
    return null;
  }

  @Override
  void getOperationDescriptions(
      final ListIterator<PathElement> iterator,
      final Map<String, OperationEntry> providers,
      final boolean inherited) {

    if (!iterator.hasNext()) {
      providers.putAll(operationsUpdater.get(this));
      if (inherited) {
        getInheritedOperations(providers, true);
      }
      return;
    }
    final PathElement next = iterator.next();
    try {
      final String key = next.getKey();
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      final NodeSubregistry subregistry = snapshot.get(key);
      if (subregistry != null) {
        subregistry.getHandlers(iterator, next.getValue(), providers, inherited);
      }
    } finally {
      iterator.previous();
    }
  }

  @Override
  void getInheritedOperationEntries(final Map<String, OperationEntry> providers) {
    for (final Map.Entry<String, OperationEntry> entry : operationsUpdater.get(this).entrySet()) {
      if (entry.getValue().isInherited() && !providers.containsKey(entry.getKey())) {
        providers.put(entry.getKey(), entry.getValue());
      }
    }
  }

  @Override
  public void registerOperationHandler(
      final String operationName,
      final OperationStepHandler handler,
      final DescriptionProvider descriptionProvider,
      final boolean inherited,
      EntryType entryType) {
    if (operationsUpdater.putIfAbsent(
            this,
            operationName,
            new OperationEntry(handler, descriptionProvider, inherited, entryType))
        != null) {
      throw new IllegalArgumentException(
          "A handler named '"
              + operationName
              + "' is already registered at location '"
              + getLocationString()
              + "'");
    }
  }

  @Override
  public void registerOperationHandler(
      final String operationName,
      final OperationStepHandler handler,
      final DescriptionProvider descriptionProvider,
      final boolean inherited,
      EntryType entryType,
      EnumSet<OperationEntry.Flag> flags) {
    if (operationsUpdater.putIfAbsent(
            this,
            operationName,
            new OperationEntry(handler, descriptionProvider, inherited, entryType, flags))
        != null) {
      throw new IllegalArgumentException(
          "A handler named '"
              + operationName
              + "' is already registered at location '"
              + getLocationString()
              + "'");
    }
  }

  @Override
  public void registerReadWriteAttribute(
      final String attributeName,
      final OperationStepHandler readHandler,
      final OperationStepHandler writeHandler,
      AttributeAccess.Storage storage) {
    if (attributesUpdater.putIfAbsent(
            this,
            attributeName,
            new AttributeAccess(AccessType.READ_WRITE, storage, readHandler, writeHandler, null))
        != null) {
      throw new IllegalArgumentException(
          "An attribute named '"
              + attributeName
              + "' is already registered at location '"
              + getLocationString()
              + "'");
    }
  }

  @Override
  public void registerReadWriteAttribute(
      final String attributeName,
      final OperationStepHandler readHandler,
      final OperationStepHandler writeHandler,
      EnumSet<AttributeAccess.Flag> flags) {
    AttributeAccess.Storage storage =
        (flags != null && flags.contains(AttributeAccess.Flag.STORAGE_RUNTIME))
            ? Storage.RUNTIME
            : Storage.CONFIGURATION;
    if (attributesUpdater.putIfAbsent(
            this,
            attributeName,
            new AttributeAccess(AccessType.READ_WRITE, storage, readHandler, writeHandler, flags))
        != null) {
      throw new IllegalArgumentException(
          "An attribute named '"
              + attributeName
              + "' is already registered at location '"
              + getLocationString()
              + "'");
    }
  }

  @Override
  public void registerReadOnlyAttribute(
      final String attributeName,
      final OperationStepHandler readHandler,
      AttributeAccess.Storage storage) {
    if (attributesUpdater.putIfAbsent(
            this,
            attributeName,
            new AttributeAccess(AccessType.READ_ONLY, storage, readHandler, null, null))
        != null) {
      throw new IllegalArgumentException(
          "An attribute named '"
              + attributeName
              + "' is already registered at location '"
              + getLocationString()
              + "'");
    }
  }

  @Override
  public void registerReadOnlyAttribute(
      final String attributeName,
      final OperationStepHandler readHandler,
      EnumSet<AttributeAccess.Flag> flags) {
    AttributeAccess.Storage storage =
        (flags != null && flags.contains(AttributeAccess.Flag.STORAGE_RUNTIME))
            ? Storage.RUNTIME
            : Storage.CONFIGURATION;
    if (attributesUpdater.putIfAbsent(
            this,
            attributeName,
            new AttributeAccess(AccessType.READ_ONLY, storage, readHandler, null, null))
        != null) {
      throw new IllegalArgumentException(
          "An attribute named '"
              + attributeName
              + "' is already registered at location '"
              + getLocationString()
              + "'");
    }
  }

  @Override
  public void registerMetric(String attributeName, OperationStepHandler metricHandler) {
    registerMetric(attributeName, metricHandler, null);
  }

  @Override
  public void registerMetric(
      String attributeName,
      OperationStepHandler metricHandler,
      EnumSet<AttributeAccess.Flag> flags) {
    if (attributesUpdater.putIfAbsent(
            this,
            attributeName,
            new AttributeAccess(
                AccessType.METRIC, AttributeAccess.Storage.RUNTIME, metricHandler, null, flags))
        != null) {
      throw new IllegalArgumentException(
          "An attribute named '"
              + attributeName
              + "' is already registered at location '"
              + getLocationString()
              + "'");
    }
  }

  @Override
  public void registerProxyController(final PathElement address, final ProxyController controller)
      throws IllegalArgumentException {
    getOrCreateSubregistry(address.getKey())
        .registerProxyController(address.getValue(), controller);
  }

  @Override
  public void unregisterProxyController(final PathElement address) throws IllegalArgumentException {
    final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
    final NodeSubregistry subregistry = snapshot.get(address.getKey());
    if (subregistry != null) {
      subregistry.unregisterProxyController(address.getValue());
    }
  }

  NodeSubregistry getOrCreateSubregistry(final String key) {
    for (; ; ) {
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      final NodeSubregistry subregistry = snapshot.get(key);
      if (subregistry != null) {
        return subregistry;
      } else {
        final NodeSubregistry newRegistry = new NodeSubregistry(key, this);
        final NodeSubregistry appearing =
            childrenUpdater.putAtomic(this, key, newRegistry, snapshot);
        if (appearing == null) {
          return newRegistry;
        } else if (appearing != newRegistry) {
          // someone else added one
          return appearing;
        }
        // otherwise, retry the loop because the map changed
      }
    }
  }

  @Override
  DescriptionProvider getModelDescription(final Iterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getModelDescription(iterator, next.getValue());
    } else {
      return descriptionProvider;
    }
  }

  @Override
  Set<String> getAttributeNames(final Iterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return Collections.emptySet();
      }
      return subregistry.getAttributeNames(iterator, next.getValue());
    } else {
      final Map<String, AttributeAccess> snapshot = attributesUpdater.get(this);
      return snapshot.keySet();
    }
  }

  @Override
  AttributeAccess getAttributeAccess(
      final ListIterator<PathElement> iterator, final String attributeName) {

    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getAttributeAccess(iterator, next.getValue(), attributeName);
    } else {
      final Map<String, AttributeAccess> snapshot = attributesUpdater.get(this);
      AttributeAccess access = snapshot.get(attributeName);
      if (access == null) {
        // If there is metadata for an attribute but no AttributeAccess, assume RO. Can't
        // be writable without a registered handler. This opens the possibility that out-of-date
        // metadata
        // for attribute "foo" can lead to a read of non-existent-in-model "foo" with
        // an unexpected undefined value returned. But it removes the possibility of a
        // dev forgetting to call registry.registerReadOnlyAttribute("foo", null) resulting
        // in the valid attribute "foo" not being readable
        final ModelNode desc = descriptionProvider.getModelDescription(null);
        if (desc.has(ATTRIBUTES) && desc.get(ATTRIBUTES).keys().contains(attributeName)) {
          access =
              new AttributeAccess(AccessType.READ_ONLY, Storage.CONFIGURATION, null, null, null);
        }
      }
      return access;
    }
  }

  @Override
  Set<String> getChildNames(final Iterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return Collections.emptySet();
      }
      return subregistry.getChildNames(iterator, next.getValue());
    } else {
      final Map<String, NodeSubregistry> children = this.children;
      if (children != null) {
        return Collections.unmodifiableSet(children.keySet());
      }
      return Collections.emptySet();
    }
  }

  @Override
  Set<PathElement> getChildAddresses(final Iterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return Collections.emptySet();
      }
      return subregistry.getChildAddresses(iterator, next.getValue());
    } else {
      final Map<String, NodeSubregistry> children = this.children;
      if (children != null) {
        final Set<PathElement> elements = new HashSet<PathElement>();
        for (final Map.Entry<String, NodeSubregistry> entry : children.entrySet()) {
          for (final String entryChild : entry.getValue().getChildNames()) {
            elements.add(PathElement.pathElement(entry.getKey(), entryChild));
          }
        }
        return elements;
      }
      return Collections.emptySet();
    }
  }

  @Override
  ProxyController getProxyController(Iterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getProxyController(iterator, next.getValue());
    } else {
      return null;
    }
  }

  @Override
  void getProxyControllers(Iterator<PathElement> iterator, Set<ProxyController> controllers) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return;
      }
      if (next.isWildcard()) {
        subregistry.getProxyControllers(iterator, null, controllers);
      } else if (next.isMultiTarget()) {
        for (final String value : next.getSegments()) {
          subregistry.getProxyControllers(iterator, value, controllers);
        }
      } else {
        subregistry.getProxyControllers(iterator, next.getValue(), controllers);
      }
    } else {
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      for (NodeSubregistry subregistry : snapshot.values()) {
        subregistry.getProxyControllers(iterator, null, controllers);
      }
    }
  }

  ManagementResourceRegistration getResourceRegistration(Iterator<PathElement> iterator) {
    if (!iterator.hasNext()) {
      return this;
    } else {
      final PathElement address = iterator.next();
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      final NodeSubregistry subregistry = snapshot.get(address.getKey());
      if (subregistry != null) {
        return subregistry.getResourceRegistration(iterator, address.getValue());
      } else {
        return null;
      }
    }
  }
}
コード例 #18
0
/** Read-only snapshot of the data tree. */
final class InMemoryDataTree extends AbstractDataTreeTip implements TipProducingDataTree {
  private static final AtomicReferenceFieldUpdater<InMemoryDataTree, DataTreeState> STATE_UPDATER =
      AtomicReferenceFieldUpdater.newUpdater(InMemoryDataTree.class, DataTreeState.class, "state");
  private static final Logger LOG = LoggerFactory.getLogger(InMemoryDataTree.class);

  private final YangInstanceIdentifier rootPath;
  private final TreeType treeType;

  /** Current data store state generation. */
  private volatile DataTreeState state;

  public InMemoryDataTree(
      final TreeNode rootNode,
      final TreeType treeType,
      final YangInstanceIdentifier rootPath,
      final SchemaContext schemaContext) {
    this.treeType = Preconditions.checkNotNull(treeType, "treeType");
    this.rootPath = Preconditions.checkNotNull(rootPath, "rootPath");
    state = DataTreeState.createInitial(rootNode);
    if (schemaContext != null) {
      setSchemaContext(schemaContext);
    }
  }

  /*
   * This method is synchronized to guard against user attempting to install
   * multiple contexts. Otherwise it runs in a lock-free manner.
   */
  @Override
  public synchronized void setSchemaContext(final SchemaContext newSchemaContext) {
    Preconditions.checkNotNull(newSchemaContext);

    LOG.debug("Following schema contexts will be attempted {}", newSchemaContext);

    final DataSchemaContextTree contextTree = DataSchemaContextTree.from(newSchemaContext);
    final DataSchemaContextNode<?> rootContextNode = contextTree.getChild(rootPath);
    if (rootContextNode == null) {
      LOG.debug("Could not find root {} in new schema context, not upgrading", rootPath);
      return;
    }

    final DataSchemaNode rootSchemaNode = rootContextNode.getDataSchemaNode();
    if (!(rootSchemaNode instanceof DataNodeContainer)) {
      LOG.warn(
          "Root {} resolves to non-container type {}, not upgrading", rootPath, rootSchemaNode);
      return;
    }

    final ModificationApplyOperation rootNode;
    if (rootSchemaNode instanceof ContainerSchemaNode) {
      // FIXME: real root needs to enfore presence, but that require pre-population
      rootNode = new ContainerModificationStrategy((ContainerSchemaNode) rootSchemaNode, treeType);
    } else {
      rootNode = SchemaAwareApplyOperation.from(rootSchemaNode, treeType);
    }

    DataTreeState currentState, newState;
    do {
      currentState = state;
      newState = currentState.withSchemaContext(newSchemaContext, rootNode);
    } while (!STATE_UPDATER.compareAndSet(this, currentState, newState));
  }

  @Override
  public InMemoryDataTreeSnapshot takeSnapshot() {
    return state.newSnapshot();
  }

  @Override
  public void commit(final DataTreeCandidate candidate) {
    if (candidate instanceof NoopDataTreeCandidate) {
      return;
    }
    Preconditions.checkArgument(
        candidate instanceof InMemoryDataTreeCandidate,
        "Invalid candidate class %s",
        candidate.getClass());
    final InMemoryDataTreeCandidate c = (InMemoryDataTreeCandidate) candidate;

    if (LOG.isTraceEnabled()) {
      LOG.trace("Data Tree is {}", NormalizedNodes.toStringTree(c.getTipRoot().getData()));
    }

    final TreeNode newRoot = c.getTipRoot();
    DataTreeState currentState, newState;
    do {
      currentState = state;
      final TreeNode currentRoot = currentState.getRoot();
      LOG.debug("Updating datastore from {} to {}", currentRoot, newRoot);

      final TreeNode oldRoot = c.getBeforeRoot();
      Preconditions.checkState(
          oldRoot == currentRoot,
          "Store tree %s and candidate base %s differ.",
          currentRoot,
          oldRoot);

      newState = currentState.withRoot(newRoot);
      LOG.trace("Updated state from {} to {}", currentState, newState);
    } while (!STATE_UPDATER.compareAndSet(this, currentState, newState));
  }

  @Override
  public YangInstanceIdentifier getRootPath() {
    return rootPath;
  }

  @Override
  public String toString() {
    return MoreObjects.toStringHelper(this)
        .add("object", super.toString())
        .add("rootPath", rootPath)
        .add("state", state)
        .toString();
  }

  @Override
  @Nonnull
  protected TreeNode getTipRoot() {
    return state.getRoot();
  }
}
コード例 #19
0
public class ConcNodeCachingLinkedQueue<E> {

  private static class Node<E> {
    volatile E item;
    volatile Node<E> next;

    @SuppressWarnings("rawtypes")
    private static final AtomicReferenceFieldUpdater<Node, Node> nextUpdater =
        AtomicReferenceFieldUpdater.newUpdater(Node.class, Node.class, "next");

    boolean casNext(Node<E> cmp, Node<E> val) {
      return nextUpdater.compareAndSet(this, cmp, val);
    }
  }

  @SuppressWarnings("rawtypes")
  private static final AtomicReferenceFieldUpdater<ConcNodeCachingLinkedQueue, Node> tailUpdater =
      AtomicReferenceFieldUpdater.newUpdater(ConcNodeCachingLinkedQueue.class, Node.class, "tail");

  @SuppressWarnings("rawtypes")
  private static final AtomicReferenceFieldUpdater<ConcNodeCachingLinkedQueue, Node> headUpdater =
      AtomicReferenceFieldUpdater.newUpdater(ConcNodeCachingLinkedQueue.class, Node.class, "head");

  private boolean casTail(Node<E> cmp, Node<E> val) {
    return tailUpdater.compareAndSet(this, cmp, val);
  }

  private boolean casHead(Node<E> cmp, Node<E> val) {
    return headUpdater.compareAndSet(this, cmp, val);
  }

  /**
   * Pointer to header node, initialized to a dummy node. The first actual node is at
   * head.getNext().
   */
  private volatile Node<E> head = new Node<E>();

  /** Pointer to last node on list * */
  private volatile Node<E> tail = head;

  /** Stack of free nodes for reuse. */
  volatile Node<E> freeNode = null;

  @SuppressWarnings("rawtypes")
  private static final AtomicReferenceFieldUpdater<ConcNodeCachingLinkedQueue, Node>
      freeNodeUpdater =
          AtomicReferenceFieldUpdater.newUpdater(
              ConcNodeCachingLinkedQueue.class, Node.class, "freeNode");

  private boolean casNewNode(Node<E> cmp, Node<E> val) {
    return freeNodeUpdater.compareAndSet(this, cmp, val);
  }

  /**
   * Get Node from stack, or create new node.
   *
   * @return new node.
   */
  private Node<E> newNode() {
    Node<E> ret;
    Node<E> newFree;
    do {
      ret = freeNode;
      if (ret == null) return new Node<E>();
      newFree = ret.next;
    } while (!casNewNode(ret, newFree));
    return ret;
  }

  /**
   * Store node for reuse it later.
   *
   * @param node node for reuse.
   */
  private void freeNode(Node<E> node) {
    node.item = null;

    Node<E> oldNew;
    do {
      oldNew = freeNode;
      node.next = oldNew;
    } while (!casNewNode(oldNew, node));
  }

  /** Inserts the specified element at the tail of this queue. */
  public void push(E e) {
    Node<E> n = newNode();
    n.item = e;
    n.next = null;

    while (true) {
      Node<E> t = tail;
      Node<E> s = t.next;
      if (t == tail) {
        if (s == null) {
          if (t.casNext(s, n)) {
            casTail(t, n);
            return;
          }
        } else {
          casTail(t, s);
        }
      }
    }
  }

  public E poll() {
    while (true) {
      Node<E> h = head;
      Node<E> t = tail;
      Node<E> first = h.next;
      if (h == head) {
        if (h == t) {
          if (first == null) return null;
          else casTail(t, first);
        } else if (casHead(h, first)) {
          freeNode(h);
          E item = first.item;
          if (item != null) {
            first.item = null;
            return item;
          }
        }
      }
    }
  }
}
コード例 #20
0
ファイル: Execution.java プロジェクト: f-sander/flink
/**
 * A single execution of a vertex. While an {@link ExecutionVertex} can be executed multiple times
 * (for recovery, or other re-computation), this class tracks the state of a single execution of
 * that vertex and the resources.
 *
 * <p>NOTE ABOUT THE DESIGN RATIONAL:
 *
 * <p>In several points of the code, we need to deal with possible concurrent state changes and
 * actions. For example, while the call to deploy a task (send it to the TaskManager) happens, the
 * task gets cancelled.
 *
 * <p>We could lock the entire portion of the code (decision to deploy, deploy, set state to
 * running) such that it is guaranteed that any "cancel command" will only pick up after deployment
 * is done and that the "cancel command" call will never overtake the deploying call.
 *
 * <p>This blocks the threads big time, because the remote calls may take long. Depending of their
 * locking behavior, it may even result in distributed deadlocks (unless carefully avoided). We
 * therefore use atomic state updates and occasional double-checking to ensure that the state after
 * a completed call is as expected, and trigger correcting actions if it is not. Many actions are
 * also idempotent (like canceling).
 */
public class Execution implements Serializable {

  private static final long serialVersionUID = 42L;

  private static final AtomicReferenceFieldUpdater<Execution, ExecutionState> STATE_UPDATER =
      AtomicReferenceFieldUpdater.newUpdater(Execution.class, ExecutionState.class, "state");

  private static final Logger LOG = ExecutionGraph.LOG;

  private static final int NUM_CANCEL_CALL_TRIES = 3;

  // --------------------------------------------------------------------------------------------

  private final ExecutionVertex vertex;

  private final ExecutionAttemptID attemptId;

  private final long[] stateTimestamps;

  private final int attemptNumber;

  private final FiniteDuration timeout;

  private ConcurrentLinkedQueue<PartialInputChannelDeploymentDescriptor>
      partialInputChannelDeploymentDescriptors;

  private volatile ExecutionState state = CREATED;

  private volatile SimpleSlot
      assignedResource; // once assigned, never changes until the execution is archived

  private volatile Throwable failureCause; // once assigned, never changes

  private volatile InstanceConnectionInfo assignedResourceLocation; // for the archived execution

  private SerializedValue<StateHandle<?>> operatorState;

  private long recoveryTimestamp;

  /** The execution context which is used to execute futures. */
  @SuppressWarnings("NonSerializableFieldInSerializableClass")
  private ExecutionContext executionContext;

  /* Lock for updating the accumulators atomically. */
  private final SerializableObject accumulatorLock = new SerializableObject();

  /* Continuously updated map of user-defined accumulators */
  private volatile Map<String, Accumulator<?, ?>> userAccumulators;

  /* Continuously updated map of internal accumulators */
  private volatile Map<AccumulatorRegistry.Metric, Accumulator<?, ?>> flinkAccumulators;

  // --------------------------------------------------------------------------------------------

  public Execution(
      ExecutionContext executionContext,
      ExecutionVertex vertex,
      int attemptNumber,
      long startTimestamp,
      FiniteDuration timeout) {
    this.executionContext = checkNotNull(executionContext);

    this.vertex = checkNotNull(vertex);
    this.attemptId = new ExecutionAttemptID();

    this.attemptNumber = attemptNumber;

    this.stateTimestamps = new long[ExecutionState.values().length];
    markTimestamp(ExecutionState.CREATED, startTimestamp);

    this.timeout = timeout;

    this.partialInputChannelDeploymentDescriptors =
        new ConcurrentLinkedQueue<PartialInputChannelDeploymentDescriptor>();
  }

  // --------------------------------------------------------------------------------------------
  //   Properties
  // --------------------------------------------------------------------------------------------

  public ExecutionVertex getVertex() {
    return vertex;
  }

  public ExecutionAttemptID getAttemptId() {
    return attemptId;
  }

  public int getAttemptNumber() {
    return attemptNumber;
  }

  public ExecutionState getState() {
    return state;
  }

  public SimpleSlot getAssignedResource() {
    return assignedResource;
  }

  public InstanceConnectionInfo getAssignedResourceLocation() {
    return assignedResourceLocation;
  }

  public Throwable getFailureCause() {
    return failureCause;
  }

  public long[] getStateTimestamps() {
    return stateTimestamps;
  }

  public long getStateTimestamp(ExecutionState state) {
    return this.stateTimestamps[state.ordinal()];
  }

  public boolean isFinished() {
    return state == FINISHED || state == FAILED || state == CANCELED;
  }

  /** This method cleans fields that are irrelevant for the archived execution attempt. */
  public void prepareForArchiving() {
    if (assignedResource != null && assignedResource.isAlive()) {
      throw new IllegalStateException(
          "Cannot archive Execution while the assigned resource is still running.");
    }
    assignedResource = null;

    executionContext = null;

    partialInputChannelDeploymentDescriptors.clear();
    partialInputChannelDeploymentDescriptors = null;
  }

  public void setInitialState(
      SerializedValue<StateHandle<?>> initialState, long recoveryTimestamp) {
    if (state != ExecutionState.CREATED) {
      throw new IllegalArgumentException(
          "Can only assign operator state when execution attempt is in CREATED");
    }
    this.operatorState = initialState;
    this.recoveryTimestamp = recoveryTimestamp;
  }

  // --------------------------------------------------------------------------------------------
  //  Actions
  // --------------------------------------------------------------------------------------------

  /**
   * NOTE: This method only throws exceptions if it is in an illegal state to be scheduled, or if
   * the tasks needs to be scheduled immediately and no resource is available. If the task is
   * accepted by the schedule, any error sets the vertex state to failed and triggers the recovery
   * logic.
   *
   * @param scheduler The scheduler to use to schedule this execution attempt.
   * @param queued Flag to indicate whether the scheduler may queue this task if it cannot
   *     immediately deploy it.
   * @throws IllegalStateException Thrown, if the vertex is not in CREATED state, which is the only
   *     state that permits scheduling.
   * @throws NoResourceAvailableException Thrown is no queued scheduling is allowed and no resources
   *     are currently available.
   */
  public boolean scheduleForExecution(Scheduler scheduler, boolean queued)
      throws NoResourceAvailableException {
    if (scheduler == null) {
      throw new IllegalArgumentException("Cannot send null Scheduler when scheduling execution.");
    }

    final SlotSharingGroup sharingGroup = vertex.getJobVertex().getSlotSharingGroup();
    final CoLocationConstraint locationConstraint = vertex.getLocationConstraint();

    // sanity check
    if (locationConstraint != null && sharingGroup == null) {
      throw new RuntimeException(
          "Trying to schedule with co-location constraint but without slot sharing allowed.");
    }

    if (transitionState(CREATED, SCHEDULED)) {

      ScheduledUnit toSchedule =
          locationConstraint == null
              ? new ScheduledUnit(this, sharingGroup)
              : new ScheduledUnit(this, sharingGroup, locationConstraint);

      // IMPORTANT: To prevent leaks of cluster resources, we need to make sure that slots are
      // returned
      //     in all cases where the deployment failed. we use many try {} finally {} clauses to
      // assure that
      if (queued) {
        SlotAllocationFuture future = scheduler.scheduleQueued(toSchedule);

        future.setFutureAction(
            new SlotAllocationFutureAction() {
              @Override
              public void slotAllocated(SimpleSlot slot) {
                try {
                  deployToSlot(slot);
                } catch (Throwable t) {
                  try {
                    slot.releaseSlot();
                  } finally {
                    markFailed(t);
                  }
                }
              }
            });
      } else {
        SimpleSlot slot = scheduler.scheduleImmediately(toSchedule);
        try {
          deployToSlot(slot);
        } catch (Throwable t) {
          try {
            slot.releaseSlot();
          } finally {
            markFailed(t);
          }
        }
      }

      return true;
    } else {
      // call race, already deployed, or already done
      return false;
    }
  }

  public void deployToSlot(final SimpleSlot slot) throws JobException {
    // sanity checks
    if (slot == null) {
      throw new NullPointerException();
    }
    if (!slot.isAlive()) {
      throw new JobException("Target slot for deployment is not alive.");
    }

    // make sure exactly one deployment call happens from the correct state
    // note: the transition from CREATED to DEPLOYING is for testing purposes only
    ExecutionState previous = this.state;
    if (previous == SCHEDULED || previous == CREATED) {
      if (!transitionState(previous, DEPLOYING)) {
        // race condition, someone else beat us to the deploying call.
        // this should actually not happen and indicates a race somewhere else
        throw new IllegalStateException("Cannot deploy task: Concurrent deployment call race.");
      }
    } else {
      // vertex may have been cancelled, or it was already scheduled
      throw new IllegalStateException(
          "The vertex must be in CREATED or SCHEDULED state to be deployed. Found state "
              + previous);
    }

    try {
      // good, we are allowed to deploy
      if (!slot.setExecutedVertex(this)) {
        throw new JobException("Could not assign the ExecutionVertex to the slot " + slot);
      }
      this.assignedResource = slot;
      this.assignedResourceLocation = slot.getInstance().getInstanceConnectionInfo();

      // race double check, did we fail/cancel and do we need to release the slot?
      if (this.state != DEPLOYING) {
        slot.releaseSlot();
        return;
      }

      if (LOG.isInfoEnabled()) {
        LOG.info(
            String.format(
                "Deploying %s (attempt #%d) to %s",
                vertex.getSimpleName(),
                attemptNumber,
                slot.getInstance().getInstanceConnectionInfo().getHostname()));
      }

      final TaskDeploymentDescriptor deployment =
          vertex.createDeploymentDescriptor(
              attemptId, slot, operatorState, recoveryTimestamp, attemptNumber);

      // register this execution at the execution graph, to receive call backs
      vertex.getExecutionGraph().registerExecution(this);

      final Instance instance = slot.getInstance();
      final ActorGateway gateway = instance.getActorGateway();

      final Future<Object> deployAction = gateway.ask(new SubmitTask(deployment), timeout);

      deployAction.onComplete(
          new OnComplete<Object>() {

            @Override
            public void onComplete(Throwable failure, Object success) throws Throwable {
              if (failure != null) {
                if (failure instanceof TimeoutException) {
                  String taskname =
                      deployment.getTaskInfo().getTaskNameWithSubtasks() + " (" + attemptId + ')';

                  markFailed(
                      new Exception(
                          "Cannot deploy task "
                              + taskname
                              + " - TaskManager ("
                              + instance
                              + ") not responding after a timeout of "
                              + timeout,
                          failure));
                } else {
                  markFailed(failure);
                }
              } else {
                if (!(success.equals(Messages.getAcknowledge()))) {
                  markFailed(
                      new Exception(
                          "Failed to deploy the task to slot "
                              + slot
                              + ": Response was not of type Acknowledge"));
                }
              }
            }
          },
          executionContext);
    } catch (Throwable t) {
      markFailed(t);
      ExceptionUtils.rethrow(t);
    }
  }

  public void cancel() {
    // depending on the previous state, we go directly to cancelled (no cancel call necessary)
    // -- or to canceling (cancel call needs to be sent to the task manager)

    // because of several possibly previous states, we need to again loop until we make a
    // successful atomic state transition
    while (true) {

      ExecutionState current = this.state;

      if (current == CANCELING || current == CANCELED) {
        // already taken care of, no need to cancel again
        return;
      }

      // these two are the common cases where we need to send a cancel call
      else if (current == RUNNING || current == DEPLOYING) {
        // try to transition to canceling, if successful, send the cancel call
        if (transitionState(current, CANCELING)) {
          sendCancelRpcCall();
          return;
        }
        // else: fall through the loop
      } else if (current == FINISHED || current == FAILED) {
        // nothing to do any more. finished failed before it could be cancelled.
        // in any case, the task is removed from the TaskManager already
        sendFailIntermediateResultPartitionsRpcCall();

        return;
      } else if (current == CREATED || current == SCHEDULED) {
        // from here, we can directly switch to cancelled, because the no task has been deployed
        if (transitionState(current, CANCELED)) {

          // we skip the canceling state. set the timestamp, for a consistent appearance
          markTimestamp(CANCELING, getStateTimestamp(CANCELED));

          try {
            vertex.getExecutionGraph().deregisterExecution(this);
            if (assignedResource != null) {
              assignedResource.releaseSlot();
            }
          } finally {
            vertex.executionCanceled();
          }
          return;
        }
        // else: fall through the loop
      } else {
        throw new IllegalStateException(current.name());
      }
    }
  }

  void scheduleOrUpdateConsumers(List<List<ExecutionEdge>> allConsumers) {
    final int numConsumers = allConsumers.size();

    if (numConsumers > 1) {
      fail(
          new IllegalStateException(
              "Currently, only a single consumer group per partition is supported."));
    } else if (numConsumers == 0) {
      return;
    }

    for (ExecutionEdge edge : allConsumers.get(0)) {
      final ExecutionVertex consumerVertex = edge.getTarget();

      final Execution consumer = consumerVertex.getCurrentExecutionAttempt();
      final ExecutionState consumerState = consumer.getState();

      final IntermediateResultPartition partition = edge.getSource();

      // ----------------------------------------------------------------
      // Consumer is created => try to deploy and cache input channel
      // descriptors if there is a deployment race
      // ----------------------------------------------------------------
      if (consumerState == CREATED) {
        final Execution partitionExecution = partition.getProducer().getCurrentExecutionAttempt();

        consumerVertex.cachePartitionInfo(
            PartialInputChannelDeploymentDescriptor.fromEdge(partition, partitionExecution));

        // When deploying a consuming task, its task deployment descriptor will contain all
        // deployment information available at the respective time. It is possible that some
        // of the partitions to be consumed have not been created yet. These are updated
        // runtime via the update messages.
        //
        // TODO The current approach may send many update messages even though the consuming
        // task has already been deployed with all necessary information. We have to check
        // whether this is a problem and fix it, if it is.
        future(
            new Callable<Boolean>() {
              @Override
              public Boolean call() throws Exception {
                try {
                  consumerVertex.scheduleForExecution(
                      consumerVertex.getExecutionGraph().getScheduler(),
                      consumerVertex.getExecutionGraph().isQueuedSchedulingAllowed());
                } catch (Throwable t) {
                  fail(
                      new IllegalStateException(
                          "Could not schedule consumer " + "vertex " + consumerVertex, t));
                }

                return true;
              }
            },
            executionContext);

        // double check to resolve race conditions
        if (consumerVertex.getExecutionState() == RUNNING) {
          consumerVertex.sendPartitionInfos();
        }
      }
      // ----------------------------------------------------------------
      // Consumer is running => send update message now
      // ----------------------------------------------------------------
      else {
        if (consumerState == RUNNING) {
          final SimpleSlot consumerSlot = consumer.getAssignedResource();

          if (consumerSlot == null) {
            // The consumer has been reset concurrently
            continue;
          }

          final Instance consumerInstance = consumerSlot.getInstance();

          final ResultPartitionID partitionId =
              new ResultPartitionID(partition.getPartitionId(), attemptId);

          final Instance partitionInstance =
              partition.getProducer().getCurrentAssignedResource().getInstance();

          final ResultPartitionLocation partitionLocation;

          if (consumerInstance.equals(partitionInstance)) {
            // Consuming task is deployed to the same instance as the partition => local
            partitionLocation = ResultPartitionLocation.createLocal();
          } else {
            // Different instances => remote
            final ConnectionID connectionId =
                new ConnectionID(
                    partitionInstance.getInstanceConnectionInfo(),
                    partition.getIntermediateResult().getConnectionIndex());

            partitionLocation = ResultPartitionLocation.createRemote(connectionId);
          }

          final InputChannelDeploymentDescriptor descriptor =
              new InputChannelDeploymentDescriptor(partitionId, partitionLocation);

          final UpdatePartitionInfo updateTaskMessage =
              new UpdateTaskSinglePartitionInfo(
                  consumer.getAttemptId(), partition.getIntermediateResult().getId(), descriptor);

          sendUpdatePartitionInfoRpcCall(consumerSlot, updateTaskMessage);
        }
        // ----------------------------------------------------------------
        // Consumer is scheduled or deploying => cache input channel
        // deployment descriptors and send update message later
        // ----------------------------------------------------------------
        else if (consumerState == SCHEDULED || consumerState == DEPLOYING) {
          final Execution partitionExecution = partition.getProducer().getCurrentExecutionAttempt();

          consumerVertex.cachePartitionInfo(
              PartialInputChannelDeploymentDescriptor.fromEdge(partition, partitionExecution));

          // double check to resolve race conditions
          if (consumerVertex.getExecutionState() == RUNNING) {
            consumerVertex.sendPartitionInfos();
          }
        }
      }
    }
  }

  /**
   * This method fails the vertex due to an external condition. The task will move to state FAILED.
   * If the task was in state RUNNING or DEPLOYING before, it will send a cancel call to the
   * TaskManager.
   *
   * @param t The exception that caused the task to fail.
   */
  public void fail(Throwable t) {
    processFail(t, false);
  }

  // --------------------------------------------------------------------------------------------
  //   Callbacks
  // --------------------------------------------------------------------------------------------

  /**
   * This method marks the task as failed, but will make no attempt to remove task execution from
   * the task manager. It is intended for cases where the task is known not to be running, or then
   * the TaskManager reports failure (in which case it has already removed the task).
   *
   * @param t The exception that caused the task to fail.
   */
  void markFailed(Throwable t) {
    processFail(t, true);
  }

  void markFinished() {
    markFinished(null, null);
  }

  void markFinished(
      Map<AccumulatorRegistry.Metric, Accumulator<?, ?>> flinkAccumulators,
      Map<String, Accumulator<?, ?>> userAccumulators) {

    // this call usually comes during RUNNING, but may also come while still in deploying (very fast
    // tasks!)
    while (true) {
      ExecutionState current = this.state;

      if (current == RUNNING || current == DEPLOYING) {

        if (transitionState(current, FINISHED)) {
          try {
            for (IntermediateResultPartition finishedPartition :
                getVertex().finishAllBlockingPartitions()) {

              IntermediateResultPartition[] allPartitions =
                  finishedPartition.getIntermediateResult().getPartitions();

              for (IntermediateResultPartition partition : allPartitions) {
                scheduleOrUpdateConsumers(partition.getConsumers());
              }
            }

            synchronized (accumulatorLock) {
              this.flinkAccumulators = flinkAccumulators;
              this.userAccumulators = userAccumulators;
            }

            assignedResource.releaseSlot();
            vertex.getExecutionGraph().deregisterExecution(this);
          } finally {
            vertex.executionFinished();
          }
          return;
        }
      } else if (current == CANCELING) {
        // we sent a cancel call, and the task manager finished before it arrived. We
        // will never get a CANCELED call back from the job manager
        cancelingComplete();
        return;
      } else if (current == CANCELED || current == FAILED) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Task FINISHED, but concurrently went to state " + state);
        }
        return;
      } else {
        // this should not happen, we need to fail this
        markFailed(new Exception("Vertex received FINISHED message while being in state " + state));
        return;
      }
    }
  }

  void cancelingComplete() {

    // the taskmanagers can themselves cancel tasks without an external trigger, if they find that
    // the
    // network stack is canceled (for example by a failing / canceling receiver or sender
    // this is an artifact of the old network runtime, but for now we need to support task
    // transitions
    // from running directly to canceled

    while (true) {
      ExecutionState current = this.state;

      if (current == CANCELED) {
        return;
      } else if (current == CANCELING || current == RUNNING || current == DEPLOYING) {
        if (transitionState(current, CANCELED)) {
          try {
            assignedResource.releaseSlot();
            vertex.getExecutionGraph().deregisterExecution(this);
          } finally {
            vertex.executionCanceled();
          }
          return;
        }

        // else fall through the loop
      } else {
        // failing in the meantime may happen and is no problem.
        // anything else is a serious problem !!!
        if (current != FAILED) {
          String message =
              String.format(
                  "Asynchronous race: Found state %s after successful cancel call.", state);
          LOG.error(message);
          vertex.getExecutionGraph().fail(new Exception(message));
        }
        return;
      }
    }
  }

  void cachePartitionInfo(PartialInputChannelDeploymentDescriptor partitionInfo) {
    partialInputChannelDeploymentDescriptors.add(partitionInfo);
  }

  void sendPartitionInfos() {
    // check if the ExecutionVertex has already been archived and thus cleared the
    // partial partition infos queue
    if (partialInputChannelDeploymentDescriptors != null
        && !partialInputChannelDeploymentDescriptors.isEmpty()) {

      PartialInputChannelDeploymentDescriptor partialInputChannelDeploymentDescriptor;

      List<IntermediateDataSetID> resultIDs = new ArrayList<IntermediateDataSetID>();
      List<InputChannelDeploymentDescriptor> inputChannelDeploymentDescriptors =
          new ArrayList<InputChannelDeploymentDescriptor>();

      while ((partialInputChannelDeploymentDescriptor =
              partialInputChannelDeploymentDescriptors.poll())
          != null) {
        resultIDs.add(partialInputChannelDeploymentDescriptor.getResultId());
        inputChannelDeploymentDescriptors.add(
            partialInputChannelDeploymentDescriptor.createInputChannelDeploymentDescriptor(this));
      }

      UpdatePartitionInfo updateTaskMessage =
          createUpdateTaskMultiplePartitionInfos(
              attemptId, resultIDs, inputChannelDeploymentDescriptors);

      sendUpdatePartitionInfoRpcCall(assignedResource, updateTaskMessage);
    }
  }

  // --------------------------------------------------------------------------------------------
  //  Internal Actions
  // --------------------------------------------------------------------------------------------

  private boolean processFail(Throwable t, boolean isCallback) {

    // damn, we failed. This means only that we keep our books and notify our parent
    // JobExecutionVertex
    // the actual computation on the task manager is cleaned up by the TaskManager that noticed the
    // failure

    // we may need to loop multiple times (in the presence of concurrent calls) in order to
    // atomically switch to failed
    while (true) {
      ExecutionState current = this.state;

      if (current == FAILED) {
        // already failed. It is enough to remember once that we failed (its sad enough)
        return false;
      }

      if (current == CANCELED) {
        // we are already aborting or are already aborted
        if (LOG.isDebugEnabled()) {
          LOG.debug(
              String.format(
                  "Ignoring transition of vertex %s to %s while being %s",
                  getVertexWithAttempt(), FAILED, CANCELED));
        }
        return false;
      }

      if (transitionState(current, FAILED, t)) {
        // success (in a manner of speaking)
        this.failureCause = t;

        try {
          if (assignedResource != null) {
            assignedResource.releaseSlot();
          }
          vertex.getExecutionGraph().deregisterExecution(this);
        } finally {
          vertex.executionFailed(t);
        }

        if (!isCallback && (current == RUNNING || current == DEPLOYING)) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Sending out cancel request, to remove task execution from TaskManager.");
          }

          try {
            if (assignedResource != null) {
              sendCancelRpcCall();
            }
          } catch (Throwable tt) {
            // no reason this should ever happen, but log it to be safe
            LOG.error("Error triggering cancel call while marking task as failed.", tt);
          }
        }

        // leave the loop
        return true;
      }
    }
  }

  boolean switchToRunning() {

    if (transitionState(DEPLOYING, RUNNING)) {
      sendPartitionInfos();
      return true;
    } else {
      // something happened while the call was in progress.
      // it can mean:
      //  - canceling, while deployment was in progress. state is now canceling, or canceled, if the
      // response overtook
      //  - finishing (execution and finished call overtook the deployment answer, which is possible
      // and happens for fast tasks)
      //  - failed (execution, failure, and failure message overtook the deployment answer)

      ExecutionState currentState = this.state;

      if (currentState == FINISHED || currentState == CANCELED) {
        // do nothing, the task was really fast (nice)
        // or it was canceled really fast
      } else if (currentState == CANCELING || currentState == FAILED) {
        if (LOG.isDebugEnabled()) {
          LOG.debug(
              String.format(
                  "Concurrent canceling/failing of %s while deployment was in progress.",
                  getVertexWithAttempt()));
        }
        sendCancelRpcCall();
      } else {
        String message =
            String.format(
                "Concurrent unexpected state transition of task %s to %s while deployment was in progress.",
                getVertexWithAttempt(), currentState);

        if (LOG.isDebugEnabled()) {
          LOG.debug(message);
        }

        // undo the deployment
        sendCancelRpcCall();

        // record the failure
        markFailed(new Exception(message));
      }

      return false;
    }
  }

  /**
   * This method sends a CancelTask message to the instance of the assigned slot.
   *
   * <p>The sending is tried up to NUM_CANCEL_CALL_TRIES times.
   */
  private void sendCancelRpcCall() {
    final SimpleSlot slot = this.assignedResource;

    if (slot != null) {

      final ActorGateway gateway = slot.getInstance().getActorGateway();

      Future<Object> cancelResult =
          gateway.retry(
              new CancelTask(attemptId), NUM_CANCEL_CALL_TRIES, timeout, executionContext);

      cancelResult.onComplete(
          new OnComplete<Object>() {

            @Override
            public void onComplete(Throwable failure, Object success) throws Throwable {
              if (failure != null) {
                fail(new Exception("Task could not be canceled.", failure));
              } else {
                TaskOperationResult result = (TaskOperationResult) success;
                if (!result.success()) {
                  LOG.debug(
                      "Cancel task call did not find task. Probably akka message call" + " race.");
                }
              }
            }
          },
          executionContext);
    }
  }

  private void sendFailIntermediateResultPartitionsRpcCall() {
    final SimpleSlot slot = this.assignedResource;

    if (slot != null) {
      final Instance instance = slot.getInstance();

      if (instance.isAlive()) {
        final ActorGateway gateway = instance.getActorGateway();

        // TODO For some tests this could be a problem when querying too early if all resources were
        // released
        gateway.tell(new FailIntermediateResultPartitions(attemptId));
      }
    }
  }

  /**
   * Sends an UpdatePartitionInfo message to the instance of the consumerSlot.
   *
   * @param consumerSlot Slot to whose instance the message will be sent
   * @param updatePartitionInfo UpdatePartitionInfo message
   */
  private void sendUpdatePartitionInfoRpcCall(
      final SimpleSlot consumerSlot, final UpdatePartitionInfo updatePartitionInfo) {

    if (consumerSlot != null) {
      final Instance instance = consumerSlot.getInstance();
      final ActorGateway gateway = instance.getActorGateway();

      Future<Object> futureUpdate = gateway.ask(updatePartitionInfo, timeout);

      futureUpdate.onFailure(
          new OnFailure() {
            @Override
            public void onFailure(Throwable failure) throws Throwable {
              fail(
                  new IllegalStateException(
                      "Update task on instance " + instance + " failed due to:", failure));
            }
          },
          executionContext);
    }
  }

  // --------------------------------------------------------------------------------------------
  //  Miscellaneous
  // --------------------------------------------------------------------------------------------

  private boolean transitionState(ExecutionState currentState, ExecutionState targetState) {
    return transitionState(currentState, targetState, null);
  }

  private boolean transitionState(
      ExecutionState currentState, ExecutionState targetState, Throwable error) {
    if (STATE_UPDATER.compareAndSet(this, currentState, targetState)) {
      markTimestamp(targetState);

      LOG.info(
          getVertex().getTaskNameWithSubtaskIndex()
              + " ("
              + getAttemptId()
              + ") switched from "
              + currentState
              + " to "
              + targetState);

      // make sure that the state transition completes normally.
      // potential errors (in listeners may not affect the main logic)
      try {
        vertex.notifyStateTransition(attemptId, targetState, error);
      } catch (Throwable t) {
        LOG.error("Error while notifying execution graph of execution state transition.", t);
      }
      return true;
    } else {
      return false;
    }
  }

  private void markTimestamp(ExecutionState state) {
    markTimestamp(state, System.currentTimeMillis());
  }

  private void markTimestamp(ExecutionState state, long timestamp) {
    this.stateTimestamps[state.ordinal()] = timestamp;
  }

  public String getVertexWithAttempt() {
    return vertex.getSimpleName() + " - execution #" + attemptNumber;
  }

  // ------------------------------------------------------------------------
  //  Accumulators
  // ------------------------------------------------------------------------

  /**
   * Update accumulators (discarded when the Execution has already been terminated).
   *
   * @param flinkAccumulators the flink internal accumulators
   * @param userAccumulators the user accumulators
   */
  public void setAccumulators(
      Map<AccumulatorRegistry.Metric, Accumulator<?, ?>> flinkAccumulators,
      Map<String, Accumulator<?, ?>> userAccumulators) {
    synchronized (accumulatorLock) {
      if (!state.isTerminal()) {
        this.flinkAccumulators = flinkAccumulators;
        this.userAccumulators = userAccumulators;
      }
    }
  }

  public Map<String, Accumulator<?, ?>> getUserAccumulators() {
    return userAccumulators;
  }

  public StringifiedAccumulatorResult[] getUserAccumulatorsStringified() {
    return StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);
  }

  public Map<AccumulatorRegistry.Metric, Accumulator<?, ?>> getFlinkAccumulators() {
    return flinkAccumulators;
  }

  // ------------------------------------------------------------------------
  //  Standard utilities
  // ------------------------------------------------------------------------

  @Override
  public String toString() {
    return String.format(
        "Attempt #%d (%s) @ %s - [%s]",
        attemptNumber,
        vertex.getSimpleName(),
        (assignedResource == null ? "(unassigned)" : assignedResource.toString()),
        state);
  }
}
コード例 #21
0
  static final class BufferBondarySupplierSubscriber<T, U extends Collection<? super T>, B>
      extends QueueDrainSubscriber<T, U, U> implements Subscriber<T>, Subscription, Disposable {
    /** */
    final Supplier<U> bufferSupplier;

    final Supplier<? extends Publisher<B>> boundarySupplier;

    Subscription s;

    volatile Disposable other;

    @SuppressWarnings("rawtypes")
    static final AtomicReferenceFieldUpdater<BufferBondarySupplierSubscriber, Disposable> OTHER =
        AtomicReferenceFieldUpdater.newUpdater(
            BufferBondarySupplierSubscriber.class, Disposable.class, "other");

    static final Disposable DISPOSED =
        new Disposable() {
          @Override
          public void dispose() {}
        };

    U buffer;

    public BufferBondarySupplierSubscriber(
        Subscriber<? super U> actual,
        Supplier<U> bufferSupplier,
        Supplier<? extends Publisher<B>> boundarySupplier) {
      super(actual, new MpscLinkedQueue<U>());
      this.bufferSupplier = bufferSupplier;
      this.boundarySupplier = boundarySupplier;
    }

    @Override
    public void onSubscribe(Subscription s) {
      if (SubscriptionHelper.validateSubscription(this.s, s)) {
        return;
      }
      this.s = s;

      Subscriber<? super U> actual = this.actual;

      U b;

      try {
        b = bufferSupplier.get();
      } catch (Throwable e) {
        cancelled = true;
        s.cancel();
        EmptySubscription.error(e, actual);
        return;
      }

      if (b == null) {
        cancelled = true;
        s.cancel();
        EmptySubscription.error(new NullPointerException("The buffer supplied is null"), actual);
        return;
      }
      buffer = b;

      Publisher<B> boundary;

      try {
        boundary = boundarySupplier.get();
      } catch (Throwable ex) {
        cancelled = true;
        s.cancel();
        EmptySubscription.error(ex, actual);
        return;
      }

      if (boundary == null) {
        cancelled = true;
        s.cancel();
        EmptySubscription.error(
            new NullPointerException("The boundary publisher supplied is null"), actual);
        return;
      }

      BufferBoundarySubscriber<T, U, B> bs = new BufferBoundarySubscriber<T, U, B>(this);
      other = bs;

      actual.onSubscribe(this);

      if (!cancelled) {
        s.request(Long.MAX_VALUE);

        boundary.subscribe(bs);
      }
    }

    @Override
    public void onNext(T t) {
      synchronized (this) {
        U b = buffer;
        if (b == null) {
          return;
        }
        b.add(t);
      }
    }

    @Override
    public void onError(Throwable t) {
      cancel();
      actual.onError(t);
    }

    @Override
    public void onComplete() {
      U b;
      synchronized (this) {
        b = buffer;
        if (b == null) {
          return;
        }
        buffer = null;
      }
      queue.offer(b);
      done = true;
      if (enter()) {
        QueueDrainHelper.drainMaxLoop(queue, actual, false, this, this);
      }
    }

    @Override
    public void request(long n) {
      requested(n);
    }

    @Override
    public void cancel() {
      if (!cancelled) {
        cancelled = true;
        s.cancel();
        disposeOther();

        if (enter()) {
          queue.clear();
        }
      }
    }

    void disposeOther() {
      Disposable d = other;
      if (d != DISPOSED) {
        d = OTHER.getAndSet(this, DISPOSED);
        if (d != DISPOSED && d != null) {
          d.dispose();
        }
      }
    }

    void next() {

      Disposable o = other;

      U next;

      try {
        next = bufferSupplier.get();
      } catch (Throwable e) {
        cancel();
        actual.onError(e);
        return;
      }

      if (next == null) {
        cancel();
        actual.onError(new NullPointerException("The buffer supplied is null"));
        return;
      }

      Publisher<B> boundary;

      try {
        boundary = boundarySupplier.get();
      } catch (Throwable ex) {
        cancelled = true;
        s.cancel();
        actual.onError(ex);
        return;
      }

      if (boundary == null) {
        cancelled = true;
        s.cancel();
        actual.onError(new NullPointerException("The boundary publisher supplied is null"));
        return;
      }

      BufferBoundarySubscriber<T, U, B> bs = new BufferBoundarySubscriber<T, U, B>(this);

      if (!OTHER.compareAndSet(this, o, bs)) {
        return;
      }

      U b;
      synchronized (this) {
        b = buffer;
        if (b == null) {
          return;
        }
        buffer = next;
      }

      boundary.subscribe(bs);

      fastpathEmitMax(b, false, this);
    }

    @Override
    public void dispose() {
      s.cancel();
      disposeOther();
    }

    @Override
    public boolean accept(Subscriber<? super U> a, U v) {
      actual.onNext(v);
      return true;
    }
  }
コード例 #22
0
/**
 * Lock-free secure concurrent hash map. Attempts to store keys which cause excessive collisions
 * will result in a security exception.
 *
 * @param <K> the key type
 * @param <V> the value type
 * @author <a href="mailto:[email protected]">David M. Lloyd</a>
 */
public final class SecureHashMap<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
  private static final int MAX_ROW_LENGTH = 32;
  private static final int DEFAULT_INITIAL_CAPACITY = 16;
  private static final int MAXIMUM_CAPACITY = 1 << 30;
  private static final float DEFAULT_LOAD_FACTOR = 0.60f;

  /** A row which has been resized into the new view. */
  private static final Item[] RESIZED = new Item[0];
  /** A non-existent table entry (as opposed to a {@code null} value). */
  private static final Object NONEXISTENT = new Object();

  private volatile Table<K, V> table;

  private final Set<K> keySet = new KeySet();
  private final Set<Entry<K, V>> entrySet = new EntrySet();
  private final Collection<V> values = new Values();

  private final float loadFactor;
  private final int initialCapacity;

  @SuppressWarnings("unchecked")
  private static final AtomicIntegerFieldUpdater<Table> sizeUpdater =
      AtomicIntegerFieldUpdater.newUpdater(Table.class, "size");

  @SuppressWarnings("unchecked")
  private static final AtomicReferenceFieldUpdater<SecureHashMap, Table> tableUpdater =
      AtomicReferenceFieldUpdater.newUpdater(SecureHashMap.class, Table.class, "table");

  @SuppressWarnings("unchecked")
  private static final AtomicReferenceFieldUpdater<Item, Object> valueUpdater =
      AtomicReferenceFieldUpdater.newUpdater(Item.class, Object.class, "value");

  /**
   * Construct a new instance.
   *
   * @param initialCapacity the initial capacity
   * @param loadFactor the load factor
   */
  public SecureHashMap(int initialCapacity, float loadFactor) {
    if (initialCapacity < 0) {
      throw new IllegalArgumentException("Initial capacity must be > 0");
    }
    if (initialCapacity > MAXIMUM_CAPACITY) {
      initialCapacity = MAXIMUM_CAPACITY;
    }
    if (loadFactor <= 0.0 || Float.isNaN(loadFactor) || loadFactor >= 1.0) {
      throw new IllegalArgumentException("Load factor must be between 0.0f and 1.0f");
    }

    int capacity = 1;

    while (capacity < initialCapacity) {
      capacity <<= 1;
    }

    this.loadFactor = loadFactor;
    this.initialCapacity = capacity;

    final Table<K, V> table = new Table<K, V>(capacity, loadFactor);
    tableUpdater.set(this, table);
  }

  /**
   * Construct a new instance.
   *
   * @param loadFactor the load factor
   */
  public SecureHashMap(final float loadFactor) {
    this(DEFAULT_INITIAL_CAPACITY, loadFactor);
  }

  /**
   * Construct a new instance.
   *
   * @param initialCapacity the initial capacity
   */
  public SecureHashMap(final int initialCapacity) {
    this(initialCapacity, DEFAULT_LOAD_FACTOR);
  }

  /** Construct a new instance. */
  public SecureHashMap() {
    this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
  }

  private static int hashOf(final Object key) {
    int h = key.hashCode();
    h += (h << 15) ^ 0xffffcd7d;
    h ^= (h >>> 10);
    h += (h << 3);
    h ^= (h >>> 6);
    h += (h << 2) + (h << 14);
    return h ^ (h >>> 16);
  }

  private static boolean equals(final Object o1, final Object o2) {
    return o1 == null ? o2 == null : o1.equals(o2);
  }

  private Item<K, V>[] addItem(final Item<K, V>[] row, final Item<K, V> newItem) {
    if (row == null) {
      return createRow(newItem);
    } else {
      final int length = row.length;
      if (length > MAX_ROW_LENGTH) {
        throw new SecurityException("Excessive map collisions");
      }
      Item<K, V>[] newRow = Arrays.copyOf(row, length + 1);
      newRow[length] = newItem;
      return newRow;
    }
  }

  @SuppressWarnings("unchecked")
  private static <K, V> Item<K, V>[] createRow(final Item<K, V> newItem) {
    return new Item[] {newItem};
  }

  @SuppressWarnings("unchecked")
  private static <K, V> Item<K, V>[] createRow(final int length) {
    return new Item[length];
  }

  private V doPut(K key, V value, boolean ifAbsent, Table<K, V> table) {
    final int hashCode = hashOf(key);
    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final int idx = hashCode & array.length() - 1;

    OUTER:
    for (; ; ) {

      // Fetch the table row.
      Item<K, V>[] oldRow = array.get(idx);
      if (oldRow == RESIZED) {
        // row was transported to the new table so recalculate everything
        final V result = doPut(key, value, ifAbsent, table.resizeView);
        // keep a consistent size view though!
        if (result == NONEXISTENT) sizeUpdater.getAndIncrement(table);
        return result;
      }
      if (oldRow != null) {
        // Find the matching Item in the row.
        Item<K, V> oldItem = null;
        for (Item<K, V> tryItem : oldRow) {
          if (equals(key, tryItem.key)) {
            oldItem = tryItem;
            break;
          }
        }
        if (oldItem != null) {
          // entry exists; try to return the old value and try to replace the value if allowed.
          V oldItemValue;
          do {
            oldItemValue = oldItem.value;
            if (oldItemValue == NONEXISTENT) {
              // Key was removed; on the next iteration or two the doornail should be gone.
              continue OUTER;
            }
          } while (!ifAbsent && !valueUpdater.compareAndSet(oldItem, oldItemValue, value));
          return oldItemValue;
        }
        // Row exists but item doesn't.
      }

      // Row doesn't exist, or row exists but item doesn't; try and add a new item to the row.
      final Item<K, V> newItem = new Item<K, V>(key, hashCode, value);
      final Item<K, V>[] newRow = addItem(oldRow, newItem);
      if (!array.compareAndSet(idx, oldRow, newRow)) {
        // Nope, row changed; retry.
        continue;
      }

      // Up the table size.
      final int threshold = table.threshold;
      int newSize = sizeUpdater.incrementAndGet(table);
      // >= 0 is really a sign-bit check
      while (newSize >= 0 && (newSize & 0x7fffffff) > threshold) {
        if (sizeUpdater.compareAndSet(table, newSize, newSize | 0x80000000)) {
          resize(table);
          return nonexistent();
        }
      }
      // Success.
      return nonexistent();
    }
  }

  private void resize(Table<K, V> origTable) {
    final AtomicReferenceArray<Item<K, V>[]> origArray = origTable.array;
    final int origCapacity = origArray.length();
    final Table<K, V> newTable = new Table<K, V>(origCapacity << 1, loadFactor);
    // Prevent resize until we're done...
    newTable.size = 0x80000000;
    origTable.resizeView = newTable;
    final AtomicReferenceArray<Item<K, V>[]> newArray = newTable.array;

    for (int i = 0; i < origCapacity; i++) {
      // for each row, try to resize into two new rows
      Item<K, V>[] origRow, newRow0, newRow1;
      do {
        origRow = origArray.get(i);
        if (origRow != null) {
          int count0 = 0, count1 = 0;
          for (Item<K, V> item : origRow) {
            if ((item.hashCode & origCapacity) == 0) {
              count0++;
            } else {
              count1++;
            }
          }
          if (count0 != 0) {
            newRow0 = createRow(count0);
            int j = 0;
            for (Item<K, V> item : origRow) {
              if ((item.hashCode & origCapacity) == 0) {
                newRow0[j++] = item;
              }
            }
            newArray.lazySet(i, newRow0);
          }
          if (count1 != 0) {
            newRow1 = createRow(count1);
            int j = 0;
            for (Item<K, V> item : origRow) {
              if ((item.hashCode & origCapacity) != 0) {
                newRow1[j++] = item;
              }
            }
            newArray.lazySet(i + origCapacity, newRow1);
          }
        }
      } while (!origArray.compareAndSet(i, origRow, SecureHashMap.<K, V>resized()));
      if (origRow != null) sizeUpdater.getAndAdd(newTable, origRow.length);
    }

    int size;
    do {
      size = newTable.size;
      if ((size & 0x7fffffff) >= newTable.threshold) {
        // shorter path for reads and writes
        table = newTable;
        // then time for another resize, right away
        resize(newTable);
        return;
      }
    } while (!sizeUpdater.compareAndSet(newTable, size, size & 0x7fffffff));

    // All done, plug in the new table
    table = newTable;
  }

  private static <K, V> Item<K, V>[] remove(Item<K, V>[] row, int idx) {
    final int len = row.length;
    assert idx < len;
    if (len == 1) {
      return null;
    }
    @SuppressWarnings("unchecked")
    Item<K, V>[] newRow = new Item[len - 1];
    if (idx > 0) {
      System.arraycopy(row, 0, newRow, 0, idx);
    }
    if (idx < len - 1) {
      System.arraycopy(row, idx + 1, newRow, idx, len - 1 - idx);
    }
    return newRow;
  }

  public V putIfAbsent(final K key, final V value) {
    final V result = doPut(key, value, true, table);
    return result == NONEXISTENT ? null : result;
  }

  public boolean remove(final Object objectKey, final Object objectValue) {
    // Get type-safe key and value.
    @SuppressWarnings("unchecked")
    final K key = (K) objectKey;
    @SuppressWarnings("unchecked")
    final V value = (V) objectValue;
    return doRemove(key, value, table);
  }

  private boolean doRemove(final Item<K, V> item, final Table<K, V> table) {
    int hashCode = item.hashCode;

    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final int idx = hashCode & array.length() - 1;

    Item<K, V>[] oldRow;

    for (; ; ) {
      oldRow = array.get(idx);
      if (oldRow == null) {
        return false;
      }
      if (oldRow == RESIZED) {
        boolean result;
        if (result = doRemove(item, table.resizeView)) {
          sizeUpdater.getAndDecrement(table);
        }
        return result;
      }

      int rowIdx = -1;
      for (int i = 0; i < oldRow.length; i++) {
        if (item == oldRow[i]) {
          rowIdx = i;
          break;
        }
      }
      if (rowIdx == -1) {
        return false;
      }
      if (array.compareAndSet(idx, oldRow, remove(oldRow, rowIdx))) {
        sizeUpdater.getAndDecrement(table);
        return true;
      }
      // row changed, cycle back again
    }
  }

  private boolean doRemove(final K key, final V value, final Table<K, V> table) {

    final int hashCode = hashOf(key);

    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final int idx = hashCode & array.length() - 1;

    Item<K, V>[] oldRow;

    // Fetch the table row.
    oldRow = array.get(idx);
    if (oldRow == null) {
      // no match for the key
      return false;
    }
    if (oldRow == RESIZED) {
      boolean result;
      if (result = doRemove(key, value, table.resizeView)) {
        // keep size consistent
        sizeUpdater.getAndDecrement(table);
      }
      return result;
    }

    // Find the matching Item in the row.
    Item<K, V> oldItem = null;
    V oldValue = null;
    int rowIdx = -1;
    for (int i = 0; i < oldRow.length; i++) {
      Item<K, V> tryItem = oldRow[i];
      if (equals(key, tryItem.key)) {
        if (equals(value, oldValue = tryItem.value)) {
          oldItem = tryItem;
          rowIdx = i;
          break;
        } else {
          // value doesn't match; exit without changing map.
          return false;
        }
      }
    }
    if (oldItem == null) {
      // no such entry exists.
      return false;
    }

    while (!valueUpdater.compareAndSet(oldItem, oldValue, NONEXISTENT)) {
      if (equals(value, oldValue = oldItem.value)) {
        // Values are equal; try marking it as removed again.
        continue;
      }
      // Value was changed to a non-equal value.
      return false;
    }

    // Now we are free to remove the item from the row.
    if (array.compareAndSet(idx, oldRow, remove(oldRow, rowIdx))) {
      // Adjust the table size, since we are definitely the ones to be removing this item from the
      // table.
      sizeUpdater.decrementAndGet(table);
      return true;
    } else {
      // The old row changed so retry by the other algorithm
      return doRemove(oldItem, table);
    }
  }

  @SuppressWarnings("unchecked")
  public V remove(final Object objectKey) {
    final V result = doRemove((K) objectKey, table);
    return result == NONEXISTENT ? null : result;
  }

  private V doRemove(final K key, final Table<K, V> table) {
    final int hashCode = hashOf(key);

    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final int idx = hashCode & array.length() - 1;

    // Fetch the table row.
    Item<K, V>[] oldRow = array.get(idx);
    if (oldRow == null) {
      // no match for the key
      return nonexistent();
    }
    if (oldRow == RESIZED) {
      V result;
      if ((result = doRemove(key, table.resizeView)) != NONEXISTENT) {
        // keep size consistent
        sizeUpdater.getAndDecrement(table);
      }
      return result;
    }

    // Find the matching Item in the row.
    Item<K, V> oldItem = null;
    int rowIdx = -1;
    for (int i = 0; i < oldRow.length; i++) {
      Item<K, V> tryItem = oldRow[i];
      if (equals(key, tryItem.key)) {
        oldItem = tryItem;
        rowIdx = i;
        break;
      }
    }
    if (oldItem == null) {
      // no such entry exists.
      return nonexistent();
    }

    // Mark the item as "removed".
    @SuppressWarnings("unchecked")
    V oldValue = (V) valueUpdater.getAndSet(oldItem, NONEXISTENT);
    if (oldValue == NONEXISTENT) {
      // Someone else beat us to it.
      return nonexistent();
    }

    // Now we are free to remove the item from the row.
    if (array.compareAndSet(idx, oldRow, remove(oldRow, rowIdx))) {
      // Adjust the table size, since we are definitely the ones to be removing this item from the
      // table.
      sizeUpdater.decrementAndGet(table);

      // Item is removed from the row; we are done here.
      return oldValue;
    } else {
      boolean result = doRemove(oldItem, table);
      assert result;
      return oldValue;
    }
  }

  @SuppressWarnings("unchecked")
  private static <V> V nonexistent() {
    return (V) NONEXISTENT;
  }

  @SuppressWarnings("unchecked")
  private static <K, V> Item<K, V>[] resized() {
    return (Item<K, V>[]) RESIZED;
  }

  public boolean replace(final K key, final V oldValue, final V newValue) {
    return doReplace(key, oldValue, newValue, table);
  }

  private boolean doReplace(
      final K key, final V oldValue, final V newValue, final Table<K, V> table) {
    final int hashCode = hashOf(key);
    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final int idx = hashCode & array.length() - 1;

    // Fetch the table row.
    Item<K, V>[] oldRow = array.get(idx);
    if (oldRow == null) {
      // no match for the key
      return false;
    }
    if (oldRow == RESIZED) {
      return doReplace(key, oldValue, newValue, table.resizeView);
    }

    // Find the matching Item in the row.
    Item<K, V> oldItem = null;
    V oldRowValue = null;
    for (Item<K, V> tryItem : oldRow) {
      if (equals(key, tryItem.key)) {
        if (equals(oldValue, oldRowValue = tryItem.value)) {
          oldItem = tryItem;
          break;
        } else {
          // value doesn't match; exit without changing map.
          return false;
        }
      }
    }
    if (oldItem == null) {
      // no such entry exists.
      return false;
    }

    // Now swap the item.
    while (!valueUpdater.compareAndSet(oldItem, oldRowValue, newValue)) {
      if (equals(oldValue, oldRowValue = oldItem.value)) {
        // Values are equal; try swapping it again.
        continue;
      }
      // Value was changed to a non-equal value.
      return false;
    }

    // Item is swapped; we are done here.
    return true;
  }

  public V replace(final K key, final V value) {
    final V result = doReplace(key, value, table);
    return result == NONEXISTENT ? null : result;
  }

  private V doReplace(final K key, final V value, final Table<K, V> table) {
    final int hashCode = hashOf(key);
    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final int idx = hashCode & array.length() - 1;

    // Fetch the table row.
    Item<K, V>[] oldRow = array.get(idx);
    if (oldRow == null) {
      // no match for the key
      return nonexistent();
    }
    if (oldRow == RESIZED) {
      return doReplace(key, value, table.resizeView);
    }

    // Find the matching Item in the row.
    Item<K, V> oldItem = null;
    for (Item<K, V> tryItem : oldRow) {
      if (equals(key, tryItem.key)) {
        oldItem = tryItem;
        break;
      }
    }
    if (oldItem == null) {
      // no such entry exists.
      return nonexistent();
    }

    // Now swap the item.
    @SuppressWarnings("unchecked")
    V oldRowValue = (V) valueUpdater.getAndSet(oldItem, value);
    if (oldRowValue == NONEXISTENT) {
      // Item was removed.
      return nonexistent();
    }

    // Item is swapped; we are done here.
    return oldRowValue;
  }

  public int size() {
    return table.size & 0x7fffffff;
  }

  private V doGet(final Table<K, V> table, final K key) {
    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final Item<K, V>[] row = array.get(hashOf(key) & (array.length() - 1));
    if (row != null) {
      for (Item<K, V> item : row) {
        if (equals(key, item.key)) {
          return item.value;
        }
      }
    }
    return nonexistent();
  }

  public boolean containsKey(final Object key) {
    @SuppressWarnings("unchecked")
    final V value = doGet(table, (K) key);
    return value != NONEXISTENT;
  }

  public V get(final Object key) {
    @SuppressWarnings("unchecked")
    final V value = doGet(table, (K) key);
    return value == NONEXISTENT ? null : value;
  }

  public V put(final K key, final V value) {
    final V result = doPut(key, value, false, table);
    return result == NONEXISTENT ? null : result;
  }

  public void clear() {
    table = new Table<K, V>(initialCapacity, loadFactor);
  }

  public Set<Entry<K, V>> entrySet() {
    return entrySet;
  }

  public Collection<V> values() {
    return values;
  }

  public Set<K> keySet() {
    return keySet;
  }

  final class KeySet extends AbstractSet<K> implements Set<K> {

    public void clear() {
      SecureHashMap.this.clear();
    }

    public boolean contains(final Object o) {
      return containsKey(o);
    }

    @SuppressWarnings("unchecked")
    public boolean remove(final Object o) {
      return doRemove((K) o, table) != NONEXISTENT;
    }

    public Iterator<K> iterator() {
      return new KeyIterator();
    }

    public Object[] toArray() {
      ArrayList<Object> list = new ArrayList<Object>(size());
      list.addAll(this);
      return list.toArray();
    }

    public <T> T[] toArray(final T[] a) {
      ArrayList<T> list = new ArrayList<T>();
      list.addAll((Collection<T>) this);
      return list.toArray(a);
    }

    public boolean add(final K k) {
      return doPut(k, null, true, table) == NONEXISTENT;
    }

    public int size() {
      return SecureHashMap.this.size();
    }
  }

  final class Values extends AbstractCollection<V> implements Collection<V> {

    public void clear() {
      SecureHashMap.this.clear();
    }

    public boolean contains(final Object o) {
      return containsValue(o);
    }

    public Iterator<V> iterator() {
      return new ValueIterator();
    }

    public Object[] toArray() {
      ArrayList<Object> list = new ArrayList<Object>(size());
      list.addAll(this);
      return list.toArray();
    }

    public <T> T[] toArray(final T[] a) {
      ArrayList<T> list = new ArrayList<T>();
      list.addAll((Collection<T>) this);
      return list.toArray(a);
    }

    public int size() {
      return SecureHashMap.this.size();
    }
  }

  final class EntrySet extends AbstractSet<Entry<K, V>> implements Set<Entry<K, V>> {

    public Iterator<Entry<K, V>> iterator() {
      return new EntryIterator();
    }

    public boolean add(final Entry<K, V> entry) {
      return doPut(entry.getKey(), entry.getValue(), true, table) == NONEXISTENT;
    }

    @SuppressWarnings("unchecked")
    public boolean remove(final Object o) {
      return o instanceof Entry && remove((Entry<K, V>) o);
    }

    public boolean remove(final Entry<K, V> entry) {
      return doRemove(entry.getKey(), entry.getValue(), table);
    }

    public void clear() {
      SecureHashMap.this.clear();
    }

    public Object[] toArray() {
      ArrayList<Object> list = new ArrayList<Object>(size());
      list.addAll(this);
      return list.toArray();
    }

    public <T> T[] toArray(final T[] a) {
      ArrayList<T> list = new ArrayList<T>();
      list.addAll((Set<T>) this);
      return list.toArray(a);
    }

    @SuppressWarnings("unchecked")
    public boolean contains(final Object o) {
      return o instanceof Entry && contains((Entry<K, V>) o);
    }

    public boolean contains(final Entry<K, V> entry) {
      final V tableValue = doGet(table, entry.getKey());
      final V entryValue = entry.getValue();
      return tableValue == null ? entryValue == null : tableValue.equals(entryValue);
    }

    public int size() {
      return SecureHashMap.this.size();
    }
  }

  abstract class TableIterator implements Iterator<Entry<K, V>> {
    public abstract Item<K, V> next();

    abstract V nextValue();
  }

  final class RowIterator extends TableIterator {
    private final Table<K, V> table;
    Item<K, V>[] row;

    private int idx;
    private Item<K, V> next;
    private Item<K, V> remove;

    RowIterator(final Table<K, V> table, final Item<K, V>[] row) {
      this.table = table;
      this.row = row;
    }

    public boolean hasNext() {
      if (next == null) {
        final Item<K, V>[] row = this.row;
        if (row == null || idx == row.length) {
          return false;
        }
        next = row[idx++];
      }
      return true;
    }

    V nextValue() {
      V value;
      do {
        if (next == null) {
          final Item<K, V>[] row = this.row;
          if (row == null || idx == row.length) {
            return nonexistent();
          }
          next = row[idx++];
        }
        value = next.value;
      } while (value == NONEXISTENT);
      next = null;
      return value;
    }

    public Item<K, V> next() {
      if (hasNext())
        try {
          return next;
        } finally {
          remove = next;
          next = null;
        }
      throw new NoSuchElementException();
    }

    public void remove() {
      final Item<K, V> remove = this.remove;
      if (remove == null) {
        throw new IllegalStateException("next() not yet called");
      }
      if (valueUpdater.getAndSet(remove, NONEXISTENT) == NONEXISTENT) {
        // someone else beat us to it; this is idempotent-ish
        return;
      }
      // item guaranteed to be in the map... somewhere
      this.remove = null;
      doRemove(remove, table);
    }
  }

  final class BranchIterator extends TableIterator {
    private final TableIterator branch0;
    private final TableIterator branch1;

    private boolean branch;

    BranchIterator(final TableIterator branch0, final TableIterator branch1) {
      this.branch0 = branch0;
      this.branch1 = branch1;
    }

    public boolean hasNext() {
      return branch0.hasNext() || branch1.hasNext();
    }

    public Item<K, V> next() {
      if (branch) {
        return branch1.next();
      }
      if (branch0.hasNext()) {
        return branch0.next();
      }
      branch = true;
      return branch1.next();
    }

    V nextValue() {
      if (branch) {
        return branch1.nextValue();
      }
      V value = branch0.nextValue();
      if (value != NONEXISTENT) {
        return value;
      }
      branch = true;
      return branch1.nextValue();
    }

    public void remove() {
      if (branch) {
        branch0.remove();
      } else {
        branch1.remove();
      }
    }
  }

  private TableIterator createRowIterator(Table<K, V> table, int rowIdx) {
    final AtomicReferenceArray<Item<K, V>[]> array = table.array;
    final Item<K, V>[] row = array.get(rowIdx);
    if (row == RESIZED) {
      final Table<K, V> resizeView = table.resizeView;
      return new BranchIterator(
          createRowIterator(resizeView, rowIdx),
          createRowIterator(resizeView, rowIdx + array.length()));
    } else {
      return new RowIterator(table, row);
    }
  }

  final class EntryIterator implements Iterator<Entry<K, V>> {
    private final Table<K, V> table = SecureHashMap.this.table;
    private TableIterator tableIterator;
    private TableIterator removeIterator;
    private int tableIdx;
    private Item<K, V> next;

    public boolean hasNext() {
      while (next == null) {
        if (tableIdx == table.array.length()) {
          return false;
        }
        if (tableIterator == null) {
          int rowIdx = tableIdx++;
          if (table.array.get(rowIdx) != null) {
            tableIterator = createRowIterator(table, rowIdx);
          }
        }
        if (tableIterator != null) {
          if (tableIterator.hasNext()) {
            next = tableIterator.next();
            return true;
          } else {
            tableIterator = null;
          }
        }
      }
      return true;
    }

    public Entry<K, V> next() {
      if (hasNext())
        try {
          return next;
        } finally {
          removeIterator = tableIterator;
          next = null;
        }
      throw new NoSuchElementException();
    }

    public void remove() {
      final TableIterator removeIterator = this.removeIterator;
      if (removeIterator == null) {
        throw new IllegalStateException();
      } else
        try {
          removeIterator.remove();
        } finally {
          this.removeIterator = null;
        }
    }
  }

  final class KeyIterator implements Iterator<K> {
    private final Table<K, V> table = SecureHashMap.this.table;
    private TableIterator tableIterator;
    private TableIterator removeIterator;
    private int tableIdx;
    private Item<K, V> next;

    public boolean hasNext() {
      while (next == null) {
        if (tableIdx == table.array.length() && tableIterator == null) {
          return false;
        }
        if (tableIterator == null) {
          int rowIdx = tableIdx++;
          if (table.array.get(rowIdx) != null) {
            tableIterator = createRowIterator(table, rowIdx);
          }
        }
        if (tableIterator != null) {
          if (tableIterator.hasNext()) {
            next = tableIterator.next();
            return true;
          } else {
            tableIterator = null;
          }
        }
      }
      return true;
    }

    public K next() {
      if (hasNext())
        try {
          return next.key;
        } finally {
          removeIterator = tableIterator;
          next = null;
        }
      throw new NoSuchElementException();
    }

    public void remove() {
      final TableIterator removeIterator = this.removeIterator;
      if (removeIterator == null) {
        throw new IllegalStateException();
      } else
        try {
          removeIterator.remove();
        } finally {
          this.removeIterator = null;
        }
    }
  }

  final class ValueIterator implements Iterator<V> {
    private final Table<K, V> table = SecureHashMap.this.table;
    private TableIterator tableIterator;
    private TableIterator removeIterator;
    private int tableIdx;
    private V next = nonexistent();

    public boolean hasNext() {
      while (next == NONEXISTENT) {
        if (tableIdx == table.array.length() && tableIterator == null) {
          return false;
        }
        if (tableIterator == null) {
          int rowIdx = tableIdx++;
          if (table.array.get(rowIdx) != null) {
            tableIterator = createRowIterator(table, rowIdx);
          }
        }
        if (tableIterator != null) {
          next = tableIterator.nextValue();
          if (next == NONEXISTENT) {
            tableIterator = null;
          }
        }
      }
      return true;
    }

    public V next() {
      if (hasNext())
        try {
          return next;
        } finally {
          removeIterator = tableIterator;
          next = nonexistent();
        }
      throw new NoSuchElementException();
    }

    public void remove() {
      final TableIterator removeIterator = this.removeIterator;
      if (removeIterator == null) {
        throw new IllegalStateException();
      } else
        try {
          removeIterator.remove();
        } finally {
          this.removeIterator = null;
        }
    }
  }

  static final class Table<K, V> {
    final AtomicReferenceArray<Item<K, V>[]> array;
    final int threshold;
    /** Bits 0-30 are size; bit 31 is 1 if the table is being resized. */
    volatile int size;

    volatile Table<K, V> resizeView;

    private Table(int capacity, float loadFactor) {
      array = new AtomicReferenceArray<Item<K, V>[]>(capacity);
      threshold = capacity == MAXIMUM_CAPACITY ? Integer.MAX_VALUE : (int) (capacity * loadFactor);
    }
  }

  static final class Item<K, V> implements Entry<K, V> {
    private final K key;
    private final int hashCode;
    volatile V value;

    Item(final K key, final int hashCode, final V value) {
      this.key = key;
      this.hashCode = hashCode;
      //noinspection ThisEscapedInObjectConstruction
      valueUpdater.lazySet(this, value);
    }

    public K getKey() {
      return key;
    }

    public V getValue() {
      V value = this.value;
      if (value == NONEXISTENT) {
        throw new IllegalStateException("Already removed");
      }
      return value;
    }

    public V setValue(final V value) {
      V oldValue;
      do {
        oldValue = this.value;
        if (oldValue == NONEXISTENT) {
          throw new IllegalStateException("Already removed");
        }
      } while (!valueUpdater.compareAndSet(this, oldValue, value));
      return oldValue;
    }

    public int hashCode() {
      return hashCode;
    }

    public boolean equals(final Object obj) {
      return obj instanceof Item && equals((Item<?, ?>) obj);
    }

    public boolean equals(final Item<?, ?> obj) {
      return obj != null && hashCode == obj.hashCode && key.equals(obj.key);
    }
  }
}
コード例 #23
0
@SuppressWarnings("deprecation")
final class ConcreteResourceRegistration extends AbstractResourceRegistration {

  @SuppressWarnings("unused")
  private volatile Map<String, NodeSubregistry> children;

  @SuppressWarnings("unused")
  private volatile Map<String, OperationEntry> operations;

  private final ResourceDefinition resourceDefinition;
  private final List<AccessConstraintDefinition> accessConstraintDefinitions;

  @SuppressWarnings("unused")
  private volatile Map<String, AttributeAccess> attributes;

  private final AtomicBoolean runtimeOnly = new AtomicBoolean();
  private final AccessConstraintUtilizationRegistry constraintUtilizationRegistry;

  private static final AtomicMapFieldUpdater<ConcreteResourceRegistration, String, NodeSubregistry>
      childrenUpdater =
          AtomicMapFieldUpdater.newMapUpdater(
              AtomicReferenceFieldUpdater.newUpdater(
                  ConcreteResourceRegistration.class, Map.class, "children"));
  private static final AtomicMapFieldUpdater<ConcreteResourceRegistration, String, OperationEntry>
      operationsUpdater =
          AtomicMapFieldUpdater.newMapUpdater(
              AtomicReferenceFieldUpdater.newUpdater(
                  ConcreteResourceRegistration.class, Map.class, "operations"));
  private static final AtomicMapFieldUpdater<ConcreteResourceRegistration, String, AttributeAccess>
      attributesUpdater =
          AtomicMapFieldUpdater.newMapUpdater(
              AtomicReferenceFieldUpdater.newUpdater(
                  ConcreteResourceRegistration.class, Map.class, "attributes"));

  ConcreteResourceRegistration(
      final String valueString,
      final NodeSubregistry parent,
      final ResourceDefinition definition,
      AccessConstraintUtilizationRegistry constraintUtilizationRegistry,
      final boolean runtimeOnly) {
    super(valueString, parent);
    this.constraintUtilizationRegistry = constraintUtilizationRegistry;
    childrenUpdater.clear(this);
    operationsUpdater.clear(this);
    attributesUpdater.clear(this);
    this.resourceDefinition = definition;
    this.runtimeOnly.set(runtimeOnly);
    this.accessConstraintDefinitions = buildAccessConstraints();
  }

  @Override
  public boolean isRuntimeOnly() {
    checkPermission();
    return runtimeOnly.get();
  }

  @Override
  public void setRuntimeOnly(final boolean runtimeOnly) {
    checkPermission();
    this.runtimeOnly.set(runtimeOnly);
  }

  @Override
  public boolean isRemote() {
    checkPermission();
    return false;
  }

  @Override
  public List<AccessConstraintDefinition> getAccessConstraints() {
    checkPermission();
    return accessConstraintDefinitions;
  }

  private List<AccessConstraintDefinition> buildAccessConstraints() {
    AbstractResourceRegistration reg = this;
    List<AccessConstraintDefinition> list = new ArrayList<AccessConstraintDefinition>();
    while (reg != null) {
      reg.addAccessConstraints(list);
      NodeSubregistry parent = reg.getParent();
      reg = parent == null ? null : parent.getParent();
    }
    return Collections.unmodifiableList(list);
  }

  @Override
  void addAccessConstraints(List<AccessConstraintDefinition> list) {
    if (resourceDefinition instanceof ConstrainedResourceDefinition) {
      list.addAll(((ConstrainedResourceDefinition) resourceDefinition).getAccessConstraints());
    }
  }

  @Override
  public ManagementResourceRegistration registerSubModel(
      final ResourceDefinition resourceDefinition) {
    if (resourceDefinition == null) {
      throw ControllerLogger.ROOT_LOGGER.nullVar("resourceDefinition");
    }
    final PathElement address = resourceDefinition.getPathElement();
    if (address == null) {
      throw ControllerLogger.ROOT_LOGGER.cannotRegisterSubmodelWithNullPath();
    }
    if (isRuntimeOnly()) {
      throw ControllerLogger.ROOT_LOGGER.cannotRegisterSubmodel();
    }
    final AbstractResourceRegistration existing =
        getSubRegistration(PathAddress.pathAddress(address));
    if (existing != null && existing.getValueString().equals(address.getValue())) {
      throw ControllerLogger.ROOT_LOGGER.nodeAlreadyRegistered(existing.getLocationString());
    }
    final String key = address.getKey();
    final NodeSubregistry child = getOrCreateSubregistry(key);
    final ManagementResourceRegistration resourceRegistration =
        child.register(address.getValue(), resourceDefinition, false);
    resourceDefinition.registerAttributes(resourceRegistration);
    resourceDefinition.registerOperations(resourceRegistration);
    resourceDefinition.registerChildren(resourceRegistration);
    if (constraintUtilizationRegistry != null
        && resourceDefinition instanceof ConstrainedResourceDefinition) {
      PathAddress childAddress = getPathAddress().append(address);
      List<AccessConstraintDefinition> constraintDefinitions =
          ((ConstrainedResourceDefinition) resourceDefinition).getAccessConstraints();
      for (AccessConstraintDefinition acd : constraintDefinitions) {
        constraintUtilizationRegistry.registerAccessConstraintResourceUtilization(
            acd.getKey(), childAddress);
      }
    }
    return resourceRegistration;
  }

  @Override
  public void registerOperationHandler(
      OperationDefinition definition, OperationStepHandler handler, boolean inherited) {
    checkPermission();
    if (operationsUpdater.putIfAbsent(
            this,
            definition.getName(),
            new OperationEntry(
                handler,
                definition.getDescriptionProvider(),
                inherited,
                definition.getEntryType(),
                definition.getFlags(),
                definition.getAccessConstraints()))
        != null) {
      throw alreadyRegistered("operation handler", definition.getName());
    }
    registerOperationAccessConstraints(definition);
  }

  public void unregisterSubModel(final PathElement address) throws IllegalArgumentException {
    final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
    final NodeSubregistry subregistry = snapshot.get(address.getKey());
    if (subregistry != null) {
      subregistry.unregisterSubModel(address.getValue());
    }
    unregisterAccessConstraints(address);
  }

  @Override
  OperationEntry getOperationEntry(
      final ListIterator<PathElement> iterator,
      final String operationName,
      OperationEntry inherited) {
    if (iterator.hasNext()) {
      OperationEntry ourInherited = getInheritableOperationEntry(operationName);
      OperationEntry inheritance = ourInherited == null ? inherited : ourInherited;
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getOperationEntry(iterator, next.getValue(), operationName, inheritance);
    } else {
      checkPermission();
      final OperationEntry entry = operationsUpdater.get(this, operationName);
      return entry == null ? inherited : entry;
    }
  }

  @Override
  OperationEntry getInheritableOperationEntry(final String operationName) {
    checkPermission();
    final OperationEntry entry = operationsUpdater.get(this, operationName);
    if (entry != null && entry.isInherited()) {
      return entry;
    }
    return null;
  }

  @Override
  void getOperationDescriptions(
      final ListIterator<PathElement> iterator,
      final Map<String, OperationEntry> providers,
      final boolean inherited) {

    if (!iterator.hasNext()) {
      checkPermission();
      providers.putAll(operationsUpdater.get(this));
      if (inherited) {
        getInheritedOperations(providers, true);
      }
      return;
    }
    final PathElement next = iterator.next();
    try {
      final String key = next.getKey();
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      final NodeSubregistry subregistry = snapshot.get(key);
      if (subregistry != null) {
        subregistry.getHandlers(iterator, next.getValue(), providers, inherited);
      }
    } finally {
      iterator.previous();
    }
  }

  @Override
  void getInheritedOperationEntries(final Map<String, OperationEntry> providers) {
    checkPermission();
    for (final Map.Entry<String, OperationEntry> entry : operationsUpdater.get(this).entrySet()) {
      if (entry.getValue().isInherited() && !providers.containsKey(entry.getKey())) {
        providers.put(entry.getKey(), entry.getValue());
      }
    }
  }

  @Override
  public void registerOperationHandler(
      final String operationName,
      final OperationStepHandler handler,
      final DescriptionProvider descriptionProvider,
      final boolean inherited,
      EntryType entryType) {
    checkPermission();
    if (operationsUpdater.putIfAbsent(
            this,
            operationName,
            new OperationEntry(handler, descriptionProvider, inherited, entryType))
        != null) {
      throw alreadyRegistered("operation handler", operationName);
    }
  }

  @Override
  public void registerOperationHandler(
      final String operationName,
      final OperationStepHandler handler,
      final DescriptionProvider descriptionProvider,
      final boolean inherited,
      EntryType entryType,
      EnumSet<OperationEntry.Flag> flags) {
    checkPermission();
    if (operationsUpdater.putIfAbsent(
            this,
            operationName,
            new OperationEntry(handler, descriptionProvider, inherited, entryType, flags, null))
        != null) {
      throw alreadyRegistered("operation handler", operationName);
    }
  }

  @Override
  public void unregisterOperationHandler(final String operationName) {
    checkPermission();
    if (operationsUpdater.remove(this, operationName) == null) {
      throw operationNotRegisteredException(operationName, resourceDefinition.getPathElement());
    }
  }

  @Override
  public void registerReadWriteAttribute(
      final AttributeDefinition definition,
      final OperationStepHandler readHandler,
      final OperationStepHandler writeHandler) {
    checkPermission();
    final EnumSet<AttributeAccess.Flag> flags = definition.getFlags();
    final String attributeName = definition.getName();
    AttributeAccess.Storage storage =
        (flags != null && flags.contains(AttributeAccess.Flag.STORAGE_RUNTIME))
            ? Storage.RUNTIME
            : Storage.CONFIGURATION;
    AttributeAccess aa =
        new AttributeAccess(
            AccessType.READ_WRITE, storage, readHandler, writeHandler, definition, flags);
    if (attributesUpdater.putIfAbsent(this, attributeName, aa) != null) {
      throw alreadyRegistered("attribute", attributeName);
    }
    registerAttributeAccessConstraints(definition);
  }

  @Override
  public void registerReadOnlyAttribute(
      final String attributeName,
      final OperationStepHandler readHandler,
      AttributeAccess.Storage storage) {
    checkPermission();
    AttributeAccess aa =
        new AttributeAccess(AccessType.READ_ONLY, storage, readHandler, null, null, null);
    if (attributesUpdater.putIfAbsent(this, attributeName, aa) != null) {
      throw alreadyRegistered("attribute", attributeName);
    }
  }

  @Override
  public void registerReadOnlyAttribute(
      final AttributeDefinition definition, final OperationStepHandler readHandler) {
    checkPermission();
    final EnumSet<AttributeAccess.Flag> flags = definition.getFlags();
    final String attributeName = definition.getName();
    AttributeAccess.Storage storage =
        (flags != null && flags.contains(AttributeAccess.Flag.STORAGE_RUNTIME))
            ? Storage.RUNTIME
            : Storage.CONFIGURATION;
    AttributeAccess aa =
        new AttributeAccess(AccessType.READ_ONLY, storage, readHandler, null, definition, flags);
    if (attributesUpdater.putIfAbsent(this, attributeName, aa) != null) {
      throw alreadyRegistered("attribute", attributeName);
    }
    registerAttributeAccessConstraints(definition);
  }

  @Override
  public void unregisterAttribute(String attributeName) {
    checkPermission();
    attributesUpdater.remove(this, attributeName);
  }

  @Override
  public void registerMetric(AttributeDefinition definition, OperationStepHandler metricHandler) {
    checkPermission();
    AttributeAccess aa =
        new AttributeAccess(
            AccessType.METRIC,
            AttributeAccess.Storage.RUNTIME,
            metricHandler,
            null,
            definition,
            definition.getFlags());
    if (attributesUpdater.putIfAbsent(this, definition.getName(), aa) != null) {
      throw alreadyRegistered("attribute", definition.getName());
    }
    registerAttributeAccessConstraints(definition);
  }

  private void registerAttributeAccessConstraints(AttributeDefinition ad) {
    if (constraintUtilizationRegistry != null) {
      for (AccessConstraintDefinition acd : ad.getAccessConstraints()) {
        constraintUtilizationRegistry.registerAccessConstraintAttributeUtilization(
            acd.getKey(), getPathAddress(), ad.getName());
      }
    }
  }

  private void registerOperationAccessConstraints(OperationDefinition od) {
    if (constraintUtilizationRegistry != null) {
      for (AccessConstraintDefinition acd : od.getAccessConstraints()) {
        constraintUtilizationRegistry.registerAccessConstraintOperationUtilization(
            acd.getKey(), getPathAddress(), od.getName());
      }
    }
  }

  private void unregisterAccessConstraints(PathElement childAddress) {
    if (constraintUtilizationRegistry != null) {
      constraintUtilizationRegistry.unregisterAccessConstraintUtilizations(
          getPathAddress().append(childAddress));
    }
  }

  @Override
  public void registerProxyController(final PathElement address, final ProxyController controller)
      throws IllegalArgumentException {
    final AbstractResourceRegistration existing =
        getSubRegistration(PathAddress.pathAddress(address));
    if (existing != null && existing.getValueString().equals(address.getValue())) {
      throw ControllerLogger.ROOT_LOGGER.nodeAlreadyRegistered(existing.getLocationString());
    }
    getOrCreateSubregistry(address.getKey())
        .registerProxyController(address.getValue(), controller);
  }

  @Override
  public void unregisterProxyController(final PathElement address) throws IllegalArgumentException {
    final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
    final NodeSubregistry subregistry = snapshot.get(address.getKey());
    if (subregistry != null) {
      subregistry.unregisterProxyController(address.getValue());
    }
  }

  @Override
  public void registerAlias(
      PathElement address, AliasEntry alias, AbstractResourceRegistration target) {
    getOrCreateSubregistry(address.getKey()).registerAlias(address.getValue(), alias, target);
  }

  @Override
  public void unregisterAlias(PathElement address) {
    final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
    final NodeSubregistry subregistry = snapshot.get(address.getKey());
    if (subregistry != null) {
      subregistry.unregisterAlias(address.getValue());
    }
  }

  NodeSubregistry getOrCreateSubregistry(final String key) {
    for (; ; ) {
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      final NodeSubregistry subregistry = snapshot.get(key);
      if (subregistry != null) {
        return subregistry;
      } else {
        checkPermission();
        final NodeSubregistry newRegistry =
            new NodeSubregistry(key, this, constraintUtilizationRegistry);
        final NodeSubregistry appearing =
            childrenUpdater.putAtomic(this, key, newRegistry, snapshot);
        if (appearing == null) {
          return newRegistry;
        } else if (appearing != newRegistry) {
          // someone else added one
          return appearing;
        }
        // otherwise, retry the loop because the map changed
      }
    }
  }

  @Override
  DescriptionProvider getModelDescription(final ListIterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getModelDescription(iterator, next.getValue());
    } else {
      checkPermission();
      return resourceDefinition.getDescriptionProvider(this);
    }
  }

  @Override
  Set<String> getAttributeNames(final ListIterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return Collections.emptySet();
      }
      return subregistry.getAttributeNames(iterator, next.getValue());
    } else {
      checkPermission();
      final Map<String, AttributeAccess> snapshot = attributesUpdater.get(this);
      return snapshot.keySet();
    }
  }

  @Override
  AttributeAccess getAttributeAccess(
      final ListIterator<PathElement> iterator, final String attributeName) {

    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getAttributeAccess(iterator, next.getValue(), attributeName);
    } else {
      checkPermission();
      final Map<String, AttributeAccess> snapshot = attributesUpdater.get(this);
      AttributeAccess access = snapshot.get(attributeName);
      if (access == null && hasNoAlternativeWildcardRegistration()) {
        // If there is metadata for an attribute but no AttributeAccess, assume RO. Can't
        // be writable without a registered handler. This opens the possibility that out-of-date
        // metadata
        // for attribute "foo" can lead to a read of non-existent-in-model "foo" with
        // an unexpected undefined value returned. But it removes the possibility of a
        // dev forgetting to call registry.registerReadOnlyAttribute("foo", null) resulting
        // in the valid attribute "foo" not being readable
        DescriptionProvider provider = resourceDefinition.getDescriptionProvider(this);
        if (provider instanceof DefaultResourceDescriptionProvider) {
          return null; // attribute was not registered so it does not exist. no need to read
                       // resource description as we wont find anything and cause SO
        }
        // todo get rid of this fallback loop as with code cleanup we wont need it anymore.
        final ModelNode desc =
            resourceDefinition.getDescriptionProvider(this).getModelDescription(null);
        if (desc.has(ATTRIBUTES) && desc.get(ATTRIBUTES).keys().contains(attributeName)) {
          access =
              new AttributeAccess(
                  AccessType.READ_ONLY, Storage.CONFIGURATION, null, null, null, null);
        }
      }
      return access;
    }
  }

  @Override
  Set<String> getChildNames(final ListIterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return Collections.emptySet();
      }
      return subregistry.getChildNames(iterator, next.getValue());
    } else {
      checkPermission();
      final Map<String, NodeSubregistry> children = this.children;
      if (children != null) {
        return Collections.unmodifiableSet(children.keySet());
      }
      return Collections.emptySet();
    }
  }

  @Override
  Set<PathElement> getChildAddresses(final ListIterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return Collections.emptySet();
      }
      return subregistry.getChildAddresses(iterator, next.getValue());
    } else {
      checkPermission();
      final Map<String, NodeSubregistry> children = this.children;
      if (children != null) {
        final Set<PathElement> elements = new HashSet<PathElement>();
        for (final Map.Entry<String, NodeSubregistry> entry : children.entrySet()) {
          for (final String entryChild : entry.getValue().getChildNames()) {
            elements.add(PathElement.pathElement(entry.getKey(), entryChild));
          }
        }
        return elements;
      }
      return Collections.emptySet();
    }
  }

  @Override
  ProxyController getProxyController(ListIterator<PathElement> iterator) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return null;
      }
      return subregistry.getProxyController(iterator, next.getValue());
    } else {
      return null;
    }
  }

  @Override
  void getProxyControllers(ListIterator<PathElement> iterator, Set<ProxyController> controllers) {
    if (iterator.hasNext()) {
      final PathElement next = iterator.next();
      final NodeSubregistry subregistry = children.get(next.getKey());
      if (subregistry == null) {
        return;
      }
      if (next.isWildcard()) {
        subregistry.getProxyControllers(iterator, null, controllers);
      } else if (next.isMultiTarget()) {
        for (final String value : next.getSegments()) {
          subregistry.getProxyControllers(iterator, value, controllers);
        }
      } else {
        subregistry.getProxyControllers(iterator, next.getValue(), controllers);
      }
    } else {
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      for (NodeSubregistry subregistry : snapshot.values()) {
        subregistry.getProxyControllers(iterator, null, controllers);
      }
    }
  }

  @Override
  AbstractResourceRegistration getResourceRegistration(ListIterator<PathElement> iterator) {
    if (!iterator.hasNext()) {
      checkPermission();
      return this;
    } else {
      final PathElement address = iterator.next();
      final Map<String, NodeSubregistry> snapshot = childrenUpdater.get(this);
      final NodeSubregistry subregistry = snapshot.get(address.getKey());
      if (subregistry != null) {
        return subregistry.getResourceRegistration(iterator, address.getValue());
      } else {
        return null;
      }
    }
  }

  private IllegalArgumentException alreadyRegistered(final String type, final String name) {
    return ControllerLogger.ROOT_LOGGER.alreadyRegistered(type, name, getLocationString());
  }

  private IllegalArgumentException operationNotRegisteredException(String op, PathElement address) {
    return ControllerLogger.ROOT_LOGGER.operationNotRegisteredException(
        op, PathAddress.pathAddress(address));
  }

  @Override
  public AliasEntry getAliasEntry() {
    checkPermission();
    return null;
  }
}
コード例 #24
0
  static final class LatestSubscriber<T>
      implements Subscriber<T>, Subscription, Trackable, Producer, Receiver {

    final Subscriber<? super T> actual;

    volatile long requested;

    @SuppressWarnings("rawtypes")
    static final AtomicLongFieldUpdater<LatestSubscriber> REQUESTED =
        AtomicLongFieldUpdater.newUpdater(LatestSubscriber.class, "requested");

    volatile int wip;

    @SuppressWarnings("rawtypes")
    static final AtomicIntegerFieldUpdater<LatestSubscriber> WIP =
        AtomicIntegerFieldUpdater.newUpdater(LatestSubscriber.class, "wip");

    Subscription s;

    Throwable error;
    volatile boolean done;

    volatile boolean cancelled;

    volatile T value;

    @SuppressWarnings("rawtypes")
    static final AtomicReferenceFieldUpdater<LatestSubscriber, Object> VALUE =
        AtomicReferenceFieldUpdater.newUpdater(LatestSubscriber.class, Object.class, "value");

    public LatestSubscriber(Subscriber<? super T> actual) {
      this.actual = actual;
    }

    @Override
    public void request(long n) {
      if (Operators.validate(n)) {
        Operators.getAndAddCap(REQUESTED, this, n);

        drain();
      }
    }

    @Override
    public void cancel() {
      if (!cancelled) {

        cancelled = true;

        s.cancel();

        if (WIP.getAndIncrement(this) == 0) {
          VALUE.lazySet(this, null);
        }
      }
    }

    @Override
    public void onSubscribe(Subscription s) {
      if (Operators.validate(this.s, s)) {
        this.s = s;

        actual.onSubscribe(this);

        s.request(Long.MAX_VALUE);
      }
    }

    @Override
    public void onNext(T t) {
      VALUE.lazySet(this, t);
      drain();
    }

    @Override
    public void onError(Throwable t) {
      error = t;
      done = true;
      drain();
    }

    @Override
    public void onComplete() {
      done = true;
      drain();
    }

    void drain() {
      if (WIP.getAndIncrement(this) != 0) {
        return;
      }
      final Subscriber<? super T> a = actual;

      int missed = 1;

      for (; ; ) {

        if (checkTerminated(done, value == null, a)) {
          return;
        }

        long r = requested;
        long e = 0L;

        while (r != e) {
          boolean d = done;

          @SuppressWarnings("unchecked")
          T v = (T) VALUE.getAndSet(this, null);

          boolean empty = v == null;

          if (checkTerminated(d, empty, a)) {
            return;
          }

          if (empty) {
            break;
          }

          a.onNext(v);

          e++;
        }

        if (r == e && checkTerminated(done, value == null, a)) {
          return;
        }

        if (e != 0L && r != Long.MAX_VALUE) {
          REQUESTED.addAndGet(this, -e);
        }

        missed = WIP.addAndGet(this, -missed);
        if (missed == 0) {
          break;
        }
      }
    }

    boolean checkTerminated(boolean d, boolean empty, Subscriber<? super T> a) {
      if (cancelled) {
        VALUE.lazySet(this, null);
        return true;
      }

      if (d) {
        Throwable e = error;
        if (e != null) {
          VALUE.lazySet(this, null);

          a.onError(e);
          return true;
        } else if (empty) {
          a.onComplete();
          return true;
        }
      }

      return false;
    }

    @Override
    public boolean isCancelled() {
      return cancelled;
    }

    @Override
    public boolean isStarted() {
      return s != null && !cancelled && !done;
    }

    @Override
    public boolean isTerminated() {
      return done;
    }

    @Override
    public Object downstream() {
      return actual;
    }

    @Override
    public long requestedFromDownstream() {
      return requested;
    }

    @Override
    public Throwable getError() {
      return error;
    }

    @Override
    public Object upstream() {
      return s;
    }
  }
コード例 #25
0
/**
 * A token representing the registration of a {@link SelectableChannel} with a {@link Selector}.
 *
 * <p>A selection key is created each time a channel is registered with a selector. A key remains
 * valid until it is <i>cancelled</i> by invoking its {@link #cancel cancel} method, by closing its
 * channel, or by closing its selector. Cancelling a key does not immediately remove it from its
 * selector; it is instead added to the selector's <a href="Selector.html#ks"><i>cancelled-key
 * set</i></a> for removal during the next selection operation. The validity of a key may be tested
 * by invoking its {@link #isValid isValid} method.
 *
 * <p><a name="opsets">
 *
 * <p>A selection key contains two <i>operation sets</i> represented as integer values. Each bit of
 * an operation set denotes a category of selectable operations that are supported by the key's
 * channel.
 *
 * <ul>
 *   <li>
 *       <p>The <i>interest set</i> determines which operation categories will be tested for
 *       readiness the next time one of the selector's selection methods is invoked. The interest
 *       set is initialized with the value given when the key is created; it may later be changed
 *       via the {@link #interestOps(int)} method.
 *   <li>
 *       <p>The <i>ready set</i> identifies the operation categories for which the key's channel has
 *       been detected to be ready by the key's selector. The ready set is initialized to zero when
 *       the key is created; it may later be updated by the selector during a selection operation,
 *       but it cannot be updated directly.
 * </ul>
 *
 * <p>That a selection key's ready set indicates that its channel is ready for some operation
 * category is a hint, but not a guarantee, that an operation in such a category may be performed by
 * a thread without causing the thread to block. A ready set is most likely to be accurate
 * immediately after the completion of a selection operation. It is likely to be made inaccurate by
 * external events and by I/O operations that are invoked upon the corresponding channel.
 *
 * <p>This class defines all known operation-set bits, but precisely which bits are supported by a
 * given channel depends upon the type of the channel. Each subclass of {@link SelectableChannel}
 * defines an {@link SelectableChannel#validOps() validOps()} method which returns a set identifying
 * just those operations that are supported by the channel. An attempt to set or test an
 * operation-set bit that is not supported by a key's channel will result in an appropriate run-time
 * exception.
 *
 * <p>It is often necessary to associate some application-specific data with a selection key, for
 * example an object that represents the state of a higher-level protocol and handles readiness
 * notifications in order to implement that protocol. Selection keys therefore support the
 * <i>attachment</i> of a single arbitrary object to a key. An object can be attached via the {@link
 * #attach attach} method and then later retrieved via the {@link #attachment() attachment} method.
 *
 * <p>Selection keys are safe for use by multiple concurrent threads. The operations of reading and
 * writing the interest set will, in general, be synchronized with certain operations of the
 * selector. Exactly how this synchronization is performed is implementation-dependent: In a naive
 * implementation, reading or writing the interest set may block indefinitely if a selection
 * operation is already in progress; in a high-performance implementation, reading or writing the
 * interest set may block briefly, if at all. In any case, a selection operation will always use the
 * interest-set value that was current at the moment that the operation began.
 *
 * @author Mark Reinhold
 * @author JSR-51 Expert Group
 * @since 1.4
 * @see SelectableChannel
 * @see Selector
 */
public abstract class SelectionKey {

  /** Constructs an instance of this class. */
  protected SelectionKey() {}

  // -- Channel and selector operations --

  /**
   * Returns the channel for which this key was created. This method will continue to return the
   * channel even after the key is cancelled.
   *
   * @return This key's channel
   */
  public abstract SelectableChannel channel();

  /**
   * Returns the selector for which this key was created. This method will continue to return the
   * selector even after the key is cancelled.
   *
   * @return This key's selector
   */
  public abstract Selector selector();

  /**
   * Tells whether or not this key is valid.
   *
   * <p>A key is valid upon creation and remains so until it is cancelled, its channel is closed, or
   * its selector is closed.
   *
   * @return <tt>true</tt> if, and only if, this key is valid
   */
  public abstract boolean isValid();

  /**
   * Requests that the registration of this key's channel with its selector be cancelled. Upon
   * return the key will be invalid and will have been added to its selector's cancelled-key set.
   * The key will be removed from all of the selector's key sets during the next selection
   * operation.
   *
   * <p>If this key has already been cancelled then invoking this method has no effect. Once
   * cancelled, a key remains forever invalid.
   *
   * <p>This method may be invoked at any time. It synchronizes on the selector's cancelled-key set,
   * and therefore may block briefly if invoked concurrently with a cancellation or selection
   * operation involving the same selector.
   */
  public abstract void cancel();

  // -- Operation-set accessors --

  /**
   * Retrieves this key's interest set.
   *
   * <p>It is guaranteed that the returned set will only contain operation bits that are valid for
   * this key's channel.
   *
   * <p>This method may be invoked at any time. Whether or not it blocks, and for how long, is
   * implementation-dependent.
   *
   * @return This key's interest set
   * @throws CancelledKeyException If this key has been cancelled
   */
  public abstract int interestOps();

  /**
   * Sets this key's interest set to the given value.
   *
   * <p>This method may be invoked at any time. Whether or not it blocks, and for how long, is
   * implementation-dependent.
   *
   * @param ops The new interest set
   * @return This selection key
   * @throws IllegalArgumentException If a bit in the set does not correspond to an operation that
   *     is supported by this key's channel, that is, if <tt>set & ~(channel().validOps()) != 0</tt>
   * @throws CancelledKeyException If this key has been cancelled
   */
  public abstract SelectionKey interestOps(int ops);

  /**
   * Retrieves this key's ready-operation set.
   *
   * <p>It is guaranteed that the returned set will only contain operation bits that are valid for
   * this key's channel.
   *
   * @return This key's ready-operation set
   * @throws CancelledKeyException If this key has been cancelled
   */
  public abstract int readyOps();

  // -- Operation bits and bit-testing convenience methods --

  /**
   * Operation-set bit for read operations.
   *
   * <p>Suppose that a selection key's interest set contains <tt>OP_READ</tt> at the start of a <a
   * href="Selector.html#selop">selection operation</a>. If the selector detects that the
   * corresponding channel is ready for reading, has reached end-of-stream, has been remotely shut
   * down for further reading, or has an error pending, then it will add <tt>OP_READ</tt> to the
   * key's ready-operation set and add the key to its selected-key&nbsp;set.
   */
  public static final int OP_READ = 1 << 0;

  /**
   * Operation-set bit for write operations.
   *
   * <p>Suppose that a selection key's interest set contains <tt>OP_WRITE</tt> at the start of a <a
   * href="Selector.html#selop">selection operation</a>. If the selector detects that the
   * corresponding channel is ready for writing, has been remotely shut down for further writing, or
   * has an error pending, then it will add <tt>OP_WRITE</tt> to the key's ready set and add the key
   * to its selected-key&nbsp;set.
   */
  public static final int OP_WRITE = 1 << 2;

  /**
   * Operation-set bit for socket-connect operations.
   *
   * <p>Suppose that a selection key's interest set contains <tt>OP_CONNECT</tt> at the start of a
   * <a href="Selector.html#selop">selection operation</a>. If the selector detects that the
   * corresponding socket channel is ready to complete its connection sequence, or has an error
   * pending, then it will add <tt>OP_CONNECT</tt> to the key's ready set and add the key to its
   * selected-key&nbsp;set.
   */
  public static final int OP_CONNECT = 1 << 3;

  /**
   * Operation-set bit for socket-accept operations.
   *
   * <p>Suppose that a selection key's interest set contains <tt>OP_ACCEPT</tt> at the start of a <a
   * href="Selector.html#selop">selection operation</a>. If the selector detects that the
   * corresponding server-socket channel is ready to accept another connection, or has an error
   * pending, then it will add <tt>OP_ACCEPT</tt> to the key's ready set and add the key to its
   * selected-key&nbsp;set.
   */
  public static final int OP_ACCEPT = 1 << 4;

  /**
   * Tests whether this key's channel is ready for reading.
   *
   * <p>An invocation of this method of the form <tt>k.isReadable()</tt> behaves in exactly the same
   * way as the expression
   *
   * <blockquote>
   *
   * <pre>
   * k.readyOps()&nbsp;&amp;&nbsp;OP_READ&nbsp;!=&nbsp;0</pre>
   *
   * </blockquote>
   *
   * <p>If this key's channel does not support read operations then this method always returns
   * <tt>false</tt>.
   *
   * @return <tt>true</tt> if, and only if,
   *     <tt>readyOps()</tt>&nbsp;<tt>&</tt>&nbsp;<tt>OP_READ</tt> is nonzero
   * @throws CancelledKeyException If this key has been cancelled
   */
  public final boolean isReadable() {
    return (readyOps() & OP_READ) != 0;
  }

  /**
   * Tests whether this key's channel is ready for writing.
   *
   * <p>An invocation of this method of the form <tt>k.isWritable()</tt> behaves in exactly the same
   * way as the expression
   *
   * <blockquote>
   *
   * <pre>
   * k.readyOps()&nbsp;&amp;&nbsp;OP_WRITE&nbsp;!=&nbsp;0</pre>
   *
   * </blockquote>
   *
   * <p>If this key's channel does not support write operations then this method always returns
   * <tt>false</tt>.
   *
   * @return <tt>true</tt> if, and only if,
   *     <tt>readyOps()</tt>&nbsp;<tt>&</tt>&nbsp;<tt>OP_WRITE</tt> is nonzero
   * @throws CancelledKeyException If this key has been cancelled
   */
  public final boolean isWritable() {
    return (readyOps() & OP_WRITE) != 0;
  }

  /**
   * Tests whether this key's channel has either finished, or failed to finish, its
   * socket-connection operation.
   *
   * <p>An invocation of this method of the form <tt>k.isConnectable()</tt> behaves in exactly the
   * same way as the expression
   *
   * <blockquote>
   *
   * <pre>
   * k.readyOps()&nbsp;&amp;&nbsp;OP_CONNECT&nbsp;!=&nbsp;0</pre>
   *
   * </blockquote>
   *
   * <p>If this key's channel does not support socket-connect operations then this method always
   * returns <tt>false</tt>.
   *
   * @return <tt>true</tt> if, and only if,
   *     <tt>readyOps()</tt>&nbsp;<tt>&</tt>&nbsp;<tt>OP_CONNECT</tt> is nonzero
   * @throws CancelledKeyException If this key has been cancelled
   */
  public final boolean isConnectable() {
    return (readyOps() & OP_CONNECT) != 0;
  }

  /**
   * Tests whether this key's channel is ready to accept a new socket connection.
   *
   * <p>An invocation of this method of the form <tt>k.isAcceptable()</tt> behaves in exactly the
   * same way as the expression
   *
   * <blockquote>
   *
   * <pre>
   * k.readyOps()&nbsp;&amp;&nbsp;OP_ACCEPT&nbsp;!=&nbsp;0</pre>
   *
   * </blockquote>
   *
   * <p>If this key's channel does not support socket-accept operations then this method always
   * returns <tt>false</tt>.
   *
   * @return <tt>true</tt> if, and only if,
   *     <tt>readyOps()</tt>&nbsp;<tt>&</tt>&nbsp;<tt>OP_ACCEPT</tt> is nonzero
   * @throws CancelledKeyException If this key has been cancelled
   */
  public final boolean isAcceptable() {
    return (readyOps() & OP_ACCEPT) != 0;
  }

  // -- Attachments --

  private volatile Object attachment = null;

  private static final AtomicReferenceFieldUpdater<SelectionKey, Object> attachmentUpdater =
      AtomicReferenceFieldUpdater.newUpdater(SelectionKey.class, Object.class, "attachment");

  /**
   * Attaches the given object to this key.
   *
   * <p>An attached object may later be retrieved via the {@link #attachment() attachment} method.
   * Only one object may be attached at a time; invoking this method causes any previous attachment
   * to be discarded. The current attachment may be discarded by attaching <tt>null</tt>.
   *
   * @param ob The object to be attached; may be <tt>null</tt>
   * @return The previously-attached object, if any, otherwise <tt>null</tt>
   */
  public final Object attach(Object ob) {
    return attachmentUpdater.getAndSet(this, ob);
  }

  /**
   * Retrieves the current attachment.
   *
   * @return The object currently attached to this key, or <tt>null</tt> if there is no attachment
   */
  public final Object attachment() {
    return attachment;
  }
}
コード例 #26
0
/**
 * A Reactor based Subscriber implementation that hosts assertion tests for its state and allows
 * asynchronous cancellation and requesting.
 *
 * <p>To create a new instance of {@link TestSubscriber}, you have the choice between these static
 * methods:
 *
 * <ul>
 *   <li>{@link TestSubscriber#subscribe(Publisher)}: create a new {@link TestSubscriber}, subscribe
 *       to it with the specified {@link Publisher} and requests an unbounded number of elements.
 *   <li>{@link TestSubscriber#subscribe(Publisher, long)}: create a new {@link TestSubscriber},
 *       subscribe to it with the specified {@link Publisher} and requests {@code n} elements (can
 *       be 0 if you want no initial demand).
 *   <li>{@link TestSubscriber#create()}: create a new {@link TestSubscriber} and requests an
 *       unbounded number of elements.
 *   <li>{@link TestSubscriber#create(long)}: create a new {@link TestSubscriber} and requests
 *       {@code n} elements (can be 0 if you want no initial demand).
 * </ul>
 *
 * <p>If you are testing asynchronous publishers, don't forget to use one of the {@code await*()}
 * methods to wait for the data to assert.
 *
 * <p>You can extend this class but only the onNext, onError and onComplete can be overridden. You
 * can call {@link #request(long)} and {@link #cancel()} from any thread or from within the
 * overridable methods but you should avoid calling the assertXXX methods asynchronously.
 *
 * <p>Usage:
 *
 * <pre>{@code
 * TestSubscriber
 *   .subscribe(publisher)
 *   .await()
 *   .assertValues("ABC", "DEF");
 * }</pre>
 *
 * @param <T> the value type.
 * @author Sebastien Deleuze
 * @author David Karnok
 * @author Anatoly Kadyshev
 * @author Stephane Maldini
 * @author Brian Clozel
 */
public class TestSubscriber<T> implements Subscriber<T>, Subscription, Trackable, Receiver {

  /** Default timeout for waiting next values to be received */
  public static final Duration DEFAULT_VALUES_TIMEOUT = Duration.ofSeconds(3);

  @SuppressWarnings("rawtypes")
  private static final AtomicLongFieldUpdater<TestSubscriber> REQUESTED =
      AtomicLongFieldUpdater.newUpdater(TestSubscriber.class, "requested");

  @SuppressWarnings("rawtypes")
  private static final AtomicReferenceFieldUpdater<TestSubscriber, List> NEXT_VALUES =
      AtomicReferenceFieldUpdater.newUpdater(TestSubscriber.class, List.class, "values");

  @SuppressWarnings("rawtypes")
  private static final AtomicReferenceFieldUpdater<TestSubscriber, Subscription> S =
      AtomicReferenceFieldUpdater.newUpdater(TestSubscriber.class, Subscription.class, "s");

  private final List<Throwable> errors = new LinkedList<>();

  private final CountDownLatch cdl = new CountDownLatch(1);

  volatile Subscription s;

  volatile long requested;

  volatile List<T> values = new LinkedList<>();

  /** The fusion mode to request. */
  private int requestedFusionMode = -1;

  /** The established fusion mode. */
  private volatile int establishedFusionMode = -1;

  /** The fuseable QueueSubscription in case a fusion mode was specified. */
  private Fuseable.QueueSubscription<T> qs;

  private int subscriptionCount = 0;

  private int completionCount = 0;

  private volatile long valueCount = 0L;

  private volatile long nextValueAssertedCount = 0L;

  private Duration valuesTimeout = DEFAULT_VALUES_TIMEOUT;

  private boolean valuesStorage = true;

  //
  // ==============================================================================================================
  //	 Static methods
  //
  // ==============================================================================================================

  /**
   * Blocking method that waits until {@code conditionSupplier} returns true, or if it does not
   * before the specified timeout, throws an {@link AssertionError} with the specified error message
   * supplier.
   *
   * @param timeout the timeout duration
   * @param errorMessageSupplier the error message supplier
   * @param conditionSupplier condition to break out of the wait loop
   */
  public static void await(
      Duration timeout, Supplier<String> errorMessageSupplier, BooleanSupplier conditionSupplier) {

    Objects.requireNonNull(errorMessageSupplier);
    Objects.requireNonNull(conditionSupplier);
    Objects.requireNonNull(timeout);

    long timeoutNs = timeout.toNanos();
    long startTime = System.nanoTime();
    do {
      if (conditionSupplier.getAsBoolean()) {
        return;
      }
      try {
        Thread.sleep(100);
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException(e);
      }
    } while (System.nanoTime() - startTime < timeoutNs);
    throw new AssertionError(errorMessageSupplier.get());
  }

  /**
   * Blocking method that waits until {@code conditionSupplier} returns true, or if it does not
   * before the specified timeout, throw an {@link AssertionError} with the specified error message.
   *
   * @param timeout the timeout duration
   * @param errorMessage the error message
   * @param conditionSupplier condition to break out of the wait loop
   */
  public static void await(
      Duration timeout, final String errorMessage, BooleanSupplier conditionSupplier) {
    await(
        timeout,
        new Supplier<String>() {

          @Override
          public String get() {
            return errorMessage;
          }
        },
        conditionSupplier);
  }

  /**
   * Create a new {@link TestSubscriber} that requests an unbounded number of elements.
   *
   * <p>Be sure at least a publisher has subscribed to it via {@link
   * Publisher#subscribe(Subscriber)} before use assert methods.
   *
   * @see #subscribe(Publisher)
   * @param <T> the observed value type
   * @return a fresh TestSubscriber instance
   */
  public static <T> TestSubscriber<T> create() {
    return new TestSubscriber<>();
  }

  /**
   * Create a new {@link TestSubscriber} that requests initially {@code n} elements. You can then
   * manage the demand with {@link Subscription#request(long)}.
   *
   * <p>Be sure at least a publisher has subscribed to it via {@link
   * Publisher#subscribe(Subscriber)} before use assert methods.
   *
   * @param n Number of elements to request (can be 0 if you want no initial demand).
   * @see #subscribe(Publisher, long)
   * @param <T> the observed value type
   * @return a fresh TestSubscriber instance
   */
  public static <T> TestSubscriber<T> create(long n) {
    return new TestSubscriber<>(n);
  }

  /**
   * Create a new {@link TestSubscriber} that requests an unbounded number of elements, and make the
   * specified {@code publisher} subscribe to it.
   *
   * @param publisher The publisher to subscribe with
   * @param <T> the observed value type
   * @return a fresh TestSubscriber instance
   */
  public static <T> TestSubscriber<T> subscribe(Publisher<T> publisher) {
    TestSubscriber<T> subscriber = new TestSubscriber<>();
    publisher.subscribe(subscriber);
    return subscriber;
  }

  /**
   * Create a new {@link TestSubscriber} that requests initially {@code n} elements, and make the
   * specified {@code publisher} subscribe to it. You can then manage the demand with {@link
   * Subscription#request(long)}.
   *
   * @param publisher The publisher to subscribe with
   * @param n Number of elements to request (can be 0 if you want no initial demand).
   * @param <T> the observed value type
   * @return a fresh TestSubscriber instance
   */
  public static <T> TestSubscriber<T> subscribe(Publisher<T> publisher, long n) {
    TestSubscriber<T> subscriber = new TestSubscriber<>(n);
    publisher.subscribe(subscriber);
    return subscriber;
  }

  //
  // ==============================================================================================================
  //	 Private constructors
  //
  // ==============================================================================================================

  private TestSubscriber() {
    this(Long.MAX_VALUE);
  }

  private TestSubscriber(long n) {
    if (n < 0) {
      throw new IllegalArgumentException("initialRequest >= required but it was " + n);
    }
    REQUESTED.lazySet(this, n);
  }

  //
  // ==============================================================================================================
  //	 Configuration
  //
  // ==============================================================================================================

  /**
   * Enable or disabled the values storage. It is enabled by default, and can be disable in order to
   * be able to perform performance benchmarks or tests with a huge amount values.
   *
   * @param enabled enable value storage?
   * @return this
   */
  public final TestSubscriber<T> configureValuesStorage(boolean enabled) {
    this.valuesStorage = enabled;
    return this;
  }

  /**
   * Configure the timeout in seconds for waiting next values to be received (3 seconds by default).
   *
   * @param timeout the new default value timeout duration
   * @return this
   */
  public final TestSubscriber<T> configureValuesTimeout(Duration timeout) {
    this.valuesTimeout = timeout;
    return this;
  }

  /**
   * Returns the established fusion mode or -1 if it was not enabled
   *
   * @return the fusion mode, see Fuseable constants
   */
  public final int establishedFusionMode() {
    return establishedFusionMode;
  }

  //
  // ==============================================================================================================
  //	 Assertions
  //
  // ==============================================================================================================

  /**
   * Assert a complete successfully signal has been received.
   *
   * @return this
   */
  public final TestSubscriber<T> assertComplete() {
    assertNoError();
    int c = completionCount;
    if (c == 0) {
      throw new AssertionError("Not completed", null);
    }
    if (c > 1) {
      throw new AssertionError("Multiple completions: " + c, null);
    }
    return this;
  }

  /**
   * Assert the specified values have been received. Values storage should be enabled to use this
   * method.
   *
   * @param expectedValues the values to assert
   * @see #configureValuesStorage(boolean)
   * @return this
   */
  public final TestSubscriber<T> assertContainValues(Set<? extends T> expectedValues) {
    if (!valuesStorage) {
      throw new IllegalStateException("Using assertNoValues() requires enabling values storage");
    }
    if (expectedValues.size() > values.size()) {
      throw new AssertionError("Actual contains fewer elements" + values, null);
    }

    Iterator<? extends T> expected = expectedValues.iterator();

    while (true) {
      boolean n2 = expected.hasNext();
      if (n2) {
        T t2 = expected.next();
        if (!values.contains(t2)) {
          throw new AssertionError(
              "The element is not contained in the "
                  + "received resuls"
                  + " = "
                  + valueAndClass(t2),
              null);
        }
      } else {
        break;
      }
    }
    return this;
  }

  /**
   * Assert an error signal has been received.
   *
   * @return this
   */
  public final TestSubscriber<T> assertError() {
    assertNotComplete();
    int s = errors.size();
    if (s == 0) {
      throw new AssertionError("No error", null);
    }
    if (s > 1) {
      throw new AssertionError("Multiple errors: " + s, null);
    }
    return this;
  }

  /**
   * Assert an error signal has been received.
   *
   * @param clazz The class of the exception contained in the error signal
   * @return this
   */
  public final TestSubscriber<T> assertError(Class<? extends Throwable> clazz) {
    assertNotComplete();
    int s = errors.size();
    if (s == 0) {
      throw new AssertionError("No error", null);
    }
    if (s == 1) {
      Throwable e = errors.get(0);
      if (!clazz.isInstance(e)) {
        throw new AssertionError(
            "Error class incompatible: expected = " + clazz + ", actual = " + e, null);
      }
    }
    if (s > 1) {
      throw new AssertionError("Multiple errors: " + s, null);
    }
    return this;
  }

  public final TestSubscriber<T> assertErrorMessage(String message) {
    assertNotComplete();
    int s = errors.size();
    if (s == 0) {
      assertionError("No error", null);
    }
    if (s == 1) {
      if (!Objects.equals(message, errors.get(0).getMessage())) {
        assertionError(
            "Error class incompatible: expected = \""
                + message
                + "\", actual = \""
                + errors.get(0).getMessage()
                + "\"",
            null);
      }
    }
    if (s > 1) {
      assertionError("Multiple errors: " + s, null);
    }

    return this;
  }

  /**
   * Assert an error signal has been received.
   *
   * @param expectation A method that can verify the exception contained in the error signal and
   *     throw an exception (like an {@link AssertionError}) if the exception is not valid.
   * @return this
   */
  public final TestSubscriber<T> assertErrorWith(Consumer<? super Throwable> expectation) {
    assertNotComplete();
    int s = errors.size();
    if (s == 0) {
      throw new AssertionError("No error", null);
    }
    if (s == 1) {
      expectation.accept(errors.get(0));
    }
    if (s > 1) {
      throw new AssertionError("Multiple errors: " + s, null);
    }
    return this;
  }

  /**
   * Assert that the upstream was a Fuseable source.
   *
   * @return this
   */
  public final TestSubscriber<T> assertFuseableSource() {
    if (qs == null) {
      throw new AssertionError("Upstream was not Fuseable");
    }
    return this;
  }

  /**
   * Assert that the fusion mode was granted.
   *
   * @return this
   */
  public final TestSubscriber<T> assertFusionEnabled() {
    if (establishedFusionMode != Fuseable.SYNC && establishedFusionMode != Fuseable.ASYNC) {
      throw new AssertionError("Fusion was not enabled");
    }
    return this;
  }

  public final TestSubscriber<T> assertFusionMode(int expectedMode) {
    if (establishedFusionMode != expectedMode) {
      throw new AssertionError(
          "Wrong fusion mode: expected: "
              + fusionModeName(expectedMode)
              + ", actual: "
              + fusionModeName(establishedFusionMode));
    }
    return this;
  }

  /**
   * Assert that the fusion mode was granted.
   *
   * @return this
   */
  public final TestSubscriber<T> assertFusionRejected() {
    if (establishedFusionMode != Fuseable.NONE) {
      throw new AssertionError("Fusion was granted");
    }
    return this;
  }

  /**
   * Assert no error signal has been received.
   *
   * @return this
   */
  public final TestSubscriber<T> assertNoError() {
    int s = errors.size();
    if (s == 1) {
      Throwable e = errors.get(0);
      String valueAndClass = e == null ? null : e + " (" + e.getClass().getSimpleName() + ")";
      throw new AssertionError("Error present: " + valueAndClass, null);
    }
    if (s > 1) {
      throw new AssertionError("Multiple errors: " + s, null);
    }
    return this;
  }

  /**
   * Assert no values have been received.
   *
   * @return this
   */
  public final TestSubscriber<T> assertNoValues() {
    if (valueCount != 0) {
      throw new AssertionError(
          "No values expected but received: [length = " + values.size() + "] " + values, null);
    }
    return this;
  }

  /**
   * Assert that the upstream was not a Fuseable source.
   *
   * @return this
   */
  public final TestSubscriber<T> assertNonFuseableSource() {
    if (qs != null) {
      throw new AssertionError("Upstream was Fuseable");
    }
    return this;
  }

  /**
   * Assert no complete successfully signal has been received.
   *
   * @return this
   */
  public final TestSubscriber<T> assertNotComplete() {
    int c = completionCount;
    if (c == 1) {
      throw new AssertionError("Completed", null);
    }
    if (c > 1) {
      throw new AssertionError("Multiple completions: " + c, null);
    }
    return this;
  }

  /**
   * Assert no subscription occurred.
   *
   * @return this
   */
  public final TestSubscriber<T> assertNotSubscribed() {
    int s = subscriptionCount;

    if (s == 1) {
      throw new AssertionError("OnSubscribe called once", null);
    }
    if (s > 1) {
      throw new AssertionError("OnSubscribe called multiple times: " + s, null);
    }

    return this;
  }

  /**
   * Assert no complete successfully or error signal has been received.
   *
   * @return this
   */
  public final TestSubscriber<T> assertNotTerminated() {
    if (cdl.getCount() == 0) {
      throw new AssertionError("Terminated", null);
    }
    return this;
  }

  /**
   * Assert subscription occurred (once).
   *
   * @return this
   */
  public final TestSubscriber<T> assertSubscribed() {
    int s = subscriptionCount;

    if (s == 0) {
      throw new AssertionError("OnSubscribe not called", null);
    }
    if (s > 1) {
      throw new AssertionError("OnSubscribe called multiple times: " + s, null);
    }

    return this;
  }

  /**
   * Assert either complete successfully or error signal has been received.
   *
   * @return this
   */
  public final TestSubscriber<T> assertTerminated() {
    if (cdl.getCount() != 0) {
      throw new AssertionError("Not terminated", null);
    }
    return this;
  }

  /**
   * Assert {@code n} values has been received.
   *
   * @param n the expected value count
   * @return this
   */
  public final TestSubscriber<T> assertValueCount(long n) {
    if (valueCount != n) {
      throw new AssertionError(
          "Different value count: expected = " + n + ", actual = " + valueCount, null);
    }
    return this;
  }

  /**
   * Assert the specified values have been received in the same order read by the passed {@link
   * Iterable}. Values storage should be enabled to use this method.
   *
   * @param expectedSequence the values to assert
   * @see #configureValuesStorage(boolean)
   * @return this
   */
  public final TestSubscriber<T> assertValueSequence(Iterable<? extends T> expectedSequence) {
    if (!valuesStorage) {
      throw new IllegalStateException("Using assertNoValues() requires enabling values storage");
    }
    Iterator<T> actual = values.iterator();
    Iterator<? extends T> expected = expectedSequence.iterator();
    int i = 0;
    while (true) {
      boolean n1 = actual.hasNext();
      boolean n2 = expected.hasNext();
      if (n1 && n2) {
        T t1 = actual.next();
        T t2 = expected.next();
        if (!Objects.equals(t1, t2)) {
          throw new AssertionError(
              "The element with index "
                  + i
                  + " does not match: expected = "
                  + valueAndClass(t2)
                  + ", actual = "
                  + valueAndClass(t1),
              null);
        }
        i++;
      } else if (n1 && !n2) {
        throw new AssertionError("Actual contains more elements" + values, null);
      } else if (!n1 && n2) {
        throw new AssertionError("Actual contains fewer elements: " + values, null);
      } else {
        break;
      }
    }
    return this;
  }

  /**
   * Assert the specified values have been received in the declared order. Values storage should be
   * enabled to use this method.
   *
   * @param expectedValues the values to assert
   * @return this
   * @see #configureValuesStorage(boolean)
   */
  @SafeVarargs
  @SuppressWarnings("varargs")
  public final TestSubscriber<T> assertValues(T... expectedValues) {
    return assertValueSequence(Arrays.asList(expectedValues));
  }

  /**
   * Assert the specified values have been received in the declared order. Values storage should be
   * enabled to use this method.
   *
   * @param expectations One or more methods that can verify the values and throw a exception (like
   *     an {@link AssertionError}) if the value is not valid.
   * @return this
   * @see #configureValuesStorage(boolean)
   */
  @SafeVarargs
  public final TestSubscriber<T> assertValuesWith(Consumer<T>... expectations) {
    if (!valuesStorage) {
      throw new IllegalStateException("Using assertNoValues() requires enabling values storage");
    }
    final int expectedValueCount = expectations.length;
    if (expectedValueCount != values.size()) {
      throw new AssertionError(
          "Different value count: expected = " + expectedValueCount + ", actual = " + valueCount,
          null);
    }
    for (int i = 0; i < expectedValueCount; i++) {
      Consumer<T> consumer = expectations[i];
      T actualValue = values.get(i);
      consumer.accept(actualValue);
    }
    return this;
  }

  //
  // ==============================================================================================================
  //	 Await methods
  //
  // ==============================================================================================================

  /**
   * Blocking method that waits until a complete successfully or error signal is received.
   *
   * @return this
   */
  public final TestSubscriber<T> await() {
    if (cdl.getCount() == 0) {
      return this;
    }
    try {
      cdl.await();
    } catch (InterruptedException ex) {
      throw new AssertionError("Wait interrupted", ex);
    }
    return this;
  }

  /**
   * Blocking method that waits until a complete successfully or error signal is received or until a
   * timeout occurs.
   *
   * @param timeout The timeout value
   * @return this
   */
  public final TestSubscriber<T> await(Duration timeout) {
    if (cdl.getCount() == 0) {
      return this;
    }
    try {
      if (!cdl.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
        throw new AssertionError("No complete or error signal before timeout");
      }
      return this;
    } catch (InterruptedException ex) {
      throw new AssertionError("Wait interrupted", ex);
    }
  }

  /**
   * Blocking method that waits until {@code n} next values have been received.
   *
   * @param n the value count to assert
   * @return this
   */
  public final TestSubscriber<T> awaitAndAssertNextValueCount(final long n) {
    await(
        valuesTimeout,
        () -> {
          if (valuesStorage) {
            return String.format(
                "%d out of %d next values received within %d, " + "values : %s",
                valueCount - nextValueAssertedCount,
                n,
                valuesTimeout.toMillis(),
                values.toString());
          }
          return String.format(
              "%d out of %d next values received within %d",
              valueCount - nextValueAssertedCount, n, valuesTimeout.toMillis());
        },
        () -> valueCount >= (nextValueAssertedCount + n));
    nextValueAssertedCount += n;
    return this;
  }

  /**
   * Blocking method that waits until {@code n} next values have been received (n is the number of
   * values provided) to assert them.
   *
   * @param values the values to assert
   * @return this
   */
  @SafeVarargs
  @SuppressWarnings({"unchecked", "rawtypes"})
  public final TestSubscriber<T> awaitAndAssertNextValues(T... values) {
    final int expectedNum = values.length;
    final List<Consumer<T>> expectations = new ArrayList<>();
    for (int i = 0; i < expectedNum; i++) {
      final T expectedValue = values[i];
      expectations.add(
          actualValue -> {
            if (!actualValue.equals(expectedValue)) {
              throw new AssertionError(
                  String.format(
                      "Expected Next signal: %s, but got: %s", expectedValue, actualValue));
            }
          });
    }
    awaitAndAssertNextValuesWith(expectations.toArray((Consumer<T>[]) new Consumer[0]));
    return this;
  }

  /**
   * Blocking method that waits until {@code n} next values have been received (n is the number of
   * expectations provided) to assert them.
   *
   * @param expectations One or more methods that can verify the values and throw a exception (like
   *     an {@link AssertionError}) if the value is not valid.
   * @return this
   */
  @SafeVarargs
  public final TestSubscriber<T> awaitAndAssertNextValuesWith(Consumer<T>... expectations) {
    valuesStorage = true;
    final int expectedValueCount = expectations.length;
    await(
        valuesTimeout,
        () -> {
          if (valuesStorage) {
            return String.format(
                "%d out of %d next values received within %d, " + "values : %s",
                valueCount - nextValueAssertedCount,
                expectedValueCount,
                valuesTimeout.toMillis(),
                values.toString());
          }
          return String.format(
              "%d out of %d next values received within %d ms",
              valueCount - nextValueAssertedCount, expectedValueCount, valuesTimeout.toMillis());
        },
        () -> valueCount >= (nextValueAssertedCount + expectedValueCount));
    List<T> nextValuesSnapshot;
    List<T> empty = new ArrayList<>();
    while (true) {
      nextValuesSnapshot = values;
      if (NEXT_VALUES.compareAndSet(this, values, empty)) {
        break;
      }
    }
    if (nextValuesSnapshot.size() < expectedValueCount) {
      throw new AssertionError(
          String.format(
              "Expected %d number of signals but received %d",
              expectedValueCount, nextValuesSnapshot.size()));
    }
    for (int i = 0; i < expectedValueCount; i++) {
      Consumer<T> consumer = expectations[i];
      T actualValue = nextValuesSnapshot.get(i);
      consumer.accept(actualValue);
    }
    nextValueAssertedCount += expectedValueCount;
    return this;
  }

  //
  // ==============================================================================================================
  //	 Overrides
  //
  // ==============================================================================================================

  @Override
  public void cancel() {
    Subscription a = s;
    if (a != Operators.cancelledSubscription()) {
      a = S.getAndSet(this, Operators.cancelledSubscription());
      if (a != null && a != Operators.cancelledSubscription()) {
        a.cancel();
      }
    }
  }

  @Override
  public final boolean isCancelled() {
    return s == Operators.cancelledSubscription();
  }

  @Override
  public final boolean isStarted() {
    return s != null;
  }

  @Override
  public final boolean isTerminated() {
    return isCancelled();
  }

  @Override
  public void onComplete() {
    completionCount++;
    cdl.countDown();
  }

  @Override
  public void onError(Throwable t) {
    errors.add(t);
    cdl.countDown();
  }

  @Override
  public void onNext(T t) {
    if (establishedFusionMode == Fuseable.ASYNC) {
      while (true) {
        t = qs.poll();
        if (t == null) {
          break;
        }
        valueCount++;
        if (valuesStorage) {
          List<T> nextValuesSnapshot;
          while (true) {
            nextValuesSnapshot = values;
            nextValuesSnapshot.add(t);
            if (NEXT_VALUES.compareAndSet(this, nextValuesSnapshot, nextValuesSnapshot)) {
              break;
            }
          }
        }
      }
    } else {
      valueCount++;
      if (valuesStorage) {
        List<T> nextValuesSnapshot;
        while (true) {
          nextValuesSnapshot = values;
          nextValuesSnapshot.add(t);
          if (NEXT_VALUES.compareAndSet(this, nextValuesSnapshot, nextValuesSnapshot)) {
            break;
          }
        }
      }
    }
  }

  @Override
  @SuppressWarnings("unchecked")
  public void onSubscribe(Subscription s) {
    subscriptionCount++;
    int requestMode = requestedFusionMode;
    if (requestMode >= 0) {
      if (!setWithoutRequesting(s)) {
        if (!isCancelled()) {
          errors.add(new IllegalStateException("Subscription already set: " + subscriptionCount));
        }
      } else {
        if (s instanceof Fuseable.QueueSubscription) {
          this.qs = (Fuseable.QueueSubscription<T>) s;

          int m = qs.requestFusion(requestMode);
          establishedFusionMode = m;

          if (m == Fuseable.SYNC) {
            while (true) {
              T v = qs.poll();
              if (v == null) {
                onComplete();
                break;
              }

              onNext(v);
            }
          } else {
            requestDeferred();
          }
        } else {
          requestDeferred();
        }
      }
    } else {
      if (!set(s)) {
        if (!isCancelled()) {
          errors.add(new IllegalStateException("Subscription already set: " + subscriptionCount));
        }
      }
    }
  }

  @Override
  public void request(long n) {
    if (Operators.validate(n)) {
      if (establishedFusionMode != Fuseable.SYNC) {
        normalRequest(n);
      }
    }
  }

  @Override
  public final long requestedFromDownstream() {
    return requested;
  }

  /**
   * Setup what fusion mode should be requested from the incomining Subscription if it happens to be
   * QueueSubscription
   *
   * @param requestMode the mode to request, see Fuseable constants
   * @return this
   */
  public final TestSubscriber<T> requestedFusionMode(int requestMode) {
    this.requestedFusionMode = requestMode;
    return this;
  }

  @Override
  public Subscription upstream() {
    return s;
  }

  //
  // ==============================================================================================================
  //	 Non public methods
  //
  // ==============================================================================================================

  protected final void normalRequest(long n) {
    Subscription a = s;
    if (a != null) {
      a.request(n);
    } else {
      Operators.addAndGet(REQUESTED, this, n);

      a = s;

      if (a != null) {
        long r = REQUESTED.getAndSet(this, 0L);

        if (r != 0L) {
          a.request(r);
        }
      }
    }
  }

  /** Requests the deferred amount if not zero. */
  protected final void requestDeferred() {
    long r = REQUESTED.getAndSet(this, 0L);

    if (r != 0L) {
      s.request(r);
    }
  }

  /**
   * Atomically sets the single subscription and requests the missed amount from it.
   *
   * @param s the Subscription to set.
   * @return false if this arbiter is cancelled or there was a subscription already set
   */
  protected final boolean set(Subscription s) {
    Objects.requireNonNull(s, "s");
    Subscription a = this.s;
    if (a == Operators.cancelledSubscription()) {
      s.cancel();
      return false;
    }
    if (a != null) {
      s.cancel();
      Operators.reportSubscriptionSet();
      return false;
    }

    if (S.compareAndSet(this, null, s)) {

      long r = REQUESTED.getAndSet(this, 0L);

      if (r != 0L) {
        s.request(r);
      }

      return true;
    }

    a = this.s;

    if (a != Operators.cancelledSubscription()) {
      s.cancel();
      return false;
    }

    Operators.reportSubscriptionSet();
    return false;
  }

  /**
   * Sets the Subscription once but does not request anything.
   *
   * @param s the Subscription to set
   * @return true if successful, false if the current subscription is not null
   */
  protected final boolean setWithoutRequesting(Subscription s) {
    Objects.requireNonNull(s, "s");
    while (true) {
      Subscription a = this.s;
      if (a == Operators.cancelledSubscription()) {
        s.cancel();
        return false;
      }
      if (a != null) {
        s.cancel();
        Operators.reportSubscriptionSet();
        return false;
      }

      if (S.compareAndSet(this, null, s)) {
        return true;
      }
    }
  }

  /**
   * Prepares and throws an AssertionError exception based on the message, cause, the active state
   * and the potential errors so far.
   *
   * @param message the message
   * @param cause the optional Throwable cause
   * @throws AssertionError as expected
   */
  protected final void assertionError(String message, Throwable cause) {
    StringBuilder b = new StringBuilder();

    if (cdl.getCount() != 0) {
      b.append("(active) ");
    }
    b.append(message);

    List<Throwable> err = errors;
    if (!err.isEmpty()) {
      b.append(" (+ ").append(err.size()).append(" errors)");
    }
    AssertionError e = new AssertionError(b.toString(), cause);

    for (Throwable t : err) {
      e.addSuppressed(t);
    }

    throw e;
  }

  protected final String fusionModeName(int mode) {
    switch (mode) {
      case -1:
        return "Disabled";
      case Fuseable.NONE:
        return "None";
      case Fuseable.SYNC:
        return "Sync";
      case Fuseable.ASYNC:
        return "Async";
      default:
        return "Unknown(" + mode + ")";
    }
  }

  protected final String valueAndClass(Object o) {
    if (o == null) {
      return null;
    }
    return o + " (" + o.getClass().getSimpleName() + ")";
  }
}
コード例 #27
0
/** Default {@link RpcResponse} implementation. */
public class DefaultRpcResponse extends CompletableFuture<Object> implements RpcResponse {

  private static final CancellationException CANCELLED =
      Exceptions.clearTrace(new CancellationException());

  private static final AtomicReferenceFieldUpdater<DefaultRpcResponse, Throwable> causeUpdater =
      AtomicReferenceFieldUpdater.newUpdater(DefaultRpcResponse.class, Throwable.class, "cause");

  private volatile Throwable cause;

  /** Creates a new incomplete response. */
  public DefaultRpcResponse() {}

  /**
   * Creates a new successfully complete response.
   *
   * @param result the result or an RPC call
   */
  public DefaultRpcResponse(Object result) {
    complete(result);
  }

  /**
   * Creates a new exceptionally complete response.
   *
   * @param cause the cause of failure
   */
  public DefaultRpcResponse(Throwable cause) {
    requireNonNull(cause, "cause");
    completeExceptionally(cause);
  }

  @Override
  public final Throwable cause() {
    return cause;
  }

  @Override
  public boolean completeExceptionally(Throwable cause) {
    causeUpdater.compareAndSet(this, null, requireNonNull(cause));
    return super.completeExceptionally(cause);
  }

  @Override
  public void obtrudeException(Throwable cause) {
    this.cause = requireNonNull(cause);
    super.obtrudeException(cause);
  }

  @Override
  public boolean cancel(boolean mayInterruptIfRunning) {
    return completeExceptionally(CANCELLED) || isCancelled();
  }

  @Override
  public String toString() {
    if (isDone()) {
      if (isCompletedExceptionally()) {
        return MoreObjects.toStringHelper(this).add("cause", cause).toString();
      } else {
        return MoreObjects.toStringHelper(this).addValue(getNow(null)).toString();
      }
    }

    final int count = getNumberOfDependents();
    if (count == 0) {
      return MoreObjects.toStringHelper(this).addValue("not completed").toString();
    } else {
      return MoreObjects.toStringHelper(this)
          .addValue("not completed")
          .add("dependents", count)
          .toString();
    }
  }
}
コード例 #28
0
  static class Node {
    static ConcurrentSoftQueue<Node> pool = new ConcurrentSoftQueue<Node>();

    static Node alloc() {
      final int threshold = 2;
      int tryCnt = 0;
      while (true) {
        tryCnt++;
        Node n = pool.poll();
        if (n == null) {
          return new Node();
        } else {
          if (n.getRefCnt() > 0) {
            pool.add(n);
            if (tryCnt <= threshold) continue;
            else return new Node();
          } else {
            if (n.next != null) n.next.decRefCnt();
            n.init();
            return n;
          }
        }
      }
    }

    static void free(Node n) {
      pool.add(n);
    }

    private static void nodeCopy(
        double[] srcKeys,
        Object[] srcVals,
        int srcBegin,
        double[] targetKeys,
        Object[] targetVals,
        int targetBegin,
        int copyLen) {
      System.arraycopy(srcKeys, srcBegin, targetKeys, targetBegin, copyLen);
      System.arraycopy(srcVals, srcBegin, targetVals, targetBegin, copyLen);
    }

    static Node safeNext(Node b) {
      while (true) {
        Node n = b.next;
        if (n == null) return null;
        n.incRefCnt();
        if (n == b.next) return n;
        else release(n);
      }
    }

    static void release(Node n) {
      n.decRefCnt();
    }

    volatile int refcnt;
    volatile int length;
    final double[] keys;
    final Object[] vals;
    volatile Node next;

    Node() {
      this(CHUNK_SIZE);
    }

    Node(int capacity) {
      keys = new double[capacity];
      vals = new Object[capacity];
      refcnt = 1;
      next = null;
    }

    void init() {
      incRefCnt();
      length = 0;
      next = null;
      clearEntry();
    }

    public void clearEntry() {
      Arrays.fill(vals, null);
    }

    public String toString() {
      String str =
          "Node(refcnt:" + refcnt + ", isMarked:" + isMarked() + ", length:" + length + ")[";
      for (int i = 0; i < len(); i++) {
        str += keys[i] + " ";
      }
      str += "]";
      return str;
    }

    void _print() {
      System.out.print("Node(refcnt:" + refcnt + "," + length + "/" + keys.length + ")[");
      for (int i = 0; i < length; i++) {
        // System.out.print(keys[i]+":"+vals[i]+" ");
        System.out.print(keys[i] + " ");
      }
      System.out.println("]");
    }

    static final AtomicReferenceFieldUpdater<Node, Node> nextUpdater =
        AtomicReferenceFieldUpdater.newUpdater(Node.class, Node.class, "next");
    static final AtomicIntegerFieldUpdater<Node> lenUpdater =
        AtomicIntegerFieldUpdater.newUpdater(Node.class, "length");
    static final AtomicIntegerFieldUpdater<Node> refcntUpdater =
        AtomicIntegerFieldUpdater.newUpdater(Node.class, "refcnt");

    boolean casNext(Node cmp, Node val) {
      boolean success = nextUpdater.compareAndSet(this, cmp, val);
      if (!success) {
        System.err.println("setting next failed");
      }
      return success;
    }

    boolean casLen(int cmp, int val) {
      return lenUpdater.compareAndSet(this, cmp, val);
    }

    int incRefCnt() {
      return refcntUpdater.incrementAndGet(this);
    }

    int decRefCnt() {
      int refcnt = refcntUpdater.decrementAndGet(this);
      if (refcnt < 0) {
        throw new RuntimeException("refcnt:" + refcnt);
      }
      return refcnt;
    }

    int getRefCnt() {
      return refcnt;
    }

    boolean isFull() {
      return len() == keys.length;
    }

    int len() {
      int len = length;
      return (len >= 0) ? len : -len;
    }

    boolean mark() {
      int len = length;
      if (len < 0) return false;
      return casLen(len, -len);
    }

    boolean isMarked() {
      return length < 0;
    }

    double first() {
      assert len() != 0;
      return keys[0];
    }

    double last() {
      assert len() != 0;
      return keys[len() - 1];
    }

    boolean contains(double key) {
      return Arrays.binarySearch(keys, 0, len(), key) >= 0;
    }

    int findKeyIndex(double key) {
      return Arrays.binarySearch(keys, 0, len(), key);
    }

    Object get(double key) {
      if (len() == 0) return null;
      int pos;
      if (key == keys[0]) pos = 0;
      else pos = Arrays.binarySearch(keys, 0, len(), key);
      if (pos < 0) return null;
      return vals[pos];
    }

    Object replace(double key, Object expect, Object value) {
      synchronized (this) {
        if (isMarked()) return Retry;

        int len = len();
        int pos = Arrays.binarySearch(keys, 0, len, key);
        if (pos < 0) return null;
        Object old = vals[pos];
        if (expect == null) {
          vals[pos] = value;
          return old;
        } else if (expect.equals(old)) {
          vals[pos] = value;
          return old;
        } else {
          return null;
        }
      }
    }

    // no concurrent read/write access is assumed
    Object put(double key, Object value, ConcurrentDoubleOrderedListMap orderedMap) {
      int len = len();
      int pos = Arrays.binarySearch(keys, 0, len, key);
      if (pos >= 0) {
        Object old = vals[pos];
        vals[pos] = value;
        return old;
      } else {
        pos = -(pos + 1);
        putReally(pos, key, value, orderedMap);
        return null;
      }
    }

    private boolean emptySlotInNextTwo() {
      if (next == null) return false;
      if (!next.isFull()) return true;
      if (next.next == null) return false;
      if (!next.next.isFull()) return true;
      return false;
    }

    // no concurrent read/write access is assumed
    private void putReally(
        int pos, double key, Object value, ConcurrentDoubleOrderedListMap orderedMap) {
      int len = len();
      if (len + 1 <= keys.length) { // inserted in the current node
        nodeCopy(keys, vals, pos, keys, vals, pos + 1, len - pos);
        keys[pos] = key;
        vals[pos] = value;
        length = len + 1;
        if (pos == 0) {
          orderedMap.skipListMap.put(keys[0], this);
          if (len != 0) orderedMap.skipListMap.remove(keys[1], this);
        }
      } else if (emptySlotInNextTwo()) {
        if (pos == len) {
          next.put(key, value, orderedMap);
          return;
        }
        next.put(keys[len - 1], vals[len - 1], orderedMap);
        nodeCopy(keys, vals, pos, keys, vals, pos + 1, len - pos - 1);
        keys[pos] = key;
        vals[pos] = value;
        if (pos == 0) {
          orderedMap.skipListMap.remove(keys[1], this);
          orderedMap.skipListMap.put(keys[0], this);
        }
      } else { // current node is full, so requires a new node
        Node n = Node.alloc();
        double[] nkeys = n.keys;
        Object[] nvals = n.vals;
        int l1 = len / 2, l2 = len - l1;
        if (next == null && pos == len) { // this is the last node, simply add to the new node.
          nkeys[0] = key;
          nvals[0] = value;
          n.length = 1;
          orderedMap.skipListMap.put(nkeys[0], n);
        } else if (pos < l1) { // key,value is stored in the current node
          length = l1 + 1;
          n.length = l2;
          nodeCopy(keys, vals, l1, nkeys, nvals, 0, l2);

          nodeCopy(keys, vals, pos, keys, vals, pos + 1, l1 - pos);
          keys[pos] = key;
          vals[pos] = value;
          if (pos == 0) {
            orderedMap.skipListMap.remove(keys[1]);
            orderedMap.skipListMap.put(keys[0], this);
          }
          orderedMap.skipListMap.put(nkeys[0], n);
        } else { // key,value is stored in the new node
          length = l1;
          n.length = l2 + 1;
          int newpos = pos - l1;

          nodeCopy(keys, vals, l1, nkeys, nvals, 0, newpos);
          nkeys[newpos] = key;
          nvals[newpos] = value;
          nodeCopy(keys, vals, pos, nkeys, nvals, newpos + 1, l2 - newpos);

          orderedMap.skipListMap.put(nkeys[0], n);
        }
        n.next = this.next;
        this.next = n;
      }
    }

    // concurrent read/write access is allowed
    boolean appendNewAtomic(double key, Object value, ConcurrentDoubleOrderedListMap orderedMap) {
      synchronized (this) {
        if (isMarked()) return false;
        if (next != null) return false;

        Node n = Node.alloc();
        n.put(key, value, orderedMap);
        // assert n.len()==1;
        n.next = null;
        boolean success = casNext(null, n);
        assert success;
        return true;
      }
    }
    // concurrent read/write access is allowed
    Object putAtomic(
        double key,
        Object value,
        Node b,
        boolean onlyIfAbsent,
        ConcurrentDoubleOrderedListMap orderedMap) {
      synchronized (b) {
        if (b.isMarked()) return Retry;
        synchronized (this) {
          if (isMarked()) return Retry;

          int len = len();
          int pos = Arrays.binarySearch(keys, 0, len, key);
          if (pos >= 0) {
            Object old = vals[pos];
            if (onlyIfAbsent) {
              if (old == null) {
                vals[pos] = value;
                return null;
              } else {
                return old;
              }
            }
            vals[pos] = value;
            return old;
          }
          pos = -(pos + 1);
          putAtomicReally(b, pos, key, value, orderedMap);
          return null;
        }
      }
    }

    // only used by putAtomic and PutAtomicIfAbsent. Inside synchronized(b) and synchronized(this).
    private void putAtomicReally(
        Node b, int pos, double key, Object value, ConcurrentDoubleOrderedListMap orderedMap) {
      int len = len();
      if (len + 1 <= keys.length) {
        if (pos == len) { // in-place append in the current node
          keys[pos] = key;
          vals[pos] = value;
          length = len + 1;
          if (pos == 0) {
            orderedMap.skipListMap.put(keys[0], this);
          }
        } else { // copied to a new node, replacing the current node
          mark();
          Node n = Node.alloc();
          n.next = this.next;
          if (next != null) next.incRefCnt();
          double[] nkeys = n.keys;
          Object[] nvals = n.vals;
          n.length = len + 1;
          nodeCopy(keys, vals, 0, nkeys, nvals, 0, pos);
          nkeys[pos] = key;
          nvals[pos] = value;
          nodeCopy(keys, vals, pos, nkeys, nvals, pos + 1, len - pos);

          orderedMap.skipListMap.put(nkeys[0], n);
          b.casNext(this, n); // should always succeed.
          if (pos == 0) {
            orderedMap.skipListMap.remove(keys[0], this);
          }
          release(this);
          free(this);
        }
      } else { // requires 2 new nodes, to replace the current node
        mark();
        Node n1 = Node.alloc();
        double[] n1keys = n1.keys;
        Object[] n1vals = n1.vals;
        Node n2 = Node.alloc();
        double[] n2keys = n2.keys;
        Object[] n2vals = n2.vals;
        int l1 = len / 2, l2 = len - l1;
        if (pos < l1) { // key, value stored in n1
          n1.length = l1 + 1;
          n2.length = l2;

          nodeCopy(keys, vals, 0, n1keys, n1vals, 0, pos);
          n1keys[pos] = key;
          n1vals[pos] = value;
          nodeCopy(keys, vals, pos, n1keys, n1vals, pos + 1, l1 - pos);
          nodeCopy(keys, vals, l1, n2keys, n2vals, 0, l2);

          n1.next = n2;
          n2.next = this.next;
          if (next != null) next.incRefCnt();

          orderedMap.skipListMap.put(n1keys[0], n1);
          orderedMap.skipListMap.put(n2keys[0], n2);
          b.casNext(this, n1); // should always succeed.
          if (pos == 0) {
            orderedMap.skipListMap.remove(keys[0], this);
          }
          release(this);
          free(this);
        } else { // key,value is stored in n2
          n1.length = l1;
          n2.length = l2 + 1;
          int newpos = pos - l1;

          nodeCopy(keys, vals, 0, n1keys, n1vals, 0, l1);

          nodeCopy(keys, vals, l1, n2keys, n2vals, 0, newpos);
          n2keys[newpos] = key;
          n2vals[newpos] = value;
          nodeCopy(keys, vals, pos, n2keys, n2vals, newpos + 1, l2 - newpos);

          n1.next = n2;
          n2.next = this.next;
          if (next != null) next.incRefCnt();

          orderedMap.skipListMap.put(n1keys[0], n1);
          orderedMap.skipListMap.put(n2keys[0], n2);
          b.casNext(this, n1); // should always succeed.
          release(this);
          free(this);
        }
      }
    }
  }
コード例 #29
0
/**
 * Subscription that can be checked for status such as in a loop inside an {@link Observable} to
 * exit the loop if unsubscribed.
 *
 * @see <a
 *     href="http://msdn.microsoft.com/en-us/library/system.reactive.disposables.multipleassignmentdisposable">Rx.Net
 *     equivalent MultipleAssignmentDisposable</a>
 */
public final class MultipleAssignmentSubscription implements Subscription {
  /** The shared empty state. */
  static final State EMPTY_STATE = new State(false, Subscriptions.empty());

  volatile State state = EMPTY_STATE;
  static final AtomicReferenceFieldUpdater<MultipleAssignmentSubscription, State> STATE_UPDATER =
      AtomicReferenceFieldUpdater.newUpdater(
          MultipleAssignmentSubscription.class, State.class, "state");

  private static final class State {
    final boolean isUnsubscribed;
    final Subscription subscription;

    State(boolean u, Subscription s) {
      this.isUnsubscribed = u;
      this.subscription = s;
    }

    State unsubscribe() {
      return new State(true, subscription);
    }

    State set(Subscription s) {
      return new State(isUnsubscribed, s);
    }
  }

  @Override
  public boolean isUnsubscribed() {
    return state.isUnsubscribed;
  }

  @Override
  public void unsubscribe() {
    State oldState;
    State newState;
    do {
      oldState = state;
      if (oldState.isUnsubscribed) {
        return;
      } else {
        newState = oldState.unsubscribe();
      }
    } while (!STATE_UPDATER.compareAndSet(this, oldState, newState));
    oldState.subscription.unsubscribe();
  }

  /**
   * Sets the underlying subscription. If the {@code MultipleAssignmentSubscription} is already
   * unsubscribed, setting a new subscription causes the new subscription to also be immediately
   * unsubscribed.
   *
   * @param s the {@link Subscription} to set
   * @throws IllegalArgumentException if {@code s} is {@code null}
   */
  public void set(Subscription s) {
    if (s == null) {
      throw new IllegalArgumentException("Subscription can not be null");
    }
    State oldState;
    State newState;
    do {
      oldState = state;
      if (oldState.isUnsubscribed) {
        s.unsubscribe();
        return;
      } else {
        newState = oldState.set(s);
      }
    } while (!STATE_UPDATER.compareAndSet(this, oldState, newState));
  }

  /**
   * Gets the underlying subscription.
   *
   * @return the {@link Subscription} that underlies the {@code MultipleAssignmentSubscription}
   */
  public Subscription get() {
    return state.subscription;
  }
}
コード例 #30
0
/**
 * A <code>BufferedInputStream</code> adds functionality to another input stream-namely, the ability
 * to buffer the input and to support the <code>mark</code> and <code>reset</code> methods. When the
 * <code>BufferedInputStream</code> is created, an internal buffer array is created. As bytes from
 * the stream are read or skipped, the internal buffer is refilled as necessary from the contained
 * input stream, many bytes at a time. The <code>mark</code> operation remembers a point in the
 * input stream and the <code>reset</code> operation causes all the bytes read since the most recent
 * <code>mark</code> operation to be reread before new bytes are taken from the contained input
 * stream.
 *
 * @author Arthur van Hoff
 * @since JDK1.0
 */
public class BufferedInputStream extends FilterInputStream {

  private static int defaultBufferSize = 8192;

  /**
   * The internal buffer array where the data is stored. When necessary, it may be replaced by
   * another array of a different size.
   */
  protected volatile byte buf[];

  /**
   * Atomic updater to provide compareAndSet for buf. This is necessary because closes can be
   * asynchronous. We use nullness of buf[] as primary indicator that this stream is closed. (The
   * "in" field is also nulled out on close.)
   */
  private static final AtomicReferenceFieldUpdater<BufferedInputStream, byte[]> bufUpdater =
      AtomicReferenceFieldUpdater.newUpdater(BufferedInputStream.class, byte[].class, "buf");

  /**
   * The index one greater than the index of the last valid byte in the buffer. This value is always
   * in the range <code>0</code> through <code>buf.length</code>; elements <code>buf[0]</code>
   * through <code>buf[count-1]
   * </code>contain buffered input data obtained from the underlying input stream.
   */
  protected int count;

  /**
   * The current position in the buffer. This is the index of the next character to be read from the
   * <code>buf</code> array.
   *
   * <p>This value is always in the range <code>0</code> through <code>count</code>. If it is less
   * than <code>count</code>, then <code>buf[pos]</code> is the next byte to be supplied as input;
   * if it is equal to <code>count</code>, then the next <code>read</code> or <code>skip</code>
   * operation will require more bytes to be read from the contained input stream.
   *
   * @see java.io.BufferedInputStream#buf
   */
  protected int pos;

  /**
   * The value of the <code>pos</code> field at the time the last <code>mark</code> method was
   * called.
   *
   * <p>This value is always in the range <code>-1</code> through <code>pos</code>. If there is no
   * marked position in the input stream, this field is <code>-1</code>. If there is a marked
   * position in the input stream, then <code>buf[markpos]</code> is the first byte to be supplied
   * as input after a <code>reset</code> operation. If <code>markpos</code> is not <code>-1</code>,
   * then all bytes from positions <code>buf[markpos]</code> through <code>buf[pos-1]</code> must
   * remain in the buffer array (though they may be moved to another place in the buffer array, with
   * suitable adjustments to the values of <code>count</code>, <code>pos</code>, and <code>markpos
   * </code>); they may not be discarded unless and until the difference between <code>pos</code>
   * and <code>markpos</code> exceeds <code>marklimit</code>.
   *
   * @see java.io.BufferedInputStream#mark(int)
   * @see java.io.BufferedInputStream#pos
   */
  protected int markpos = -1;

  /**
   * The maximum read ahead allowed after a call to the <code>mark</code> method before subsequent
   * calls to the <code>reset</code> method fail. Whenever the difference between <code>pos</code>
   * and <code>markpos</code> exceeds <code>marklimit</code>, then the mark may be dropped by
   * setting <code>markpos</code> to <code>-1</code>.
   *
   * @see java.io.BufferedInputStream#mark(int)
   * @see java.io.BufferedInputStream#reset()
   */
  protected int marklimit;

  /**
   * Check to make sure that underlying input stream has not been nulled out due to close; if not
   * return it;
   */
  private InputStream getInIfOpen() throws IOException {
    InputStream input = in;
    if (input == null) throw new IOException("Stream closed");
    return input;
  }

  /** Check to make sure that buffer has not been nulled out due to close; if not return it; */
  private byte[] getBufIfOpen() throws IOException {
    byte[] buffer = buf;
    if (buffer == null) throw new IOException("Stream closed");
    return buffer;
  }

  /**
   * Creates a <code>BufferedInputStream</code> and saves its argument, the input stream <code>in
   * </code>, for later use. An internal buffer array is created and stored in <code>buf</code>.
   *
   * @param in the underlying input stream.
   */
  public BufferedInputStream(InputStream in) {
    this(in, defaultBufferSize);
  }

  /**
   * Creates a <code>BufferedInputStream</code> with the specified buffer size, and saves its
   * argument, the input stream <code>in</code>, for later use. An internal buffer array of length
   * <code>size</code> is created and stored in <code>buf</code>.
   *
   * @param in the underlying input stream.
   * @param size the buffer size.
   * @exception IllegalArgumentException if size <= 0.
   */
  public BufferedInputStream(InputStream in, int size) {
    super(in);
    if (size <= 0) {
      throw new IllegalArgumentException("Buffer size <= 0");
    }
    buf = new byte[size];
  }

  /**
   * Fills the buffer with more data, taking into account shuffling and other tricks for dealing
   * with marks. Assumes that it is being called by a synchronized method. This method also assumes
   * that all data has already been read in, hence pos > count.
   */
  private void fill() throws IOException {
    byte[] buffer = getBufIfOpen();
    if (markpos < 0) pos = 0; /* no mark: throw away the buffer */
    else if (pos >= buffer.length) /* no room left in buffer */
      if (markpos > 0) {
          /* can throw away early part of the buffer */
        int sz = pos - markpos;
        System.arraycopy(buffer, markpos, buffer, 0, sz);
        pos = sz;
        markpos = 0;
      } else if (buffer.length >= marklimit) {
        markpos = -1; /* buffer got too big, invalidate mark */
        pos = 0; /* drop buffer contents */
      } else {
          /* grow buffer */
        int nsz = pos * 2;
        if (nsz > marklimit) nsz = marklimit;
        byte nbuf[] = new byte[nsz];
        System.arraycopy(buffer, 0, nbuf, 0, pos);
        if (!bufUpdater.compareAndSet(this, buffer, nbuf)) {
          // Can't replace buf if there was an async close.
          // Note: This would need to be changed if fill()
          // is ever made accessible to multiple threads.
          // But for now, the only way CAS can fail is via close.
          // assert buf == null;
          throw new IOException("Stream closed");
        }
        buffer = nbuf;
      }
    count = pos;
    int n = getInIfOpen().read(buffer, pos, buffer.length - pos);
    if (n > 0) count = n + pos;
  }

  /**
   * See the general contract of the <code>read</code> method of <code>InputStream</code>.
   *
   * @return the next byte of data, or <code>-1</code> if the end of the stream is reached.
   * @exception IOException if this input stream has been closed by invoking its {@link #close()}
   *     method, or an I/O error occurs.
   * @see java.io.FilterInputStream#in
   */
  public synchronized int read() throws IOException {
    if (pos >= count) {
      fill();
      if (pos >= count) return -1;
    }
    return getBufIfOpen()[pos++] & 0xff;
  }

  /**
   * Read characters into a portion of an array, reading from the underlying stream at most once if
   * necessary.
   */
  private int read1(byte[] b, int off, int len) throws IOException {
    int avail = count - pos;
    if (avail <= 0) {
      /* If the requested length is at least as large as the buffer, and
      if there is no mark/reset activity, do not bother to copy the
      bytes into the local buffer.  In this way buffered streams will
      cascade harmlessly. */
      if (len >= getBufIfOpen().length && markpos < 0) {
        return getInIfOpen().read(b, off, len);
      }
      fill();
      avail = count - pos;
      if (avail <= 0) return -1;
    }
    int cnt = (avail < len) ? avail : len;
    System.arraycopy(getBufIfOpen(), pos, b, off, cnt);
    pos += cnt;
    return cnt;
  }

  /**
   * Reads bytes from this byte-input stream into the specified byte array, starting at the given
   * offset.
   *
   * <p>This method implements the general contract of the corresponding <code>
   * {@link InputStream#read(byte[], int, int) read}</code> method of the <code>{@link InputStream}
   * </code> class. As an additional convenience, it attempts to read as many bytes as possible by
   * repeatedly invoking the <code>read</code> method of the underlying stream. This iterated <code>
   * read</code> continues until one of the following conditions becomes true:
   *
   * <ul>
   *   <li>The specified number of bytes have been read,
   *   <li>The <code>read</code> method of the underlying stream returns <code>-1</code>, indicating
   *       end-of-file, or
   *   <li>The <code>available</code> method of the underlying stream returns zero, indicating that
   *       further input requests would block.
   * </ul>
   *
   * If the first <code>read</code> on the underlying stream returns <code>-1</code> to indicate
   * end-of-file then this method returns <code>-1</code>. Otherwise this method returns the number
   * of bytes actually read.
   *
   * <p>Subclasses of this class are encouraged, but not required, to attempt to read as many bytes
   * as possible in the same fashion.
   *
   * @param b destination buffer.
   * @param off offset at which to start storing bytes.
   * @param len maximum number of bytes to read.
   * @return the number of bytes read, or <code>-1</code> if the end of the stream has been reached.
   * @exception IOException if this input stream has been closed by invoking its {@link #close()}
   *     method, or an I/O error occurs.
   */
  public synchronized int read(byte b[], int off, int len) throws IOException {
    getBufIfOpen(); // Check for closed stream
    if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
      throw new IndexOutOfBoundsException();
    } else if (len == 0) {
      return 0;
    }

    int n = 0;
    for (; ; ) {
      int nread = read1(b, off + n, len - n);
      if (nread <= 0) return (n == 0) ? nread : n;
      n += nread;
      if (n >= len) return n;
      // if not closed but no bytes available, return
      InputStream input = in;
      if (input != null && input.available() <= 0) return n;
    }
  }

  /**
   * See the general contract of the <code>skip</code> method of <code>InputStream</code>.
   *
   * @exception IOException if the stream does not support seek, or if this input stream has been
   *     closed by invoking its {@link #close()} method, or an I/O error occurs.
   */
  public synchronized long skip(long n) throws IOException {
    getBufIfOpen(); // Check for closed stream
    if (n <= 0) {
      return 0;
    }
    long avail = count - pos;

    if (avail <= 0) {
      // If no mark position set then don't keep in buffer
      if (markpos < 0) return getInIfOpen().skip(n);

      // Fill in buffer to save bytes for reset
      fill();
      avail = count - pos;
      if (avail <= 0) return 0;
    }

    long skipped = (avail < n) ? avail : n;
    pos += skipped;
    return skipped;
  }

  /**
   * Returns an estimate of the number of bytes that can be read (or skipped over) from this input
   * stream without blocking by the next invocation of a method for this input stream. The next
   * invocation might be the same thread or another thread. A single read or skip of this many bytes
   * will not block, but may read or skip fewer bytes.
   *
   * <p>This method returns the sum of the number of bytes remaining to be read in the buffer (
   * <code>count&nbsp;- pos</code>) and the result of calling the {@link
   * java.io.FilterInputStream#in in}.available().
   *
   * @return an estimate of the number of bytes that can be read (or skipped over) from this input
   *     stream without blocking.
   * @exception IOException if this input stream has been closed by invoking its {@link #close()}
   *     method, or an I/O error occurs.
   */
  public synchronized int available() throws IOException {
    return getInIfOpen().available() + (count - pos);
  }

  /**
   * See the general contract of the <code>mark</code> method of <code>InputStream</code>.
   *
   * @param readlimit the maximum limit of bytes that can be read before the mark position becomes
   *     invalid.
   * @see java.io.BufferedInputStream#reset()
   */
  public synchronized void mark(int readlimit) {
    marklimit = readlimit;
    markpos = pos;
  }

  /**
   * See the general contract of the <code>reset</code> method of <code>InputStream</code>.
   *
   * <p>If <code>markpos</code> is <code>-1</code> (no mark has been set or the mark has been
   * invalidated), an <code>IOException</code> is thrown. Otherwise, <code>pos</code> is set equal
   * to <code>markpos</code>.
   *
   * @exception IOException if this stream has not been marked or, if the mark has been invalidated,
   *     or the stream has been closed by invoking its {@link #close()} method, or an I/O error
   *     occurs.
   * @see java.io.BufferedInputStream#mark(int)
   */
  public synchronized void reset() throws IOException {
    getBufIfOpen(); // Cause exception if closed
    if (markpos < 0) throw new IOException("Resetting to invalid mark");
    pos = markpos;
  }

  /**
   * Tests if this input stream supports the <code>mark</code> and <code>reset</code> methods. The
   * <code>markSupported</code> method of <code>BufferedInputStream</code> returns <code>true</code>
   * .
   *
   * @return a <code>boolean</code> indicating if this stream type supports the <code>mark</code>
   *     and <code>reset</code> methods.
   * @see java.io.InputStream#mark(int)
   * @see java.io.InputStream#reset()
   */
  public boolean markSupported() {
    return true;
  }

  /**
   * Closes this input stream and releases any system resources associated with the stream. Once the
   * stream has been closed, further read(), available(), reset(), or skip() invocations will throw
   * an IOException. Closing a previously closed stream has no effect.
   *
   * @exception IOException if an I/O error occurs.
   */
  public void close() throws IOException {
    byte[] buffer;
    while ((buffer = buf) != null) {
      if (bufUpdater.compareAndSet(this, buffer, null)) {
        InputStream input = in;
        in = null;
        if (input != null) input.close();
        return;
      }
      // Else retry in case a new buf was CASed in fill()
    }
  }
}