Esempio n. 1
0
 // Unsafe mechanics
 static long objectFieldOffset(sun.misc.Unsafe UNSAFE, String field, Class<?> klazz) {
   try {
     return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
   } catch (NoSuchFieldException e) {
     // Convert Exception to corresponding Error
     NoSuchFieldError error = new NoSuchFieldError(field);
     error.initCause(e);
     throw error;
   }
 }
Esempio n. 2
0
    /** Node class for TransferStacks. */
    static final class SNode {
      volatile SNode next; // next node in stack
      volatile SNode match; // the node matched to this
      volatile Thread waiter; // to control park/unpark
      Object item; // data; or null for REQUESTs
      int mode;
      // Note: item and mode fields don't need to be volatile
      // since they are always written before, and read after,
      // other volatile/atomic operations.

      SNode(Object item) {
        this.item = item;
      }

      boolean casNext(SNode cmp, SNode val) {
        return cmp == next && UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
      }

      /**
       * Tries to match node s to this node, if so, waking up thread. Fulfillers call tryMatch to
       * identify their waiters. Waiters block until they have been matched.
       *
       * @param s the node to match
       * @return true if successfully matched to s
       */
      boolean tryMatch(SNode s) {
        if (match == null && UNSAFE.compareAndSwapObject(this, matchOffset, null, s)) {
          Thread w = waiter;
          if (w != null) { // waiters need at most one unpark
            waiter = null;
            LockSupport.unpark(w);
          }
          return true;
        }
        return match == s;
      }

      /** Tries to cancel a wait by matching node to itself. */
      void tryCancel() {
        UNSAFE.compareAndSwapObject(this, matchOffset, null, this);
      }

      boolean isCancelled() {
        return match == this;
      }

      // Unsafe mechanics
      private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
      private static final long nextOffset = objectFieldOffset(UNSAFE, "next", SNode.class);
      private static final long matchOffset = objectFieldOffset(UNSAFE, "match", SNode.class);
    }
Esempio n. 3
0
    /** Node class for TransferQueue. */
    static final class QNode {
      volatile QNode next; // next node in queue
      volatile Object item; // CAS'ed to or from null
      volatile Thread waiter; // to control park/unpark
      final boolean isData;

      QNode(Object item, boolean isData) {
        this.item = item;
        this.isData = isData;
      }

      boolean casNext(QNode cmp, QNode val) {
        return next == cmp && UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
      }

      boolean casItem(Object cmp, Object val) {
        return item == cmp && UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
      }

      /** Tries to cancel by CAS'ing ref to this as item. */
      void tryCancel(Object cmp) {
        UNSAFE.compareAndSwapObject(this, itemOffset, cmp, this);
      }

      boolean isCancelled() {
        return item == this;
      }

      /**
       * Returns true if this node is known to be off the queue because its next pointer has been
       * forgotten due to an advanceHead operation.
       */
      boolean isOffList() {
        return next == this;
      }

      // Unsafe mechanics
      private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
      private static final long nextOffset = objectFieldOffset(UNSAFE, "next", QNode.class);
      private static final long itemOffset = objectFieldOffset(UNSAFE, "item", QNode.class);
    }
Esempio n. 4
0
  /** Dual Queue */
  static final class TransferQueue extends Transferer {
    /*
     * This extends Scherer-Scott dual queue algorithm, differing,
     * among other ways, by using modes within nodes rather than
     * marked pointers. The algorithm is a little simpler than
     * that for stacks because fulfillers do not need explicit
     * nodes, and matching is done by CAS'ing QNode.item field
     * from non-null to null (for put) or vice versa (for take).
     */

    /** Node class for TransferQueue. */
    static final class QNode {
      volatile QNode next; // next node in queue
      volatile Object item; // CAS'ed to or from null
      volatile Thread waiter; // to control park/unpark
      final boolean isData;

      QNode(Object item, boolean isData) {
        this.item = item;
        this.isData = isData;
      }

      boolean casNext(QNode cmp, QNode val) {
        return next == cmp && UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
      }

      boolean casItem(Object cmp, Object val) {
        return item == cmp && UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
      }

      /** Tries to cancel by CAS'ing ref to this as item. */
      void tryCancel(Object cmp) {
        UNSAFE.compareAndSwapObject(this, itemOffset, cmp, this);
      }

      boolean isCancelled() {
        return item == this;
      }

      /**
       * Returns true if this node is known to be off the queue because its next pointer has been
       * forgotten due to an advanceHead operation.
       */
      boolean isOffList() {
        return next == this;
      }

      // Unsafe mechanics
      private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
      private static final long nextOffset = objectFieldOffset(UNSAFE, "next", QNode.class);
      private static final long itemOffset = objectFieldOffset(UNSAFE, "item", QNode.class);
    }

    /** Head of queue */
    transient volatile QNode head;
    /** Tail of queue */
    transient volatile QNode tail;
    /**
     * Reference to a cancelled node that might not yet have been unlinked from queue because it was
     * the last inserted node when it cancelled.
     */
    transient volatile QNode cleanMe;

    TransferQueue() {
      QNode h = new QNode(null, false); // initialize to dummy node.
      head = h;
      tail = h;
    }

    /**
     * Tries to cas nh as new head; if successful, unlink old head's next node to avoid garbage
     * retention.
     */
    void advanceHead(QNode h, QNode nh) {
      if (h == head && UNSAFE.compareAndSwapObject(this, headOffset, h, nh))
        h.next = h; // forget old next
    }

    /** Tries to cas nt as new tail. */
    void advanceTail(QNode t, QNode nt) {
      if (tail == t) UNSAFE.compareAndSwapObject(this, tailOffset, t, nt);
    }

    /** Tries to CAS cleanMe slot. */
    boolean casCleanMe(QNode cmp, QNode val) {
      return cleanMe == cmp && UNSAFE.compareAndSwapObject(this, cleanMeOffset, cmp, val);
    }

    /** Puts or takes an item. */
    Object transfer(Object e, boolean timed, long nanos) {
      /* Basic algorithm is to loop trying to take either of
       * two actions:
       *
       * 1. If queue apparently empty or holding same-mode nodes,
       *    try to add node to queue of waiters, wait to be
       *    fulfilled (or cancelled) and return matching item.
       *
       * 2. If queue apparently contains waiting items, and this
       *    call is of complementary mode, try to fulfill by CAS'ing
       *    item field of waiting node and dequeuing it, and then
       *    returning matching item.
       *
       * In each case, along the way, check for and try to help
       * advance head and tail on behalf of other stalled/slow
       * threads.
       *
       * The loop starts off with a null check guarding against
       * seeing uninitialized head or tail values. This never
       * happens in current SynchronousQueue, but could if
       * callers held non-volatile/final ref to the
       * transferer. The check is here anyway because it places
       * null checks at top of loop, which is usually faster
       * than having them implicitly interspersed.
       */

      QNode s = null; // constructed/reused as needed
      boolean isData = (e != null);

      for (; ; ) {
        QNode t = tail;
        QNode h = head;
        if (t == null || h == null) // saw uninitialized value
        continue; // spin

        if (h == t || t.isData == isData) { // empty or same-mode
          QNode tn = t.next;
          if (t != tail) // inconsistent read
          continue;
          if (tn != null) { // lagging tail
            advanceTail(t, tn);
            continue;
          }
          if (timed && nanos <= 0) // can't wait
          return null;
          if (s == null) s = new QNode(e, isData);
          if (!t.casNext(null, s)) // failed to link in
          continue;

          advanceTail(t, s); // swing tail and wait
          Object x = awaitFulfill(s, e, timed, nanos);
          if (x == s) { // wait was cancelled
            clean(t, s);
            return null;
          }

          if (!s.isOffList()) { // not already unlinked
            advanceHead(t, s); // unlink if head
            if (x != null) // and forget fields
            s.item = s;
            s.waiter = null;
          }
          return (x != null) ? x : e;

        } else { // complementary-mode
          QNode m = h.next; // node to fulfill
          if (t != tail || m == null || h != head) continue; // inconsistent read

          Object x = m.item;
          if (isData == (x != null)
              || // m already fulfilled
              x == m
              || // m cancelled
              !m.casItem(x, e)) { // lost CAS
            advanceHead(h, m); // dequeue and retry
            continue;
          }

          advanceHead(h, m); // successfully fulfilled
          LockSupport.unpark(m.waiter);
          return (x != null) ? x : e;
        }
      }
    }

    /**
     * Spins/blocks until node s is fulfilled.
     *
     * @param s the waiting node
     * @param e the comparison value for checking match
     * @param timed true if timed wait
     * @param nanos timeout value
     * @return matched item, or s if cancelled
     */
    Object awaitFulfill(QNode s, Object e, boolean timed, long nanos) {
      /* Same idea as TransferStack.awaitFulfill */
      long lastTime = timed ? System.nanoTime() : 0;
      Thread w = Thread.currentThread();
      int spins = ((head.next == s) ? (timed ? maxTimedSpins : maxUntimedSpins) : 0);
      for (; ; ) {
        if (w.isInterrupted()) s.tryCancel(e);
        Object x = s.item;
        if (x != e) return x;
        if (timed) {
          long now = System.nanoTime();
          nanos -= now - lastTime;
          lastTime = now;
          if (nanos <= 0) {
            s.tryCancel(e);
            continue;
          }
        }
        if (spins > 0) --spins;
        else if (s.waiter == null) s.waiter = w;
        else if (!timed) LockSupport.park(this);
        else if (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos);
      }
    }

    /** Gets rid of cancelled node s with original predecessor pred. */
    void clean(QNode pred, QNode s) {
      s.waiter = null; // forget thread
      /*
       * At any given time, exactly one node on list cannot be
       * deleted -- the last inserted node. To accommodate this,
       * if we cannot delete s, we save its predecessor as
       * "cleanMe", deleting the previously saved version
       * first. At least one of node s or the node previously
       * saved can always be deleted, so this always terminates.
       */
      while (pred.next == s) { // Return early if already unlinked
        QNode h = head;
        QNode hn = h.next; // Absorb cancelled first node as head
        if (hn != null && hn.isCancelled()) {
          advanceHead(h, hn);
          continue;
        }
        QNode t = tail; // Ensure consistent read for tail
        if (t == h) return;
        QNode tn = t.next;
        if (t != tail) continue;
        if (tn != null) {
          advanceTail(t, tn);
          continue;
        }
        if (s != t) { // If not tail, try to unsplice
          QNode sn = s.next;
          if (sn == s || pred.casNext(s, sn)) return;
        }
        QNode dp = cleanMe;
        if (dp != null) { // Try unlinking previous cancelled node
          QNode d = dp.next;
          QNode dn;
          if (d == null
              || // d is gone or
              d == dp
              || // d is off list or
              !d.isCancelled()
              || // d not cancelled or
              (d != t
                  && // d not tail and
                  (dn = d.next) != null
                  && //   has successor
                  dn != d
                  && //   that is on list
                  dp.casNext(d, dn))) // d unspliced
          casCleanMe(dp, null);
          if (dp == pred) return; // s is already saved node
        } else if (casCleanMe(null, pred)) return; // Postpone cleaning s
      }
    }

    // unsafe mechanics
    private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
    private static final long headOffset = objectFieldOffset(UNSAFE, "head", TransferQueue.class);
    private static final long tailOffset = objectFieldOffset(UNSAFE, "tail", TransferQueue.class);
    private static final long cleanMeOffset =
        objectFieldOffset(UNSAFE, "cleanMe", TransferQueue.class);
  }
Esempio n. 5
0
  /** Dual stack */
  static final class TransferStack extends Transferer {
    /*
     * This extends Scherer-Scott dual stack algorithm, differing,
     * among other ways, by using "covering" nodes rather than
     * bit-marked pointers: Fulfilling operations push on marker
     * nodes (with FULFILLING bit set in mode) to reserve a spot
     * to match a waiting node.
     */

    /* Modes for SNodes, ORed together in node fields */
    /** Node represents an unfulfilled consumer */
    static final int REQUEST = 0;
    /** Node represents an unfulfilled producer */
    static final int DATA = 1;
    /** Node is fulfilling another unfulfilled DATA or REQUEST */
    static final int FULFILLING = 2;

    /** Return true if m has fulfilling bit set */
    static boolean isFulfilling(int m) {
      return (m & FULFILLING) != 0;
    }

    /** Node class for TransferStacks. */
    static final class SNode {
      volatile SNode next; // next node in stack
      volatile SNode match; // the node matched to this
      volatile Thread waiter; // to control park/unpark
      Object item; // data; or null for REQUESTs
      int mode;
      // Note: item and mode fields don't need to be volatile
      // since they are always written before, and read after,
      // other volatile/atomic operations.

      SNode(Object item) {
        this.item = item;
      }

      boolean casNext(SNode cmp, SNode val) {
        return cmp == next && UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
      }

      /**
       * Tries to match node s to this node, if so, waking up thread. Fulfillers call tryMatch to
       * identify their waiters. Waiters block until they have been matched.
       *
       * @param s the node to match
       * @return true if successfully matched to s
       */
      boolean tryMatch(SNode s) {
        if (match == null && UNSAFE.compareAndSwapObject(this, matchOffset, null, s)) {
          Thread w = waiter;
          if (w != null) { // waiters need at most one unpark
            waiter = null;
            LockSupport.unpark(w);
          }
          return true;
        }
        return match == s;
      }

      /** Tries to cancel a wait by matching node to itself. */
      void tryCancel() {
        UNSAFE.compareAndSwapObject(this, matchOffset, null, this);
      }

      boolean isCancelled() {
        return match == this;
      }

      // Unsafe mechanics
      private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
      private static final long nextOffset = objectFieldOffset(UNSAFE, "next", SNode.class);
      private static final long matchOffset = objectFieldOffset(UNSAFE, "match", SNode.class);
    }

    /** The head (top) of the stack */
    volatile SNode head;

    boolean casHead(SNode h, SNode nh) {
      return h == head && UNSAFE.compareAndSwapObject(this, headOffset, h, nh);
    }

    /**
     * Creates or resets fields of a node. Called only from transfer where the node to push on stack
     * is lazily created and reused when possible to help reduce intervals between reads and CASes
     * of head and to avoid surges of garbage when CASes to push nodes fail due to contention.
     */
    static SNode snode(SNode s, Object e, SNode next, int mode) {
      if (s == null) s = new SNode(e);
      s.mode = mode;
      s.next = next;
      return s;
    }

    /** Puts or takes an item. */
    Object transfer(Object e, boolean timed, long nanos) {
      /*
       * Basic algorithm is to loop trying one of three actions:
       *
       * 1. If apparently empty or already containing nodes of same
       *    mode, try to push node on stack and wait for a match,
       *    returning it, or null if cancelled.
       *
       * 2. If apparently containing node of complementary mode,
       *    try to push a fulfilling node on to stack, match
       *    with corresponding waiting node, pop both from
       *    stack, and return matched item. The matching or
       *    unlinking might not actually be necessary because of
       *    other threads performing action 3:
       *
       * 3. If top of stack already holds another fulfilling node,
       *    help it out by doing its match and/or pop
       *    operations, and then continue. The code for helping
       *    is essentially the same as for fulfilling, except
       *    that it doesn't return the item.
       */

      SNode s = null; // constructed/reused as needed
      int mode = (e == null) ? REQUEST : DATA;

      for (; ; ) {
        SNode h = head;
        if (h == null || h.mode == mode) { // empty or same-mode
          if (timed && nanos <= 0) { // can't wait
            if (h != null && h.isCancelled()) casHead(h, h.next); // pop cancelled node
            else return null;
          } else if (casHead(h, s = snode(s, e, h, mode))) {
            SNode m = awaitFulfill(s, timed, nanos);
            if (m == s) { // wait was cancelled
              clean(s);
              return null;
            }
            if ((h = head) != null && h.next == s) casHead(h, s.next); // help s's fulfiller
            return (mode == REQUEST) ? m.item : s.item;
          }
        } else if (!isFulfilling(h.mode)) { // try to fulfill
          if (h.isCancelled()) // already cancelled
          casHead(h, h.next); // pop and retry
          else if (casHead(h, s = snode(s, e, h, FULFILLING | mode))) {
            for (; ; ) { // loop until matched or waiters disappear
              SNode m = s.next; // m is s's match
              if (m == null) { // all waiters are gone
                casHead(s, null); // pop fulfill node
                s = null; // use new node next time
                break; // restart main loop
              }
              SNode mn = m.next;
              if (m.tryMatch(s)) {
                casHead(s, mn); // pop both s and m
                return (mode == REQUEST) ? m.item : s.item;
              } else // lost match
              s.casNext(m, mn); // help unlink
            }
          }
        } else { // help a fulfiller
          SNode m = h.next; // m is h's match
          if (m == null) // waiter is gone
          casHead(h, null); // pop fulfilling node
          else {
            SNode mn = m.next;
            if (m.tryMatch(h)) // help match
            casHead(h, mn); // pop both h and m
            else // lost match
            h.casNext(m, mn); // help unlink
          }
        }
      }
    }

    /**
     * Spins/blocks until node s is matched by a fulfill operation.
     *
     * @param s the waiting node
     * @param timed true if timed wait
     * @param nanos timeout value
     * @return matched node, or s if cancelled
     */
    SNode awaitFulfill(SNode s, boolean timed, long nanos) {
      /*
       * When a node/thread is about to block, it sets its waiter
       * field and then rechecks state at least one more time
       * before actually parking, thus covering race vs
       * fulfiller noticing that waiter is non-null so should be
       * woken.
       *
       * When invoked by nodes that appear at the point of call
       * to be at the head of the stack, calls to park are
       * preceded by spins to avoid blocking when producers and
       * consumers are arriving very close in time.  This can
       * happen enough to bother only on multiprocessors.
       *
       * The order of checks for returning out of main loop
       * reflects fact that interrupts have precedence over
       * normal returns, which have precedence over
       * timeouts. (So, on timeout, one last check for match is
       * done before giving up.) Except that calls from untimed
       * SynchronousQueue.{poll/offer} don't check interrupts
       * and don't wait at all, so are trapped in transfer
       * method rather than calling awaitFulfill.
       */
      long lastTime = timed ? System.nanoTime() : 0;
      Thread w = Thread.currentThread();
      SNode h = head;
      int spins = (shouldSpin(s) ? (timed ? maxTimedSpins : maxUntimedSpins) : 0);
      for (; ; ) {
        if (w.isInterrupted()) s.tryCancel();
        SNode m = s.match;
        if (m != null) return m;
        if (timed) {
          long now = System.nanoTime();
          nanos -= now - lastTime;
          lastTime = now;
          if (nanos <= 0) {
            s.tryCancel();
            continue;
          }
        }
        if (spins > 0) spins = shouldSpin(s) ? (spins - 1) : 0;
        else if (s.waiter == null) s.waiter = w; // establish waiter so can park next iter
        else if (!timed) LockSupport.park(this);
        else if (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos);
      }
    }

    /** Returns true if node s is at head or there is an active fulfiller. */
    boolean shouldSpin(SNode s) {
      SNode h = head;
      return (h == s || h == null || isFulfilling(h.mode));
    }

    /** Unlinks s from the stack. */
    void clean(SNode s) {
      s.item = null; // forget item
      s.waiter = null; // forget thread

      /*
       * At worst we may need to traverse entire stack to unlink
       * s. If there are multiple concurrent calls to clean, we
       * might not see s if another thread has already removed
       * it. But we can stop when we see any node known to
       * follow s. We use s.next unless it too is cancelled, in
       * which case we try the node one past. We don't check any
       * further because we don't want to doubly traverse just to
       * find sentinel.
       */

      SNode past = s.next;
      if (past != null && past.isCancelled()) past = past.next;

      // Absorb cancelled nodes at head
      SNode p;
      while ((p = head) != null && p != past && p.isCancelled()) casHead(p, p.next);

      // Unsplice embedded nodes
      while (p != null && p != past) {
        SNode n = p.next;
        if (n != null && n.isCancelled()) p.casNext(n, n.next);
        else p = n;
      }
    }

    // Unsafe mechanics
    private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
    private static final long headOffset = objectFieldOffset(UNSAFE, "head", TransferStack.class);
  }