Esempio n. 1
0
  /**
   * Spins/yields/blocks until node s is matched or caller gives up.
   *
   * @param s the waiting node
   * @param pred the predecessor of s, or s itself if it has no predecessor, or null if unknown (the
   *     null case does not occur in any current calls but may in possible future extensions)
   * @param e the comparison value for checking match
   * @param timed if true, wait only until timeout elapses
   * @param nanos timeout in nanosecs, used only if timed is true
   * @return matched item, or e if unmatched on interrupt or timeout
   */
  private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) {
    final long deadline = timed ? System.nanoTime() + nanos : 0L;
    Thread w = Thread.currentThread();
    int spins = -1; // initialized after first item and cancel checks
    ThreadLocalRandom randomYields = null; // bound if needed

    for (; ; ) {
      Object item = s.item;
      if (item != e) { // matched
        // assert item != s;
        s.forgetContents(); // avoid garbage
        return LinkedTransferQueue.<E>cast(item);
      }
      if ((w.isInterrupted() || (timed && nanos <= 0)) && s.casItem(e, FORGOTTEN)) { // cancel
        unsplice(pred, s);
        return e;
      }

      if (spins < 0) { // establish spins at/near front
        if ((spins = spinsFor(pred, s.isData)) > 0) randomYields = ThreadLocalRandom.current();
      } else if (spins > 0) { // spin
        --spins;
        if (randomYields.nextInt(CHAINED_SPINS) == 0) Thread.yield(); // occasionally yield
      } else if (s.waiter == null) {
        s.waiter = w; // request unpark then recheck
      } else if (timed) {
        nanos = deadline - System.nanoTime();
        if (nanos > 0L) LockSupport.parkNanos(this, nanos);
      } else {
        LockSupport.park(this);
      }
    }
  }
 public static void waitForRehashToComplete(Cache... caches) {
   // give it 1 second to start rehashing
   // TODO Should look at the last committed view instead and check if it contains all the caches
   LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   for (Cache c : caches) {
     CacheViewsManager cacheViewsManager =
         TestingUtil.extractGlobalComponent(c.getCacheManager(), CacheViewsManager.class);
     RpcManager rpcManager = TestingUtil.extractComponent(c, RpcManager.class);
     while (cacheViewsManager.getCommittedView(c.getName()).getMembers().size() != caches.length) {
       if (System.currentTimeMillis() > giveup) {
         String message =
             String.format(
                 "Timed out waiting for rehash to complete on node %s, expected member list is %s, current member list is %s!",
                 rpcManager.getAddress(),
                 Arrays.toString(caches),
                 cacheViewsManager.getCommittedView(c.getName()));
         log.error(message);
         throw new RuntimeException(message);
       }
       LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
     }
     log.trace("Node " + rpcManager.getAddress() + " finished rehash task.");
   }
 }
 @Override
 public void run() {
   int m;
   do {
     while (waiting) {
       LockSupport.park();
     }
     m = message;
     waiting = true;
     next.message = m - 1;
     next.waiting = false;
     LockSupport.unpark(next);
   } while (m > 0);
 }
  // Can't throw.
  private void cancelConnectionWaiterLocked(ConnectionWaiter waiter) {
    if (waiter.mAssignedConnection != null || waiter.mException != null) {
      // Waiter is done waiting but has not woken up yet.
      return;
    }

    // Waiter must still be waiting.  Dequeue it.
    ConnectionWaiter predecessor = null;
    ConnectionWaiter current = mConnectionWaiterQueue;
    while (current != waiter) {
      assert current != null;
      predecessor = current;
      current = current.mNext;
    }
    if (predecessor != null) {
      predecessor.mNext = waiter.mNext;
    } else {
      mConnectionWaiterQueue = waiter.mNext;
    }

    // Send the waiter an exception and unpark it.
    waiter.mException = new OperationCanceledException();
    LockSupport.unpark(waiter.mThread);

    // Check whether removing this waiter will enable other waiters to make progress.
    wakeConnectionWaitersLocked();
  }
  // Can't throw.
  private void wakeConnectionWaitersLocked() {
    // Unpark all waiters that have requests that we can fulfill.
    // This method is designed to not throw runtime exceptions, although we might send
    // a waiter an exception for it to rethrow.
    ConnectionWaiter predecessor = null;
    ConnectionWaiter waiter = mConnectionWaiterQueue;
    boolean primaryConnectionNotAvailable = false;
    boolean nonPrimaryConnectionNotAvailable = false;
    while (waiter != null) {
      boolean unpark = false;
      if (!mIsOpen) {
        unpark = true;
      } else {
        try {
          SQLiteConnection connection = null;
          if (!waiter.mWantPrimaryConnection && !nonPrimaryConnectionNotAvailable) {
            connection =
                tryAcquireNonPrimaryConnectionLocked(
                    waiter.mSql, waiter.mConnectionFlags); // might throw
            if (connection == null) {
              nonPrimaryConnectionNotAvailable = true;
            }
          }
          if (connection == null && !primaryConnectionNotAvailable) {
            connection = tryAcquirePrimaryConnectionLocked(waiter.mConnectionFlags); // might throw
            if (connection == null) {
              primaryConnectionNotAvailable = true;
            }
          }
          if (connection != null) {
            waiter.mAssignedConnection = connection;
            unpark = true;
          } else if (nonPrimaryConnectionNotAvailable && primaryConnectionNotAvailable) {
            // There are no connections available and the pool is still open.
            // We cannot fulfill any more connection requests, so stop here.
            break;
          }
        } catch (RuntimeException ex) {
          // Let the waiter handle the exception from acquiring a connection.
          waiter.mException = ex;
          unpark = true;
        }
      }

      final ConnectionWaiter successor = waiter.mNext;
      if (unpark) {
        if (predecessor != null) {
          predecessor.mNext = successor;
        } else {
          mConnectionWaiterQueue = successor;
        }
        waiter.mNext = null;

        LockSupport.unpark(waiter.mThread);
      } else {
        predecessor = waiter;
      }
      waiter = successor;
    }
  }
  /** @see Sequencer#next(int) */
  @Override
  public long next(int n) {
    if (n < 1) {
      throw new IllegalArgumentException("n must be > 0");
    }

    long nextValue = this.nextValue;

    long nextSequence = nextValue + n;
    long wrapPoint = nextSequence - bufferSize;
    long cachedGatingSequence = this.cachedValue;

    if (wrapPoint > cachedGatingSequence || cachedGatingSequence > nextValue) {
      long minSequence;
      while (wrapPoint > (minSequence = Sequencer.getMinimumSequence(gatingSequences, nextValue))) {
        if (spinObserver != null) {
          spinObserver.accept(null);
        }
        LockSupport.parkNanos(1L); // TODO: Use waitStrategy to spin?
      }

      this.cachedValue = minSequence;
    }

    this.nextValue = nextSequence;

    return nextSequence;
  }
  @Override
  public void generate(int level) {
    File file = new File("outputfile");
    while (true) {

      for (long i = 0; i < 1000 * level; i++) {

        try {

          BufferedWriter writer = new BufferedWriter(new FileWriter(file));
          for (long j = 0; j < 1000; j++) writer.write("1234567890qwertyuiopasdfghjklzxcvbvnm");
          writer.close();

          Scanner scanner = new Scanner(new FileReader(file));
          while (scanner.hasNextLine()) scanner.nextLine();
          scanner.close();

        } catch (IOException e) {
          System.out.println("I/O error");
          file.delete();
        }
      }

      workUnits.getAndIncrement();

      java.util.concurrent.locks.LockSupport.parkNanos(1);
    }
  }
Esempio n. 8
0
  /**
   * The one calling read doesn't actually read, since reading is up to the thread in here. Instead
   * the caller just waits for this thread to have fully read the next buffer and flips over to that
   * buffer, returning it.
   */
  @Override
  public SectionedCharBuffer read(SectionedCharBuffer buffer, int from) throws IOException {
    // are we still healthy and all that?
    assertHealthy();

    // wait until thread has made data available
    while (!hasReadAhead) {
      parkAWhile();
      assertHealthy();
    }

    // flip the buffers
    SectionedCharBuffer resultBuffer = theOtherBuffer;
    buffer.compact(resultBuffer, from);
    theOtherBuffer = buffer;

    // handle source notifications that has happened
    if (newSourceDescription != null) {
      sourceDescription = newSourceDescription;
      // At this point the new source is official, so tell that to our external monitors
      for (SourceMonitor monitor : sourceMonitors) {
        monitor.notify(newSourceDescription);
      }
      newSourceDescription = null;
    }

    // wake up the reader... there's stuff to do, data to read
    hasReadAhead = false;
    LockSupport.unpark(this);
    return resultBuffer;
  }
 @Test
 public void testTransactionAtomicity_whenMultiMapGetIsUsed_withTransaction()
     throws InterruptedException {
   final HazelcastInstance hz = Hazelcast.newHazelcastInstance(createConfigWithDummyTxService());
   final String name = HazelcastTestSupport.generateRandomString(5);
   Thread producerThread = startProducerThread(hz, name);
   try {
     IQueue<String> q = hz.getQueue(name);
     for (int i = 0; i < 1000; i++) {
       String id = q.poll();
       if (id != null) {
         TransactionContext tx = hz.newTransactionContext();
         try {
           tx.beginTransaction();
           TransactionalMultiMap<Object, Object> multiMap = tx.getMultiMap(name);
           Collection<Object> values = multiMap.get(id);
           assertFalse(values.isEmpty());
           multiMap.remove(id);
           tx.commitTransaction();
         } catch (TransactionException e) {
           tx.rollbackTransaction();
           e.printStackTrace();
         }
       } else {
         LockSupport.parkNanos(100);
       }
     }
   } finally {
     stopProducerThread(producerThread);
   }
 }
Esempio n. 10
0
    protected void await(boolean throwInterrupt) throws InterruptedException {
      if (!signaled.get()) {
        lock.acquired = false;
        sendAwaitConditionRequest(lock.name, lock.owner);
        boolean interrupted = false;
        while (!signaled.get()) {
          parker.set(Thread.currentThread());
          LockSupport.park(this);

          if (Thread.interrupted()) {
            // If we were interrupted and haven't received a response yet then we try to
            // clean up the lock request and throw the exception
            if (!signaled.get()) {
              sendDeleteAwaitConditionRequest(lock.name, lock.owner);
              throw new InterruptedException();
            }
            // In the case that we were signaled and interrupted
            // we want to return the signal but still interrupt
            // our thread
            interrupted = true;
          }
        }
        if (interrupted) Thread.currentThread().interrupt();
      }

      // We set as if this signal was no released.  This way if the
      // condition is reused again, but the client condition isn't lost
      // we won't think we were signaled immediately
      signaled.set(false);
    }
Esempio n. 11
0
 @Override
 public boolean offer(WaitStrategy.Offerable o) throws InterruptedException {
   while (!o.offer()) {
     LockSupport.parkNanos(1l);
   }
   return true;
 }
Esempio n. 12
0
 public static void main(String[] args) throws InterruptedException {
   thread1.start();
   Thread.sleep(100);
   thread2.start();
   thread1.interrupt();
   LockSupport.unpark(thread2);
 }
Esempio n. 13
0
  public void await() {

    long spin = 1;
    while (!done) {
      LockSupport.parkNanos(spin++);
    }
  }
Esempio n. 14
0
    private boolean sendMessages(
        RingBuffer<byte[]> ringBuffer, long messagesPerSecond, int runtimeSeconds)
        throws InterruptedException {
      LOGGER.info("Rate: " + messagesPerSecond + ", for " + runtimeSeconds + "s");

      long runtimeNanos = TimeUnit.SECONDS.toNanos(runtimeSeconds);

      long t0 = System.nanoTime();
      long delta = 0;
      long sent = 0;

      try {
        do {
          delta = System.nanoTime() - t0;
          long shouldHaveSent = (messagesPerSecond * delta) / 1000000000;

          for (; sent < shouldHaveSent; sent++) {
            if (!send(ringBuffer)) {
              return false;
            }
          }

          LockSupport.parkNanos(1);
        } while (delta <= runtimeNanos);

        Thread.sleep(1000);
        return ringBuffer.hasAvailableCapacity(ringBuffer.getBufferSize());

      } finally {
        while (!ringBuffer.hasAvailableCapacity(ringBuffer.getBufferSize())) {
          Thread.sleep(1000);
        }
      }
    }
Esempio n. 15
0
 // 处理无数据的情况,避免空循环挂死
 private void applyWait(int fullTimes) {
   int newFullTimes = fullTimes > maxFullTimes ? maxFullTimes : fullTimes;
   if (fullTimes <= 3) { // 3次以内
     Thread.yield();
   } else { // 超过3次,最多只sleep 10ms
     LockSupport.parkNanos(1000 * 1000L * newFullTimes);
   }
 }
Esempio n. 16
0
 protected void pause(int delayNS) {
   if (delayNS < 1) return;
   long start = System.nanoTime();
   if (delayNS >= 1000 * 1000) LockSupport.parkNanos(delayNS - 1000 * 1000); // only ms accuracy.
   while (System.nanoTime() - start < delayNS) {
     Thread.yield();
   }
 }
Esempio n. 17
0
 /**
  * Spins/blocks until node s is matched by a fulfill operation.
  *
  * @param s the waiting node
  * @param timed true if timed wait
  * @param nanos timeout value
  * @return matched node, or s if cancelled
  */
 SNode awaitFulfill(SNode s, boolean timed, long nanos) {
   /*
    * When a node/thread is about to block, it sets its waiter
    * field and then rechecks state at least one more time
    * before actually parking, thus covering race vs
    * fulfiller noticing that waiter is non-null so should be
    * woken.
    *
    * When invoked by nodes that appear at the point of call
    * to be at the head of the stack, calls to park are
    * preceded by spins to avoid blocking when producers and
    * consumers are arriving very close in time.  This can
    * happen enough to bother only on multiprocessors.
    *
    * The order of checks for returning out of main loop
    * reflects fact that interrupts have precedence over
    * normal returns, which have precedence over
    * timeouts. (So, on timeout, one last check for match is
    * done before giving up.) Except that calls from untimed
    * SynchronousQueue.{poll/offer} don't check interrupts
    * and don't wait at all, so are trapped in transfer
    * method rather than calling awaitFulfill.
    */
   long lastTime = timed ? System.nanoTime() : 0;
   Thread w = Thread.currentThread();
   SNode h = head;
   int spins = (shouldSpin(s) ? (timed ? maxTimedSpins : maxUntimedSpins) : 0);
   for (; ; ) {
     if (w.isInterrupted()) s.tryCancel();
     SNode m = s.match;
     if (m != null) return m;
     if (timed) {
       long now = System.nanoTime();
       nanos -= now - lastTime;
       lastTime = now;
       if (nanos <= 0) {
         s.tryCancel();
         continue;
       }
     }
     if (spins > 0) spins = shouldSpin(s) ? (spins - 1) : 0;
     else if (s.waiter == null) s.waiter = w; // establish waiter so can park next iter
     else if (!timed) LockSupport.park(this);
     else if (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos);
   }
 }
  public void destroy() {
    _isClosed = true;

    wake();

    Thread thread = _thread;

    if (thread != null) LockSupport.unpark(thread);
  }
 @Override
 public int idle(final int idleCounter) {
   if (idleCounter > 200) {
     LockSupport.parkNanos(1L);
   } else if (idleCounter > 100) {
     Thread.yield();
   }
   return idleCounter + 1;
 }
Esempio n. 20
0
 /** Tries to artificially match a data node -- used by remove. */
 final boolean tryMatchData() {
   // assert isData;
   Object x = item;
   if (x != null && x != FORGOTTEN && casItem(x, null)) {
     LockSupport.unpark(waiter);
     return true;
   }
   return false;
 }
Esempio n. 21
0
  public T get(long timeout, TimeUnit unit) {
    if (isDone()) {
      return getResultValue();
    }

    Thread thread = Thread.currentThread();

    // _thread = thread;

    long expires = unit.toMillis(timeout) + System.currentTimeMillis();

    while (true) {
      if (isDone()) {
        return getResultValue();
      } else if (_state == FutureState.ASYNC) {
        Result<Object> chain = _chain;
        Object chainValue = _chainValue;
        _chain = null;
        _chainValue = null;

        _state = FutureState.INIT;

        // _thread = null;

        chain.completeFuture(chainValue);

        /*
        if (isDone()) {
          return getResultValue();
        }
        */

        // _thread = thread;
      } else {
        if (ServiceRef.flushOutboxAndExecuteLast()) {
          // if pending messages, continue to process them
          continue;
        }

        // ServiceRef.flushOutbox();

        _thread = thread;

        if (_state.isParkRequired()) {
          if (expires < System.currentTimeMillis()) {
            _thread = null;

            throw new ServiceExceptionFutureTimeout("future timeout " + timeout + " " + unit);
          }

          LockSupport.parkUntil(expires);
        }

        _thread = null;
      }
    }
  }
  @Override
  public final void run() {
    String oldName = null;

    try {
      _thread = Thread.currentThread();
      _thread.setContextClassLoader(_classLoader);
      oldName = _thread.getName();
      _thread.setName(getThreadName());

      onThreadStart();

      long now = getCurrentTimeActual();

      long expires = now + _workerIdleTimeout;

      do {
        while (_taskState.getAndSet(TASK_SLEEP) == TASK_READY) {
          _thread.setContextClassLoader(_classLoader);

          long delta = runTask();

          now = getCurrentTimeActual();

          if (delta < 0) {
            expires = now + _workerIdleTimeout;
          } else {
            expires = now + delta;
          }
        }

        if (isClosed()) return;

        if (_taskState.compareAndSet(TASK_SLEEP, TASK_PARK)) {
          Thread.interrupted();
          LockSupport.parkUntil(expires);

          if (isPermanent()) _taskState.set(TASK_READY);
        }
      } while (_taskState.get() == TASK_READY || isPermanent() || getCurrentTimeActual() < expires);
    } catch (Throwable e) {
      WarningService.sendCurrentWarning(this, e);
      log.log(Level.WARNING, e.toString(), e);
    } finally {
      Thread thread = _thread;
      _thread = null;

      _isActive.set(false);

      if (_taskState.get() == TASK_READY) wake();

      onThreadComplete();

      if (thread != null && oldName != null) thread.setName(oldName);
    }
  }
Esempio n. 23
0
  @Override
  public T take(WaitStrategy.Takeable<T> t) throws InterruptedException {
    T result;

    while ((result = t.take()) == null) {
      LockSupport.parkNanos(1l);
    }

    return result;
  }
Esempio n. 24
0
 /**
  * Tries to match node s to this node, if so, waking up thread. Fulfillers call tryMatch to
  * identify their waiters. Waiters block until they have been matched.
  *
  * @param s the node to match
  * @return true if successfully matched to s
  */
 boolean tryMatch(SNode s) {
   if (match == null && UNSAFE.compareAndSwapObject(this, matchOffset, null, s)) {
     Thread w = waiter;
     if (w != null) { // waiters need at most one unpark
       waiter = null;
       LockSupport.unpark(w);
     }
     return true;
   }
   return match == s;
 }
Esempio n. 25
0
 @Override
 public void run() {
   synchronized (u) {
     System.out.println("in" + getName());
     LockSupport.park();
     if (Thread.interrupted()) {
       System.out.println(getName() + ": is interrupted");
     }
   }
   System.out.println("execute done");
 }
  @Benchmark
  @Override
  public final void benchmark() throws InterruptedException {
    first.message = ringSize;
    first.waiting = false;
    LockSupport.unpark(first);

    for (Worker worker : workers) {
      worker.join();
    }
  }
Esempio n. 27
0
  @Override
  public void ok(T result) {
    _value = result;

    _state = FutureState.COMPLETE;
    Thread thread = _thread;

    if (thread != null) {
      LockSupport.unpark(thread);
    }
  }
Esempio n. 28
0
  @Override
  public void completeFuture(T value) {
    _value = value;

    _state = FutureState.COMPLETE;
    Thread thread = _thread;

    if (thread != null) {
      LockSupport.unpark(thread);
    }
  }
Esempio n. 29
0
  @Override
  public <U> void completeFuture(ResultChain<U> chain, U chainValue) {
    _chain = (Result) chain;
    _chainValue = chainValue;

    _state = FutureState.ASYNC;
    Thread thread = _thread;

    if (thread != null) {
      LockSupport.unpark(thread);
    }
  }
 public static void waitForRehashToComplete(Cache cache, int groupSize) {
   LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1));
   int gracetime = 30000; // 30 seconds?
   long giveup = System.currentTimeMillis() + gracetime;
   CacheViewsManager cacheViewsManager =
       TestingUtil.extractGlobalComponent(cache.getCacheManager(), CacheViewsManager.class);
   RpcManager rpcManager = TestingUtil.extractComponent(cache, RpcManager.class);
   while (cacheViewsManager.getCommittedView(cache.getName()).getMembers().size() != groupSize) {
     if (System.currentTimeMillis() > giveup) {
       String message =
           String.format(
               "Timed out waiting for rehash to complete on node %s, expected member count %s, current member count is %s!",
               rpcManager.getAddress(),
               groupSize,
               cacheViewsManager.getCommittedView(cache.getName()));
       log.error(message);
       throw new RuntimeException(message);
     }
     LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
   }
   log.trace("Node " + rpcManager.getAddress() + " finished rehash task.");
 }