public static void waitForRehashToComplete(Cache... caches) { // give it 1 second to start rehashing // TODO Should look at the last committed view instead and check if it contains all the caches LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1)); int gracetime = 30000; // 30 seconds? long giveup = System.currentTimeMillis() + gracetime; for (Cache c : caches) { CacheViewsManager cacheViewsManager = TestingUtil.extractGlobalComponent(c.getCacheManager(), CacheViewsManager.class); RpcManager rpcManager = TestingUtil.extractComponent(c, RpcManager.class); while (cacheViewsManager.getCommittedView(c.getName()).getMembers().size() != caches.length) { if (System.currentTimeMillis() > giveup) { String message = String.format( "Timed out waiting for rehash to complete on node %s, expected member list is %s, current member list is %s!", rpcManager.getAddress(), Arrays.toString(caches), cacheViewsManager.getCommittedView(c.getName())); log.error(message); throw new RuntimeException(message); } LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100)); } log.trace("Node " + rpcManager.getAddress() + " finished rehash task."); } }
@Override public final V get(final long timeout, final TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { Object response = registerWaiter(Thread.currentThread(), null); if (response != VOID) { return resolveAndThrow(response); } long deadlineNanos = System.nanoTime() + unit.toNanos(timeout); boolean interrupted = false; try { long timeoutNanos = unit.toNanos(timeout); while (timeoutNanos > 0) { parkNanos(timeoutNanos); timeoutNanos = deadlineNanos - System.nanoTime(); if (isDone()) { return resolveAndThrow(state); } else if (Thread.interrupted()) { interrupted = true; onInterruptDetected(); } } } finally { restoreInterrupt(interrupted); } unregisterWaiter(Thread.currentThread()); throw newTimeoutException(timeout, unit); }
public void await() { long spin = 1; while (!done) { LockSupport.parkNanos(spin++); } }
/** @see Sequencer#next(int) */ @Override public long next(int n) { if (n < 1) { throw new IllegalArgumentException("n must be > 0"); } long nextValue = this.nextValue; long nextSequence = nextValue + n; long wrapPoint = nextSequence - bufferSize; long cachedGatingSequence = this.cachedValue; if (wrapPoint > cachedGatingSequence || cachedGatingSequence > nextValue) { long minSequence; while (wrapPoint > (minSequence = Sequencer.getMinimumSequence(gatingSequences, nextValue))) { if (spinObserver != null) { spinObserver.accept(null); } LockSupport.parkNanos(1L); // TODO: Use waitStrategy to spin? } this.cachedValue = minSequence; } this.nextValue = nextSequence; return nextSequence; }
@Override public void generate(int level) { File file = new File("outputfile"); while (true) { for (long i = 0; i < 1000 * level; i++) { try { BufferedWriter writer = new BufferedWriter(new FileWriter(file)); for (long j = 0; j < 1000; j++) writer.write("1234567890qwertyuiopasdfghjklzxcvbvnm"); writer.close(); Scanner scanner = new Scanner(new FileReader(file)); while (scanner.hasNextLine()) scanner.nextLine(); scanner.close(); } catch (IOException e) { System.out.println("I/O error"); file.delete(); } } workUnits.getAndIncrement(); java.util.concurrent.locks.LockSupport.parkNanos(1); } }
@Test public void testTransactionAtomicity_whenMultiMapGetIsUsed_withTransaction() throws InterruptedException { final HazelcastInstance hz = Hazelcast.newHazelcastInstance(createConfigWithDummyTxService()); final String name = HazelcastTestSupport.generateRandomString(5); Thread producerThread = startProducerThread(hz, name); try { IQueue<String> q = hz.getQueue(name); for (int i = 0; i < 1000; i++) { String id = q.poll(); if (id != null) { TransactionContext tx = hz.newTransactionContext(); try { tx.beginTransaction(); TransactionalMultiMap<Object, Object> multiMap = tx.getMultiMap(name); Collection<Object> values = multiMap.get(id); assertFalse(values.isEmpty()); multiMap.remove(id); tx.commitTransaction(); } catch (TransactionException e) { tx.rollbackTransaction(); e.printStackTrace(); } } else { LockSupport.parkNanos(100); } } } finally { stopProducerThread(producerThread); } }
@Override public boolean offer(WaitStrategy.Offerable o) throws InterruptedException { while (!o.offer()) { LockSupport.parkNanos(1l); } return true; }
/** * Spins/yields/blocks until node s is matched or caller gives up. * * @param s the waiting node * @param pred the predecessor of s, or s itself if it has no predecessor, or null if unknown (the * null case does not occur in any current calls but may in possible future extensions) * @param e the comparison value for checking match * @param timed if true, wait only until timeout elapses * @param nanos timeout in nanosecs, used only if timed is true * @return matched item, or e if unmatched on interrupt or timeout */ private E awaitMatch(Node s, Node pred, E e, boolean timed, long nanos) { final long deadline = timed ? System.nanoTime() + nanos : 0L; Thread w = Thread.currentThread(); int spins = -1; // initialized after first item and cancel checks ThreadLocalRandom randomYields = null; // bound if needed for (; ; ) { Object item = s.item; if (item != e) { // matched // assert item != s; s.forgetContents(); // avoid garbage return LinkedTransferQueue.<E>cast(item); } if ((w.isInterrupted() || (timed && nanos <= 0)) && s.casItem(e, FORGOTTEN)) { // cancel unsplice(pred, s); return e; } if (spins < 0) { // establish spins at/near front if ((spins = spinsFor(pred, s.isData)) > 0) randomYields = ThreadLocalRandom.current(); } else if (spins > 0) { // spin --spins; if (randomYields.nextInt(CHAINED_SPINS) == 0) Thread.yield(); // occasionally yield } else if (s.waiter == null) { s.waiter = w; // request unpark then recheck } else if (timed) { nanos = deadline - System.nanoTime(); if (nanos > 0L) LockSupport.parkNanos(this, nanos); } else { LockSupport.park(this); } } }
private boolean sendMessages( RingBuffer<byte[]> ringBuffer, long messagesPerSecond, int runtimeSeconds) throws InterruptedException { LOGGER.info("Rate: " + messagesPerSecond + ", for " + runtimeSeconds + "s"); long runtimeNanos = TimeUnit.SECONDS.toNanos(runtimeSeconds); long t0 = System.nanoTime(); long delta = 0; long sent = 0; try { do { delta = System.nanoTime() - t0; long shouldHaveSent = (messagesPerSecond * delta) / 1000000000; for (; sent < shouldHaveSent; sent++) { if (!send(ringBuffer)) { return false; } } LockSupport.parkNanos(1); } while (delta <= runtimeNanos); Thread.sleep(1000); return ringBuffer.hasAvailableCapacity(ringBuffer.getBufferSize()); } finally { while (!ringBuffer.hasAvailableCapacity(ringBuffer.getBufferSize())) { Thread.sleep(1000); } } }
// 处理无数据的情况,避免空循环挂死 private void applyWait(int fullTimes) { int newFullTimes = fullTimes > maxFullTimes ? maxFullTimes : fullTimes; if (fullTimes <= 3) { // 3次以内 Thread.yield(); } else { // 超过3次,最多只sleep 10ms LockSupport.parkNanos(1000 * 1000L * newFullTimes); } }
protected void pause(int delayNS) { if (delayNS < 1) return; long start = System.nanoTime(); if (delayNS >= 1000 * 1000) LockSupport.parkNanos(delayNS - 1000 * 1000); // only ms accuracy. while (System.nanoTime() - start < delayNS) { Thread.yield(); } }
@Override public int idle(final int idleCounter) { if (idleCounter > 200) { LockSupport.parkNanos(1L); } else if (idleCounter > 100) { Thread.yield(); } return idleCounter + 1; }
@Override public T take(WaitStrategy.Takeable<T> t) throws InterruptedException { T result; while ((result = t.take()) == null) { LockSupport.parkNanos(1l); } return result; }
/** * Join which can be called exactly once! * * @return Result */ public T join() { try { long spin = 1; while (!done) { LockSupport.parkNanos(spin++); } if (completedExceptionally) throw (new SimpleReactCompletionException(exception())); return result(); } finally { markComplete(); } }
public static void waitForRehashToComplete(Cache cache, int groupSize) { LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1)); int gracetime = 30000; // 30 seconds? long giveup = System.currentTimeMillis() + gracetime; CacheViewsManager cacheViewsManager = TestingUtil.extractGlobalComponent(cache.getCacheManager(), CacheViewsManager.class); RpcManager rpcManager = TestingUtil.extractComponent(cache, RpcManager.class); while (cacheViewsManager.getCommittedView(cache.getName()).getMembers().size() != groupSize) { if (System.currentTimeMillis() > giveup) { String message = String.format( "Timed out waiting for rehash to complete on node %s, expected member count %s, current member count is %s!", rpcManager.getAddress(), groupSize, cacheViewsManager.getCommittedView(cache.getName())); log.error(message); throw new RuntimeException(message); } LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100)); } log.trace("Node " + rpcManager.getAddress() + " finished rehash task."); }
public long waitOnLock( final long sequence, Sequence cursorSequence, final Sequence dependentSequence, final SequenceBarrier barrier) throws AlertException, InterruptedException { long availableSequence; while ((availableSequence = dependentSequence.get()) < sequence) { LockSupport.parkNanos(1); } return availableSequence; }
@Override public long waitForNext() { // set random interval on the first run if (nextNanos == 0) { nextNanos = nanoTime() + nextLong(0, intervalNanos); } long delayNanos = nextNanos - nanoTime(); if (delayNanos > 0) { parkNanos(delayNanos); } long expectedStartNanos = nextNanos; nextNanos += intervalNanos; return accountForCoordinatedOmission ? expectedStartNanos : nanoTime(); }
/** * Spins/blocks until node s is matched by a fulfill operation. * * @param s the waiting node * @param timed true if timed wait * @param nanos timeout value * @return matched node, or s if cancelled */ SNode awaitFulfill(SNode s, boolean timed, long nanos) { /* * When a node/thread is about to block, it sets its waiter * field and then rechecks state at least one more time * before actually parking, thus covering race vs * fulfiller noticing that waiter is non-null so should be * woken. * * When invoked by nodes that appear at the point of call * to be at the head of the stack, calls to park are * preceded by spins to avoid blocking when producers and * consumers are arriving very close in time. This can * happen enough to bother only on multiprocessors. * * The order of checks for returning out of main loop * reflects fact that interrupts have precedence over * normal returns, which have precedence over * timeouts. (So, on timeout, one last check for match is * done before giving up.) Except that calls from untimed * SynchronousQueue.{poll/offer} don't check interrupts * and don't wait at all, so are trapped in transfer * method rather than calling awaitFulfill. */ long lastTime = timed ? System.nanoTime() : 0; Thread w = Thread.currentThread(); SNode h = head; int spins = (shouldSpin(s) ? (timed ? maxTimedSpins : maxUntimedSpins) : 0); for (; ; ) { if (w.isInterrupted()) s.tryCancel(); SNode m = s.match; if (m != null) return m; if (timed) { long now = System.nanoTime(); nanos -= now - lastTime; lastTime = now; if (nanos <= 0) { s.tryCancel(); continue; } } if (spins > 0) spins = shouldSpin(s) ? (spins - 1) : 0; else if (s.waiter == null) s.waiter = w; // establish waiter so can park next iter else if (!timed) LockSupport.park(this); else if (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos); } }
protected long await(long nanoSeconds) throws InterruptedException { long target_nano = System.nanoTime() + nanoSeconds; if (!signaled.get()) { // We release the lock at the same time as waiting on the // condition lock.acquired = false; sendAwaitConditionRequest(lock.name, lock.owner); boolean interrupted = false; while (!signaled.get()) { long wait_nano = target_nano - System.nanoTime(); // If we waited max time break out if (wait_nano > 0) { parker.set(Thread.currentThread()); LockSupport.parkNanos(this, wait_nano); if (Thread.interrupted()) { // If we were interrupted and haven't received a response yet then we try to // clean up the lock request and throw the exception if (!signaled.get()) { sendDeleteAwaitConditionRequest(lock.name, lock.owner); throw new InterruptedException(); } // In the case that we were signaled and interrupted // we want to return the signal but still interrupt // our thread interrupted = true; } } else { break; } } if (interrupted) Thread.currentThread().interrupt(); } // We set as if this signal was no released. This way if the // condition is reused again, but the client condition isn't lost // we won't think we were signaled immediately // If we weren't signaled then delete our request if (!signaled.getAndSet(false)) { sendDeleteAwaitConditionRequest(lock.name, lock.owner); } return target_nano - System.nanoTime(); }
/** * @deprecated Should use {@link #waitForRehashToComplete(org.infinispan.Cache[])} instead, this * is not reliable with merges */ public static void waitForInitRehashToComplete(Cache... caches) { int gracetime = 30000; // 30 seconds? long giveup = System.currentTimeMillis() + gracetime; for (Cache c : caches) { StateTransferManager stateTransferManager = TestingUtil.extractComponent(c, StateTransferManager.class); RpcManager rpcManager = TestingUtil.extractComponent(c, RpcManager.class); while (!stateTransferManager.isJoinComplete()) { if (System.currentTimeMillis() > giveup) { String message = "Timed out waiting for join to complete on node " + rpcManager.getAddress() + " !"; log.error(message); throw new RuntimeException(message); } LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100)); } log.trace("Node " + rpcManager.getAddress() + " finished join task."); } }
public static void waitForRehashToComplete(Cache... caches) { int gracetime = 30000; // 30 seconds? long giveup = System.currentTimeMillis() + gracetime; for (Cache c : caches) { DistributionManagerImpl dmi = (DistributionManagerImpl) TestingUtil.extractComponent(c, DistributionManager.class); while (dmi.isRehashInProgress()) { if (System.currentTimeMillis() > giveup) { String message = "Timed out waiting for rehash to complete on node " + dmi.getRpcManager().getAddress() + " !"; log.error(message); throw new RuntimeException(message); } LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1)); } log.trace("Node " + dmi.getRpcManager().getAddress() + " finished rehash task."); } }
public void run() { while (_isRunnable) { if (_quantumCount >= _timeout) { _quantumCount = 0; try { ArrayList<Env> activeEnv = new ArrayList<Env>(_activeEnvSet.keySet()); for (Env env : activeEnv) { env.updateTimeout(); } } catch (Throwable e) { } } else { _quantumCount += ENV_TIMEOUT_UPDATE_INTERVAL; } LockSupport.parkNanos(ENV_TIMEOUT_UPDATE_INTERVAL * 1000000L); } }
@Test public void testTransactionAtomicity_whenMultiMapValueCountIsUsed_withoutTransaction() throws InterruptedException { final HazelcastInstance hz = Hazelcast.newHazelcastInstance(createConfigWithDummyTxService()); final String name = HazelcastTestSupport.generateRandomString(5); Thread producerThread = startProducerThread(hz, name); try { IQueue<String> q = hz.getQueue(name); for (int i = 0; i < 1000; i++) { String id = q.poll(); if (id != null) { MultiMap<Object, Object> multiMap = hz.getMultiMap(name); assertEquals(1, multiMap.valueCount(id)); multiMap.remove(id); } else { LockSupport.parkNanos(100); } } } finally { stopProducerThread(producerThread); } }
/** * Wait for signal with no spurious wakeup. Exclusive latch must be held, which is still held when * method returns. * * @param node newly allocated node * @param nanosTimeout relative nanosecond time to wait; infinite if <0 * @param nanosEnd absolute nanosecond time to wait until; used only with >0 timeout * @return -1 if interrupted, 0 if timed out, 1 if signaled */ int await(Latch latch, Node node, long nanosTimeout, long nanosEnd) { node.mWaiter = Thread.currentThread(); Node tail = mTail; if (tail == null) { mHead = node; } else { tail.mNext = node; node.mPrev = tail; } mTail = node; if (nanosTimeout < 0) { while (true) { latch.releaseExclusive(); LockSupport.park(); latch.acquireExclusive(); int state = node.resumed(this); if (state != 0) { return state; } } } else { while (true) { latch.releaseExclusive(); LockSupport.parkNanos(nanosTimeout); latch.acquireExclusive(); int state = node.resumed(this); if (state != 0) { return state; } if (nanosTimeout == 0 || (nanosTimeout = nanosEnd - System.nanoTime()) <= 0) { node.remove(this); return 0; } } } }
/** * Spins/blocks until node s is fulfilled. * * @param s the waiting node * @param e the comparison value for checking match * @param timed true if timed wait * @param nanos timeout value * @return matched item, or s if cancelled */ Object awaitFulfill(QNode s, Object e, boolean timed, long nanos) { /* Same idea as TransferStack.awaitFulfill */ long lastTime = timed ? System.nanoTime() : 0; Thread w = Thread.currentThread(); int spins = ((head.next == s) ? (timed ? maxTimedSpins : maxUntimedSpins) : 0); for (; ; ) { if (w.isInterrupted()) s.tryCancel(e); Object x = s.item; if (x != e) return x; if (timed) { long now = System.nanoTime(); nanos -= now - lastTime; lastTime = now; if (nanos <= 0) { s.tryCancel(e); continue; } } if (spins > 0) --spins; else if (s.waiter == null) s.waiter = w; else if (!timed) LockSupport.park(this); else if (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos); } }
@SuppressWarnings("NestedAssignment") public static void main(String[] args) throws Exception { Robot robot = new Robot(); int steps = 3; while (true) { Point p = MouseInfo.getPointerInfo().getLocation(); move(robot, p.x += steps, p.y); // update the pointer location, else you block the user's movement // (the pointer would move to the initial location instead of following the user's movement) p = MouseInfo.getPointerInfo().getLocation(); move(robot, p.x, p.y += steps); p = MouseInfo.getPointerInfo().getLocation(); move(robot, p.x -= steps, p.y); p = MouseInfo.getPointerInfo().getLocation(); move(robot, p.x, p.y -= steps); LockSupport.parkNanos(TimeUnit.MINUTES.toNanos(2)); } }
@Test public void park() { LockSupport.parkNanos(this, TimeUnit.SECONDS.toNanos(10)); }
// Might throw. private SQLiteConnection waitForConnection( String sql, int connectionFlags, CancellationSignal cancellationSignal) { final boolean wantPrimaryConnection = (connectionFlags & CONNECTION_FLAG_PRIMARY_CONNECTION_AFFINITY) != 0; final ConnectionWaiter waiter; final int nonce; synchronized (mLock) { throwIfClosedLocked(); // Abort if canceled. if (cancellationSignal != null) { cancellationSignal.throwIfCanceled(); } // Try to acquire a connection. SQLiteConnection connection = null; if (!wantPrimaryConnection) { connection = tryAcquireNonPrimaryConnectionLocked(sql, connectionFlags); // might throw } if (connection == null) { connection = tryAcquirePrimaryConnectionLocked(connectionFlags); // might throw } if (connection != null) { return connection; } // No connections available. Enqueue a waiter in priority order. final int priority = getPriority(connectionFlags); final long startTime = SystemClock.uptimeMillis(); waiter = obtainConnectionWaiterLocked( Thread.currentThread(), startTime, priority, wantPrimaryConnection, sql, connectionFlags); ConnectionWaiter predecessor = null; ConnectionWaiter successor = mConnectionWaiterQueue; while (successor != null) { if (priority > successor.mPriority) { waiter.mNext = successor; break; } predecessor = successor; successor = successor.mNext; } if (predecessor != null) { predecessor.mNext = waiter; } else { mConnectionWaiterQueue = waiter; } nonce = waiter.mNonce; } // Set up the cancellation listener. if (cancellationSignal != null) { cancellationSignal.setOnCancelListener( new CancellationSignal.OnCancelListener() { @Override public void onCancel() { synchronized (mLock) { if (waiter.mNonce == nonce) { cancelConnectionWaiterLocked(waiter); } } } }); } try { // Park the thread until a connection is assigned or the pool is closed. // Rethrow an exception from the wait, if we got one. long busyTimeoutMillis = CONNECTION_POOL_BUSY_MILLIS; long nextBusyTimeoutTime = waiter.mStartTime + busyTimeoutMillis; for (; ; ) { // Detect and recover from connection leaks. if (mConnectionLeaked.compareAndSet(true, false)) { synchronized (mLock) { wakeConnectionWaitersLocked(); } } // Wait to be unparked (may already have happened), a timeout, or interruption. LockSupport.parkNanos(this, busyTimeoutMillis * 1000000L); // Clear the interrupted flag, just in case. Thread.interrupted(); // Check whether we are done waiting yet. synchronized (mLock) { throwIfClosedLocked(); final SQLiteConnection connection = waiter.mAssignedConnection; final RuntimeException ex = waiter.mException; if (connection != null || ex != null) { recycleConnectionWaiterLocked(waiter); if (connection != null) { return connection; } throw ex; // rethrow! } final long now = SystemClock.uptimeMillis(); if (now < nextBusyTimeoutTime) { busyTimeoutMillis = now - nextBusyTimeoutTime; } else { logConnectionPoolBusyLocked(now - waiter.mStartTime, connectionFlags); busyTimeoutMillis = CONNECTION_POOL_BUSY_MILLIS; nextBusyTimeoutTime = now + busyTimeoutMillis; } } } } finally { // Remove the cancellation listener. if (cancellationSignal != null) { cancellationSignal.setOnCancelListener(null); } } }
@Override public void run() throws Exception { LockSupport.parkNanos(delayNs); }
@Override protected void backoff() { LockSupport.parkNanos(1L); }