@Test @Ignore // the test is non-deterministic and can't be made deterministic public void testFlatMapAsync() { int NUM = (int) (Observable.bufferSize() * 2.1); AtomicInteger c = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<>(); incrementingIntegers(c) .subscribeOn(Schedulers.computation()) .flatMap( i -> incrementingIntegers(new AtomicInteger()) .take(10) .subscribeOn(Schedulers.computation())) .take(NUM) .subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println( "testFlatMapAsync => Received: " + ts.valueCount() + " Emitted: " + c.get() + " Size: " + Observable.bufferSize()); assertEquals(NUM, ts.valueCount()); // even though we only need 10, it will request at least Observable.bufferSize(), and then as it // drains keep requesting more // and then it will be non-deterministic when the take() causes the unsubscribe as it is // scheduled on 10 different schedulers (threads) // normally this number is ~250 but can get up to ~1200 when Observable.bufferSize() == 1024 assertTrue(c.get() <= Observable.bufferSize() * 2); }
@Override public void onInbound(InboundInterceptorContext context) throws Exception { if (context.request().requestType() == RequestType.CREATE) { ResourceResponse response = new DefaultResourceResponse( context.request(), ResourceResponse.ResponseType.CREATED, new Resource() { @Override public Resource parent() { return null; } @Override public String id() { return "mock"; } }); ResourceState state = new DefaultResourceState(); state.putProperty("requestCounter", requestCounter.get()); state.putProperty("responseCounter", responseCounter.get()); response.setState(state); context.replyWith(response); } else { requestCounter.incrementAndGet(); super.onInbound(context); } }
public synchronized PageBufferClientStatus getStatus() { String state; if (closed) { state = "closed"; } else if (future != null) { state = "running"; } else if (scheduled) { state = "scheduled"; } else if (completed) { state = "completed"; } else { state = "queued"; } String httpRequestState = "not scheduled"; if (future != null) { httpRequestState = future.getState(); } long rejectedRows = rowsRejected.get(); int rejectedPages = pagesRejected.get(); return new PageBufferClientStatus( location, state, lastUpdate, rowsReceived.get(), pagesReceived.get(), rejectedRows == 0 ? OptionalLong.empty() : OptionalLong.of(rejectedRows), rejectedPages == 0 ? OptionalInt.empty() : OptionalInt.of(rejectedPages), requestsScheduled.get(), requestsCompleted.get(), requestsFailed.get(), httpRequestState); }
// @Test @Ignore public void testThread() throws InterruptedException { IUserManagedPool pool = super.getPool(); MyThread[] threads = new MyThread[CONN_LOOPS]; for (int j = 0; j < CONN_LOOPS; j++) { threads[j] = new MyThread(pool, j, true, count); } for (int j = 0; j < CONN_LOOPS; j++) { threads[j].start(); } while (count.get() < CONN_LOOPS && countException.get() == 0) { // 实时监控连接池的状态 String msg = "当前连接池数量:" + pool.getStatus(); ConnPrintOutUtil.print(log, ConnPrintOutUtil.INFO, msg); ThreadUtils.doWait(super.getPool()); } Thread.sleep(1000); // 如果结果不抛出异常 则说明case正确 if (countException.get() == 0) { ConnPrintOutUtil.printSuccess(log); } else { ConnPrintOutUtil.printFailure(log); } }
public void testCacheStopExpired() throws Exception { ProducerCache cache = new ProducerCache(this, context, 5); cache.start(); assertEquals("Size should be 0", 0, cache.size()); for (int i = 0; i < 8; i++) { Endpoint e = new MyEndpoint(true, i); Producer p = cache.acquireProducer(e); cache.releaseProducer(e, p); } // the eviction is async so force cleanup cache.cleanUp(); assertEquals("Size should be 5", 5, cache.size()); // the eviction listener is async so sleep a bit Thread.sleep(1000); // should have stopped the 3 evicted assertEquals(3, stopCounter.get()); cache.stop(); // should have stopped all 8 assertEquals(8, stopCounter.get()); }
public float getFailedPercent() { if (total.get() > 0) { return failed.get() * 100 / total.get(); } else { return 0; } }
@Override public boolean commit() { try { if (!writeSet.isEmpty()) { int v = status.get(); int s = v & STATUS_MASK; if (s == TX_ACTIVE && status.compareAndSet(v, v + (TX_COMMITTING - TX_ACTIVE))) { long newClock = clock.incrementAndGet(); if (newClock != startTime.get() + 1 && !readSet.validate(this, id)) { rollback0(); return false; } // Write values and release locks writeSet.commit(newClock); status.set(v + (TX_COMMITTED - TX_ACTIVE)); } else { // We have been killed: wait for our locks to have been released while (s != TX_ABORTED) s = status.get() & STATUS_MASK; return false; } } else { // No need to set status to COMMITTED (we cannot be killed with an empty write set) } attempts = 0; return true; } finally { if (irrevocableState) { irrevocableState = false; irrevocableAccessLock.writeLock().unlock(); } else { irrevocableAccessLock.readLock().unlock(); } } }
@Test public void testSingleClientLockContention() throws IOException, InterruptedException, KeeperException { final AtomicInteger withLockRuns = new AtomicInteger(0); final AtomicInteger currentWithLockRuns = new AtomicInteger(0); for (int i = 0; i < 10; i++) { this.rbz.withLock( "testLock", new Runnable() { @Override public void run() { withLockRuns.incrementAndGet(); int currentlyRunning = currentWithLockRuns.incrementAndGet(); Assert.assertEquals(1, currentlyRunning); try { Thread.sleep(RANDOM.nextInt(100)); } catch (InterruptedException e) { throw new RuntimeException(e); } currentlyRunning = currentWithLockRuns.decrementAndGet(); Assert.assertEquals(0, currentlyRunning); } }); } Thread.sleep(1000); Assert.assertEquals(10, withLockRuns.get()); Assert.assertEquals(0, currentWithLockRuns.get()); checkNoRemainingLocks("testLock"); }
/** * 更新缓存 * * @param key - 键 * @param value - 值 * @param cacheTime - 毫秒 */ public void put(Object key, Object value, long cacheTime) { // 如果缓存已经大于最大容量则值执行逻辑清除 if (counter.get() > cacheConfig.getMaxCapacity()) { logger.warn("local cache store is full and size is: " + counter.get()); try { executorService.execute( new Runnable() { @Override public void run() { removePolicy(true); } }); } catch (Throwable t) { logger.warn("executorService execute error", t); } } // 如果缓存已经大于2倍的容量则执行物理清除 if (counter.get() > cacheConfig.getMaxCapacity() * 2) { logger.warn("local cache store is full and size is:" + counter.get()); removePolicy(false); } // 执行put操作 CacheElement cacheElement = storeMap.put(key, new CacheElement(value, cacheTime)); if (cacheElement == null) { counter.incrementAndGet(); logger.warn("local cache store put key=" + key + " count=" + counter.get()); } }
protected static void recordValue( final BitSet set, final AtomicInteger pos, final byte bitCnt, int diff, final boolean signed) { if (signed) { if (diff < 0) { set.set(pos.get()); } pos.incrementAndGet(); } diff = Math.abs(diff); for (int x = bitCnt - 1; x >= 0; x--) { if (bitCnt >= 32) { set.set(pos.get() + x); diff -= Integer.MAX_VALUE; } else { if (diff != 0 && diff >= bitValues[x] + 1) { set.set(pos.get() + x); diff -= bitValues[x] + 1; } } } }
public void testOnCloseCallback() throws IOException { final ShardId shardId = new ShardId( new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10)), randomIntBetween(0, 100)); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); final AtomicInteger count = new AtomicInteger(0); final ShardLock lock = new DummyShardLock(shardId); Store store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(directoryService), lock, new Store.OnClose() { @Override public void handle(ShardLock theLock) { assertEquals(shardId, theLock.getShardId()); assertEquals(lock, theLock); count.incrementAndGet(); } }); assertEquals(count.get(), 0); final int iters = randomIntBetween(1, 10); for (int i = 0; i < iters; i++) { store.close(); } assertEquals(count.get(), 1); }
@Override protected void onComponentTag(ComponentTag tag) { checkComponentTag(tag, "progress"); super.onComponentTag(tag); tag.put("value", value.get()); tag.put("max", max.get()); }
@Test public void testTakeFilterSkipChainAsync() { int NUM = (int) (Observable.bufferSize() * 2.1); AtomicInteger c = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<>(); incrementingIntegers(c) .observeOn(Schedulers.computation()) .skip(10000) .filter(i -> i > 11000) .take(NUM) .subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); // emit 10000 that are skipped // emit next 1000 that are filtered out // take NUM // so emitted is at least 10000+1000+NUM + extra for buffer size/threshold int expected = 10000 + 1000 + Observable.bufferSize() * 3 + Observable.bufferSize() / 2; System.out.println( "testTakeFilterSkipChain => Received: " + ts.valueCount() + " Emitted: " + c.get() + " Expected: " + expected); assertEquals(NUM, ts.valueCount()); assertTrue(c.get() < expected); }
@Test public void testZipSync() { int NUM = (int) (Observable.bufferSize() * 4.1); AtomicInteger c1 = new AtomicInteger(); AtomicInteger c2 = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<>(); Observable<Integer> zipped = Observable.zip(incrementingIntegers(c1), incrementingIntegers(c2), (t1, t2) -> t1 + t2); zipped.take(NUM).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println( "testZipSync => Received: " + ts.valueCount() + " Emitted: " + c1.get() + " / " + c2.get()); assertEquals(NUM, ts.valueCount()); assertTrue(c1.get() < Observable.bufferSize() * 7); assertTrue(c2.get() < Observable.bufferSize() * 7); }
@Test public void testEndHandlerCalled() { String path = "/some/path"; server = vertx .createHttpServer(new HttpServerOptions().setPort(HttpTestBase.DEFAULT_HTTP_PORT)) .websocketHandler(WebSocketBase::close); AtomicInteger doneCount = new AtomicInteger(); server.listen( ar -> { assertTrue(ar.succeeded()); client .websocket(HttpTestBase.DEFAULT_HTTP_PORT, HttpTestBase.DEFAULT_HTTP_HOST, path, null) .endHandler(done -> doneCount.incrementAndGet()) .handler( ws -> { assertEquals(0, doneCount.get()); ws.closeHandler( v -> { assertEquals(1, doneCount.get()); testComplete(); }); }); }); await(); }
@Test public void testMergeAsyncThenObserveOnLoop() { for (int i = 0; i < 500; i++) { if (i % 10 == 0) { System.out.println("testMergeAsyncThenObserveOnLoop >> " + i); } // Verify there is no MissingBackpressureException int NUM = (int) (RxRingBuffer.SIZE * 4.1); AtomicInteger c1 = new AtomicInteger(); AtomicInteger c2 = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Observable<Integer> merged = Observable.merge( incrementingIntegers(c1).subscribeOn(Schedulers.computation()), incrementingIntegers(c2).subscribeOn(Schedulers.computation())); merged.observeOn(Schedulers.io()).take(NUM).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println( "testMergeAsyncThenObserveOn => Received: " + ts.getOnNextEvents().size() + " Emitted: " + c1.get() + " / " + c2.get()); assertEquals(NUM, ts.getOnNextEvents().size()); } }
public static void main(String[] args) throws IOException, InterruptedException { long start = System.currentTimeMillis(); initialbuild(); System.out.println("Start highwatermark " + Highwatermark.get()); for (int i = 0; i < 10; i++) { Thread t1 = new Thread("test 1") { public void run() { _test(); } }; Thread t2 = new Thread("test 2") { public void run() { _test(); } }; Thread t3 = new Thread("test 3") { public void run() { _test(); } }; t1.start(); t2.start(); t3.start(); _test(); t1.join(); t2.join(); t3.join(); } System.out.println("End highwatermark " + Highwatermark.get()); long time = System.currentTimeMillis() - start; System.out.printf("End to end took %.1f%n", time / 1e3); }
@Test public void testMergeAsyncThenObserveOn() { int NUM = (int) (RxRingBuffer.SIZE * 4.1); AtomicInteger c1 = new AtomicInteger(); AtomicInteger c2 = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Observable<Integer> merged = Observable.merge( incrementingIntegers(c1).subscribeOn(Schedulers.computation()), incrementingIntegers(c2).subscribeOn(Schedulers.computation())); merged.observeOn(Schedulers.newThread()).take(NUM).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println( "testMergeAsyncThenObserveOn => Received: " + ts.getOnNextEvents().size() + " Emitted: " + c1.get() + " / " + c2.get()); assertEquals(NUM, ts.getOnNextEvents().size()); // either one can starve the other, but neither should be capable of doing more than 5 batches // (taking 4.1) // TODO is it possible to make this deterministic rather than one possibly starving the other? // benjchristensen => In general I'd say it's not worth trying to make it so, as "fair" // algoritms generally take a performance hit // akarnokd => run this in a loop over 10k times and never saw values get as high as 7*SIZE, but // since observeOn delays the unsubscription non-deterministically, the test will remain // unreliable assertTrue(c1.get() < RxRingBuffer.SIZE * 7); assertTrue(c2.get() < RxRingBuffer.SIZE * 7); }
/* ------------------------------------------------------------ */ private void shrinkCache() { // While we need to shrink while (_cache.size() > 0 && (_cachedFiles.get() > _maxCachedFiles || _cachedSize.get() > _maxCacheSize)) { // Scan the entire cache and generate an ordered list by last accessed time. SortedSet<Content> sorted = new TreeSet<Content>( new Comparator<Content>() { public int compare(Content c1, Content c2) { if (c1._lastAccessed < c2._lastAccessed) return -1; if (c1._lastAccessed > c2._lastAccessed) return 1; if (c1._length < c2._length) return -1; return c1._key.compareTo(c2._key); } }); for (Content content : _cache.values()) sorted.add(content); // Invalidate least recently used first for (Content content : sorted) { if (_cachedFiles.get() <= _maxCachedFiles && _cachedSize.get() <= _maxCacheSize) break; if (content == _cache.remove(content.getKey())) content.invalidate(); } } }
@Test public void testZipAsync() { int NUM = (int) (RxRingBuffer.SIZE * 2.1); AtomicInteger c1 = new AtomicInteger(); AtomicInteger c2 = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Observable<Integer> zipped = Observable.zip( incrementingIntegers(c1).subscribeOn(Schedulers.computation()), incrementingIntegers(c2).subscribeOn(Schedulers.computation()), new Func2<Integer, Integer, Integer>() { @Override public Integer call(Integer t1, Integer t2) { return t1 + t2; } }); zipped.take(NUM).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println( "testZipAsync => Received: " + ts.getOnNextEvents().size() + " Emitted: " + c1.get() + " / " + c2.get()); assertEquals(NUM, ts.getOnNextEvents().size()); assertTrue(c1.get() < RxRingBuffer.SIZE * 3); assertTrue(c2.get() < RxRingBuffer.SIZE * 3); }
@Test public void onlyFirstShouldSubscribeAndLastUnsubscribe() { final AtomicInteger subscriptionCount = new AtomicInteger(); final AtomicInteger unsubscriptionCount = new AtomicInteger(); Observable<Integer> observable = Observable.create( new Observable.OnSubscribeFunc<Integer>() { @Override public Subscription onSubscribe(Observer<? super Integer> observer) { subscriptionCount.incrementAndGet(); return Subscriptions.create( new Action0() { @Override public void call() { unsubscriptionCount.incrementAndGet(); } }); } }); Observable<Integer> refCounted = observable.publish().refCount(); @SuppressWarnings("unchecked") Observer<Integer> observer = mock(Observer.class); Subscription first = refCounted.subscribe(observer); assertEquals(1, subscriptionCount.get()); Subscription second = refCounted.subscribe(observer); assertEquals(1, subscriptionCount.get()); first.unsubscribe(); assertEquals(0, unsubscriptionCount.get()); second.unsubscribe(); assertEquals(1, unsubscriptionCount.get()); }
@Test public void testObserveOnWithSlowConsumer() { int NUM = (int) (RxRingBuffer.SIZE * 0.2); AtomicInteger c = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); incrementingIntegers(c) .observeOn(Schedulers.computation()) .map( new Func1<Integer, Integer>() { @Override public Integer call(Integer i) { try { Thread.sleep(1); } catch (InterruptedException e) { e.printStackTrace(); } return i; } }) .take(NUM) .subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println( "testObserveOnWithSlowConsumer => Received: " + ts.getOnNextEvents().size() + " Emitted: " + c.get()); assertEquals(NUM, ts.getOnNextEvents().size()); assertTrue(c.get() < RxRingBuffer.SIZE * 2); }
@Override public void run() { if (isTraceEnabled) logger.trace("Tripper triggered."); if (ques[1 - curQue.get()].isEmpty()) { curQue.set(-1); } else curQue.set(1 - curQue.get()); }
@Test public void testMergeSync() { int NUM = (int) (RxRingBuffer.SIZE * 4.1); AtomicInteger c1 = new AtomicInteger(); AtomicInteger c2 = new AtomicInteger(); TestSubscriber<Integer> ts = new TestSubscriber<Integer>(); Observable<Integer> merged = Observable.merge(incrementingIntegers(c1), incrementingIntegers(c2)); merged.take(NUM).subscribe(ts); ts.awaitTerminalEvent(); ts.assertNoErrors(); System.out.println("Expected: " + NUM + " got: " + ts.getOnNextEvents().size()); System.out.println( "testMergeSync => Received: " + ts.getOnNextEvents().size() + " Emitted: " + c1.get() + " / " + c2.get()); assertEquals(NUM, ts.getOnNextEvents().size()); // either one can starve the other, but neither should be capable of doing more than 5 batches // (taking 4.1) // TODO is it possible to make this deterministic rather than one possibly starving the other? // benjchristensen => In general I'd say it's not worth trying to make it so, as "fair" // algoritms generally take a performance hit assertTrue(c1.get() < RxRingBuffer.SIZE * 5); assertTrue(c2.get() < RxRingBuffer.SIZE * 5); }
public void testReleaseProducerInvokesStopAndShutdownByNonSingletonProducers() throws Exception { ProducerCache cache = new ProducerCache(this, context, 1); cache.start(); assertEquals("Size should be 0", 0, cache.size()); for (int i = 0; i < 3; i++) { Endpoint e = new MyEndpoint(false, i); Producer p = cache.acquireProducer(e); cache.releaseProducer(e, p); } assertEquals("Size should be 0", 0, cache.size()); // should have stopped all 3 assertEquals(3, stopCounter.get()); // should have shutdown all 3 assertEquals(3, shutdownCounter.get()); cache.stop(); // no more stop after stopping the cache assertEquals(3, stopCounter.get()); // no more shutdown after stopping the cache assertEquals(3, shutdownCounter.get()); }
/** @see java.lang.Process#exitValue() */ @Override public int exitValue() { if (!DsfSession.isSessionActive(getSession().getId())) { return fExitCode.get(); } try { getSession() .getExecutor() .submit( new Callable<Object>() { @Override public Object call() throws Exception { if (fMIBackend.getState() != IMIBackend.State.TERMINATED) { throw new IllegalThreadStateException( "Backend Process has not exited"); //$NON-NLS-1$ } return null; } }) .get(); } catch (RejectedExecutionException e) { } catch (InterruptedException e) { } catch (ExecutionException e) { if (e.getCause() instanceof RuntimeException) { throw (RuntimeException) e.getCause(); } } return fExitCode.get(); }
@Test public void testBaseMetaChange() throws Exception { startEngine(); m_curator .setData() .forPath(ZKPathUtils.getBaseMetaVersionZkPath(), ZKSerializeUtils.serialize(100L)); int retries = 50; int i = 0; while (m_baseMetaChangeCount.get() != 1 && i++ < retries) { TimeUnit.MILLISECONDS.sleep(100); } assertEquals(1, m_baseMetaChangeCount.get()); m_curator .setData() .forPath(ZKPathUtils.getBaseMetaVersionZkPath(), ZKSerializeUtils.serialize(200L)); i = 0; while (m_baseMetaChangeCount.get() != 2 && i++ < retries) { TimeUnit.MILLISECONDS.sleep(100); } assertEquals(2, m_baseMetaChangeCount.get()); }
/** @see java.lang.Process#waitFor() */ @Override public int waitFor() throws InterruptedException { if (!DsfSession.isSessionActive(getSession().getId())) { return fExitCode.get(); } try { Query<Object> query = new Query<Object>() { @Override protected void execute(final DataRequestMonitor<Object> rm) { if (!DsfSession.isSessionActive(getSession().getId()) || isDisposed() || fMIBackend.getState() == IMIBackend.State.TERMINATED) { rm.setData(new Object()); rm.done(); } else { fExitedEventListener.fWaitForRMs.add( new ImmediateRequestMonitor(rm) { @Override protected void handleSuccess() { rm.setData(new Object()); rm.done(); } }); } } }; getSession().getExecutor().execute(query); query.get(); } catch (RejectedExecutionException e) { } catch (ExecutionException e) { } return fExitCode.get(); }
public E poll() { final AtomicInteger count = this.count; if (count.get() == 0) return null; Node<E> x = null; final ReentrantLock takeLock = this.takeLock; takeLock.lock(); try { if (count.get() > 0) { x = extract(); if (count.getAndDecrement() > 1) notEmpty.signal(); } } finally { takeLock.unlock(); } if (x != null) { E result = x.item; // temporary clearence x.item = null; x.next = null; return result; } return null; }
/** addAndGet adds given value to current, and returns current value */ public void testAddAndGet() { AtomicInteger ai = new AtomicInteger(1); assertEquals(3, ai.addAndGet(2)); assertEquals(3, ai.get()); assertEquals(-1, ai.addAndGet(-4)); assertEquals(-1, ai.get()); }