@Test public void testTakeWithErrorInObserver() { final AtomicInteger count = new AtomicInteger(); final AtomicReference<Throwable> error = new AtomicReference<>(); Observable.just("1", "2", "three", "4") .take(3) .safeSubscribe( new Observer<String>() { @Override public void onComplete() { System.out.println("completed"); } @Override public void onError(Throwable e) { error.set(e); System.out.println("error"); e.printStackTrace(); } @Override public void onNext(String v) { int num = Integer.parseInt(v); System.out.println(num); // doSomething(num); count.incrementAndGet(); } }); assertEquals(2, count.get()); assertNotNull(error.get()); if (!(error.get() instanceof NumberFormatException)) { fail("It should be a NumberFormatException"); } }
/** * The error from the user provided Observable is handled by the subscribe try/catch because this * is synchronous * * <p>Result: Passes */ @Test public void testCustomObservableWithErrorInObservableSynchronous() { final AtomicInteger count = new AtomicInteger(); final AtomicReference<Throwable> error = new AtomicReference<>(); // FIXME custom built??? Observable.just("1", "2") .concatWith(Observable.error(() -> new NumberFormatException())) .subscribe( new Observer<String>() { @Override public void onComplete() { System.out.println("completed"); } @Override public void onError(Throwable e) { error.set(e); System.out.println("error"); e.printStackTrace(); } @Override public void onNext(String v) { System.out.println(v); count.incrementAndGet(); } }); assertEquals(2, count.get()); assertNotNull(error.get()); if (!(error.get() instanceof NumberFormatException)) { fail("It should be a NumberFormatException"); } }
/** * https://github.com/ReactiveX/RxJava/issues/198 * * <p>Rx Design Guidelines 5.2 * * <p>"when calling the Subscribe method that only has an onNext argument, the OnError behavior * will be to rethrow the exception on the thread that the message comes out from the Observable. * The OnCompleted behavior in this case is to do nothing." * * @throws InterruptedException */ @Test @Ignore("Subscribers can't throw") public void testErrorThrownWithoutErrorHandlerAsynchronous() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<Throwable> exception = new AtomicReference<>(); Observable.create( observer -> { new Thread( () -> { try { observer.onError(new Error("failure")); } catch (Throwable e) { // without an onError handler it has to just throw on whatever thread // invokes it exception.set(e); } latch.countDown(); }) .start(); }) .subscribe(); // wait for exception latch.await(3000, TimeUnit.MILLISECONDS); assertNotNull(exception.get()); assertEquals("failure", exception.get().getMessage()); }
public void newCharacter(CharacterEvent ce) { int oldChar2type; // Previous character not typed correctly - 1 point penalty if (ce.source == generator.get()) { oldChar2type = char2type.getAndSet(ce.character); // 接收随机字母 if (oldChar2type != -1) { score.decrementAndGet(); setScore(); } } // If character is extraneous - 1 point penalty // If character does not match - 1 point penalty else if (ce.source == typist.get()) { // 在此有个假设:即正在使用的该变量值不会被变更且程序代码完成时也是如此 // 所有已经被我们设定的具有特定值的变量就应当是那个值。 while (true) { oldChar2type = char2type.get(); // 获取上次已接收到的随机字母 if (oldChar2type != ce.character) { // ce.character是用户输入的字母,现在作比较 score.decrementAndGet(); break; } else if (char2type.compareAndSet(oldChar2type, -1)) { score.incrementAndGet(); break; } } setScore(); } }
@Ignore("Non-positive requests are relayed to the plugin and is a no-op otherwise") @Test public void testNegativeRequestThrowsIllegalArgumentException() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<Throwable> exception = new AtomicReference<>(); Observable.just(1, 2, 3, 4) .subscribe( new Observer<Integer>() { @Override public void onStart() { request(1); } @Override public void onComplete() {} @Override public void onError(Throwable e) { exception.set(e); latch.countDown(); } @Override public void onNext(Integer t) { request(-1); request(1); } }); Assert.assertTrue(latch.await(10, TimeUnit.SECONDS)); Assert.assertTrue(exception.get() instanceof IllegalArgumentException); }
/** {@inheritDoc} */ @Override public GridDiscoveryTopologySnapshot topologySnapshot() throws IgniteCheckedException { get(); if (topSnapshot.get() == null) topSnapshot.compareAndSet( null, new GridDiscoveryTopologySnapshot(discoEvt.topologyVersion(), discoEvt.topologyNodes())); return topSnapshot.get(); }
/** Cleans up resources to avoid excessive memory usage. */ public void cleanUp() { topSnapshot.set(null); singleMsgs.clear(); fullMsgs.clear(); rcvdIds.clear(); oldestNode.set(null); partReleaseFut = null; Collection<ClusterNode> rmtNodes = this.rmtNodes; if (rmtNodes != null) rmtNodes.clear(); }
/** * @param nodes Nodes. * @param id ID. * @throws IgniteCheckedException If failed. */ private void sendAllPartitions( Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id) throws IgniteCheckedException { GridDhtPartitionsFullMessage m = new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (!cacheCtx.isLocal()) { AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion(); boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0; if (ready) m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true)); } } // It is important that client topologies be added after contexts. for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true)); if (log.isDebugEnabled()) log.debug( "Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) + ", exchId=" + exchId + ", msg=" + m + ']'); cctx.io().safeSend(nodes, m, SYSTEM_POOL, null); }
/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) { if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } } else if (tx.implicit() && tx.isSystemInvalidate()) { // Finish implicit transaction on heuristic error. try { tx.close(); } catch (GridException ex) { U.error(log, "Failed to invalidate transaction: " + tx, ex); } } onComplete(); } }
private void recheck() { // If this is the oldest node. if (oldestNode.get().id().equals(cctx.localNodeId())) { Collection<UUID> remaining = remaining(); if (!remaining.isEmpty()) { try { cctx.io() .safeSend( cctx.discovery().nodes(remaining), new GridDhtPartitionsSingleRequest(exchId), SYSTEM_POOL, null); } catch (IgniteCheckedException e) { U.error( log, "Failed to request partitions from nodes [exchangeId=" + exchId + ", nodes=" + remaining + ']', e); } } // Resend full partition map because last attempt failed. else { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } else sendPartitions(); // Schedule another send. scheduleRecheck(); }
public void resetTypist(CharacterSource newTypist) { CharacterSource oldTypist; if (newTypist != null) newTypist.addCharacterListener(this); oldTypist = typist.getAndSet(newTypist); if (oldTypist != null) oldTypist.removeCharacterListener(this); }
public void resetGenerator(CharacterSource newGenerator) { /** 注意这个方法的算法更改逻辑 */ CharacterSource oldGenerator; if (newGenerator != null) newGenerator.addCharacterListener(this); oldGenerator = generator.getAndSet(newGenerator); if (oldGenerator != null) oldGenerator.removeCharacterListener(this); }
/** * The error from the user provided Observer is not handled by the subscribe method try/catch. * * <p>It is handled by the AtomicObserver that wraps the provided Observer. * * <p>Result: Passes (if AtomicObserver functionality exists) */ @Test public void testCustomObservableWithErrorInObserverAsynchronous() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicInteger count = new AtomicInteger(); final AtomicReference<Throwable> error = new AtomicReference<>(); // FIXME custom built??? Observable.just("1", "2", "three", "4") .subscribeOn(Schedulers.newThread()) .safeSubscribe( new Observer<String>() { @Override public void onComplete() { System.out.println("completed"); latch.countDown(); } @Override public void onError(Throwable e) { error.set(e); System.out.println("error"); e.printStackTrace(); latch.countDown(); } @Override public void onNext(String v) { int num = Integer.parseInt(v); System.out.println(num); // doSomething(num); count.incrementAndGet(); } }); // wait for async sequence to complete latch.await(); assertEquals(2, count.get()); assertNotNull(error.get()); if (!(error.get() instanceof NumberFormatException)) { fail("It should be a NumberFormatException"); } }
/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } onComplete(); } }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** * @param nodeId Sender node id. * @param msg Single partition info. */ public void onReceive(final UUID nodeId, final GridDhtPartitionsSingleMessage msg) { assert msg != null; assert msg.exchangeId().equals(exchId); // Update last seen version. while (true) { GridCacheVersion old = lastVer.get(); if (old == null || old.compareTo(msg.lastVersion()) < 0) { if (lastVer.compareAndSet(old, msg.lastVersion())) break; } else break; } if (isDone()) { if (log.isDebugEnabled()) log.debug( "Received message for finished future (will reply only to sender) [msg=" + msg + ", fut=" + this + ']'); sendAllPartitions(nodeId, cctx.gridConfig().getNetworkSendRetryCount()); } else { initFut.listen( new CI1<IgniteInternalFuture<Boolean>>() { @Override public void apply(IgniteInternalFuture<Boolean> t) { try { if (!t.get()) // Just to check if there was an error. return; ClusterNode loc = cctx.localNode(); singleMsgs.put(nodeId, msg); boolean match = true; // Check if oldest node has changed. if (!oldestNode.get().equals(loc)) { match = false; synchronized (mux) { // Double check. if (oldestNode.get().equals(loc)) match = true; } } if (match) { boolean allReceived; synchronized (rcvdIds) { if (rcvdIds.add(nodeId)) updatePartitionSingleMap(msg); allReceived = allReceived(); } // If got all replies, and initialization finished, and reply has not been sent // yet. if (allReceived && ready.get() && replied.compareAndSet(false, true)) { spreadPartitions(); onDone(exchId.topologyVersion()); } else if (log.isDebugEnabled()) log.debug( "Exchange future full map is not sent [allReceived=" + allReceived() + ", ready=" + ready + ", replied=" + replied.get() + ", init=" + init.get() + ", fut=" + this + ']'); } } catch (IgniteCheckedException e) { U.error(log, "Failed to initialize exchange future: " + this, e); } } }); } }
/** Completeness callback. */ private void onComplete() { onDone(tx, err.get()); }