public void testDelayedTasksThatFiredAtTheSameTimeAreExecutedConcurrently() throws InterruptedException, ExecutionException { final AppScheduledExecutorService service = new AppScheduledExecutorService(getName()); final List<LogInfo> log = Collections.synchronizedList(new ArrayList<>()); int delay = 500; int N = 20; List<? extends Future<?>> futures = ContainerUtil.map( Collections.nCopies(N, ""), __ -> service.schedule( () -> { log.add(new LogInfo(0)); TimeoutUtil.sleep(10 * 1000); }, delay, TimeUnit.MILLISECONDS)); for (Future<?> future : futures) { future.get(); } assertEquals(N, log.size()); Set<Thread> usedThreads = ContainerUtil.map2Set(log, logInfo -> logInfo.currentThread); assertEquals(N, usedThreads.size()); service.shutdownAppScheduledExecutorService(); assertTrue(service.awaitTermination(10, TimeUnit.SECONDS)); }
/** * Constructs a new game. * * @param web <code>true</code> if this game is meant to be an applet (which can be played * online), and <code>false</code> otherwise. Note: this doesn't work yet. */ public Game() { Game.applet = false; canvas = new Canvas(this); solidShapes = Collections.newSetFromMap(new ConcurrentHashMap<Shape, Boolean>()); allShapes = Collections.newSetFromMap(new ConcurrentHashMap<Shape, Boolean>()); // TODO: sort out which data structures actually have to support concurrency layerContents = new ConcurrentHashMap<Integer, java.util.List<Shape>>(); layers = new CopyOnWriteArrayList<Integer>(); layerOf = new ConcurrentHashMap<Shape, Integer>(); counters = new ArrayList<Counter>(); Mouse mouse = new Mouse(); if (applet) { addMouseMotionListener(mouse); addMouseListener(mouse); addKeyListener(new Keyboard()); } else { frame = new JFrame(); frame.addMouseMotionListener(mouse); frame.addMouseListener(mouse); frame.addKeyListener(new Keyboard()); frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); } setDefaults(); }
@Override protected void doSubscribe(Node node, NotifyListener listener) { List<NodeType> listenNodeTypes = node.getListenNodeTypes(); if (CollectionUtils.isEmpty(listenNodeTypes)) { return; } for (NodeType listenNodeType : listenNodeTypes) { String listenNodePath = NodeRegistryUtils.getNodeTypePath(clusterName, listenNodeType); Notifier notifier = notifiers.get(listenNodePath); if (notifier == null) { Notifier newNotifier = new Notifier(listenNodePath); notifiers.putIfAbsent(listenNodePath, newNotifier); notifier = notifiers.get(listenNodePath); if (notifier == newNotifier) { notifier.start(); } } boolean success = false; NodeRegistryException exception = null; for (Map.Entry<String, JedisPool> entry : jedisPools.entrySet()) { JedisPool jedisPool = entry.getValue(); try { Jedis jedis = jedisPool.getResource(); try { doNotify( jedis, Collections.singletonList(listenNodePath), Collections.singletonList(listener)); success = true; break; // 只需读一个服务器的数据 } finally { jedis.close(); } } catch (Throwable t) { exception = new NodeRegistryException( "Failed to unregister node to redis registry. registry: " + entry.getKey() + ", node: " + node + ", cause: " + t.getMessage(), t); } } if (exception != null) { if (success) { LOGGER.warn(exception.getMessage(), exception); } else { throw exception; } } } }
/** {@inheritDoc} */ @Nullable @Override public Map<String, Collection<?>> run(GridStreamerContext ctx, Collection<Quote> quotes) throws GridException { GridStreamerWindow win = ctx.window("stage1"); // Add numbers to window. win.enqueueAll(quotes); Collection<Quote> polled = win.pollEvictedBatch(); if (!polled.isEmpty()) { Map<String, Bar> map = new HashMap<>(); for (Quote quote : polled) { String symbol = quote.symbol(); Bar bar = map.get(symbol); if (bar == null) map.put(symbol, bar = new Bar(symbol)); bar.update(quote.price()); } return Collections.<String, Collection<?>>singletonMap(ctx.nextStageName(), map.values()); } return null; }
public static Iterable<Object> context() { if (ENABLED) { return DebugScope.getInstance().getCurrentContext(); } else { return Collections.emptyList(); } }
private List<TransactionInfo> getTransactions( Address source, Set<Integer> segments, int topologyId) { if (trace) { log.tracef( "Requesting transactions for segments %s of cache %s from node %s", segments, cacheName, source); } // get transactions and locks try { StateRequestCommand cmd = commandsFactory.buildStateRequestCommand( StateRequestCommand.Type.GET_TRANSACTIONS, rpcManager.getAddress(), topologyId, segments); Map<Address, Response> responses = rpcManager.invokeRemotely( Collections.singleton(source), cmd, ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS, timeout); Response response = responses.get(source); if (response instanceof SuccessfulResponse) { return (List<TransactionInfo>) ((SuccessfulResponse) response).getResponseValue(); } log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, null); } catch (CacheException e) { log.failedToRetrieveTransactionsForSegments(segments, cacheName, source, e); } return null; }
/** Return all of the neighbors with whom we share the provided range. */ static Set<InetAddress> getNeighbors(String table, Range<Token> toRepair) { StorageService ss = StorageService.instance; Map<Range<Token>, List<InetAddress>> replicaSets = ss.getRangeToAddressMap(table); Range<Token> rangeSuperSet = null; for (Range<Token> range : ss.getLocalRanges(table)) { if (range.contains(toRepair)) { rangeSuperSet = range; break; } else if (range.intersects(toRepair)) { throw new IllegalArgumentException( "Requested range intersects a local range but is not fully contained in one; this would lead to imprecise repair"); } } if (rangeSuperSet == null || !replicaSets.containsKey(toRepair)) return Collections.emptySet(); Set<InetAddress> neighbors = new HashSet<InetAddress>(replicaSets.get(rangeSuperSet)); neighbors.remove(FBUtilities.getBroadcastAddress()); // Excluding all node with version <= 0.7 since they don't know how to // create a correct merkle tree (they build it over the full range) Iterator<InetAddress> iter = neighbors.iterator(); while (iter.hasNext()) { InetAddress endpoint = iter.next(); if (Gossiper.instance.getVersion(endpoint) <= MessagingService.VERSION_07) { logger.info( "Excluding " + endpoint + " from repair because it is on version 0.7 or sooner. You should consider updating this node before running repair again."); iter.remove(); } } return neighbors; }
/** * The gossip digest is built based on randomization rather than just looping through the * collection of live endpoints. * * @param gDigests list of Gossip Digests. */ void makeRandomGossipDigest(List<GossipDigest> gDigests) { /* Add the local endpoint state */ EndpointState epState = endpointStateMap_.get(localEndpoint_); int generation = epState.getHeartBeatState().getGeneration(); int maxVersion = getMaxEndpointStateVersion(epState); gDigests.add(new GossipDigest(localEndpoint_, generation, maxVersion)); List<InetAddress> endpoints = new ArrayList<InetAddress>(endpointStateMap_.keySet()); Collections.shuffle(endpoints, random_); for (InetAddress endpoint : endpoints) { epState = endpointStateMap_.get(endpoint); if (epState != null) { generation = epState.getHeartBeatState().getGeneration(); maxVersion = getMaxEndpointStateVersion(epState); gDigests.add(new GossipDigest(endpoint, generation, maxVersion)); } else { gDigests.add(new GossipDigest(endpoint, 0, 0)); } } /* FOR DEBUG ONLY - remove later */ StringBuilder sb = new StringBuilder(); for (GossipDigest gDigest : gDigests) { sb.append(gDigest); sb.append(" "); } if (logger_.isTraceEnabled()) logger_.trace("Gossip Digests are : " + sb.toString()); }
private void doNotify(Jedis jedis, String key) { for (Map.Entry<Node, Set<NotifyListener>> entry : new HashMap<Node, Set<NotifyListener>>(getSubscribed()).entrySet()) { doNotify( jedis, Collections.singletonList(key), new HashSet<NotifyListener>(entry.getValue())); } }
public List<TravelQuote> getRankedTravelQuotes( TravelInfo travelInfo, Set<TravelCompany> companies, Comparator<TravelQuote> ranking, long time, TimeUnit unit) throws InterruptedException { List<QuoteTask> tasks = new ArrayList<QuoteTask>(); for (TravelCompany company : companies) tasks.add(new QuoteTask(company, travelInfo)); List<Future<TravelQuote>> futures = exec.invokeAll(tasks, time, unit); List<TravelQuote> quotes = new ArrayList<TravelQuote>(tasks.size()); Iterator<QuoteTask> taskIter = tasks.iterator(); for (Future<TravelQuote> f : futures) { QuoteTask task = taskIter.next(); try { quotes.add(f.get()); } catch (ExecutionException e) { quotes.add(task.getFailureQuote(e.getCause())); } catch (CancellationException e) { quotes.add(task.getTimeoutQuote(e)); } } Collections.sort(quotes, ranking); return quotes; }
private static void testPotato( Class<? extends Collection> implClazz, Class<? extends List> argClazz) throws Throwable { try { System.out.printf("implClazz=%s, argClazz=%s\n", implClazz.getName(), argClazz.getName()); final int iterations = 100000; final List<Integer> list = (List<Integer>) argClazz.newInstance(); final Integer one = Integer.valueOf(1); final List<Integer> oneElementList = Collections.singletonList(one); final Constructor<? extends Collection> constr = implClazz.getConstructor(Collection.class); final Thread t = new CheckedThread() { public void realRun() { for (int i = 0; i < iterations; i++) { list.add(one); list.remove(one); } } }; t.setDaemon(true); t.start(); for (int i = 0; i < iterations; i++) { Collection<?> coll = constr.newInstance(list); Object[] elts = coll.toArray(); check(elts.length == 0 || (elts.length == 1 && elts[0] == one)); } } catch (Throwable t) { unexpected(t); } }
static { Map<String, String> fakeData = new HashMap<>(); fakeData.put("http://www.weather.gov", "Weather forecast"); fakeData.put("http://www.espn.com", "Sports scores"); fakeData.put("http://www.marketwatch.com", "Stock market data"); fakeData.put("http://www.fandango.com", "Movie showtimes"); data = Collections.unmodifiableMap(fakeData); }
@Override protected List<O> execute(Task<I, O> task, List<I> batch) { try { return task.apply(batch); } catch (IOException e) { e.printStackTrace(); } return Collections.emptyList(); }
/** {@inheritDoc} */ @Override public final Map<? extends GridJob, GridNode> map(List<GridNode> subgrid, Callable<T> arg) throws GridException { assert subgrid != null; assert !subgrid.isEmpty(); GridJob job = createJob(arg); return Collections.singletonMap(job, balancer.getBalancedNode(job, null)); }
public static void main(String[] args) { Date start = new Date(); if (args.length < 3) { System.out.println("Wrong number of arguments:\n" + USAGE); return; } // get # threads int tc = Integer.parseInt(args[0]); String outfile = args[1]; // make a threadsafe queue of all files to process ConcurrentLinkedQueue<String> files = new ConcurrentLinkedQueue<String>(); for (int i = 2; i < args.length; i++) { files.add(args[i]); } // hastable for results Hashtable<String, Integer> results = new Hashtable<String, Integer>(HASH_SIZE, LF); // spin up the threads Thread[] workers = new Thread[tc]; for (int i = 0; i < tc; i++) { workers[i] = new Worker(files, results); workers[i].start(); } // wait for them to finish try { for (int i = 0; i < tc; i++) { workers[i].join(); } } catch (Exception e) { System.out.println("Caught Exception: " + e.getMessage()); } // terminal output Date end = new Date(); System.out.println(end.getTime() - start.getTime() + " total milliseconds"); System.out.println(results.size() + " unique words"); // sort results for easy comparison/verification List<Map.Entry<String, Integer>> sorted_results = new ArrayList<Map.Entry<String, Integer>>(results.entrySet()); Collections.sort(sorted_results, new KeyComp()); // file output try { PrintStream out = new PrintStream(outfile); for (int i = 0; i < sorted_results.size(); i++) { out.println(sorted_results.get(i).getKey() + "\t" + sorted_results.get(i).getValue()); } } catch (Exception e) { System.out.println("Caught Exception: " + e.getMessage()); } }
/** * Adds a new int parameter to be altered for the model being tuned. * * @param param the model parameter * @param initialSearchValues the values to try for the specified parameter */ public void addParameter(IntParameter param, int... initialSearchValues) { searchParams.add(param); DoubleList dl = new DoubleList(initialSearchValues.length); for (double d : initialSearchValues) dl.add(d); Arrays.sort(dl.getBackingArray()); // convience, only really needed if param is warm if (param.isWarmParameter() && !param.preferredLowToHigh()) Collections.reverse(dl); // put it in the prefered order if (param.isWarmParameter()) // put it at the front! searchValues.add(0, dl); else searchValues.add(dl); }
/** * Returns all supported capture sizes. * * @return an array of capture sizes, in bytes, never <code>null</code>. */ public Integer[] getCaptureSizes() { final String rawValue = this.properties.get(DEVICE_CAPTURESIZES); final String[] values = rawValue.split(",\\s*"); final List<Integer> result = new ArrayList<Integer>(); for (String value : values) { result.add(Integer.valueOf(value.trim())); } Collections.sort( result, NumberUtils.<Integer>createNumberComparator(false /* aSortAscending */)); return result.toArray(new Integer[result.size()]); }
public void testAwaitTerminationMakesSureTasksTransferredToBackendExecutorAreFinished() throws InterruptedException, ExecutionException { final AppScheduledExecutorService service = new AppScheduledExecutorService(getName()); final List<LogInfo> log = Collections.synchronizedList(new ArrayList<>()); int N = 20; int delay = 500; List<? extends Future<?>> futures = ContainerUtil.map( Collections.nCopies(N, ""), s -> service.schedule( () -> { TimeoutUtil.sleep(5000); log.add(new LogInfo(0)); }, delay, TimeUnit.MILLISECONDS)); TimeoutUtil.sleep(delay); long start = System.currentTimeMillis(); while (!service.delayQueue.isEmpty() && System.currentTimeMillis() < start + 20000) { // wait till all tasks transferred to backend } List<SchedulingWrapper.MyScheduledFutureTask> queuedTasks = new ArrayList<>(service.delayQueue); if (!queuedTasks.isEmpty()) { String s = queuedTasks .stream() .map(BoundedTaskExecutor::info) .collect(Collectors.toList()) .toString(); fail("Queued tasks left: " + s + ";\n" + queuedTasks); } service.shutdownAppScheduledExecutorService(); assertTrue(service.awaitTermination(20, TimeUnit.SECONDS)); for (Future<?> future : futures) { assertTrue(future.isDone()); } assertEquals(log.toString(), N, log.size()); }
/** * Set the layer that a given shape will be displayed in. Since the shapes in this game are two * dimensional, it must be decided which will appear "on top" when two shapes overlap. The shape * in the higher layer will appear on top. * * <p>Setting a shape's layer will only affect how it is displayed; a shape's layer has no effect * on how it interacts with other shapes. (For example, two shapes can touch even if they are in * different layers. See {@link Shape#isTouching(Shape)}.) * * @param shape the shape whose layer is being set. * @param layer the layer into which this shape will be moved. */ static void setLayer(Shape shape, int layer) { removeFromLayers(shape); // add new stuff if (!layerContents.containsKey(layer)) { layerContents.put(layer, new CopyOnWriteArrayList<Shape>()); int insertionPoint = ~Collections.binarySearch(layers, layer); layers.add(insertionPoint, layer); } layerContents.get(layer).add(shape); layerOf.put(shape, layer); }
/** * Adds a new double parameter to be altered for the model being tuned. * * @param param the model parameter * @param initialSearchValues the values to try for the specified parameter */ public void addParameter(DoubleParameter param, double... initialSearchValues) { if (param == null) throw new IllegalArgumentException("null not allowed for parameter"); searchParams.add(param); DoubleList dl = new DoubleList(initialSearchValues.length); for (double d : initialSearchValues) dl.add(d); Arrays.sort(dl.getBackingArray()); // convience, only really needed if param is warm if (param.isWarmParameter() && !param.preferredLowToHigh()) Collections.reverse(dl); // put it in the prefered order if (param.isWarmParameter()) // put it at the front! searchValues.add(0, dl); else searchValues.add(dl); }
/** * Test file creation. * * @throws Exception In case of exception. */ public void testCreateFile() throws Exception { GridGgfsPath root = new GridGgfsPath("/"); GridGgfsPath path = new GridGgfsPath("/asdf"); long max = 100L * CFG_BLOCK_SIZE / WRITING_THREADS_CNT; for (long size = 0; size <= max; size = size * 15 / 10 + 1) { assertEquals(Collections.<GridGgfsPath>emptyList(), fs.listPaths(root)); testCreateFile(path, size, new Random().nextInt()); } }
static class Holder { Holder(MongoOptions options) { _options = options; } DBPortPool get(InetSocketAddress addr) { DBPortPool p = _pools.get(addr); if (p != null) return p; synchronized (_pools) { p = _pools.get(addr); if (p != null) { return p; } p = new DBPortPool(addr, _options); _pools.put(addr, p); String name = "com.mongodb:type=ConnectionPool,host=" + addr.toString().replace(':', '_'); try { ObjectName on = new ObjectName(name); if (_server.isRegistered(on)) { _server.unregisterMBean(on); Bytes.LOGGER.log( Level.INFO, "multiple Mongo instances for same host, jmx numbers might be off"); } _server.registerMBean(p, on); } catch (JMException e) { Bytes.LOGGER.log(Level.WARNING, "jmx registration error, continuing", e); } catch (java.security.AccessControlException e) { Bytes.LOGGER.log(Level.WARNING, "jmx registration error, continuing", e); } } return p; } void close() { synchronized (_pools) { for (DBPortPool p : _pools.values()) { p.close(); } } } final MongoOptions _options; final Map<InetSocketAddress, DBPortPool> _pools = Collections.synchronizedMap(new HashMap<InetSocketAddress, DBPortPool>()); final MBeanServer _server = ManagementFactory.getPlatformMBeanServer(); }
/** * @param cacheCtx Cache context. * @throws IgniteCheckedException If failed. */ private void initTopology(GridCacheContext cacheCtx) throws IgniteCheckedException { if (stopping(cacheCtx.cacheId())) return; if (canCalculateAffinity(cacheCtx)) { if (log.isDebugEnabled()) log.debug( "Will recalculate affinity [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); cacheCtx.affinity().calculateAffinity(exchId.topologyVersion(), discoEvt); } else { if (log.isDebugEnabled()) log.debug( "Will request affinity from remote node [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); // Fetch affinity assignment from remote node. GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture( cacheCtx, exchId.topologyVersion(), CU.affinityNodes(cacheCtx, exchId.topologyVersion())); fetchFut.init(); List<List<ClusterNode>> affAssignment = fetchFut.get(); if (log.isDebugEnabled()) log.debug( "Fetched affinity from remote node, initializing affinity assignment [locNodeId=" + cctx.localNodeId() + ", topVer=" + exchId.topologyVersion() + ']'); if (affAssignment == null) { affAssignment = new ArrayList<>(cacheCtx.affinity().partitions()); List<ClusterNode> empty = Collections.emptyList(); for (int i = 0; i < cacheCtx.affinity().partitions(); i++) affAssignment.add(empty); } cacheCtx.affinity().initializeAffinity(exchId.topologyVersion(), affAssignment); } }
public List<LoopEx> outterFirst() { ArrayList<LoopEx> loops = new ArrayList<>(loops()); Collections.sort( loops, new Comparator<LoopEx>() { @Override public int compare(LoopEx o1, LoopEx o2) { return o1.lirLoop().depth - o2.lirLoop().depth; } }); return loops; }
@SuppressWarnings("unchecked") public static <T> List<T> contextSnapshot(Class<T> clazz) { if (ENABLED) { List<T> result = new ArrayList<>(); for (Object o : context()) { if (clazz.isInstance(o)) { result.add((T) o); } } return result; } else { return Collections.emptyList(); } }
public Void call() { final List<RolapStar> starList = CacheControlImpl.getStarList(region); Collections.sort( starList, new Comparator<RolapStar>() { public int compare(RolapStar o1, RolapStar o2) { return o1.getFactTable().getAlias().compareTo(o2.getFactTable().getAlias()); } }); for (RolapStar star : starList) { indexRegistry.getIndex(star).printCacheState(pw); } return null; }
private void generateRemainingPagesQueueForAllFiles() { List<Integer>[] array = new ArrayList[FILE_COUNT]; for (int k = 0; k < FILE_COUNT; ++k) { array[k] = new ArrayList<Integer>(PAGE_COUNT); for (Integer i = 0; i < PAGE_COUNT; ++i) { array[k].add(i); } } for (int i = 0; i < FILE_COUNT; ++i) { Collections.shuffle(array[i]); pagesQueue.set(i, new ConcurrentLinkedQueue<Integer>(array[i])); } }
public static void main(String[] args) throws Exception { // Test with Hashtable Object hashtableObj = new Hashtable<String, Integer>(); performTest(hashtableObj); // Test with synchronizedMap Object syncMapObj = Collections.synchronizedMap(new HashMap<String, Integer>()); performTest(syncMapObj); // Test with ConcurrentHashMap Object concurrentHashMapObj = new ConcurrentHashMap<String, Integer>(); performTest(concurrentHashMapObj); }
/** {@inheritDoc} */ public List<$T> readAllNow() { lockWrite(); try { int $p = $gate.drainPermits(); if ($p == 0) { return Collections.emptyList(); } else { List<$T> $v = new ArrayList<$T>($p); for (int $i = 0; $i < $p; $i++) $v.add($queue.poll()); return $v; } } finally { unlockWrite(); checkForFinale(); } }
/** * @param p Partition. * @param topVer Topology version ({@code -1} for all nodes). * @param state Partition state. * @param states Additional partition states. * @return List of nodes for the partition. */ private List<ClusterNode> nodes( int p, AffinityTopologyVersion topVer, GridDhtPartitionState state, GridDhtPartitionState... states) { Collection<UUID> allIds = topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null; lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer=" + topVer + ", allIds=" + allIds + ", node2part=" + node2part + ", cache=" + cctx.name() + ']'; Collection<UUID> nodeIds = part2node.get(p); // Node IDs can be null if both, primary and backup, nodes disappear. int size = nodeIds == null ? 0 : nodeIds.size(); if (size == 0) return Collections.emptyList(); List<ClusterNode> nodes = new ArrayList<>(size); for (UUID id : nodeIds) { if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue; if (hasState(p, id, state, states)) { ClusterNode n = cctx.discovery().node(id); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) nodes.add(n); } } return nodes; } finally { lock.readLock().unlock(); } }