@SuppressWarnings("unchecked") void onShardResponse(ShardId shardId, TransportShardMultiPercolateAction.Response response) { logger.trace("{} Percolate shard response", shardId); try { for (TransportShardMultiPercolateAction.Response.Item item : response.items()) { AtomicReferenceArray shardResults = responsesByItemAndShard.get(item.slot()); if (shardResults == null) { assert false : "shardResults can't be null"; continue; } if (item.failed()) { shardResults.set( shardId.id(), new BroadcastShardOperationFailedException(shardId, item.error().string())); } else { shardResults.set(shardId.id(), item.response()); } assert expectedOperationsPerItem.get(item.slot()).get() >= 1 : "slot[" + item.slot() + "] can't be lower than one"; if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) { // Failure won't bubble up, since we fail the whole request now via the catch clause // below, // so expectedOperationsPerItem will not be decremented twice. reduce(item.slot()); } } } catch (Throwable e) { logger.error("{} Percolate original reduce error", e, shardId); finalListener.onFailure(e); } }
public Connection getConnection() throws SQLException { int space = -1; for (int i = 0; i < connections.length(); i++) { if (connections.get(i) == null) { space = i; } else { if (connections.get(i).lease()) { return connections.get(i); } else { if ((i == POOL_SIZE - 1) && (space < 0)) { throw new SQLException(ERROR_MESSAGE_POOL); } } } } Connection conn; try { conn = DriverManager.getConnection(url, user, password); } catch (SQLException e) { throw new SQLException(ERROR_MESSAGE_CONNECTION); } JDCConnection c = new JDCConnection(conn, this); c.lease(); connections.set(space, c); return connections.get(space); }
/** * Get or create a trace object for this module id. Trace modules with id are cached. * * @param moduleId module id * @return the trace object */ public Trace getTrace(int moduleId) { Trace t = traces.get(moduleId); if (t == null) { t = new Trace(writer, moduleId); if (!traces.compareAndSet(moduleId, null, t)) { t = traces.get(moduleId); } } return t; }
public void testNewReferenceArray_withStringArray() throws Exception { String[] array = {"foo", "bar", "baz"}; AtomicReferenceArray<String> refArray = Atomics.newReferenceArray(array); for (int i = 0; i < array.length; ++i) { assertEquals(array[i], refArray.get(i)); } try { refArray.get(array.length); fail(); } catch (IndexOutOfBoundsException expected) { } }
public void testNewReferenceArray_withLength() throws Exception { int length = 42; AtomicReferenceArray<String> refArray = Atomics.newReferenceArray(length); for (int i = 0; i < length; ++i) { assertEquals(null, refArray.get(i)); } try { refArray.get(length); fail(); } catch (IndexOutOfBoundsException expected) { } }
/** * Obtains an instance of <code>WeekOfWeekBasedYear</code> from a value. * * <p>A week of week-based-year object represents one of the 53 weeks of the year, from 1 to 53. * These are cached internally and returned as singletons, so they can be compared using ==. * * @param weekOfWeekyear the week of week-based-year to represent, from 1 to 53 * @return the WeekOfWeekBasedYear singleton, never null * @throws IllegalCalendarFieldValueException if the weekOfWeekyear is invalid */ public static WeekOfWeekBasedYear weekOfWeekBasedYear(int weekOfWeekyear) { try { WeekOfWeekBasedYear result = CACHE.get(--weekOfWeekyear); if (result == null) { WeekOfWeekBasedYear temp = new WeekOfWeekBasedYear(weekOfWeekyear + 1); CACHE.compareAndSet(weekOfWeekyear, null, temp); result = CACHE.get(weekOfWeekyear); } return result; } catch (IndexOutOfBoundsException ex) { throw new IllegalCalendarFieldValueException(rule(), ++weekOfWeekyear); } }
public void reapConnections() { long stale = System.currentTimeMillis() - TIME_OUT; int i = 0; while ((connections.length() != 0) && (connections.get(i) != null)) { AtomicReference<JDCConnection> conn = new AtomicReference<>(connections.get(i)); if ((conn.get().inUse()) && (stale > conn.get().getLastUse()) && (!conn.get().validate())) { removeConnection(i); } else { i++; } } }
/** * Obtains an instance of <code>MinuteOfHour</code>. * * @param minuteOfHour the minute-of-hour to represent, from 0 to 59 * @return the created MinuteOfHour * @throws IllegalCalendarFieldValueException if the minuteOfHour is invalid */ public static MinuteOfHour minuteOfHour(int minuteOfHour) { try { MinuteOfHour result = CACHE.get(minuteOfHour); if (result == null) { MinuteOfHour temp = new MinuteOfHour(minuteOfHour); CACHE.compareAndSet(minuteOfHour, null, temp); result = CACHE.get(minuteOfHour); } return result; } catch (IndexOutOfBoundsException ex) { throw new IllegalCalendarFieldValueException(rule(), minuteOfHour); } }
/** * Obtains an instance of {@code DayOfMonth}. * * <p>A day-of-month object represents one of the 31 days of the month, from 1 to 31. * * @param dayOfMonth the day-of-month to represent, from 1 to 31 * @return the day-of-month, not null * @throws CalendricalException if the day-of-month is invalid */ public static DayOfMonth of(int dayOfMonth) { try { DayOfMonth result = CACHE.get(--dayOfMonth); if (result == null) { DayOfMonth temp = new DayOfMonth(dayOfMonth + 1); CACHE.compareAndSet(dayOfMonth, null, temp); result = CACHE.get(dayOfMonth); } return result; } catch (IndexOutOfBoundsException ex) { throw new CalendricalException("Invalid value for DayOfYear: " + ++dayOfMonth); } }
private boolean addHashEntry( final AtomicReferenceArray<ClassInfoEntry> hashTable, final ClassInfo ci, final int hash) { ClassInfoEntry cie = hashTable.get(hash); while (cie != null) { if (ci.equals(cie._classInfo)) { return false; } cie = cie._next; } cie = hashTable.get(hash); final ClassInfoEntry newCie = new ClassInfoEntry(ci, cie); hashTable.set(hash, newCie); _size.incrementAndGet(); return true; }
/** * Removes the next element (at hd +1). <em>Note that this method is not concurrent, as RingBuffer * can only have 1 remover thread active at any time !</em> * * @param nullify Nulls the element in the array if true * @return T if there was a non-null element at position hd +1, or null if the element at hd+1 was * null, or hd+1 > hr. */ public T remove(boolean nullify) { long tmp = hd + 1; if (tmp > hr.get()) return null; int index = index(tmp); T element = buf.get(index); if (element == null) return null; hd = tmp; if (nullify) { long tmp_low = low; if (tmp == tmp_low + 1) buf.compareAndSet(index, element, null); else { int from = index(tmp_low + 1), length = (int) (tmp - tmp_low), capacity = capacity(); for (int i = from; i < from + length; i++) { index = i % capacity; buf.set(index, null); } } low = tmp; lock.lock(); try { buffer_full.signalAll(); } finally { lock.unlock(); } } return element; }
/** * Adds a new element to the buffer * * @param seqno The seqno of the element * @param element The element * @param block If true, add() will block when the buffer is full until there is space. Else, * add() will return immediately, either successfully or unsuccessfully (if the buffer is * full) * @return True if the element was added, false otherwise. */ public boolean add(long seqno, T element, boolean block) { validate(seqno); if (seqno <= hd) // seqno already delivered, includes check seqno <= low return false; if (seqno - low > capacity() && (!block || !block(seqno))) // seqno too big return false; // now we can set any slow > hd and yet not overwriting low (check #1 above) int index = index(seqno); // Fix for correctness check #1 (see doc/design/RingBuffer.txt) if (buf.get(index) != null || seqno <= hd) return false; if (!buf.compareAndSet(index, null, element)) // the element at buf[index] was already present return false; // now see if hr needs to moved forward, this can be concurrent as we may have multiple // producers for (; ; ) { long current_hr = hr.get(); long new_hr = Math.max(seqno, current_hr); if (new_hr <= current_hr || hr.compareAndSet(current_hr, new_hr)) break; } return true; }
public void closeConnections() { int i = 0; while ((connections.length() != 0) && (connections.get(i) != null)) { removeConnection(i); i++; } }
/** * Combine this array reduction variable at the given index with the given value using the given * operation. (This array <TT>[i]</TT>) is set to (this array <TT>[i]</TT>) <I>op</I> * (<TT>value</TT>), then (this array <TT>[i]</TT>) is returned. * * @param i Index. * @param value Value. * @param op Binary operation. * @return (This array <TT>[i]</TT>) <I>op</I> (<TT>value</TT>). */ public T reduce(int i, T value, ObjectOp<T> op) { for (; ; ) { T oldvalue = myArray.get(i); T newvalue = op.op(oldvalue, value); if (myArray.compareAndSet(i, oldvalue, newvalue)) return newvalue; } }
void setFailure(ShardIterator shardIt, int shardIndex, Throwable t) { // we don't aggregate shard failures on non active shards (but do keep the header counts // right) if (TransportActions.isShardNotAvailableException(t)) { return; } if (!(t instanceof BroadcastShardOperationFailedException)) { t = new BroadcastShardOperationFailedException(shardIt.shardId(), t); } Object response = shardsResponses.get(shardIndex); if (response == null) { // just override it and return shardsResponses.set(shardIndex, t); } if (!(response instanceof Throwable)) { // we should never really get here... return; } // the failure is already present, try and not override it with an exception that is less // meaningless // for example, getting illegal shard state if (TransportActions.isReadOverrideException(t)) { shardsResponses.set(shardIndex, t); } }
@Override protected ClearIndicesCacheResponse newResponse( ClearIndicesCacheRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { int successfulShards = 0; int failedShards = 0; List<ShardOperationFailedException> shardFailures = null; for (int i = 0; i < shardsResponses.length(); i++) { Object shardResponse = shardsResponses.get(i); if (shardResponse == null) { // simply ignore non active shards } else if (shardResponse instanceof BroadcastShardOperationFailedException) { failedShards++; if (shardFailures == null) { shardFailures = newArrayList(); } shardFailures.add( new DefaultShardOperationFailedException( (BroadcastShardOperationFailedException) shardResponse)); } else { successfulShards++; } } return new ClearIndicesCacheResponse( shardsResponses.length(), successfulShards, failedShards, shardFailures); }
@Override protected SuggestResponse newResponse( SuggestRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { int successfulShards = 0; int failedShards = 0; final Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<>(); List<ShardOperationFailedException> shardFailures = null; for (int i = 0; i < shardsResponses.length(); i++) { Object shardResponse = shardsResponses.get(i); if (shardResponse == null) { // simply ignore non active shards } else if (shardResponse instanceof BroadcastShardOperationFailedException) { failedShards++; if (shardFailures == null) { shardFailures = newArrayList(); } shardFailures.add( new DefaultShardOperationFailedException( (BroadcastShardOperationFailedException) shardResponse)); } else { Suggest suggest = ((ShardSuggestResponse) shardResponse).getSuggest(); Suggest.group(groupedSuggestions, suggest); successfulShards++; } } return new SuggestResponse( new Suggest(Suggest.reduce(groupedSuggestions)), shardsResponses.length(), successfulShards, failedShards, shardFailures); }
@Override protected TermlistResponse newResponse( TermlistRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { int successfulShards = 0; int failedShards = 0; List<ShardOperationFailedException> shardFailures = null; Set<String> termlist = new CompactHashSet(); for (int i = 0; i < shardsResponses.length(); i++) { Object shardResponse = shardsResponses.get(i); if (shardResponse == null) { // a non active shard, ignore... } else if (shardResponse instanceof BroadcastShardOperationFailedException) { failedShards++; if (shardFailures == null) { shardFailures = newArrayList(); } shardFailures.add( new DefaultShardOperationFailedException( (BroadcastShardOperationFailedException) shardResponse)); } else { successfulShards++; if (shardResponse instanceof ShardTermlistResponse) { ShardTermlistResponse resp = (ShardTermlistResponse) shardResponse; termlist.addAll(resp.getTermList()); } } } return new TermlistResponse( shardsResponses.length(), successfulShards, failedShards, shardFailures, termlist); }
@Override public R get(Object key) { if (!(key instanceof Integer)) return null; int i = (int) key; if (0 <= i && i < array.length()) { return array.get(i); } return null; }
public int size() { int retval=0; for(int i=0; i < capacity; i++) { Message tmp=array.get(i); if(tmp != null && tmp != TOMBSTONE) retval++; } return retval; }
public SeqnoList getMissing() { SeqnoList missing = null; long tmp_hd = hd, tmp_hr = hr.get(); for (long i = tmp_hd + 1; i <= tmp_hr; i++) { if (buf.get(index(i)) == null) { if (missing == null) missing = new SeqnoList(); long end = i; while (buf.get(index(end + 1)) == null && end <= tmp_hr) end++; if (end == i) missing.add(i); else { missing.add(i, end); i = end; } } } return missing; }
private Response newResponse( Request request, AtomicReferenceArray responses, List<NoShardAvailableActionException> unavailableShardExceptions, Map<String, List<ShardRouting>> nodes, ClusterState clusterState) { int totalShards = 0; int successfulShards = 0; List<ShardOperationResult> broadcastByNodeResponses = new ArrayList<>(); List<ShardOperationFailedException> exceptions = new ArrayList<>(); for (int i = 0; i < responses.length(); i++) { if (responses.get(i) instanceof FailedNodeException) { FailedNodeException exception = (FailedNodeException) responses.get(i); totalShards += nodes.get(exception.nodeId()).size(); for (ShardRouting shard : nodes.get(exception.nodeId())) { exceptions.add( new DefaultShardOperationFailedException(shard.getIndex(), shard.getId(), exception)); } } else { NodeResponse response = (NodeResponse) responses.get(i); broadcastByNodeResponses.addAll(response.results); totalShards += response.getTotalShards(); successfulShards += response.getSuccessfulShards(); for (BroadcastShardOperationFailedException throwable : response.getExceptions()) { if (!TransportActions.isShardNotAvailableException(throwable)) { exceptions.add( new DefaultShardOperationFailedException( throwable.getIndex(), throwable.getShardId().getId(), throwable)); } } } } totalShards += unavailableShardExceptions.size(); int failedShards = exceptions.size(); return newResponse( request, totalShards, successfulShards, failedShards, broadcastByNodeResponses, exceptions, clusterState); }
private void testJoinWithManyNodesMultipleGroups(final boolean multicast) throws InterruptedException { final int nodeCount = 10; final int groupCount = 3; final int basePort = 12301; final CountDownLatch latch = new CountDownLatch(nodeCount); final AtomicReferenceArray<HazelcastInstance> instances = new AtomicReferenceArray<HazelcastInstance>(nodeCount); final Map<String, AtomicInteger> groups = new HashMap<String, AtomicInteger>(groupCount); for (int i = 0; i < groupCount; i++) { groups.put("group-" + i, new AtomicInteger(0)); } ExecutorService ex = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors() * 2); for (int i = 0; i < nodeCount; i++) { final int portSeed = i; ex.execute( new Runnable() { public void run() { sleepRandom(1, 1000); Config config = new Config(); String name = "group-" + (int) (Math.random() * groupCount); config.getGroupConfig().setName(name); groups.get(name).incrementAndGet(); initNetworkConfig( config.getNetworkConfig(), basePort, portSeed, multicast, nodeCount); HazelcastInstance h = Hazelcast.newHazelcastInstance(config); instances.set(portSeed, h); latch.countDown(); } }); } try { latch.await(200, TimeUnit.SECONDS); } finally { ex.shutdown(); } for (int i = 0; i < nodeCount; i++) { HazelcastInstance hz = instances.get(i); assertNotNull(hz); int clusterSize = hz.getCluster().getMembers().size(); String groupName = hz.getConfig().getGroupConfig().getName(); int shouldBeClusterSize = groups.get(groupName).get(); assertEquals(groupName + ": ", shouldBeClusterSize, clusterSize); } }
/** {@inheritDoc} */ public E set(int index, E e) { int pos = index + FIRST_BUCKET_SIZE; int bucketInd = Integer.numberOfLeadingZeros(FIRST_BUCKET_SIZE) - Integer.numberOfLeadingZeros(pos); int idx = Integer.highestOneBit(pos) ^ pos; AtomicReferenceArray<E> bucket = buckets.get(bucketInd); while (true) { E oldV = bucket.get(idx); if (bucket.compareAndSet(idx, oldV, e)) return oldV; } }
public Message remove(long seqno) { int index=index(seqno); if(index < 0) return null; Message retval=array.get(index); if(retval != null && retval != TOMBSTONE && array.compareAndSet(index, retval, TOMBSTONE)) { num_tombstones.incrementAndGet(); return retval; } return null; }
protected int count(boolean missing) { int retval = 0; long tmp_hd = hd, tmp_hr = hr.get(); for (long i = tmp_hd + 1; i <= tmp_hr; i++) { int index = index(i); T element = buf.get(index); if (missing && element == null) retval++; if (!missing && element != null) retval++; } return retval; }
private GetFieldMappingsResponse merge(AtomicReferenceArray<Object> indexResponses) { Map<String, Map<String, Map<String, GetFieldMappingsResponse.FieldMappingMetaData>>> mergedResponses = new HashMap<>(); for (int i = 0; i < indexResponses.length(); i++) { Object element = indexResponses.get(i); if (element instanceof GetFieldMappingsResponse) { GetFieldMappingsResponse response = (GetFieldMappingsResponse) element; mergedResponses.putAll(response.mappings()); } } return new GetFieldMappingsResponse(unmodifiableMap(mergedResponses)); }
void reduce(int slot) { AtomicReferenceArray shardResponses = responsesByItemAndShard.get(slot); PercolateResponse reducedResponse = TransportPercolateAction.reduce( (PercolateRequest) percolateRequests.get(slot), shardResponses, percolatorService); reducedResponses.set(slot, reducedResponse); assert expectedOperations.get() >= 1 : "slot[" + slot + "] expected options should be >= 1 but is " + expectedOperations.get(); if (expectedOperations.decrementAndGet() == 0) { finish(); } }
/** @return Random node and its index. */ @Nullable private T2<Ignite, Integer> randomNode() { while (!done) { int idx = ThreadLocalRandom.current().nextInt(GRID_CNT); Ignite ignite = nodes.get(idx); if (ignite != null && nodes.compareAndSet(idx, ignite, null)) return new T2<>(ignite, idx); } return null; }
/** * Uses random numbers to sample the entire map. * * <p>This implemenation uses a key array. * * @return a random sample of elements */ protected Element[] sampleElementsViaKeyArray() { int[] indices = LfuPolicy.generateRandomSampleIndices(maximumSize); Element[] elements = new Element[indices.length]; for (int i = 0; i < indices.length; i++) { Object key = keyArray.get(indices[i]); if (key == null) { continue; } elements[i] = (Element) map.get(key); } return elements; }