コード例 #1
0
  private void registerFailure(Object caller, int index, boolean last) {
    failureCounters.incrementAndGet(index);

    if (policy == LoadBalancerPolicy.LATENCY_LAST) {
      // Make sure we don't pick a backend with a failed request next time
      latencyTimes.set(index, Long.MAX_VALUE - 1);
    }

    if (suspensionTime > 0) {
      // Try to save the failure time in the failureTimes array
      long now = System.currentTimeMillis();
      long oldestFailureTime = now - failureRateTimeUnit.toMillis(failureRateTime);
      int i;
      for (i = 0; i < failureTimes[index].length(); i++) {
        if (failureTimes[index].get(i) < oldestFailureTime) {
          failureTimes[index].set(i, now);
          break;
        }
      }
      // If all failureTimes slots are used then we can suspend the endpoint
      if (failureTimes[index].length() == 0 || i == failureTimes[index].length() - 1) {
        suspensionTimes.set(index, now + suspensionTimeUnit.toMillis(suspensionTime));
      }
    }

    // Remove the caller
    if (last) {
      retryCounters.remove(caller);
    }
  }
コード例 #2
0
  /**
   * Assign a predefined ordinal to a serialized representation.
   *
   * <p>WARNING: THIS OPERATION IS NOT THREAD-SAFE.
   *
   * <p>This is intended for use in the client-side heap-safe double snapshot load.
   */
  public void put(ByteDataBuffer serializedRepresentation, int ordinal) {
    if (size > sizeBeforeGrow) growKeyArray();

    int hash = SegmentedByteArrayHasher.hashCode(serializedRepresentation);

    int modBitmask = pointersAndOrdinals.length() - 1;
    int bucket = hash & modBitmask;
    long key = pointersAndOrdinals.get(bucket);

    while (key != EMPTY_BUCKET_VALUE) {
      if (compare(serializedRepresentation, key)) return;

      bucket = (bucket + 1) & modBitmask;
      key = pointersAndOrdinals.get(bucket);
    }

    int pointer = byteData.length();

    VarInt.writeVInt(byteData, serializedRepresentation.length());
    serializedRepresentation.copyTo(byteData);

    key = ((long) ordinal << 32) | pointer;

    size++;

    pointersAndOrdinals.set(bucket, key);
  }
コード例 #3
0
 private void registerSuccess(Object caller, int index, long start) {
   successCounters.incrementAndGet(index);
   retryCounters.remove(caller);
   if (policy == LoadBalancerPolicy.LATENCY_LAST) {
     latencyTimes.set(index, System.currentTimeMillis() - start);
   }
 }
コード例 #4
0
  /**
   * This is used to store the server's SerializationState, so that it may resume the delta chain
   * after a new server is brought back up.
   *
   * @param os
   * @throws IOException
   */
  public void serializeTo(OutputStream os) throws IOException {
    /// write the hashed key array size
    VarInt.writeVInt(os, pointersAndOrdinals.length());

    /// write the keys in sorted ordinal order to the stream
    long keys[] = new long[size];

    int counter = 0;

    for (int i = 0; i < pointersAndOrdinals.length(); i++) {
      long key = pointersAndOrdinals.get(i);
      if (key != EMPTY_BUCKET_VALUE) {
        keys[counter++] = key;
      }
    }

    Arrays.sort(keys);

    VarInt.writeVInt(os, keys.length);

    for (int i = 0; i < keys.length; i++) {
      VarInt.writeVInt(os, (int) (keys[i] >> 32));
      VarInt.writeVInt(os, (int) (keys[i]));
    }

    /// write the byte data to the stream
    VarInt.writeVInt(os, byteData.length());

    for (int i = 0; i < byteData.length(); i++) {
      os.write(byteData.get(i) & 0xFF);
    }

    /// write the freeOrdinalTracker to the stream
    freeOrdinalTracker.serializeTo(os);
  }
コード例 #5
0
 /**
  * Remove all entries from this map, but reuse the existing arrays when populating the map next
  * time.
  *
  * <p>This is intended for use in the client-side heap-safe double snapshot load.
  */
 public void clear() {
   for (int i = 0; i < pointersAndOrdinals.length(); i++) {
     pointersAndOrdinals.set(i, EMPTY_BUCKET_VALUE);
   }
   byteData.reset();
   size = 0;
 }
コード例 #6
0
  /**
   * Create an array mapping the ordinals to pointers, so that they can be easily looked up when
   * writing to blob streams.
   *
   * @return the maximum length, in bytes, of any byte sequence in this map.
   */
  public int prepareForWrite() {
    int maxOrdinal = 0;
    int maxLength = 0;

    for (int i = 0; i < pointersAndOrdinals.length(); i++) {
      long key = pointersAndOrdinals.get(i);
      if (key != EMPTY_BUCKET_VALUE) {
        int ordinal = (int) (key >> 32);
        if (ordinal > maxOrdinal) maxOrdinal = ordinal;
      }
    }

    pointersByOrdinal = new int[maxOrdinal + 1];
    Arrays.fill(pointersByOrdinal, -1);

    for (int i = 0; i < pointersAndOrdinals.length(); i++) {
      long key = pointersAndOrdinals.get(i);
      if (key != EMPTY_BUCKET_VALUE) {
        int ordinal = (int) (key >> 32);
        pointersByOrdinal[ordinal] = (int) key;

        int dataLength = VarInt.readVInt(byteData.getUnderlyingArray(), pointersByOrdinal[ordinal]);
        if (dataLength > maxLength) maxLength = dataLength;
      }
    }

    return maxLength;
  }
コード例 #7
0
  /**
   * Grow the key array. All of the values in the current array must be re-hashed and added to the
   * new array.
   */
  private void growKeyArray() {
    AtomicLongArray newKeys = emptyKeyArray(pointersAndOrdinals.length() * 2);

    long valuesToAdd[] = new long[size];

    int counter = 0;

    /// do not iterate over these values in the same order in which they appear in the hashed array.
    /// if we do so, we cause large clusters of collisions to appear (because we resolve collisions
    // with linear probing).
    for (int i = 0; i < pointersAndOrdinals.length(); i++) {
      long key = pointersAndOrdinals.get(i);
      if (key != EMPTY_BUCKET_VALUE) {
        valuesToAdd[counter++] = key;
      }
    }

    Arrays.sort(valuesToAdd);

    populateNewHashArray(newKeys, valuesToAdd);

    /// 70% load factor
    sizeBeforeGrow = (newKeys.length() * 7) / 10;
    pointersAndOrdinals = newKeys;
  }
コード例 #8
0
 /**
  * Create an AtomicLongArray of the specified size, each value in the array will be
  * EMPTY_BUCKET_VALUE
  */
 private AtomicLongArray emptyKeyArray(int size) {
   AtomicLongArray arr = new AtomicLongArray(size);
   for (int i = 0; i < arr.length(); i++) {
     arr.set(i, EMPTY_BUCKET_VALUE);
   }
   return arr;
 }
コード例 #9
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
  /**
   * @return the largest value that could have been added to this histogram. If the histogram
   *     overflowed, returns Long.MAX_VALUE.
   */
  public long max() {
    int lastBucket = buckets.length() - 1;
    if (buckets.get(lastBucket) > 0) return Long.MAX_VALUE;

    for (int i = lastBucket - 1; i >= 0; i--) {
      if (buckets.get(i) > 0) return bucketOffsets[i];
    }
    return 0;
  }
コード例 #10
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
  /**
   * @param reset zero out buckets afterwards if true
   * @return a long[] containing the current histogram buckets
   */
  public long[] getBuckets(boolean reset) {
    final int len = buckets.length();
    long[] rv = new long[len];

    if (reset) for (int i = 0; i < len; i++) rv[i] = buckets.getAndSet(i, 0L);
    else for (int i = 0; i < len; i++) rv[i] = buckets.get(i);

    return rv;
  }
コード例 #11
0
ファイル: PooledFatPipe.java プロジェクト: ioworks/fat-pipe
 private void resetConsumer(Consumer<S> consumer) {
   Consumer<S>[] consumers = this.consumers;
   AtomicLongArray outUse = this.outUse;
   for (int i = 0; i < consumers.length; i++) {
     if (consumer != consumers[i]) continue;
     outUse.lazySet(i, 0);
     return;
   }
 }
コード例 #12
0
  /**
   * Hash all of the existing values specified by the keys in the supplied long array into the
   * supplied AtomicLongArray.
   */
  private void populateNewHashArray(AtomicLongArray newKeys, long[] valuesToAdd) {
    int modBitmask = newKeys.length() - 1;

    for (int i = 0; i < valuesToAdd.length; i++) {
      if (valuesToAdd[i] != EMPTY_BUCKET_VALUE) {
        int hash = rehashPreviouslyAddedData(valuesToAdd[i]);
        int bucket = hash & modBitmask;
        while (newKeys.get(bucket) != EMPTY_BUCKET_VALUE) bucket = (bucket + 1) & modBitmask;
        newKeys.set(bucket, valuesToAdd[i]);
      }
    }
  }
コード例 #13
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
  /**
   * @param percentile
   * @return estimated value at given percentile
   */
  public long percentile(double percentile) {
    assert percentile >= 0 && percentile <= 1.0;
    int lastBucket = buckets.length() - 1;
    if (buckets.get(lastBucket) > 0)
      throw new IllegalStateException("Unable to compute when histogram overflowed");

    long pcount = (long) Math.floor(count() * percentile);
    if (pcount == 0) return 0;

    long elements = 0;
    for (int i = 0; i < lastBucket; i++) {
      elements += buckets.get(i);
      if (elements >= pcount) return bucketOffsets[i];
    }
    return 0;
  }
コード例 #14
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
  /**
   * @return the mean histogram value (average of bucket offsets, weighted by count)
   * @throws IllegalStateException if any values were greater than the largest bucket threshold
   */
  public long mean() {
    int lastBucket = buckets.length() - 1;
    if (buckets.get(lastBucket) > 0)
      throw new IllegalStateException(
          "Unable to compute ceiling for max when histogram overflowed");

    long elements = 0;
    long sum = 0;
    for (int i = 0; i < lastBucket; i++) {
      long bCount = buckets.get(i);
      elements += bCount;
      sum += bCount * bucketOffsets[i];
    }

    return (long) Math.ceil((double) sum / elements);
  }
コード例 #15
0
  /**
   * Reclaim space in the byte array used in the previous cycle, but not referenced in this cycle.
   *
   * <p>This is achieved by shifting all used byte sequences down in the byte array, then updating
   * the key array to reflect the new pointers and exclude the removed entries. This is also where
   * ordinals which are unused are returned to the pool.
   *
   * <p>
   *
   * @param usedOrdinals a bit set representing the ordinals which are currently referenced by any
   *     image.
   */
  public void compact(ThreadSafeBitSet usedOrdinals) {
    long populatedReverseKeys[] = new long[size];

    int counter = 0;

    for (int i = 0; i < pointersAndOrdinals.length(); i++) {
      long key = pointersAndOrdinals.get(i);
      if (key != EMPTY_BUCKET_VALUE) {
        populatedReverseKeys[counter++] = key << 32 | key >>> 32;
      }
    }

    Arrays.sort(populatedReverseKeys);

    SegmentedByteArray arr = byteData.getUnderlyingArray();
    int currentCopyPointer = 0;

    for (int i = 0; i < populatedReverseKeys.length; i++) {
      int ordinal = (int) populatedReverseKeys[i];

      if (usedOrdinals.get(ordinal)) {
        int pointer = (int) (populatedReverseKeys[i] >> 32);
        int length = VarInt.readVInt(arr, pointer);
        length += VarInt.sizeOfVInt(length);

        if (currentCopyPointer != pointer) arr.copy(arr, pointer, currentCopyPointer, length);

        populatedReverseKeys[i] = populatedReverseKeys[i] << 32 | currentCopyPointer;

        currentCopyPointer += length;
      } else {
        freeOrdinalTracker.returnOrdinalToPool(ordinal);
        populatedReverseKeys[i] = EMPTY_BUCKET_VALUE;
      }
    }

    byteData.setPosition(currentCopyPointer);

    for (int i = 0; i < pointersAndOrdinals.length(); i++) {
      pointersAndOrdinals.set(i, EMPTY_BUCKET_VALUE);
    }

    populateNewHashArray(pointersAndOrdinals, populatedReverseKeys);
    size = usedOrdinals.cardinality();

    pointersByOrdinal = null;
  }
コード例 #16
0
  private void checkMonitors() {
    for (int i = 0; i < endpointCount; i++) {
      final int index = i;
      try {
        monitorFunction
            .apply(index)
            .whenComplete(
                (result, ex) -> {
                  if (result && ex == null) {
                    // Unset suspension time when we hit the healthy threshold
                    if (monitorHealthyCounters.incrementAndGet(index) >= monitorHealthyThreshold) {
                      suspensionTimes.set(index, 0);
                      monitorHealthyCounters.set(index, 0);
                    }
                    // Reset unhealthy counter
                    if (monitorUnhealthyCounters.get(index) > 0) {
                      monitorUnhealthyCounters.set(index, 0);
                    }

                  } else {
                    // Set suspension time when we hit the unhealthy threshold
                    if (monitorUnhealthyCounters.incrementAndGet(index)
                        >= monitorUnhealthyThreshold) {
                      suspensionTimes.set(index, Long.MAX_VALUE);
                      monitorUnhealthyCounters.set(index, 0);
                    }
                    // Reset healthy counter
                    if (monitorHealthyCounters.get(index) > 0) {
                      monitorHealthyCounters.set(index, 0);
                    }
                  }
                });

      } catch (Exception ex) { // Got exception trying to create the future
        // Set suspension time when we hit the unhealthy threshold
        if (monitorUnhealthyCounters.incrementAndGet(index) >= monitorUnhealthyThreshold) {
          suspensionTimes.set(index, Long.MAX_VALUE);
          monitorUnhealthyCounters.set(index, 0);
        }
        // Reset healthy counter
        if (monitorHealthyCounters.get(index) > 0) {
          monitorHealthyCounters.set(index, 0);
        }
      }
    }
  }
コード例 #17
0
ファイル: TaskExecutor.java プロジェクト: reake/presto
  public synchronized void removeTask(TaskHandle taskHandle) {
    taskHandle.destroy();
    tasks.remove(taskHandle);

    // record completed stats
    long threadUsageNanos = taskHandle.getThreadUsageNanos();
    int priorityLevel = calculatePriorityLevel(threadUsageNanos);
    completedTasksPerLevel.incrementAndGet(priorityLevel);
  }
コード例 #18
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
 /**
  * Increments the count of the bucket closest to n, rounding UP.
  *
  * @param n
  */
 public void add(long n) {
   int index = Arrays.binarySearch(bucketOffsets, n);
   if (index < 0) {
     // inexact match, take the first bucket higher than n
     index = -index - 1;
   }
   // else exact match; we're good
   buckets.incrementAndGet(index);
 }
コード例 #19
0
  /**
   * Add a sequence of bytes to this map. If the sequence of bytes has already been added to this
   * map, return the originally assigned ordinal. If the sequence of bytes has not been added to
   * this map, assign and return a new ordinal. This operation is thread-safe.
   */
  public int getOrAssignOrdinal(ByteDataBuffer serializedRepresentation) {
    int hash = SegmentedByteArrayHasher.hashCode(serializedRepresentation);

    int modBitmask = pointersAndOrdinals.length() - 1;
    int bucket = hash & modBitmask;
    long key = pointersAndOrdinals.get(bucket);

    /// linear probing to resolve collisions.
    while (key != EMPTY_BUCKET_VALUE) {
      if (compare(serializedRepresentation, key)) {
        return (int) (key >> 32);
      }

      bucket = (bucket + 1) & modBitmask;
      key = pointersAndOrdinals.get(bucket);
    }

    return assignOrdinal(serializedRepresentation, hash);
  }
コード例 #20
0
  /// acquire the lock before writing.
  private synchronized int assignOrdinal(ByteDataBuffer serializedRepresentation, int hash) {
    if (size > sizeBeforeGrow) growKeyArray();

    /// check to make sure that after acquiring the lock, the element still does not exist.
    /// this operation is akin to double-checked locking which is 'fixed' with the JSR 133 memory
    // model in JVM >= 1.5.
    int modBitmask = pointersAndOrdinals.length() - 1;
    int bucket = hash & modBitmask;
    long key = pointersAndOrdinals.get(bucket);

    while (key != EMPTY_BUCKET_VALUE) {
      if (compare(serializedRepresentation, key)) {
        return (int) (key >> 32);
      }

      bucket = (bucket + 1) & modBitmask;
      key = pointersAndOrdinals.get(bucket);
    }

    /// the ordinal for this object still does not exist in the list, even after the lock has been
    // acquired.
    /// it is up to this thread to add it at the current bucket position.
    int ordinal = freeOrdinalTracker.getFreeOrdinal();
    int pointer = byteData.length();

    VarInt.writeVInt(byteData, serializedRepresentation.length());
    serializedRepresentation.copyTo(byteData);

    key = ((long) ordinal << 32) | pointer;

    size++;

    /// this set on the AtomicLongArray has volatile semantics (i.e. behaves like a monitor
    // release).
    /// Any other thread reading this element in the AtomicLongArray will have visibility to all
    // memory writes this thread has made up to this point.
    /// This means the entire byte sequence is guaranteed to be visible to any thread which reads
    // the pointer to that data.
    pointersAndOrdinals.set(bucket, key);

    return ordinal;
  }
コード例 #21
0
  private int getNextIndex(Object caller) {
    long now = System.currentTimeMillis();

    if (this.policy == LoadBalancerPolicy.ROUND_ROBIN) {
      // Try all endpoints if some are suspended
      int count = endpointCount;
      int[] tried = new int[endpointCount];
      while (count > 0) {
        // Increment index and convert negative values to positive if need be
        int index = indexGenerator.getAndIncrement() % endpointCount;
        if (index < 0) {
          index += endpointCount;
        }

        // Don't retry same index
        if (tried[index] == 0) {
          // Return index if endpoint is not suspended
          if (suspensionTimes.get(index) < now) {
            return index;
          }

          tried[index] = 1;
          count--;
        }
      }

    } else if (this.policy == LoadBalancerPolicy.LATENCY_LAST) {
      int index = -1;
      for (int i = 0; i < latencyTimes.length(); i++) {
        long current = latencyTimes.get(i);
        long smallest = Long.MAX_VALUE;
        if (current < smallest && suspensionTimes.get(i) < now) {
          smallest = current;
          index = i;
        }
      }
      return index;
    }

    return -1;
  }
コード例 #22
0
ファイル: PooledFatPipe.java プロジェクト: ioworks/fat-pipe
 Consumer<S> check(long maxTime, PooledFatPipe parent) {
   if (parent != null && parent.group == null) return null;
   AtomicLongArray outUse = this.outUse;
   Consumer<S>[] consumers = this.consumers;
   long now = System.nanoTime();
   if (consumers.length > outUse.length()) {
     logger.warn("skipping check " + outUse.length() + "/" + consumers.length);
     return null;
   }
   try {
     for (int i = 0; i < consumers.length; i++) {
       long time = outUse.get(i);
       if (time == 0) continue;
       if (time == -1) continue;
       if (now < time + maxTime * 1_000_000) continue;
       if (parent != null) parent.group.list();
       return consumers[i];
     }
   } catch (Exception e) {
     logger.error("check", e);
   }
   return null;
 }
コード例 #23
0
  /**
   * Combine a portion of this matrix reduction variable with a portion of the given matrix using
   * the given operation. For each row index <TT>r</TT> from 0 to <TT>rowlen-1</TT> inclusive, and
   * for each column index <TT>c</TT> from 0 to <TT>collen-1</TT> inclusive, (this matrix
   * <TT>[dstrow+r,dstcol+c]</TT>) is set to (this matrix <TT>[dstrow+r,dstcol+c]</TT>) <I>op</I>
   * (<TT>src[srcrow+r,srccol+c]</TT>).
   *
   * <p>The <TT>reduce()</TT> method is multiple thread safe <I>on a per-element basis.</I> Each
   * individual matrix element is updated atomically, but the matrix as a whole is not updated
   * atomically.
   *
   * @param dstrow Row index of first element to update in this matrix.
   * @param dstcol Column index of first element to update in this matrix.
   * @param src Source matrix.
   * @param srcrow Row index of first element to update from in the source matrix.
   * @param srccol Column index of first element to update from in the source matrix.
   * @param rowlen Number of rows to update.
   * @param collen Number of columns to update.
   * @param op Binary operation.
   * @exception NullPointerException (unchecked exception) Thrown if <TT>src</TT> is null. Thrown if
   *     <TT>op</TT> is null.
   * @exception IndexOutOfBoundsException (unchecked exception) Thrown if <TT>rowlen</TT> &lt; 0.
   *     Thrown if <TT>collen</TT> &lt; 0. Thrown if any matrix index would be out of bounds.
   */
  public void reduce(
      int dstrow,
      int dstcol,
      long[][] src,
      int srcrow,
      int srccol,
      int rowlen,
      int collen,
      LongOp op) {
    if (rowlen < 0
        || collen < 0
        || dstrow < 0
        || dstrow + rowlen > rows()
        || dstcol < 0
        || dstcol + collen > cols()
        || srcrow < 0
        || srcrow + rowlen > src.length
        || srccol < 0
        || srccol + collen > src[0].length) {
      throw new IndexOutOfBoundsException();
    }

    for (int r = 0; r < rowlen; ++r) {
      AtomicLongArray myMatrix_r = myMatrix[dstrow + r];
      long[] src_r = src[srcrow + r];
      for (int c = 0; c < collen; ++c) {
        int dstcol_c = dstcol + c;
        long src_r_c = src_r[srccol + c];
        updateLoop:
        for (; ; ) {
          long oldvalue = myMatrix_r.get(dstcol_c);
          long newvalue = op.op(oldvalue, src_r_c);
          if (myMatrix_r.compareAndSet(dstcol_c, oldvalue, newvalue)) break updateLoop;
        }
      }
    }
  }
コード例 #24
0
ファイル: PooledFatPipe.java プロジェクト: ioworks/fat-pipe
 private boolean setConsumers(List<Consumer<S>> list) throws InterruptedException {
   AtomicLongArray oldOutUse = outUse;
   long[] oldConsumerSeqs = consumerSeqs;
   long[] useArray = new long[list.size()];
   for (int i = 0; i < useArray.length; i++) useArray[i] = -1;
   long[] seq = new long[list.size()];
   for (int i = 0; i < seq.length; i++) seq[i] = Long.MAX_VALUE;
   Consumer<S>[] oldConsumers = consumers;
   consumers = list.toArray(new Consumer[0]);
   outUse = new AtomicLongArray(useArray);
   consumerSeqs = seq;
   for (int i = 0; i < consumers.length; i++) {
     boolean found = false;
     for (int j = 0; j < oldConsumers.length; j++) {
       if (consumers[i] != oldConsumers[j]) continue;
       found = true;
       long entered = oldOutUse.get(j);
       setSequence(i, entered, oldConsumerSeqs[j]);
       break;
     }
     if (!found) setSequence(i, 0, 0); // not in use
   }
   return true;
 }
コード例 #25
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
  /**
   * log.debug() every record in the histogram
   *
   * @param log
   */
  public void log(Logger log) {
    // only print overflow if there is any
    int nameCount;
    if (buckets.get(buckets.length() - 1) == 0) nameCount = buckets.length() - 1;
    else nameCount = buckets.length();
    String[] names = new String[nameCount];

    int maxNameLength = 0;
    for (int i = 0; i < nameCount; i++) {
      names[i] = nameOfRange(bucketOffsets, i);
      maxNameLength = Math.max(maxNameLength, names[i].length());
    }

    // emit log records
    String formatstr = "%" + maxNameLength + "s: %d";
    for (int i = 0; i < nameCount; i++) {
      long count = buckets.get(i);
      // sort-of-hack to not print empty ranges at the start that are only used to demarcate the
      // first populated range. for code clarity we don't omit this record from the maxNameLength
      // calculation, and accept the unnecessary whitespace prefixes that will occasionally occur
      if (i == 0 && count == 0) continue;
      log.debug(String.format(formatstr, names[i], count));
    }
  }
コード例 #26
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
 /** @return the count in the given bucket */
 long get(int bucket) {
   return buckets.get(bucket);
 }
コード例 #27
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
 /** @return the total number of non-zero values */
 public long count() {
   long sum = 0L;
   for (int i = 0; i < buckets.length(); i++) sum += buckets.get(i);
   return sum;
 }
コード例 #28
0
ファイル: EstimatedHistogram.java プロジェクト: vniu/Raigad
 /**
  * @return true if this histogram has overflowed -- that is, a value larger than our largest
  *     bucket could bound was added
  */
 public boolean isOverflowed() {
   return buckets.get(buckets.length() - 1) > 0;
 }
コード例 #29
-1
ファイル: PooledFatPipe.java プロジェクト: ioworks/fat-pipe
  /**
   * @param product
   * @param sequence
   * @return false if there is an error
   */
  boolean consume(S product, long sequence, long sleep) {
    if (product == null) return true;

    // make copies
    Consumer<S>[] consumers = this.consumers;
    AtomicLongArray outUse = this.outUse;
    long[] consumerSeqs = this.consumerSeqs;

    if (outUse.length() != consumers.length) return false;
    for (int j = 0; j < consumers.length; j++) {
      if (!consumers[j].isConsuming()) continue;
      long time = System.nanoTime();
      if (!outUse.compareAndSet(j, 0, time)) continue;
      try {
        if (sequence <= consumerSeqs[j]) {
          outUse.lazySet(j, 0);
          if (outUse != this.outUse) resetConsumer(consumers[j]);
          break;
        }
        consumerSeqs[j] = sequence;
        consumers[j].consume(product, time);
        if (sleep > 0) Thread.sleep(sleep);
        outUse.lazySet(j, 0);
        if (outUse != this.outUse) {
          resetConsumer(consumers[j]);
          break;
        }
      } catch (Exception e) {
        if (listener == null) logger.error("consume", e);
        else listener.exceptionThrown(e);
      }
    }
    finishConsuming(product, sequence);
    return true;
  }
コード例 #30
-1
 /**
  * Combine this matrix reduction variable at the given row and column with the given value using
  * the given operation. (This matrix <TT>[r,c]</TT>) is set to (this matrix <TT>[r,c]</TT>)
  * <I>op</I> (<TT>value</TT>), then (this matrix <TT>[r,c]</TT>) is returned.
  *
  * @param r Row index.
  * @param c Column index.
  * @param value Value.
  * @param op Binary operation.
  * @return (This matrix <TT>[r,c]</TT>) <I>op</I> (<TT>value</TT>).
  */
 public long reduce(int r, int c, long value, LongOp op) {
   AtomicLongArray myMatrix_r = myMatrix[r];
   for (; ; ) {
     long oldvalue = myMatrix_r.get(c);
     long newvalue = op.op(oldvalue, value);
     if (myMatrix_r.compareAndSet(c, oldvalue, newvalue)) {
       return newvalue;
     }
   }
 }