@Override
  @Test(timeout = 30000)
  public void testMessageSizeOneDurable() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();
    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();

    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");
    org.apache.activemq.broker.region.Topic dest =
        publishTestMessagesDurable(
            connection, new String[] {"sub1"}, 200, publishedMessageSize, DeliveryMode.PERSISTENT);

    verifyPendingStats(dest, subKey, 200, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    // consume 100 messages
    consumeDurableTestMessages(connection, "sub1", 100, publishedMessageSize);

    // 100 should be left
    verifyPendingStats(dest, subKey, 100, publishedMessageSize.get());
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    connection.close();
  }
Esempio n. 2
0
  public synchronized PageBufferClientStatus getStatus() {
    String state;
    if (closed) {
      state = "closed";
    } else if (future != null) {
      state = "running";
    } else if (scheduled) {
      state = "scheduled";
    } else if (completed) {
      state = "completed";
    } else {
      state = "queued";
    }
    String httpRequestState = "not scheduled";
    if (future != null) {
      httpRequestState = future.getState();
    }

    long rejectedRows = rowsRejected.get();
    int rejectedPages = pagesRejected.get();

    return new PageBufferClientStatus(
        location,
        state,
        lastUpdate,
        rowsReceived.get(),
        pagesReceived.get(),
        rejectedRows == 0 ? OptionalLong.empty() : OptionalLong.of(rejectedRows),
        rejectedPages == 0 ? OptionalInt.empty() : OptionalInt.of(rejectedPages),
        requestsScheduled.get(),
        requestsCompleted.get(),
        requestsFailed.get(),
        httpRequestState);
  }
    @Override
    public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel)
        throws Exception {
      try (RecoveriesCollection.RecoveryRef recoveryRef =
          onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) {
        final RecoveryTarget status = recoveryRef.status();
        final RecoveryState.Index indexState = status.state().getIndex();
        if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
          indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
        }

        RateLimiter rateLimiter = recoverySettings.rateLimiter();
        if (rateLimiter != null) {
          long bytes = bytesSinceLastPause.addAndGet(request.content().length());
          if (bytes > rateLimiter.getMinPauseCheckBytes()) {
            // Time to pause
            bytesSinceLastPause.addAndGet(-bytes);
            long throttleTimeInNanos = rateLimiter.pause(bytes);
            indexState.addTargetThrottling(throttleTimeInNanos);
            status.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
          }
        }

        status.writeFileChunk(
            request.metadata(),
            request.position(),
            request.content(),
            request.lastChunk(),
            request.totalTranslogOps());
      }
      channel.sendResponse(TransportResponse.Empty.INSTANCE);
    }
  private void read(final KafkaStream<String, String> stream) {
    while (stream.iterator().hasNext()) {
      final int phase = phaser.register();

      final MessageAndMetadata<String, String> msg = stream.iterator().next();
      final long offset = msg.offset();
      final long partition = msg.partition();
      unacknowledgedOffsets.add(offset);
      lastCommittedOffset.compareAndSet(0, offset);
      currentPartition.compareAndSet(-1, partition);

      final String jsonString = msg.message();

      handler.handle(
          configuration.getVertxAddress(),
          jsonString,
          () -> {
            unacknowledgedOffsets.remove(offset);
            phaser.arriveAndDeregister();
          });

      if (unacknowledgedOffsets.size() >= configuration.getMaxUnacknowledged()
          || partititionChanged(partition)
          || tooManyUncommittedOffsets(offset)) {
        LOG.info(
            "Got {} unacknowledged messages, waiting for ACKs in order to commit",
            unacknowledgedOffsets.size());
        if (!waitForAcks(phase)) {
          return;
        }
        LOG.info("Continuing message processing");
        commitOffsetsIfAllAcknowledged(offset);
      }
    }
  }
Esempio n. 5
0
 /**
  * The mean time to execute removes.
  *
  * @return the time in milliseconds
  */
 @Override
 public float getAverageRemoveTime() {
   if (cacheRemoveTimeTakenNanos.longValue() == 0 || getCacheGets() == 0) {
     return 0;
   }
   return (cacheRemoveTimeTakenNanos.longValue() / getCacheGets()) / NANOSECONDS_IN_A_MICROSECOND;
 }
Esempio n. 6
0
  @Test
  public void int64() {
    Wire wire = createWire();
    wire.write().int64(1);
    wire.write(BWKey.field1).int64(2);
    wire.write(() -> "Test").int64(3);
    checkWire(
        wire,
        "[pos: 0, rlim: 16, wlim: 8EiB, cap: 8EiB ] À⒈Æfield1⒉ÄTest⒊",
        "[pos: 0, rlim: 40, wlim: 8EiB, cap: 8EiB ] À§⒈٠٠٠٠٠٠٠Æfield1§⒉٠٠٠٠٠٠٠ÄTest§⒊٠٠٠٠٠٠٠",
        "[pos: 0, rlim: 11, wlim: 8EiB, cap: 8EiB ] À⒈º⒈⒉º²ñ\\u009E⒈⒊",
        "[pos: 0, rlim: 35, wlim: 8EiB, cap: 8EiB ] À§⒈٠٠٠٠٠٠٠º⒈§⒉٠٠٠٠٠٠٠º²ñ\\u009E⒈§⒊٠٠٠٠٠٠٠",
        "[pos: 0, rlim: 3, wlim: 8EiB, cap: 8EiB ] ⒈⒉⒊",
        "[pos: 0, rlim: 27, wlim: 8EiB, cap: 8EiB ] §⒈٠٠٠٠٠٠٠§⒉٠٠٠٠٠٠٠§⒊٠٠٠٠٠٠٠");
    checkAsText123(wire);

    // ok as blank matches anything
    AtomicLong i = new AtomicLong();
    LongStream.rangeClosed(1, 3)
        .forEach(
            e -> {
              wire.read().int64(i::set);
              assertEquals(e, i.get());
            });

    assertEquals(0, bytes.readRemaining());
    // check it's safe to read too much.
    wire.read();
  }
  /** Return the probe's next 2-sigma */
  public final double sampleSigma(int n) {
    synchronized (_lock) {
      long count = _count.get();
      long lastCount = _lastStdCount;
      _lastStdCount = count;

      double sum = _sum.get();
      double lastSum = _lastStdSum;
      _lastStdSum = sum;

      double sumSquare = _sumSquare;
      _sumSquare = 0;

      if (count == lastCount) return 0;

      double avg = (sum - lastSum) / (count - lastCount);
      double part = (count - lastCount) * sumSquare - sum * sum;

      if (part < 0) part = 0;

      double std = Math.sqrt(part) / (count - lastCount);

      return _scale * (avg + n * std);
    }
  }
Esempio n. 8
0
  @Test
  public void testConsumers() {
    final AtomicLong lc = new AtomicLong(0L);
    Assert.assertEquals(Anoa.of(1L), handler.consumer(lc::addAndGet).apply(handler.of(1L)));
    Assert.assertEquals(1L, lc.get());
    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .consumerChecked(
                __ -> {
                  throw new IOException();
                })
            .apply(handler.of(1L)));

    final AtomicLong lc2 = new AtomicLong(0L);
    Assert.assertEquals(
        Anoa.of(1L),
        handler.biConsumer((Long x, Long y) -> lc2.addAndGet(x + y)).apply(Anoa.of(1L), 1L));
    Assert.assertEquals(2L, lc2.get());

    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .biConsumerChecked(
                (_1, _2) -> {
                  throw new IOException();
                })
            .apply(Anoa.of(1L), 1L));
  }
Esempio n. 9
0
  @Test
  public void testFunctions() {
    final AtomicLong lf = new AtomicLong(10L);
    Assert.assertEquals(Anoa.of(10L), handler.function(lf::getAndAdd).apply(handler.of(1L)));
    Assert.assertEquals(11L, lf.get());
    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .functionChecked(
                __ -> {
                  throw new IOException();
                })
            .apply(handler.of(1L)));

    final AtomicLong lf2 = new AtomicLong(10L);
    Assert.assertEquals(
        Anoa.of(10L),
        handler.biFunction((Long x, Long y) -> lf2.getAndAdd(x + y)).apply(Anoa.of(1L), 1L));
    Assert.assertEquals(12L, lf2.get());

    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .biFunctionChecked(
                (_1, _2) -> {
                  throw new IOException();
                })
            .apply(Anoa.of(1L), 1L));
  }
Esempio n. 10
0
    @Override
    protected void doGet(HttpServletRequest request, HttpServletResponse response)
        throws IOException, ServletException {
      String uri = getDecodedUri(request);
      try {
        Pattern p = Pattern.compile(".*/NodePersistentStorage.bin/([^/]+)/([^/]+)");
        Matcher m = p.matcher(uri);
        boolean b = m.matches();
        if (!b) {
          setResponseStatus(response, HttpServletResponse.SC_BAD_REQUEST);
          response.getWriter().write("Improperly formatted URI");
          return;
        }

        String categoryName = m.group(1);
        String keyName = m.group(2);
        NodePersistentStorage nps = H2O.getNPS();
        AtomicLong length = new AtomicLong();
        InputStream is = nps.get(categoryName, keyName, length);
        if (length.get() > (long) Integer.MAX_VALUE) {
          throw new Exception("NPS value size exceeds Integer.MAX_VALUE");
        }
        response.setContentType("application/octet-stream");
        response.setContentLength((int) length.get());
        response.addHeader("Content-Disposition", "attachment; filename=" + keyName + ".flow");
        setResponseStatus(response, HttpServletResponse.SC_OK);
        OutputStream os = response.getOutputStream();
        water.util.FileUtils.copyStream(is, os, 2048);
      } catch (Exception e) {
        sendErrorResponse(response, e, uri);
      } finally {
        logRequest("GET", request, response);
      }
    }
Esempio n. 11
0
  public static void init(Map<String, String> props) {

    // TODO - check if a clone is in process and prompt to kill it
    // for now kill any resharder threads that might be running
    if (OpLogWriter.isRunning()) {
      MessageLog.push("ERROR:, clone operation in progress.", Config.class.getSimpleName());
    }

    _done = false;
    _ts = 0;

    if (props != null) {
      for (Entry<String, String> prop : props.entrySet()) {
        setProperty(prop.getKey(), prop.getValue());
      }
    }

    loadDefaults();

    _chunks = new HashMap<String, List<Chunk>>();

    _docCount.set(0);
    _orphanCount.set(0);
    _oplogCount.set(0);
    _oplogReads.set(0);

    _nodes = _logDB.getCollection("nodes");

    if (props == null) _nodes.drop();
  }
Esempio n. 12
0
 /** Resets all the performance counters. */
 public void resetPerformanceCounters() {
   classLoadingCount.set(0);
   classLoadingTime.set(0);
   classLoadingPrefetchCacheCount.set(0);
   resourceLoadingCount.set(0);
   resourceLoadingTime.set(0);
 }
Esempio n. 13
0
    @Override
    public void preGetOp(
        final ObserverContext<RegionCoprocessorEnvironment> e,
        final Get get,
        final List<Cell> results)
        throws IOException {

      if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
        CountDownLatch latch = cdl.get();
        try {
          if (sleepTime.get() > 0) {
            LOG.info("Sleeping for " + sleepTime.get() + " ms");
            Thread.sleep(sleepTime.get());
          } else if (latch.getCount() > 0) {
            LOG.info("Waiting for the counterCountDownLatch");
            latch.await(2, TimeUnit.MINUTES); // To help the tests to finish.
            if (latch.getCount() > 0) {
              throw new RuntimeException("Can't wait more");
            }
          }
        } catch (InterruptedException e1) {
          LOG.error(e1);
        }
      } else {
        LOG.info("We're not the primary replicas.");
      }
    }
  @Override
  @Test(timeout = 30000)
  public void testMessageSizeTwoDurables() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();

    org.apache.activemq.broker.region.Topic dest =
        publishTestMessagesDurable(
            connection,
            new String[] {"sub1", "sub2"},
            200,
            publishedMessageSize,
            DeliveryMode.PERSISTENT);

    // verify the count and size
    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");
    verifyPendingStats(dest, subKey, 200, publishedMessageSize.get());

    // consume messages just for sub1
    consumeDurableTestMessages(connection, "sub1", 200, publishedMessageSize);

    // There is still a durable that hasn't consumed so the messages should exist
    SubscriptionKey subKey2 = new SubscriptionKey("clientId", "sub2");
    verifyPendingStats(dest, subKey, 0, 0);
    verifyPendingStats(dest, subKey2, 200, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    connection.stop();
  }
Esempio n. 15
0
 @Override
 public void afterStep(
     Collection<ProxyPerson> population, Collection<ProxyPerson> original, boolean accepted) {
   iter.incrementAndGet();
   iters2.incrementAndGet();
   if (accepted) accepts.incrementAndGet();
 }
Esempio n. 16
0
 @Override
 public boolean commit() {
   try {
     if (!writeSet.isEmpty()) {
       int v = status.get();
       int s = v & STATUS_MASK;
       if (s == TX_ACTIVE && status.compareAndSet(v, v + (TX_COMMITTING - TX_ACTIVE))) {
         long newClock = clock.incrementAndGet();
         if (newClock != startTime.get() + 1 && !readSet.validate(this, id)) {
           rollback0();
           return false;
         }
         // Write values and release locks
         writeSet.commit(newClock);
         status.set(v + (TX_COMMITTED - TX_ACTIVE));
       } else {
         // We have been killed: wait for our locks to have been released
         while (s != TX_ABORTED) s = status.get() & STATUS_MASK;
         return false;
       }
     } else {
       // No need to set status to COMMITTED (we cannot be killed with an empty write set)
     }
     attempts = 0;
     return true;
   } finally {
     if (irrevocableState) {
       irrevocableState = false;
       irrevocableAccessLock.writeLock().unlock();
     } else {
       irrevocableAccessLock.readLock().unlock();
     }
   }
 }
Esempio n. 17
0
  @Test
  public void uint32() {
    Wire wire = createWire();
    wire.write().uint32(1);
    wire.write(BWKey.field1).uint32(2);
    wire.write(() -> "Test").uint32(3);
    checkWire(
        wire,
        "[pos: 0, rlim: 16, wlim: 8EiB, cap: 8EiB ] À⒈Æfield1⒉ÄTest⒊",
        "[pos: 0, rlim: 28, wlim: 8EiB, cap: 8EiB ] À£⒈٠٠٠Æfield1£⒉٠٠٠ÄTest£⒊٠٠٠",
        "[pos: 0, rlim: 11, wlim: 8EiB, cap: 8EiB ] À⒈º⒈⒉º²ñ\\u009E⒈⒊",
        "[pos: 0, rlim: 23, wlim: 8EiB, cap: 8EiB ] À£⒈٠٠٠º⒈£⒉٠٠٠º²ñ\\u009E⒈£⒊٠٠٠",
        "[pos: 0, rlim: 3, wlim: 8EiB, cap: 8EiB ] ⒈⒉⒊",
        "[pos: 0, rlim: 15, wlim: 8EiB, cap: 8EiB ] £⒈٠٠٠£⒉٠٠٠£⒊٠٠٠");
    checkAsText123(wire);

    // ok as blank matches anything
    AtomicLong i = new AtomicLong();
    IntStream.rangeClosed(1, 3)
        .forEach(
            e -> {
              wire.read().uint32(i::set);
              assertEquals(e, i.get());
            });

    assertEquals(0, bytes.readRemaining());
    // check it's safe to read too much.
    wire.read();
  }
  private void performMerge(List<StoreFileScanner> scanners, HStore store, StoreFile.Writer writer)
      throws IOException {
    InternalScanner scanner = null;
    try {
      Scan scan = new Scan();

      // Include deletes
      scanner =
          new StoreScanner(
              store,
              store.scanInfo,
              scan,
              scanners,
              ScanType.MAJOR_COMPACT,
              Long.MIN_VALUE,
              Long.MIN_VALUE);

      ArrayList<KeyValue> kvs = new ArrayList<KeyValue>();

      while (scanner.next(kvs) || kvs.size() != 0) {
        numKV.addAndGet(kvs.size());
        for (KeyValue kv : kvs) {
          totalBytes.addAndGet(kv.getLength());
          writer.append(kv);
        }
        kvs.clear();
      }
    } finally {
      if (scanner != null) scanner.close();
    }
  }
 @Override
 public void endTransaction(boolean success) {
   transactionCount.getAndIncrement();
   if (success) {
     committedTransactionCount.getAndIncrement();
   }
 }
  /**
   * Tests Functional#reduce with the summation reduction; we traverse all keys (in each direction)
   * to make sure the sum and count are correct and keys and values match up.
   */
  public void testSummationReduction() throws Exception {
    final AtomicLong counter = new AtomicLong();
    final Exchange exchange = getExchange(db, true);

    Reduction<String, Integer, Integer> summation =
        new Reduction<String, Integer, Integer>() {
          @Override
          public Integer reduce(Pair<String, Integer> row, Integer accum) {
            Assert.assertEquals(row.getKey(), getKey(row.getValue()));
            counter.getAndIncrement();

            return accum + row.getValue();
          }
        };

    int ascendingSum = Functional.reduce(exchange, getFullTraversal(Direction.ASC), summation, 0);
    Assert.assertEquals(ascendingSum, 499500);
    Assert.assertEquals(counter.get(), 1000);

    counter.set(0);

    int descendingSum = Functional.reduce(exchange, getFullTraversal(Direction.DESC), summation, 0);

    Assert.assertEquals(descendingSum, 499500);
    Assert.assertEquals(counter.get(), 1000);
  }
Esempio n. 21
0
  synchronized boolean put(KeyBuffer keyBuffer, byte[] data, boolean ifAbsent, byte[] old) {
    long sz = sizeOf(keyBuffer, data);
    while (freeCapacity.get() < sz)
      if (!evictOne()) {
        remove(keyBuffer);
        return false;
      }

    byte[] existing = map.get(keyBuffer);
    if (ifAbsent || old != null) {
      if (ifAbsent && existing != null) return false;
      if (old != null && existing != null && !Arrays.equals(old, existing)) return false;
    }

    map.put(keyBuffer, data);
    lru.remove(keyBuffer);
    lru.addFirst(keyBuffer);

    if (existing != null) {
      freeCapacity.addAndGet(sizeOf(keyBuffer, existing));
      putReplaceCount++;
    } else putAddCount++;

    freeCapacity.addAndGet(-sz);

    return true;
  }
Esempio n. 22
0
  @Test
  public void testRequestThroughMap() {
    TestSubscriber<Integer> ts = new TestSubscriber<Integer>();
    ts.request(3);
    final AtomicLong requested = new AtomicLong();
    Observable.create(
            new OnSubscribe<Integer>() {

              @Override
              public void call(Subscriber<? super Integer> s) {
                s.setProducer(
                    new Producer() {

                      @Override
                      public void request(long n) {
                        requested.set(n);
                      }
                    });
              }
            })
        .map(
            new Func1<Integer, Integer>() {

              @Override
              public Integer call(Integer t1) {
                return t1;
              }
            })
        .subscribe(ts);
    assertEquals(3, requested.get());
  }
Esempio n. 23
0
 @Override
 public byte[] get(NamedKey key) {
   try (ResourceHolder<MemcachedClientIF> clientHolder = client.get()) {
     Future<Object> future;
     try {
       future = clientHolder.get().asyncGet(computeKeyHash(memcachedPrefix, key));
     } catch (IllegalStateException e) {
       // operation did not get queued in time (queue is full)
       errorCount.incrementAndGet();
       log.warn(e, "Unable to queue cache operation");
       return null;
     }
     try {
       byte[] bytes = (byte[]) future.get(timeout, TimeUnit.MILLISECONDS);
       if (bytes != null) {
         hitCount.incrementAndGet();
       } else {
         missCount.incrementAndGet();
       }
       return bytes == null ? null : deserializeValue(key, bytes);
     } catch (TimeoutException e) {
       timeoutCount.incrementAndGet();
       future.cancel(false);
       return null;
     } catch (InterruptedException e) {
       Thread.currentThread().interrupt();
       throw Throwables.propagate(e);
     } catch (ExecutionException e) {
       errorCount.incrementAndGet();
       log.warn(e, "Exception pulling item from cache");
       return null;
     }
   }
 }
Esempio n. 24
0
  @Test
  public void testAllRowsReader() throws Exception {
    final AtomicLong counter = new AtomicLong(0);

    AllRowsReader<String, String> reader =
        new AllRowsReader.Builder<String, String>(keyspace, CF_STANDARD1)
            .withPageSize(3)
            .withConcurrencyLevel(2)
            //                .withPartitioner(new Murmur3Partitioner())
            .forEachRow(
                new Function<Row<String, String>, Boolean>() {
                  @Override
                  public Boolean apply(Row<String, String> row) {
                    counter.incrementAndGet();
                    LOG.info("Got a row: " + row.getKey().toString());
                    return true;
                  }
                })
            .build();

    try {
      boolean result = reader.call();
      Assert.assertEquals(counter.get(), 27);
      Assert.assertTrue(result);
    } catch (Exception e) {
      LOG.info(e.getMessage(), e);
      Assert.fail(e.getMessage());
    }
  }
Esempio n. 25
0
  public void reset() {
    blobOpenCount.set(0);
    clobOpenCount.set(0);

    connectionStat.reset();
    statementStat.reset();
    resultSetStat.reset();
    connectionHoldHistogram.reset();

    lock.writeLock().lock();
    try {
      Iterator<Map.Entry<String, JdbcSqlStat>> iter = sqlStatMap.entrySet().iterator();
      while (iter.hasNext()) {
        Map.Entry<String, JdbcSqlStat> entry = iter.next();
        JdbcSqlStat stat = entry.getValue();
        if (stat.getExecuteCount() == 0 && stat.getRunningCount() == 0) {
          stat.setRemoved(true);
          iter.remove();
        } else {
          stat.reset();
        }
      }
    } finally {
      lock.writeLock().unlock();
    }

    for (JdbcConnectionStat.Entry connectionStat : connections.values()) {
      connectionStat.reset();
    }
  }
 // Appends a new packet of buffered deletes to the stream,
 // setting its generation:
 public synchronized long push(FrozenBufferedUpdates packet) {
   /*
    * The insert operation must be atomic. If we let threads increment the gen
    * and push the packet afterwards we risk that packets are out of order.
    * With DWPT this is possible if two or more flushes are racing for pushing
    * updates. If the pushed packets get our of order would loose documents
    * since deletes are applied to the wrong segments.
    */
   packet.setDelGen(nextGen++);
   assert packet.any();
   assert checkDeleteStats();
   assert packet.delGen() < nextGen;
   assert updates.isEmpty() || updates.get(updates.size() - 1).delGen() < packet.delGen()
       : "Delete packets must be in order";
   updates.add(packet);
   numTerms.addAndGet(packet.numTermDeletes);
   bytesUsed.addAndGet(packet.bytesUsed);
   if (infoStream.isEnabled("BD")) {
     infoStream.message(
         "BD",
         "push deletes "
             + packet
             + " delGen="
             + packet.delGen()
             + " packetCount="
             + updates.size()
             + " totBytesUsed="
             + bytesUsed.get());
   }
   assert checkDeleteStats();
   return packet.delGen();
 }
 boolean compare(AtomicLongRunOnGpu grhs) {
   if (grhs == null) {
     System.out.println("grhs == null");
     return false;
   }
   if (m_Num1 != grhs.m_Num1) {
     System.out.println("num1");
     System.out.println("lhs: " + m_Num1);
     System.out.println("rhs: " + grhs.m_Num1);
     return false;
   }
   if (m_ALong.get() != grhs.m_ALong.get()) {
     System.out.println("value");
     System.out.println("lhs: " + m_ALong.get());
     System.out.println("rhs: " + grhs.m_ALong.get());
     return false;
   }
   if (m_Num2 != grhs.m_Num2) {
     System.out.println("num2");
     System.out.println("lhs: " + m_Num2);
     System.out.println("rhs: " + grhs.m_Num2);
     return false;
   }
   if (grhs.m_Random == -100) {
     System.out.println("random");
     return false;
   }
   if (grhs.m_Random2 == -100) {
     System.out.println("random2");
     return false;
   }
   return true;
 }
  private void performWrites() {
    final MeterInternalCallContext context = new MeterInternalCallContext();

    // This is the atomic operation: bulk insert the new aggregated TimelineChunk objects, and
    // delete
    // or invalidate the ones that were aggregated.  This should be very fast.
    final long startWriteTime = System.currentTimeMillis();
    aggregatorSqlDao.begin();
    timelineDao.bulkInsertTimelineChunks(chunksToWrite, context);
    if (config.getDeleteAggregatedChunks()) {
      aggregatorSqlDao.deleteTimelineChunks(chunkIdsToInvalidateOrDelete, context);
    } else {
      aggregatorSqlDao.makeTimelineChunksInvalid(chunkIdsToInvalidateOrDelete, context);
    }
    aggregatorSqlDao.commit();
    msWritingDb.addAndGet(System.currentTimeMillis() - startWriteTime);

    timelineChunksWritten.addAndGet(chunksToWrite.size());
    timelineChunksInvalidatedOrDeleted.addAndGet(chunkIdsToInvalidateOrDelete.size());
    chunksToWrite.clear();
    chunkIdsToInvalidateOrDelete.clear();
    final long sleepMs = config.getAggregationSleepBetweenBatches().getMillis();
    if (sleepMs > 0) {
      final long timeBeforeSleep = System.currentTimeMillis();
      try {
        Thread.sleep(sleepMs);
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
      }
      msSpentSleeping.addAndGet(System.currentTimeMillis() - timeBeforeSleep);
    }
    timelineChunkBatchesProcessed.incrementAndGet();
  }
Esempio n. 29
0
 static long nextMemberId(Container container) {
   AtomicLong counter = CONTAINER_COUNTER.putIfAbsent(container.id(), new AtomicLong(-1));
   if (counter == null) {
     counter = CONTAINER_COUNTER.get(container.id());
   }
   return counter.incrementAndGet();
 }
  @Override
  @Test(timeout = 30000)
  public void testMessageSizeOneDurablePartialConsumption() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();

    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");
    org.apache.activemq.broker.region.Topic dest =
        publishTestMessagesDurable(
            connection, new String[] {"sub1"}, 200, publishedMessageSize, DeliveryMode.PERSISTENT);

    // verify the count and size - durable is offline so all 200 should be pending since none are in
    // prefetch
    verifyPendingStats(dest, subKey, 200, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    // consume all messages
    consumeDurableTestMessages(connection, "sub1", 50, publishedMessageSize);

    // All messages should now be gone
    verifyPendingStats(dest, subKey, 150, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    // verify the size is at least as big as 100 messages times the minimum of 100 size
    verifyStoreStats(dest, 100, 100 * 100);

    connection.close();
  }