/**
   * Assigns a new ticket id.
   *
   * @param repository
   * @return a new long id
   */
  @Override
  public synchronized long assignNewId(RepositoryModel repository) {
    long newId = 0L;
    Repository db = repositoryManager.getRepository(repository.name);
    try {
      if (getTicketsBranch(db) == null) {
        createTicketsBranch(db);
      }

      // identify current highest ticket id by scanning the paths in the tip tree
      if (!lastAssignedId.containsKey(repository.name)) {
        lastAssignedId.put(repository.name, new AtomicLong(0));
      }
      AtomicLong lastId = lastAssignedId.get(repository.name);
      if (lastId.get() <= 0) {
        Set<Long> ids = getIds(repository);
        for (long id : ids) {
          if (id > lastId.get()) {
            lastId.set(id);
          }
        }
      }

      // assign the id and touch an empty journal to hold it's place
      newId = lastId.incrementAndGet();
      String journalPath = toTicketPath(newId) + "/" + JOURNAL;
      writeTicketsFile(db, journalPath, "", "gitblit", "assigned id #" + newId);
    } finally {
      db.close();
    }
    return newId;
  }
 boolean compare(AtomicLongRunOnGpu grhs) {
   if (grhs == null) {
     System.out.println("grhs == null");
     return false;
   }
   if (m_Num1 != grhs.m_Num1) {
     System.out.println("num1");
     System.out.println("lhs: " + m_Num1);
     System.out.println("rhs: " + grhs.m_Num1);
     return false;
   }
   if (m_ALong.get() != grhs.m_ALong.get()) {
     System.out.println("value");
     System.out.println("lhs: " + m_ALong.get());
     System.out.println("rhs: " + grhs.m_ALong.get());
     return false;
   }
   if (m_Num2 != grhs.m_Num2) {
     System.out.println("num2");
     System.out.println("lhs: " + m_Num2);
     System.out.println("rhs: " + grhs.m_Num2);
     return false;
   }
   if (grhs.m_Random == -100) {
     System.out.println("random");
     return false;
   }
   if (grhs.m_Random2 == -100) {
     System.out.println("random2");
     return false;
   }
   return true;
 }
Пример #3
0
  @Test
  public void testPollWait() throws InterruptedException, IOException {
    final FileBlockingQueue<String> queue = getFileBlockingQueue();

    final AtomicLong start = new AtomicLong(System.currentTimeMillis());
    String m = queue.poll(1000, TimeUnit.MILLISECONDS);
    final AtomicLong duration = new AtomicLong(System.currentTimeMillis() - start.get());
    assertTrue(duration.get() >= 1000 && duration.get() <= 2000);
    assertNull(m);

    ExecutorService e = Executors.newFixedThreadPool(1);
    e.execute(
        new Runnable() {
          @Override
          public void run() {
            start.set(System.currentTimeMillis());
            String m = null;
            try {
              m = queue.poll(5000, TimeUnit.MILLISECONDS);
            } catch (InterruptedException e1) {
              throw new RuntimeException(e1);
            }
            assertNotNull(m);
            duration.set(System.currentTimeMillis() - start.get());
          }
        });

    Thread.sleep(1000);
    queue.offer("testString");

    assertTrue(duration.get() < 4000);
  }
  /** 检查是否存在需要get的数据,并且数量>=batchSize */
  private boolean checkUnGetSlotAt(LogPosition startPosition, int batchSize) {
    if (batchMode.isItemSize()) {
      long current = getSequence.get();
      long maxAbleSequence = putSequence.get();
      long next = current;
      if (startPosition == null
          || !startPosition.getPostion().isIncluded()) { // 第一次订阅之后,需要包含一下start位置,防止丢失第一条记录
        next = next + 1; // 少一条数据
      }

      if (current < maxAbleSequence && next + batchSize - 1 <= maxAbleSequence) {
        return true;
      } else {
        return false;
      }
    } else {
      // 处理内存大小判断
      long currentSize = getMemSize.get();
      long maxAbleSize = putMemSize.get();

      if (maxAbleSize - currentSize >= batchSize * bufferMemUnit) {
        return true;
      } else {
        return false;
      }
    }
  }
 public LogPosition getFirstPosition() throws CanalStoreException {
   final ReentrantLock lock = this.lock;
   lock.lock();
   try {
     long firstSeqeuence = ackSequence.get();
     if (firstSeqeuence == INIT_SQEUENCE && firstSeqeuence < putSequence.get()) {
       // 没有ack过数据
       Event event = entries[getIndex(firstSeqeuence + 1)]; // 最后一次ack为-1,需要移动到下一条,included
       // = false
       return CanalEventUtils.createPosition(event, false);
     } else if (firstSeqeuence > INIT_SQEUENCE && firstSeqeuence < putSequence.get()) {
       // ack未追上put操作
       Event event = entries[getIndex(firstSeqeuence + 1)]; // 最后一次ack的位置数据
       // + 1
       return CanalEventUtils.createPosition(event, true);
     } else if (firstSeqeuence > INIT_SQEUENCE && firstSeqeuence == putSequence.get()) {
       // 已经追上,store中没有数据
       Event event = entries[getIndex(firstSeqeuence)]; // 最后一次ack的位置数据,和last为同一条,included
       // = false
       return CanalEventUtils.createPosition(event, false);
     } else {
       // 没有任何数据
       return null;
     }
   } finally {
     lock.unlock();
   }
 }
Пример #6
0
 @Override
 public void writeData(ObjectDataOutput out) throws IOException {
   out.writeLong(ownedEntryCount);
   out.writeLong(ownedEntryMemoryCost);
   out.writeLong(hits.get());
   out.writeLong(misses.get());
 }
 /** addAndGet adds given value to current, and returns current value */
 public void testAddAndGet() {
   AtomicLong ai = new AtomicLong(1);
   assertEquals(3, ai.addAndGet(2));
   assertEquals(3, ai.get());
   assertEquals(-1, ai.addAndGet(-4));
   assertEquals(-1, ai.get());
 }
Пример #8
0
 void updateLastModified(long time) {
   for (long current = lastModified.get(); time > current; current = lastModified.get()) {
     if (lastModified.compareAndSet(current, time)) {
       return;
     }
   }
 }
    public void updateMemoryUsageStats(Tablet tablet, long size, long mincSize) {

      // do not want to update stats for every little change,
      // so only do it under certain circumstances... the reason
      // for this is that reporting stats acquires a lock, do
      // not want all tablets locking on the same lock for every
      // commit
      long totalSize = size + mincSize;
      long lrs = lastReportedSize.get();
      long delta = totalSize - lrs;
      long lrms = lastReportedMincSize.get();
      boolean report = false;
      // the atomic longs are considered independently, when one is set
      // the other is not set intentionally because this method is not
      // synchronized... therefore there are not transactional semantics
      // for reading and writing two variables
      if ((lrms > 0 && mincSize == 0 || lrms == 0 && mincSize > 0)
          && lastReportedMincSize.compareAndSet(lrms, mincSize)) {
        report = true;
      }

      long currentTime = System.currentTimeMillis();
      if ((delta > 32000 || delta < 0 || (currentTime - lastReportedCommitTime > 1000))
          && lastReportedSize.compareAndSet(lrs, totalSize)) {
        if (delta > 0) lastReportedCommitTime = currentTime;
        report = true;
      }

      if (report) memMgmt.updateMemoryUsageStats(tablet, size, lastReportedCommitTime, mincSize);
    }
Пример #10
0
  /** Called by BlockWriter to actually write the block. */
  void writeFromBlockWriter() throws IOException {
    do {
      long dirty = _dirtyRange.getAndSet(INIT_DIRTY);

      int dirtyMax = (int) (dirty >> 32);
      int dirtyMin = (int) dirty;

      if (dirtyMin < dirtyMax) {
        if (log.isLoggable(Level.ALL))
          log.log(Level.ALL, "write db-block " + this + " [" + dirtyMin + ", " + dirtyMax + "]");

        boolean isPriority = false;

        writeImpl(dirtyMin, dirtyMax - dirtyMin, isPriority);
      }

      if (_dirtyRange.get() == INIT_DIRTY && !isDestroyed()) {
        toValid();
      }
    } while (_dirtyRange.get() != INIT_DIRTY);

    if (_useCount.get() <= 0) {
      freeImpl();
    }
  }
  @Override
  @Test(timeout = 30000)
  public void testMessageSizeOneDurablePartialConsumption() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();

    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");
    org.apache.activemq.broker.region.Topic dest =
        publishTestMessagesDurable(
            connection, new String[] {"sub1"}, 200, publishedMessageSize, DeliveryMode.PERSISTENT);

    // verify the count and size - durable is offline so all 200 should be pending since none are in
    // prefetch
    verifyPendingStats(dest, subKey, 200, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    // consume all messages
    consumeDurableTestMessages(connection, "sub1", 50, publishedMessageSize);

    // All messages should now be gone
    verifyPendingStats(dest, subKey, 150, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    // verify the size is at least as big as 100 messages times the minimum of 100 size
    verifyStoreStats(dest, 100, 100 * 100);

    connection.close();
  }
Пример #12
0
    public void receive(Message msg) {
      byte[] buf = msg.getRawBuffer();
      byte type = buf[msg.getOffset()];

      switch (type) {
        case START:
          ByteBuffer tmp = ByteBuffer.wrap(buf, 1 + msg.getOffset(), Global.LONG_SIZE);
          num_msgs = (int) tmp.getLong();
          print = num_msgs / 10;
          current_value.set(0);
          total_bytes.set(0);
          start = System.currentTimeMillis();
          break;
        case DATA:
          long new_val = current_value.incrementAndGet();
          total_bytes.addAndGet(msg.getLength() - Global.INT_SIZE);
          if (print > 0 && new_val % print == 0) System.out.println("received " + new_val);
          if (new_val >= num_msgs) {
            long time = System.currentTimeMillis() - start;
            double msgs_sec = (current_value.get() / (time / 1000.0));
            double throughput = total_bytes.get() / (time / 1000.0);
            System.out.println(
                String.format(
                    "\nreceived %d messages in %d ms (%.2f msgs/sec), throughput=%s",
                    current_value.get(), time, msgs_sec, Util.printBytes(throughput)));
            break;
          }
          break;
        default:
          System.err.println("Type " + type + " is invalid");
      }
    }
Пример #13
0
  private synchronized void syncLog(ILogRecord logRecord) throws ACIDException {
    ITransactionContext txnCtx = null;

    if (logRecord.getLogType() != LogType.FLUSH) {
      txnCtx = logRecord.getTxnCtx();
      if (txnCtx.getTxnState() == ITransactionManager.ABORTED
          && logRecord.getLogType() != LogType.ABORT) {
        throw new ACIDException(
            "Aborted job(" + txnCtx.getJobId() + ") tried to write non-abort type log record.");
      }
    }
    if (getLogFileOffset(appendLSN.get()) + logRecord.getLogSize() > logFileSize) {
      prepareNextLogFile();
      appendPage.isFull(true);
      getAndInitNewPage();
    } else if (!appendPage.hasSpace(logRecord.getLogSize())) {
      appendPage.isFull(true);
      getAndInitNewPage();
    }
    if (logRecord.getLogType() == LogType.UPDATE) {
      logRecord.setPrevLSN(txnCtx.getLastLSN());
    }
    appendPage.append(logRecord, appendLSN.get());

    if (logRecord.getLogType() == LogType.FLUSH) {
      logRecord.setLSN(appendLSN.get());
    }
    appendLSN.addAndGet(logRecord.getLogSize());
  }
Пример #14
0
  @Override
  public boolean checkMemory(final Runnable runWhenAvailable) {
    if (addressFullMessagePolicy == AddressFullMessagePolicy.BLOCK && maxSize != -1) {
      if (sizeInBytes.get() > maxSize) {
        OurRunnable ourRunnable = new OurRunnable(runWhenAvailable);

        onMemoryFreedRunnables.add(ourRunnable);

        // We check again to avoid a race condition where the size can come down just after the
        // element
        // has been added, but the check to execute was done before the element was added
        // NOTE! We do not fix this race by locking the whole thing, doing this check provides
        // MUCH better performance in a highly concurrent environment
        if (sizeInBytes.get() <= maxSize) {
          // run it now
          ourRunnable.run();
        } else if (!blocking.get()) {
          ActiveMQServerLogger.LOGGER.blockingMessageProduction(
              address, sizeInBytes.get(), maxSize);
          blocking.set(true);
        }

        return true;
      }
    } else if (addressFullMessagePolicy == AddressFullMessagePolicy.FAIL && maxSize != -1) {
      if (sizeInBytes.get() > maxSize) {
        return false;
      }
    }

    runWhenAvailable.run();

    return true;
  }
  /** @param sinceSCN */
  @Override
  public synchronized void start(long sinceSCN) {
    _log.info("Start golden gate evert producer requested.");
    if (_currentState == State.RUNNING) {
      _log.error("Thread already running! ");
      return;
    }
    _scn.set(TrailFilePositionSetter.USE_LATEST_SCN);

    if (sinceSCN > 0) {
      _scn.set(sinceSCN);
    } else {
      if (getMaxScnReaderWriter() != null) {
        try {
          long scn = getMaxScnReaderWriter().getMaxScn();

          // If the max scn is greater than 0, then honor it.
          if (scn > 0) {
            // apply the restart SCN offset
            long newScn =
                (scn >= _pConfig.getRestartScnOffset()) ? scn - _pConfig.getRestartScnOffset() : 0;
            _log.info(
                "Checkpoint read = "
                    + scn
                    + " restartScnOffset= "
                    + _pConfig.getRestartScnOffset()
                    + " Adjusted SCN= "
                    + newScn);
            if (newScn > 0) {
              _scn.set(newScn);
            }
          } else // If the max scn is set to <0, this is a special case that we use to let the trail
          // file notifier that you want to override the default behaviour of starting with
          // the latest scn.
          {
            _log.info(
                "Overridding default behaviour (start with latest scn), using scn : "
                    + scn
                    + " to start the relay");
            if (scn != TrailFilePositionSetter.USE_EARLIEST_SCN
                && scn != TrailFilePositionSetter.USE_LATEST_SCN)
              throw new DatabusException(
                  "The scn you have passed is neither EARLIEST or LATEST  setting, cannot proceed with using this scn");

            _scn.set(scn);
          }

        } catch (DatabusException e) {
          _log.warn("Could not read saved maxScn: Defaulting to startSCN=" + _scn.get());
        }
      }
    }

    if (_worker == null) {
      _log.info("Starting with scn = " + _scn.get());
      _worker = new WorkerThread();
      _worker.setDaemon(true);
      _worker.start();
    }
  }
 /** getAndAdd returns previous value and adds given value */
 public void testGetAndAdd() {
   AtomicLong ai = new AtomicLong(1);
   assertEquals(1, ai.getAndAdd(2));
   assertEquals(3, ai.get());
   assertEquals(3, ai.getAndAdd(-4));
   assertEquals(-1, ai.get());
 }
Пример #17
0
  public synchronized PageBufferClientStatus getStatus() {
    String state;
    if (closed) {
      state = "closed";
    } else if (future != null) {
      state = "running";
    } else if (scheduled) {
      state = "scheduled";
    } else if (completed) {
      state = "completed";
    } else {
      state = "queued";
    }
    String httpRequestState = "not scheduled";
    if (future != null) {
      httpRequestState = future.getState();
    }

    long rejectedRows = rowsRejected.get();
    int rejectedPages = pagesRejected.get();

    return new PageBufferClientStatus(
        location,
        state,
        lastUpdate,
        rowsReceived.get(),
        pagesReceived.get(),
        rejectedRows == 0 ? OptionalLong.empty() : OptionalLong.of(rejectedRows),
        rejectedPages == 0 ? OptionalInt.empty() : OptionalInt.of(rejectedPages),
        requestsScheduled.get(),
        requestsCompleted.get(),
        requestsFailed.get(),
        httpRequestState);
  }
  @Override
  @Test(timeout = 30000)
  public void testMessageSizeOneDurable() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();
    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();

    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");
    org.apache.activemq.broker.region.Topic dest =
        publishTestMessagesDurable(
            connection, new String[] {"sub1"}, 200, publishedMessageSize, DeliveryMode.PERSISTENT);

    verifyPendingStats(dest, subKey, 200, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    // consume 100 messages
    consumeDurableTestMessages(connection, "sub1", 100, publishedMessageSize);

    // 100 should be left
    verifyPendingStats(dest, subKey, 100, publishedMessageSize.get());
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    connection.close();
  }
  @Override
  @Test(timeout = 30000)
  public void testMessageSizeTwoDurables() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = new ActiveMQConnectionFactory(brokerConnectURI).createConnection();
    connection.setClientID("clientId");
    connection.start();

    org.apache.activemq.broker.region.Topic dest =
        publishTestMessagesDurable(
            connection,
            new String[] {"sub1", "sub2"},
            200,
            publishedMessageSize,
            DeliveryMode.PERSISTENT);

    // verify the count and size
    SubscriptionKey subKey = new SubscriptionKey("clientId", "sub1");
    verifyPendingStats(dest, subKey, 200, publishedMessageSize.get());

    // consume messages just for sub1
    consumeDurableTestMessages(connection, "sub1", 200, publishedMessageSize);

    // There is still a durable that hasn't consumed so the messages should exist
    SubscriptionKey subKey2 = new SubscriptionKey("clientId", "sub2");
    verifyPendingStats(dest, subKey, 0, 0);
    verifyPendingStats(dest, subKey2, 200, publishedMessageSize.get());

    // The expected value is only 100 because for durables a LRUCache is being used
    // with a max size of 100
    verifyStoreStats(dest, 100, publishedMessageSize.get());

    connection.stop();
  }
  /** Return the probe's next 2-sigma */
  public final double sampleSigma(int n) {
    synchronized (_lock) {
      long count = _count.get();
      long lastCount = _lastStdCount;
      _lastStdCount = count;

      double sum = _sum.get();
      double lastSum = _lastStdSum;
      _lastStdSum = sum;

      double sumSquare = _sumSquare;
      _sumSquare = 0;

      if (count == lastCount) return 0;

      double avg = (sum - lastSum) / (count - lastCount);
      double part = (count - lastCount) * sumSquare - sum * sum;

      if (part < 0) part = 0;

      double std = Math.sqrt(part) / (count - lastCount);

      return _scale * (avg + n * std);
    }
  }
  /**
   * Tests Functional#reduce with the summation reduction; we traverse all keys (in each direction)
   * to make sure the sum and count are correct and keys and values match up.
   */
  public void testSummationReduction() throws Exception {
    final AtomicLong counter = new AtomicLong();
    final Exchange exchange = getExchange(db, true);

    Reduction<String, Integer, Integer> summation =
        new Reduction<String, Integer, Integer>() {
          @Override
          public Integer reduce(Pair<String, Integer> row, Integer accum) {
            Assert.assertEquals(row.getKey(), getKey(row.getValue()));
            counter.getAndIncrement();

            return accum + row.getValue();
          }
        };

    int ascendingSum = Functional.reduce(exchange, getFullTraversal(Direction.ASC), summation, 0);
    Assert.assertEquals(ascendingSum, 499500);
    Assert.assertEquals(counter.get(), 1000);

    counter.set(0);

    int descendingSum = Functional.reduce(exchange, getFullTraversal(Direction.DESC), summation, 0);

    Assert.assertEquals(descendingSum, 499500);
    Assert.assertEquals(counter.get(), 1000);
  }
Пример #22
0
 /**
  * This tracks block hit and misses against the cache for this Directory only
  *
  * @param isHit true if recording a hit otherwise it's a miss
  */
 public void recordBlockCacheHit(boolean isHit) {
   if (isHit) {
     long hits = hitCounter.incrementAndGet();
     if (isHitRateLoggingEnabled() && hits % 5000 == 0) {
       long misses = missCounter.get();
       double hr = MTLRUCache.calculateHitRate(hits, misses);
       int hrRounded = (int) (hr * 100);
       boolean showHR = false;
       // if its poor, show frequently
       if (hr < 0.5) showHR = true;
       else
       // otherwise, show infrequently
       if (hits % 20000 == 0) showHR = true;
       if (showHR) {
         int compRate = 0;
         if (unCompressedTotal.get() != 0) {
           compRate =
               (int) (100.0 * (double) compressedTotal.get() / (double) unCompressedTotal.get());
         }
         logger.log(
             Level.INFO,
             "Hit rate for "
                 + directoryName
                 + " Directory is "
                 + hrRounded
                 + "%, total reads "
                 + (hits + misses)
                 + ", Compress Rate:"
                 + compRate
                 + "%");
       }
     }
   } else missCounter.incrementAndGet();
 }
Пример #23
0
  @Test
  public void testFunctions() {
    final AtomicLong lf = new AtomicLong(10L);
    Assert.assertEquals(Anoa.of(10L), handler.function(lf::getAndAdd).apply(handler.of(1L)));
    Assert.assertEquals(11L, lf.get());
    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .functionChecked(
                __ -> {
                  throw new IOException();
                })
            .apply(handler.of(1L)));

    final AtomicLong lf2 = new AtomicLong(10L);
    Assert.assertEquals(
        Anoa.of(10L),
        handler.biFunction((Long x, Long y) -> lf2.getAndAdd(x + y)).apply(Anoa.of(1L), 1L));
    Assert.assertEquals(12L, lf2.get());

    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .biFunctionChecked(
                (_1, _2) -> {
                  throw new IOException();
                })
            .apply(Anoa.of(1L), 1L));
  }
Пример #24
0
  @Test
  public void testConsumers() {
    final AtomicLong lc = new AtomicLong(0L);
    Assert.assertEquals(Anoa.of(1L), handler.consumer(lc::addAndGet).apply(handler.of(1L)));
    Assert.assertEquals(1L, lc.get());
    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .consumerChecked(
                __ -> {
                  throw new IOException();
                })
            .apply(handler.of(1L)));

    final AtomicLong lc2 = new AtomicLong(0L);
    Assert.assertEquals(
        Anoa.of(1L),
        handler.biConsumer((Long x, Long y) -> lc2.addAndGet(x + y)).apply(Anoa.of(1L), 1L));
    Assert.assertEquals(2L, lc2.get());

    Assert.assertEquals(
        Anoa.empty(Stream.of(Meta.OTHER)),
        handler
            .biConsumerChecked(
                (_1, _2) -> {
                  throw new IOException();
                })
            .apply(Anoa.of(1L), 1L));
  }
Пример #25
0
    @Override
    protected void doGet(HttpServletRequest request, HttpServletResponse response)
        throws IOException, ServletException {
      String uri = getDecodedUri(request);
      try {
        Pattern p = Pattern.compile(".*/NodePersistentStorage.bin/([^/]+)/([^/]+)");
        Matcher m = p.matcher(uri);
        boolean b = m.matches();
        if (!b) {
          setResponseStatus(response, HttpServletResponse.SC_BAD_REQUEST);
          response.getWriter().write("Improperly formatted URI");
          return;
        }

        String categoryName = m.group(1);
        String keyName = m.group(2);
        NodePersistentStorage nps = H2O.getNPS();
        AtomicLong length = new AtomicLong();
        InputStream is = nps.get(categoryName, keyName, length);
        if (length.get() > (long) Integer.MAX_VALUE) {
          throw new Exception("NPS value size exceeds Integer.MAX_VALUE");
        }
        response.setContentType("application/octet-stream");
        response.setContentLength((int) length.get());
        response.addHeader("Content-Disposition", "attachment; filename=" + keyName + ".flow");
        setResponseStatus(response, HttpServletResponse.SC_OK);
        OutputStream os = response.getOutputStream();
        water.util.FileUtils.copyStream(is, os, 2048);
      } catch (Exception e) {
        sendErrorResponse(response, e, uri);
      } finally {
        logRequest("GET", request, response);
      }
    }
Пример #26
0
    @Override
    public void preGetOp(
        final ObserverContext<RegionCoprocessorEnvironment> e,
        final Get get,
        final List<Cell> results)
        throws IOException {

      if (e.getEnvironment().getRegion().getRegionInfo().getReplicaId() == 0) {
        CountDownLatch latch = cdl.get();
        try {
          if (sleepTime.get() > 0) {
            LOG.info("Sleeping for " + sleepTime.get() + " ms");
            Thread.sleep(sleepTime.get());
          } else if (latch.getCount() > 0) {
            LOG.info("Waiting for the counterCountDownLatch");
            latch.await(2, TimeUnit.MINUTES); // To help the tests to finish.
            if (latch.getCount() > 0) {
              throw new RuntimeException("Can't wait more");
            }
          }
        } catch (InterruptedException e1) {
          LOG.error(e1);
        }
      } else {
        LOG.info("We're not the primary replicas.");
      }
    }
 private boolean partititionChanged(long partition) {
   if (currentPartition.get() != partition) {
     LOG.info("Partition changed from {} to {}", currentPartition.get(), partition);
     currentPartition.set(partition);
     return true;
   }
   return false;
 }
 /** repeated weakCompareAndSet succeeds in changing value when equal to expected */
 public void testWeakCompareAndSet() {
   AtomicLong ai = new AtomicLong(1);
   while (!ai.weakCompareAndSet(1, 2)) ;
   while (!ai.weakCompareAndSet(2, -4)) ;
   assertEquals(-4, ai.get());
   while (!ai.weakCompareAndSet(-4, 7)) ;
   assertEquals(7, ai.get());
 }
Пример #29
0
 private void prepareNextLogFile() {
   appendLSN.addAndGet(logFileSize - getLogFileOffset(appendLSN.get()));
   appendChannel = getFileChannel(appendLSN.get(), true);
   appendPage.isLastPage(true);
   // [Notice]
   // the current log file channel is closed if
   // LogBuffer.flush() completely flush the last page of the file.
 }
 /** get returns the last value set */
 public void testGetSet() {
   AtomicLong ai = new AtomicLong(1);
   assertEquals(1, ai.get());
   ai.set(2);
   assertEquals(2, ai.get());
   ai.set(-3);
   assertEquals(-3, ai.get());
 }