@Override
 public void endTransaction(boolean success) {
   transactionCount.getAndIncrement();
   if (success) {
     committedTransactionCount.getAndIncrement();
   }
 }
 /** getAndIncrement returns previous value and increments */
 public void testGetAndIncrement() {
   AtomicLong ai = new AtomicLong(1);
   assertEquals(1, ai.getAndIncrement());
   assertEquals(2, ai.get());
   ai.set(-2);
   assertEquals(-2, ai.getAndIncrement());
   assertEquals(-1, ai.getAndIncrement());
   assertEquals(0, ai.getAndIncrement());
   assertEquals(1, ai.get());
 }
Ejemplo n.º 3
0
 @Override
 public Timestamp getTimestamp(DeviceId deviceId) {
   if (DID1.equals(deviceId)) {
     return new MastershipBasedTimestamp(1, ticker.getAndIncrement());
   } else if (DID2.equals(deviceId)) {
     return new MastershipBasedTimestamp(2, ticker.getAndIncrement());
   } else {
     throw new IllegalStateException();
   }
 }
Ejemplo n.º 4
0
    public void doAnAction() throws Exception {
      Scan s = new Scan();
      for (byte[] family : targetFamilies) {
        s.addFamily(family);
      }
      ResultScanner scanner = table.getScanner(s);

      for (Result res : scanner) {
        byte[] lastRow = null, lastFam = null, lastQual = null;
        byte[] gotValue = null;
        for (byte[] family : targetFamilies) {
          byte qualifier[] = QUAL;
          byte thisValue[] = res.getValue(family, qualifier);
          if (gotValue != null && thisValue != null && !Bytes.equals(gotValue, thisValue)) {

            StringBuilder msg = new StringBuilder();
            msg.append("Failed on scan ")
                .append(numScans)
                .append(" after scanning ")
                .append(numRowsScanned)
                .append(" rows!\n");
            msg.append(
                "Current  was "
                    + Bytes.toString(res.getRow())
                    + "/"
                    + Bytes.toString(family)
                    + ":"
                    + Bytes.toString(qualifier)
                    + " = "
                    + Bytes.toString(thisValue)
                    + "\n");
            msg.append(
                "Previous  was "
                    + Bytes.toString(lastRow)
                    + "/"
                    + Bytes.toString(lastFam)
                    + ":"
                    + Bytes.toString(lastQual)
                    + " = "
                    + Bytes.toString(gotValue));
            throw new RuntimeException(msg.toString());
          }

          lastFam = family;
          lastQual = qualifier;
          lastRow = res.getRow();
          gotValue = thisValue;
        }
        numRowsScanned.getAndIncrement();
      }
      numScans.getAndIncrement();
    }
 @Override
 @Caching(
     cacheable = {@Cacheable(cacheNames = "primary", key = "#root.methodName")},
     evict = {@CacheEvict("secondary")})
 public Long multiCacheAndEvict(Object arg1) {
   return counter.getAndIncrement();
 }
 @Override
 // FIXME	@Caching(evict = { @CacheEvict("primary"), @CacheEvict(cacheNames = "secondary", key =
 // "#p0"), @CacheEvict(cacheNames = "primary", key = "#p0 + 'A'") })
 @Caching(evict = {@CacheEvict("primary"), @CacheEvict(cacheNames = "secondary", key = "#p0")})
 public Long multiEvict(Object arg1) {
   return counter.getAndIncrement();
 }
 @Override
 @Cacheable(
     cacheNames = "testCache",
     key = "#root.methodName + #root.method.name + #root.targetClass + #root.target")
 public Long rootVars(Object arg1) {
   return counter.getAndIncrement();
 }
Ejemplo n.º 8
0
  @Override
  public void run() {
    while (true) {
      final long currentIndex = index.getAndIncrement();
      if (currentIndex >= length) {
        break;
      }

      System.out.println(
          "\t[" + currentIndex + "/" + length + "] judging " + currentIndex + " elements");

      final ClonePair currentClonePair = clonePairs.get(currentIndex);
      final InstantCodeFragmentInfo fragment1 = currentClonePair.getFragment1();

      if (clonePairsCategorizedByPath.containsKey(fragment1.getFilePath())) {
        final Set<ClonePair> tmpPairs = clonePairsCategorizedByPath.get(fragment1.getFilePath());

        for (final ClonePair tmpPair : tmpPairs) {
          if (tmpPair.equals(currentClonePair)) {
            continue;
          }
          if (tmpPair.subsume(currentClonePair)) {
            subsumedClonePairs.add(currentIndex);
            break;
          }
        }
      }
    }
  }
Ejemplo n.º 9
0
 private void preloadClasspathCache() throws Exception {
   sendRequest(
       ServerRequest.newBuilder()
           .setRequestId(nextRequestId.getAndIncrement())
           .setPreloadClasspathCacheRequest(PreloadClasspathCacheRequest.getDefaultInstance())
           .build());
 }
Ejemplo n.º 10
0
 private void gc() throws Exception {
   sendRequest(
       ServerRequest.newBuilder()
           .setRequestId(nextRequestId.getAndIncrement())
           .setGcRequest(GcRequest.getDefaultInstance())
           .build());
 }
Ejemplo n.º 11
0
 @Override
 public void onPublish(UTF8Buffer topic, Buffer body, Runnable ack) {
   //		String message = new String(body.getData());
   //		System.out.println("Received: "+message);
   count.getAndIncrement();
   ack.run();
 }
 ManagedClientConnection getConnection(final HttpRoute route, final Object state) {
   if (route == null) {
     throw new IllegalArgumentException("Route may not be null.");
   }
   synchronized (this) {
     assertNotShutdown();
     if (this.log.isDebugEnabled()) {
       this.log.debug("Get connection for route " + route);
     }
     if (this.conn != null) {
       throw new IllegalStateException(MISUSE_MESSAGE);
     }
     if (this.poolEntry != null && !this.poolEntry.getPlannedRoute().equals(route)) {
       this.poolEntry.close();
       this.poolEntry = null;
     }
     if (this.poolEntry == null) {
       String id = Long.toString(COUNTER.getAndIncrement());
       OperatedClientConnection conn = this.connOperator.createConnection();
       this.poolEntry = new HttpPoolEntry(this.log, id, route, conn, 0, TimeUnit.MILLISECONDS);
     }
     long now = System.currentTimeMillis();
     if (this.poolEntry.isExpired(now)) {
       this.poolEntry.close();
       this.poolEntry.getTracker().reset();
     }
     this.conn = new ManagedClientConnectionImpl(this, this.connOperator, this.poolEntry);
     return this.conn;
   }
 }
 @Override
 @Caching(
     cacheable = {@Cacheable(cacheNames = "primary", condition = "#p0 == 3")},
     evict = {@CacheEvict("secondary")})
 public Long multiConditionalCacheAndEvict(Object arg1) {
   return counter.getAndIncrement();
 }
 /**
  * Generates a new Instantiator class for the given custom class.
  *
  * <p>The generated class has the following definition:
  *
  * <pre>
  * package org.springframework.data.gemfire.serialization;
  *
  * public class &lt;<i>T</i>>Instantiator$Synthetic<i>Counter</i> extends Instantiator implements Serializable {
  *
  *  private static final Class&lt;<i>T</i>> clazz = T.class;
  *  private static final int classId = <i>value</i>;
  *
  *  public DateInstantiator() {
  *     this(clazz, classId);
  *  }
  *
  *  public DateInstantiator(Class<? extends DataSerializable> c, int classId) {
  *     super(c, classId);
  *  }
  *
  *  public <i>T</i> newInstance() {
  *     return new <i>T</i>();
  *  }
  * }
  * </pre>
  *
  * @param clazz
  * @return
  */
 Class<?> createCustomInstantiatorClass(Class<? extends DataSerializable> clazz, int classId) {
   String classInternalName =
       PKG + clazz.getSimpleName() + CLASS_LABEL + counter.getAndIncrement();
   byte[] bytecode = generateClassBytecode(classInternalName, clazz, classId);
   // translate internal name to binary form
   return classLoader.loadClass(classInternalName.replace('/', '.'), bytecode);
 }
Ejemplo n.º 15
0
 @Override
 public Long add(Product product) {
   long id = counter.getAndIncrement();
   product.setId(id);
   products.put(id, product);
   return id;
 }
Ejemplo n.º 16
0
  public MessageExchangeDAO createMessageExchange(char dir) {
    final String id = Long.toString(counter.getAndIncrement());
    MessageExchangeDAO mex = new MessageExchangeDAOImpl(dir, id);
    long now = System.currentTimeMillis();
    _mexStore.put(id, mex);
    _mexAge.put(id, now);

    if (now > _lastRemoval + (_mexTtl / 10)) {
      _lastRemoval = now;
      Object[] oldMexs = _mexAge.keySet().toArray();
      for (int i = oldMexs.length - 1; i > 0; i--) {
        String oldMex = (String) oldMexs[i];
        Long age = _mexAge.get(oldMex);
        if (age != null && now - age > _mexTtl) {
          removeMessageExchange(oldMex);
          _mexAge.remove(oldMex);
        }
      }
    }

    // Removing right away on rollback
    onRollback(
        new Runnable() {
          public void run() {
            removeMessageExchange(id);
            _mexAge.remove(id);
          }
        });

    return mex;
  }
Ejemplo n.º 17
0
  /**
   * Returns a worker id for the given worker.
   *
   * @param workerNetAddress the worker {@link WorkerNetAddress}
   * @return the worker id for this worker
   */
  public long getWorkerId(WorkerNetAddress workerNetAddress) {
    // TODO(gpang): This NetAddress cloned in case thrift re-uses the object. Does thrift re-use it?
    MasterWorkerInfo existingWorker = mWorkers.getFirstByField(ADDRESS_INDEX, workerNetAddress);
    if (existingWorker != null) {
      // This worker address is already mapped to a worker id.
      long oldWorkerId = existingWorker.getId();
      LOG.warn("The worker {} already exists as id {}.", workerNetAddress, oldWorkerId);
      return oldWorkerId;
    }

    MasterWorkerInfo lostWorker = mLostWorkers.getFirstByField(ADDRESS_INDEX, workerNetAddress);
    if (lostWorker != null) {
      // this is one of the lost workers
      synchronized (lostWorker) {
        final long lostWorkerId = lostWorker.getId();
        LOG.warn("A lost worker {} has requested its old id {}.", workerNetAddress, lostWorkerId);

        // Update the timestamp of the worker before it is considered an active worker.
        lostWorker.updateLastUpdatedTimeMs();
        mWorkers.add(lostWorker);
        mLostWorkers.remove(lostWorker);
        return lostWorkerId;
      }
    }

    // Generate a new worker id.
    long workerId = mNextWorkerId.getAndIncrement();
    mWorkers.add(new MasterWorkerInfo(workerId, workerNetAddress));

    LOG.info("getWorkerId(): WorkerNetAddress: {} id: {}", workerNetAddress, workerId);
    return workerId;
  }
 @Override
 public Thread newThread(Runnable runnable) {
   Thread thread = new Thread(runnable, "EventloopThread-" + count.getAndIncrement());
   thread.setDaemon(daemon);
   thread.setPriority(priority);
   return thread;
 }
Ejemplo n.º 19
0
  /**
   * 写入数据
   *
   * @param obj
   */
  public static void insert(Object obj) {
    if (!Constants.IN_BDB) {
      MonitorLog.addStat(
          Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Ingore"}, new Long[] {1l});
      return;
    }
    Long kIndex = keyIndex.getAndIncrement();
    try {
      byte[] vbytes = serialData(obj);
      byte[] kbytes = serialData("j" + kIndex);

      DatabaseEntry keyEntry = new DatabaseEntry(kbytes);
      DatabaseEntry valueEntry = new DatabaseEntry(vbytes);

      OperationStatus rtn = database.put(null, keyEntry, valueEntry);
      if (rtn != OperationStatus.SUCCESS) {
        logger.warn("write to bdb fail" + rtn.name());
        MonitorLog.addStat(
            Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Write Fail"}, new Long[] {1l});
      } else {
        MonitorLog.addStat(
            Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Success"}, new Long[] {1l});
      }
    } catch (Exception e) {
      logger.error("write to bdb exception", e);
      MonitorLog.addStat(
          Constants.DATA_PERSISTENCE_LOG, new String[] {"Bdb Write Exception"}, new Long[] {1l});
    }
  }
Ejemplo n.º 20
0
  /**
   * Returns a worker id for the given worker.
   *
   * @param workerNetAddress the worker {@link WorkerNetAddress}
   * @return the worker id for this worker
   */
  public long getWorkerId(WorkerNetAddress workerNetAddress) {
    // TODO(gene): This NetAddress cloned in case thrift re-uses the object. Does thrift re-use it?
    synchronized (mWorkers) {
      if (mWorkers.contains(mAddressIndex, workerNetAddress)) {
        // This worker address is already mapped to a worker id.
        long oldWorkerId = mWorkers.getFirstByField(mAddressIndex, workerNetAddress).getId();
        LOG.warn("The worker {} already exists as id {}.", workerNetAddress, oldWorkerId);
        return oldWorkerId;
      }

      if (mLostWorkers.contains(mAddressIndex, workerNetAddress)) {
        // this is one of the lost workers
        final MasterWorkerInfo lostWorkerInfo =
            mLostWorkers.getFirstByField(mAddressIndex, workerNetAddress);
        final long lostWorkerId = lostWorkerInfo.getId();
        LOG.warn("A lost worker {} has requested its old id {}.", workerNetAddress, lostWorkerId);

        // Update the timestamp of the worker before it is considered an active worker.
        lostWorkerInfo.updateLastUpdatedTimeMs();
        mWorkers.add(lostWorkerInfo);
        mLostWorkers.remove(lostWorkerInfo);
        return lostWorkerId;
      }

      // Generate a new worker id.
      long workerId = mNextWorkerId.getAndIncrement();
      mWorkers.add(new MasterWorkerInfo(workerId, workerNetAddress));

      LOG.info("getWorkerId(): WorkerNetAddress: {} id: {}", workerNetAddress, workerId);
      return workerId;
    }
  }
Ejemplo n.º 21
0
  @Override
  public synchronized void addSplits(PlanNodeId sourceId, Iterable<Split> splits) {
    try (SetThreadName ignored = new SetThreadName("HttpRemoteTask-%s", taskId)) {
      requireNonNull(sourceId, "sourceId is null");
      requireNonNull(splits, "splits is null");
      checkState(
          !noMoreSplits.contains(sourceId), "noMoreSplits has already been set for %s", sourceId);

      // only add pending split if not done
      if (!getTaskInfo().getState().isDone()) {
        int added = 0;
        for (Split split : splits) {
          if (pendingSplits.put(
              sourceId, new ScheduledSplit(nextSplitId.getAndIncrement(), split))) {
            added++;
          }
        }
        if (sourceId.equals(planFragment.getPartitionedSource())) {
          pendingSourceSplitCount += added;
          fireSplitCountChanged(added);
        }
        needsUpdate.set(true);
      }

      scheduleUpdate();
    }
  }
Ejemplo n.º 22
0
  public void write(
      final long position,
      final long size,
      final ByteBuffer directByteBuffer,
      final AIOCallback aioCallback) {
    if (aioCallback == null) {
      throw new NullPointerException("Null Callback");
    }

    checkOpened();
    if (poller == null) {
      startPoller();
    }

    pendingWrites.countUp();

    if (writeExecutor != null) {
      maxIOSemaphore.acquireUninterruptibly();

      writeExecutor.execute(
          new Runnable() {
            public void run() {
              long sequence = nextWritingSequence.getAndIncrement();

              try {
                write(handler, sequence, position, size, directByteBuffer, aioCallback);
              } catch (HornetQException e) {
                callbackError(
                    aioCallback, sequence, directByteBuffer, e.getType().getCode(), e.getMessage());
              } catch (RuntimeException e) {
                callbackError(
                    aioCallback,
                    sequence,
                    directByteBuffer,
                    HornetQExceptionType.INTERNAL_ERROR.getCode(),
                    e.getMessage());
              }
            }
          });
    } else {
      maxIOSemaphore.acquireUninterruptibly();

      long sequence = nextWritingSequence.getAndIncrement();

      try {
        write(handler, sequence, position, size, directByteBuffer, aioCallback);
      } catch (HornetQException e) {
        callbackError(
            aioCallback, sequence, directByteBuffer, e.getType().getCode(), e.getMessage());
      } catch (RuntimeException e) {
        callbackError(
            aioCallback,
            sequence,
            directByteBuffer,
            HornetQExceptionType.INTERNAL_ERROR.getCode(),
            e.getMessage());
      }
    }
  }
Ejemplo n.º 23
0
    public void doAnAction() throws Exception {
      long iteration = numBulkLoads.getAndIncrement();
      Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", iteration));

      // create HFiles for different column families
      FileSystem fs = UTIL.getTestFileSystem();
      byte[] val = Bytes.toBytes(String.format("%010d", iteration));
      final List<Pair<byte[], String>> famPaths = new ArrayList<Pair<byte[], String>>(NUM_CFS);
      for (int i = 0; i < NUM_CFS; i++) {
        Path hfile = new Path(dir, family(i));
        byte[] fam = Bytes.toBytes(family(i));
        createHFile(fs, hfile, fam, QUAL, val, 1000);
        famPaths.add(new Pair<>(fam, hfile.toString()));
      }

      // bulk load HFiles
      final ClusterConnection conn = (ClusterConnection) UTIL.getAdmin().getConnection();
      RegionServerCallable<Void> callable =
          new RegionServerCallable<Void>(conn, tableName, Bytes.toBytes("aaa")) {
            @Override
            public Void call(int callTimeout) throws Exception {
              LOG.debug(
                  "Going to connect to server "
                      + getLocation()
                      + " for row "
                      + Bytes.toStringBinary(getRow()));
              byte[] regionName = getLocation().getRegionInfo().getRegionName();
              BulkLoadHFileRequest request =
                  RequestConverter.buildBulkLoadHFileRequest(famPaths, regionName, true);
              getStub().bulkLoadHFile(null, request);
              return null;
            }
          };
      RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf);
      RpcRetryingCaller<Void> caller = factory.<Void>newCaller();
      caller.callWithRetries(callable, Integer.MAX_VALUE);

      // Periodically do compaction to reduce the number of open file handles.
      if (numBulkLoads.get() % 5 == 0) {
        // 5 * 50 = 250 open file handles!
        callable =
            new RegionServerCallable<Void>(conn, tableName, Bytes.toBytes("aaa")) {
              @Override
              public Void call(int callTimeout) throws Exception {
                LOG.debug(
                    "compacting " + getLocation() + " for row " + Bytes.toStringBinary(getRow()));
                AdminProtos.AdminService.BlockingInterface server =
                    conn.getAdmin(getLocation().getServerName());
                CompactRegionRequest request =
                    RequestConverter.buildCompactRegionRequest(
                        getLocation().getRegionInfo().getRegionName(), true, null);
                server.compactRegion(null, request);
                numCompactions.incrementAndGet();
                return null;
              }
            };
        caller.callWithRetries(callable, Integer.MAX_VALUE);
      }
    }
Ejemplo n.º 24
0
 /**
  * 幂等操作:异步操作默认添加invocation id
  *
  * @param url
  * @param inv
  */
 public static void attachInvocationIdIfAsync(URL url, Invocation inv) {
   if (isAttachInvocationId(url, inv)
       && getInvocationId(inv) == null
       && inv instanceof RpcInvocation) {
     ((RpcInvocation) inv)
         .setAttachment(Constants.ID_KEY, String.valueOf(INVOKE_ID.getAndIncrement()));
   }
 }
 /** Handle the timeout of a previous lock mapped to this key */
 protected void handleLockExpiry(Object key, Lockable lock) {
   LOG.expired(key);
   long ts = getInternalRegion().nextTimestamp() + getInternalRegion().getTimeout();
   // create new lock that times out immediately
   Lock newLock = new Lock(ts, uuid, nextLockId.getAndIncrement(), null);
   newLock.unlock(ts);
   getInternalRegion().put(key, newLock);
 }
Ejemplo n.º 26
0
  public Long submit(Job<?> task) {
    Long jobid = counter.getAndIncrement();
    Task t = (Task) pool.submit(task);
    t.setId(jobid);

    jobs.put(jobid, t);
    return jobid;
  }
  public int getCluster(final OClass clazz, final ODocument doc) {
    final int[] clusters = clazz.getClusterIds();
    if (clusters.length == 1)
      // ONLY ONE: RETURN THE FIRST ONE
      return clusters[0];

    return clusters[(int) (pointer.getAndIncrement() % clusters.length)];
  }
Ejemplo n.º 28
0
 private void updateAgentConfig(AgentConfig agentConfig) throws Exception {
   sendRequest(
       ServerRequest.newBuilder()
           .setRequestId(nextRequestId.getAndIncrement())
           .setAgentConfigUpdateRequest(
               AgentConfigUpdateRequest.newBuilder().setAgentConfig(agentConfig))
           .build());
 }
Ejemplo n.º 29
0
 public void performSearch(
     final SearchRequest searchRequest, final SearchResultHandler searchResultHandler) {
   if (lastSearchId.getAndIncrement() == 0) {
     service.submit(this::rebuildMetadataCache);
   }
   service.submit(
       new SearchCallable(lastSearchId.incrementAndGet(), searchRequest, searchResultHandler));
 }
 @JsonApiSave
 public <S extends Project> S save(S entity) {
   if (entity.getId() == null) {
     entity.setId(ID_GENERATOR.getAndIncrement());
   }
   REPOSITORY.put(entity.getId(), entity);
   return entity;
 }