/** {@inheritDoc} */
  @Override
  public GridDhtPartitionFullMap partitionMap(boolean onlyActive) {
    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node2part [node2part: "
              + node2part
              + ", cache="
              + cctx.name()
              + ", started="
              + cctx.started()
              + ", stopping="
              + stopping
              + ", locNodeId="
              + cctx.localNode().id()
              + ", locName="
              + cctx.gridName()
              + ']';

      GridDhtPartitionFullMap m = node2part;

      return new GridDhtPartitionFullMap(
          m.nodeId(), m.nodeOrder(), m.updateSequence(), m, onlyActive);
    } finally {
      lock.readLock().unlock();
    }
  }
  /** {@inheritDoc} */
  @Nullable
  @Override
  public GridDhtPartitionMap partitions(UUID nodeId) {
    lock.readLock().lock();

    try {
      return node2part.get(nodeId);
    } finally {
      lock.readLock().unlock();
    }
  }
  /** {@inheritDoc} */
  @Override
  public GridDhtPartitionMap localPartitionMap() {
    lock.readLock().lock();

    try {
      return new GridDhtPartitionMap(
          cctx.nodeId(), updateSeq.get(), F.viewReadOnly(locParts, CU.part2state()), true);
    } finally {
      lock.readLock().unlock();
    }
  }
  /** {@inheritDoc} */
  @Override
  public GridDhtTopologyFuture topologyVersionFuture() {
    lock.readLock().lock();

    try {
      assert topReadyFut != null;

      return topReadyFut;
    } finally {
      lock.readLock().unlock();
    }
  }
  /** {@inheritDoc} */
  @Override
  public AffinityTopologyVersion topologyVersion() {
    lock.readLock().lock();

    try {
      assert topVer.topologyVersion() > 0
          : "Invalid topology version [topVer=" + topVer + ", cacheName=" + cctx.name() + ']';

      return topVer;
    } finally {
      lock.readLock().unlock();
    }
  }
  /** {@inheritDoc} */
  @Override
  public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) {
    Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer1="
              + topVer
              + ", topVer2="
              + this.topVer
              + ", cache="
              + cctx.name()
              + ", node2part="
              + node2part
              + ']';

      Collection<ClusterNode> nodes = null;

      Collection<UUID> nodeIds = part2node.get(p);

      if (!F.isEmpty(nodeIds)) {
        Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id()));

        for (UUID nodeId : nodeIds) {
          if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) {
            ClusterNode n = cctx.discovery().node(nodeId);

            if (n != null
                && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
              if (nodes == null) {
                nodes = new ArrayList<>(affNodes.size() + 2);

                nodes.addAll(affNodes);
              }

              nodes.add(n);
            }
          }
        }
      }

      return nodes != null ? nodes : affNodes;
    } finally {
      lock.readLock().unlock();
    }
  }
  /**
   * @param p Partition.
   * @param topVer Topology version ({@code -1} for all nodes).
   * @param state Partition state.
   * @param states Additional partition states.
   * @return List of nodes for the partition.
   */
  private List<ClusterNode> nodes(
      int p,
      AffinityTopologyVersion topVer,
      GridDhtPartitionState state,
      GridDhtPartitionState... states) {
    Collection<UUID> allIds =
        topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null;

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer="
              + topVer
              + ", allIds="
              + allIds
              + ", node2part="
              + node2part
              + ", cache="
              + cctx.name()
              + ']';

      Collection<UUID> nodeIds = part2node.get(p);

      // Node IDs can be null if both, primary and backup, nodes disappear.
      int size = nodeIds == null ? 0 : nodeIds.size();

      if (size == 0) return Collections.emptyList();

      List<ClusterNode> nodes = new ArrayList<>(size);

      for (UUID id : nodeIds) {
        if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue;

        if (hasState(p, id, state, states)) {
          ClusterNode n = cctx.discovery().node(id);

          if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion()))
            nodes.add(n);
        }
      }

      return nodes;
    } finally {
      lock.readLock().unlock();
    }
  }
  /** {@inheritDoc} */
  @Override
  public GridDhtPartitionState partitionState(UUID nodeId, int part) {
    lock.readLock().lock();

    try {
      GridDhtPartitionMap partMap = node2part.get(nodeId);

      if (partMap != null) {
        GridDhtPartitionState state = partMap.get(part);

        return state == null ? EVICTED : state;
      }

      return EVICTED;
    } finally {
      lock.readLock().unlock();
    }
  }
Beispiel #9
0
    public static void main(String[] args) throws Throwable {
      final ReentrantLock lock = new ReentrantLock();
      lock.lock();

      final ReentrantReadWriteLock rwlock = new ReentrantReadWriteLock();
      final ReentrantReadWriteLock.ReadLock readLock = rwlock.readLock();
      final ReentrantReadWriteLock.WriteLock writeLock = rwlock.writeLock();
      rwlock.writeLock().lock();

      final BlockingQueue<Object> q = new LinkedBlockingQueue<Object>();
      final Semaphore fairSem = new Semaphore(0, true);
      final Semaphore unfairSem = new Semaphore(0, false);
      // final int threads =
      // rnd.nextInt(Runtime.getRuntime().availableProcessors() + 1) + 1;
      final int threads = 3;
      // On Linux, this test runs very slowly for some reason,
      // so use a smaller number of iterations.
      // Solaris can handle 1 << 18.
      // On the other hand, jmap is much slower on Solaris...
      final int iterations = 1 << 8;
      final CyclicBarrier cb = new CyclicBarrier(threads + 1);

      for (int i = 0; i < threads; i++)
        new Thread() {
          public void run() {
            try {
              final Random rnd = new Random();
              for (int j = 0; j < iterations; j++) {
                if (j == iterations / 10 || j == iterations - 1) {
                  cb.await(); // Quiesce
                  cb.await(); // Resume
                }
                // int t = rnd.nextInt(2000);
                int t = rnd.nextInt(900);
                check(!lock.tryLock(t, NANOSECONDS));
                check(!readLock.tryLock(t, NANOSECONDS));
                check(!writeLock.tryLock(t, NANOSECONDS));
                equal(null, q.poll(t, NANOSECONDS));
                check(!fairSem.tryAcquire(t, NANOSECONDS));
                check(!unfairSem.tryAcquire(t, NANOSECONDS));
              }
            } catch (Throwable t) {
              unexpected(t);
            }
          }
        }.start();

      cb.await(); // Quiesce
      rendezvousChild(); // Measure
      cb.await(); // Resume

      cb.await(); // Quiesce
      rendezvousChild(); // Measure
      cb.await(); // Resume

      System.exit(failed);
    }
 /** {@inheritDoc} */
 @Override
 public void readUnlock() {
   lock.readLock().unlock();
 }
 /** {@inheritDoc} */
 @SuppressWarnings({"LockAcquiredButNotSafelyReleased"})
 @Override
 public void readLock() {
   lock.readLock().lock();
 }