/** @throws Exception Exception. */
  private void checkLock(final boolean fair) throws Exception {
    // Check only 'false' cases here. Successful lock is tested over the grid.
    final IgniteLock lock = createReentrantLock("acquire", false, fair);

    lock.lock();

    IgniteInternalFuture fut =
        GridTestUtils.runAsync(
            new Callable<Void>() {
              @Override
              public Void call() throws Exception {
                assertNotNull(lock);

                assert !lock.tryLock();

                assert !lock.tryLock(10, MICROSECONDS);

                return null;
              }
            });

    fut.get();

    lock.unlock();

    removeReentrantLock("acquire", fair);
  }
  /**
   * @param nodeId Sender.
   * @param res Result.
   */
  @SuppressWarnings("ForLoopReplaceableByForEach")
  public void onResult(UUID nodeId, GridNearTxFinishResponse res) {
    if (!isDone()) {
      FinishMiniFuture finishFut = null;

      synchronized (futs) {
        for (int i = 0; i < futs.size(); i++) {
          IgniteInternalFuture<IgniteInternalTx> fut = futs.get(i);

          if (fut.getClass() == FinishMiniFuture.class) {
            FinishMiniFuture f = (FinishMiniFuture) fut;

            if (f.futureId().equals(res.miniId())) {
              assert f.node().id().equals(nodeId);

              finishFut = f;

              break;
            }
          }
        }
      }

      if (finishFut != null) finishFut.onNearFinishResponse(res);
    }
  }
    /** @param nodeId Failed node ID. */
    boolean onNodeLeft(UUID nodeId) {
      if (nodeId.equals(m.node().id())) {
        if (log.isDebugEnabled())
          log.debug("Remote node left grid while sending or waiting for reply: " + this);

        if (isSync()) {
          Map<UUID, Collection<UUID>> txNodes = tx.transactionNodes();

          if (txNodes != null) {
            Collection<UUID> backups = txNodes.get(nodeId);

            if (!F.isEmpty(backups)) {
              final CheckRemoteTxMiniFuture mini =
                  new CheckRemoteTxMiniFuture(new HashSet<>(backups));

              add(mini);

              GridDhtTxFinishRequest req = checkCommittedRequest(mini.futureId());

              req.waitRemoteTransactions(true);

              for (UUID backupId : backups) {
                ClusterNode backup = cctx.discovery().node(backupId);

                if (backup != null && WAIT_REMOTE_TXS_SINCE.compareTo(backup.version()) <= 0) {
                  if (backup.isLocal()) {
                    IgniteInternalFuture<?> fut =
                        cctx.tm().remoteTxFinishFuture(tx.nearXidVersion());

                    fut.listen(
                        new CI1<IgniteInternalFuture<?>>() {
                          @Override
                          public void apply(IgniteInternalFuture<?> fut) {
                            mini.onDhtFinishResponse(cctx.localNodeId());
                          }
                        });
                  } else {
                    try {
                      cctx.io().send(backup, req, tx.ioPolicy());
                    } catch (ClusterTopologyCheckedException e) {
                      mini.onNodeLeft(backupId);
                    } catch (IgniteCheckedException e) {
                      mini.onDone(e);
                    }
                  }
                } else mini.onDhtFinishResponse(backupId);
              }
            }
          }
        }

        onDone(tx);

        return true;
      }

      return false;
    }
  /**
   * @param failoverSafe Failover safe flag.
   * @throws Exception
   */
  private void checkFailover(final boolean failoverSafe, final boolean fair) throws Exception {
    IgniteEx g = startGrid(NODES_CNT + 1);

    // For vars locality.
    {
      // Ensure not exists.
      assert g.reentrantLock("lock", failoverSafe, fair, false) == null;

      IgniteLock lock = g.reentrantLock("lock", failoverSafe, fair, true);

      lock.lock();

      assert lock.tryLock();

      assertEquals(2, lock.getHoldCount());
    }

    Ignite g0 = grid(0);

    final IgniteLock lock0 = g0.reentrantLock("lock", false, fair, false);

    assert !lock0.tryLock();

    assertEquals(0, lock0.getHoldCount());

    IgniteInternalFuture<?> fut =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                try {
                  lock0.lock();

                  info("Acquired in separate thread.");

                  // Lock is acquired silently only in failoverSafe mode.
                  assertTrue(failoverSafe);

                  lock0.unlock();

                  info("Released lock in separate thread.");
                } catch (IgniteException e) {
                  if (!failoverSafe) info("Ignored expected exception: " + e);
                  else throw e;
                }
                return null;
              }
            },
            1);

    Thread.sleep(100);

    g.close();

    fut.get(500);

    lock0.close();
  }
  /** Initializes future. */
  @SuppressWarnings("ForLoopReplaceableByForEach")
  void finish() {
    if (tx.onNeedCheckBackup()) {
      assert tx.onePhaseCommit();

      checkBackup();

      // If checkBackup is set, it means that primary node has crashed and we will not need to send
      // finish request to it, so we can mark future as initialized.
      markInitialized();

      return;
    }

    try {
      if (tx.finish(commit) || (!commit && tx.state() == UNKNOWN)) {
        if ((tx.onePhaseCommit() && needFinishOnePhase())
            || (!tx.onePhaseCommit() && mappings != null)) {
          if (mappings.single()) {
            GridDistributedTxMapping mapping = mappings.singleMapping();

            if (mapping != null) finish(mapping);
          } else finish(mappings.mappings());
        }

        markInitialized();

        if (!isSync() && !isDone()) {
          boolean complete = true;

          synchronized (futs) {
            // Avoid collection copy and iterator creation.
            for (int i = 0; i < futs.size(); i++) {
              IgniteInternalFuture<IgniteInternalTx> f = futs.get(i);

              if (isMini(f) && !f.isDone()) {
                complete = false;

                break;
              }
            }
          }

          if (complete) onComplete();
        }
      } else onDone(new IgniteCheckedException("Failed to commit transaction: " + CU.txString(tx)));
    } catch (Error | RuntimeException e) {
      onDone(e);

      throw e;
    } catch (IgniteCheckedException e) {
      onDone(e);
    }
  }
  /** @throws Exception If any error occurs. */
  public void testMultipleStartOnCoordinatorStop() throws Exception {
    for (int k = 0; k < 3; k++) {
      log.info("Iteration: " + k);

      clientFlagGlobal = false;

      final int START_NODES = 5;
      final int JOIN_NODES = 10;

      startGrids(START_NODES);

      final CyclicBarrier barrier = new CyclicBarrier(JOIN_NODES + 1);

      final AtomicInteger startIdx = new AtomicInteger(START_NODES);

      IgniteInternalFuture<?> fut =
          GridTestUtils.runMultiThreadedAsync(
              new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                  int idx = startIdx.getAndIncrement();

                  Thread.currentThread().setName("start-thread-" + idx);

                  barrier.await();

                  Ignite ignite = startGrid(idx);

                  assertFalse(ignite.configuration().isClientMode());

                  log.info("Started node: " + ignite.name());

                  return null;
                }
              },
              JOIN_NODES,
              "start-thread");

      barrier.await();

      U.sleep(ThreadLocalRandom.current().nextInt(10, 100));

      for (int i = 0; i < START_NODES; i++) stopGrid(i);

      fut.get();

      stopAllGrids();
    }
  }
Example #7
0
  /** {@inheritDoc} */
  @Override
  public IgniteInternalFuture<IgniteInternalTx> rollbackAsync() {
    if (log.isDebugEnabled()) log.debug("Rolling back near tx: " + this);

    GridNearTxFinishFuture fut = rollbackFut.get();

    if (fut != null) return fut;

    if (!rollbackFut.compareAndSet(null, fut = new GridNearTxFinishFuture<>(cctx, this, false)))
      return rollbackFut.get();

    cctx.mvcc().addFuture(fut, fut.futureId());

    IgniteInternalFuture<?> prepFut = this.prepFut.get();

    if (prepFut == null || prepFut.isDone()) {
      try {
        // Check for errors in prepare future.
        if (prepFut != null) prepFut.get();
      } catch (IgniteCheckedException e) {
        if (log.isDebugEnabled())
          log.debug("Got optimistic tx failure [tx=" + this + ", err=" + e + ']');
      }

      fut.finish();
    } else {
      prepFut.listen(
          new CI1<IgniteInternalFuture<?>>() {
            @Override
            public void apply(IgniteInternalFuture<?> f) {
              try {
                // Check for errors in prepare future.
                f.get();
              } catch (IgniteCheckedException e) {
                if (log.isDebugEnabled())
                  log.debug("Got optimistic tx failure [tx=" + this + ", err=" + e + ']');
              }

              GridNearTxFinishFuture fut0 = rollbackFut.get();

              fut0.finish();
            }
          });
    }

    return fut;
  }
Example #8
0
  /**
   * Rolls back local part of colocated transaction.
   *
   * @return Commit future.
   */
  public IgniteInternalFuture<IgniteInternalTx> rollbackAsyncLocal() {
    if (log.isDebugEnabled()) log.debug("Rolling back colocated tx locally: " + this);

    final GridDhtTxFinishFuture fut = new GridDhtTxFinishFuture<>(cctx, this, /*commit*/ false);

    cctx.mvcc().addFuture(fut, fut.futureId());

    IgniteInternalFuture<?> prep = prepFut.get();

    if (prep == null || prep.isDone()) {
      try {
        if (prep != null) prep.get();
      } catch (IgniteCheckedException e) {
        if (log.isDebugEnabled())
          log.debug(
              "Failed to prepare transaction during rollback (will ignore) [tx="
                  + this
                  + ", msg="
                  + e.getMessage()
                  + ']');
      }

      fut.finish();
    } else
      prep.listen(
          new CI1<IgniteInternalFuture<?>>() {
            @Override
            public void apply(IgniteInternalFuture<?> f) {
              try {
                f.get(); // Check for errors of a parent future.
              } catch (IgniteCheckedException e) {
                log.debug(
                    "Failed to prepare transaction during rollback (will ignore) [tx="
                        + this
                        + ", msg="
                        + e.getMessage()
                        + ']');
              }

              fut.finish();
            }
          });

    return fut;
  }
  /**
   * @param nodeId Sender.
   * @param res Result.
   */
  public void onResult(UUID nodeId, GridDhtTxFinishResponse res) {
    if (!isDone())
      for (IgniteInternalFuture<IgniteInternalTx> fut : futures()) {
        if (fut.getClass() == CheckBackupMiniFuture.class) {
          CheckBackupMiniFuture f = (CheckBackupMiniFuture) fut;

          if (f.futureId().equals(res.miniId())) {
            assert f.node().id().equals(nodeId);

            f.onDhtFinishResponse(res);
          }
        } else if (fut.getClass() == CheckRemoteTxMiniFuture.class) {
          CheckRemoteTxMiniFuture f = (CheckRemoteTxMiniFuture) fut;

          if (f.futureId().equals(res.miniId())) f.onDhtFinishResponse(nodeId);
        }
      }
  }
Example #10
0
  /** {@inheritDoc} */
  @SuppressWarnings({"ThrowableInstanceNeverThrown"})
  @Override
  public IgniteInternalFuture<IgniteInternalTx> commitAsync() {
    if (log.isDebugEnabled()) log.debug("Committing near local tx: " + this);

    prepareAsync();

    GridNearTxFinishFuture fut = commitFut.get();

    if (fut == null
        && !commitFut.compareAndSet(null, fut = new GridNearTxFinishFuture<>(cctx, this, true)))
      return commitFut.get();

    cctx.mvcc().addFuture(fut, fut.futureId());

    final IgniteInternalFuture<?> prepareFut = prepFut.get();

    prepareFut.listen(
        new CI1<IgniteInternalFuture<?>>() {
          @Override
          public void apply(IgniteInternalFuture<?> f) {
            GridNearTxFinishFuture fut0 = commitFut.get();

            try {
              // Make sure that here are no exceptions.
              prepareFut.get();

              fut0.finish();
            } catch (Error | RuntimeException e) {
              commitErr.compareAndSet(null, e);

              fut0.onDone(e);

              throw e;
            } catch (IgniteCheckedException e) {
              commitErr.compareAndSet(null, e);

              fut0.onDone(e);
            }
          }
        });

    return fut;
  }
  /** @throws Exception If any error occurs. */
  public void testMultiThreadedClientsRestart() throws Exception {
    fail("https://issues.apache.org/jira/browse/IGNITE-1123");

    clientFlagGlobal = false;

    info("Test timeout: " + (getTestTimeout() / (60 * 1000)) + " min.");

    startGridsMultiThreaded(GRID_CNT);

    clientFlagGlobal = true;

    startGridsMultiThreaded(GRID_CNT, CLIENT_GRID_CNT);

    final AtomicBoolean done = new AtomicBoolean();

    final AtomicInteger clientIdx = new AtomicInteger(GRID_CNT);

    IgniteInternalFuture<?> fut1 =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                clientFlagPerThread.set(true);

                int idx = clientIdx.getAndIncrement();

                while (!done.get()) {
                  stopGrid(idx, true);
                  startGrid(idx);
                }

                return null;
              }
            },
            CLIENT_GRID_CNT);

    Thread.sleep(getTestTimeout() - 60 * 1000);

    done.set(true);

    fut1.get();
  }
  /**
   * Executes one test iteration.
   *
   * @throws Exception If failed.
   */
  private void doTestFailover() throws Exception {
    try {
      done = false;

      nodes = new AtomicReferenceArray<>(GRID_CNT);

      startGridsMultiThreaded(GRID_CNT, false);

      for (int i = 0; i < GRID_CNT; i++) assertTrue(nodes.compareAndSet(i, null, ignite(i)));

      List<IgniteInternalFuture> futs = new ArrayList<>();

      for (int i = 0; i < GRID_CNT + 1; i++) {
        futs.add(
            multithreadedAsync(
                new Runnable() {
                  @Override
                  public void run() {
                    T2<Ignite, Integer> ignite;

                    while ((ignite = randomNode()) != null) {
                      IgniteCache<Object, Object> cache = ignite.get1().cache(null);

                      for (int i = 0; i < 100; i++)
                        cache.containsKey(ThreadLocalRandom.current().nextInt(100_000));

                      assertTrue(nodes.compareAndSet(ignite.get2(), null, ignite.get1()));

                      try {
                        Thread.sleep(ThreadLocalRandom.current().nextLong(50));
                      } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                      }
                    }
                  }
                },
                1,
                "containsKey-thread-" + i));

        futs.add(
            multithreadedAsync(
                new Runnable() {
                  @Override
                  public void run() {
                    T2<Ignite, Integer> ignite;

                    while ((ignite = randomNode()) != null) {
                      IgniteCache<Object, Object> cache = ignite.get1().cache(null);

                      for (int i = 0; i < 100; i++)
                        cache.put(ThreadLocalRandom.current().nextInt(100_000), UUID.randomUUID());

                      assertTrue(nodes.compareAndSet(ignite.get2(), null, ignite.get1()));

                      try {
                        Thread.sleep(ThreadLocalRandom.current().nextLong(50));
                      } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                      }
                    }
                  }
                },
                1,
                "put-thread-" + i));
      }

      try {
        int aliveGrids = GRID_CNT;

        while (aliveGrids > 0) {
          T2<Ignite, Integer> ignite = randomNode();

          assert ignite != null;

          Ignite ignite0 = ignite.get1();

          log.info("Stop node: " + ignite0.name());

          ignite0.close();

          log.info("Node stop finished: " + ignite0.name());

          aliveGrids--;
        }
      } finally {
        done = true;
      }

      for (IgniteInternalFuture fut : futs) fut.get();
    } finally {
      done = true;
    }
  }
  /** @throws Exception If failed. */
  public void testReconnectQueryInProgress() throws Exception {
    Ignite cln = grid(serverCount());

    assertTrue(cln.cluster().localNode().isClient());

    final Ignite srv = clientRouter(cln);

    final IgniteCache<Integer, Person> clnCache = cln.getOrCreateCache(QUERY_CACHE);

    clnCache.put(1, new Person(1, "name1", "surname1"));
    clnCache.put(2, new Person(2, "name2", "surname2"));
    clnCache.put(3, new Person(3, "name3", "surname3"));

    blockMessage(GridQueryNextPageResponse.class);

    final SqlQuery<Integer, Person> qry = new SqlQuery<>(Person.class, "_key <> 0");

    qry.setPageSize(1);

    final QueryCursor<Cache.Entry<Integer, Person>> cur1 = clnCache.query(qry);

    final IgniteInternalFuture<Object> fut =
        GridTestUtils.runAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                try {
                  cur1.getAll();
                } catch (CacheException e) {
                  checkAndWait(e);

                  return true;
                }

                return false;
              }
            });

    // Check that client waiting operation.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return fut.get(200);
          }
        },
        IgniteFutureTimeoutCheckedException.class,
        null);

    assertNotDone(fut);

    unblockMessage();

    reconnectClientNode(cln, srv, null);

    assertTrue((Boolean) fut.get(2, SECONDS));

    QueryCursor<Cache.Entry<Integer, Person>> cur2 = clnCache.query(qry);

    assertEquals(3, cur2.getAll().size());
  }
  private void checkBackup() {
    GridDistributedTxMapping mapping = mappings.singleMapping();

    if (mapping != null) {
      UUID nodeId = mapping.node().id();

      Collection<UUID> backups = tx.transactionNodes().get(nodeId);

      if (!F.isEmpty(backups)) {
        assert backups.size() == 1;

        UUID backupId = F.first(backups);

        ClusterNode backup = cctx.discovery().node(backupId);

        // Nothing to do if backup has left the grid.
        if (backup == null) {
          readyNearMappingFromBackup(mapping);

          ClusterTopologyCheckedException cause =
              new ClusterTopologyCheckedException("Backup node left grid: " + backupId);

          cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));

          onDone(
              new IgniteTxRollbackCheckedException(
                  "Failed to commit transaction " + "(backup has left grid): " + tx.xidVersion(),
                  cause));
        } else {
          final CheckBackupMiniFuture mini = new CheckBackupMiniFuture(backup, mapping);

          add(mini);

          if (backup.isLocal()) {
            boolean committed = !cctx.tm().addRolledbackTx(tx);

            readyNearMappingFromBackup(mapping);

            if (committed) {
              if (tx.syncCommit()) {
                GridCacheVersion nearXidVer = tx.nearXidVersion();

                assert nearXidVer != null : tx;

                IgniteInternalFuture<?> fut = cctx.tm().remoteTxFinishFuture(nearXidVer);

                fut.listen(
                    new CI1<IgniteInternalFuture<?>>() {
                      @Override
                      public void apply(IgniteInternalFuture<?> fut) {
                        mini.onDone(tx);
                      }
                    });

                return;
              }

              mini.onDone(tx);
            } else {
              ClusterTopologyCheckedException cause =
                  new ClusterTopologyCheckedException("Primary node left grid: " + nodeId);

              cause.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));

              mini.onDone(
                  new IgniteTxRollbackCheckedException(
                      "Failed to commit transaction "
                          + "(transaction has been rolled back on backup node): "
                          + tx.xidVersion(),
                      cause));
            }
          } else {
            GridDhtTxFinishRequest finishReq = checkCommittedRequest(mini.futureId());

            // Preserve old behavior, otherwise response is not sent.
            if (WAIT_REMOTE_TXS_SINCE.compareTo(backup.version()) > 0) finishReq.syncCommit(true);

            try {
              if (FINISH_NEAR_ONE_PHASE_SINCE.compareTo(backup.version()) <= 0)
                cctx.io().send(backup, finishReq, tx.ioPolicy());
              else {
                mini.onDone(
                    new IgniteTxHeuristicCheckedException(
                        "Failed to check for tx commit on "
                            + "the backup node (node has an old Ignite version) [rmtNodeId="
                            + backup.id()
                            + ", ver="
                            + backup.version()
                            + ']'));
              }
            } catch (ClusterTopologyCheckedException e) {
              mini.onNodeLeft(backupId);
            } catch (IgniteCheckedException e) {
              mini.onDone(e);
            }
          }
        }
      } else readyNearMappingFromBackup(mapping);
    }
  }
  /** @throws Exception If failed. */
  private void testHasConditionQueuedThreads(final boolean fair) throws Exception {
    final IgniteLock lock0 = grid(0).reentrantLock("lock", true, fair, true);

    assertEquals(0, lock0.getHoldCount());

    assertFalse(lock0.hasQueuedThreads());

    final int totalThreads = 5;

    final Set<Thread> startedThreads = new GridConcurrentHashSet<>();

    final Set<Thread> finishedThreads = new GridConcurrentHashSet<>();

    IgniteInternalFuture<?> fut =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                assertFalse(lock0.isHeldByCurrentThread());

                IgniteCondition cond = lock0.getOrCreateCondition("cond");

                lock0.lock();

                startedThreads.add(Thread.currentThread());

                // Wait until every thread tries to lock.
                do {
                  cond.await();

                  Thread.sleep(1000);
                } while (startedThreads.size() != totalThreads);

                try {
                  info(
                      "Acquired in separate thread. Number of threads waiting on condition: "
                          + lock0.getWaitQueueLength(cond));

                  assertTrue(lock0.isHeldByCurrentThread());

                  assertFalse(lock0.hasQueuedThread(Thread.currentThread()));

                  finishedThreads.add(Thread.currentThread());

                  if (startedThreads.size() != finishedThreads.size()) {
                    assertTrue(lock0.hasWaiters(cond));
                  }

                  for (Thread t : startedThreads) {
                    if (!finishedThreads.contains(t)) assertTrue(lock0.hasWaiters(cond));
                  }

                  assertTrue(
                      lock0.getWaitQueueLength(cond)
                          == (startedThreads.size() - finishedThreads.size()));
                } finally {
                  cond.signal();

                  lock0.unlock();

                  assertFalse(lock0.isHeldByCurrentThread());
                }

                return null;
              }
            },
            totalThreads);

    IgniteCondition cond = lock0.getOrCreateCondition("cond");

    lock0.lock();

    try {
      // Wait until all threads are waiting on condition.
      while (lock0.getWaitQueueLength(cond) != totalThreads) {
        lock0.unlock();

        Thread.sleep(1000);

        lock0.lock();
      }

      // Signal once to get things started.
      cond.signal();
    } finally {
      lock0.unlock();
    }

    fut.get();

    assertFalse(lock0.hasQueuedThreads());

    for (Thread t : startedThreads) assertFalse(lock0.hasQueuedThread(t));

    lock0.close();
  }
 /**
  * @param fut Future.
  * @return {@code True} if mini-future.
  */
 private boolean isMini(IgniteInternalFuture<?> fut) {
   return fut.getClass() == FinishMiniFuture.class
       || fut.getClass() == CheckBackupMiniFuture.class
       || fut.getClass() == CheckRemoteTxMiniFuture.class;
 }
  /**
   * Tests if lock is evenly acquired among nodes when fair flag is set on. Since exact ordering of
   * lock acquisitions cannot be guaranteed because it also depends on the OS thread scheduling,
   * certain deviation from uniform distribution is tolerated.
   *
   * @throws Exception If failed.
   */
  public void testFairness() throws Exception {
    if (gridCount() == 1) return;

    // Total number of ops.
    final long opsCount = 10000;

    // Allowed deviation from uniform distribution.
    final double tolerance = 0.05;

    // Shared counter.
    final String OPS_COUNTER = "ops_counter";

    // Number of threads, one per node.
    final int threadCount = gridCount();

    final AtomicLong threadCounter = new AtomicLong(0);

    Ignite ignite = startGrid(gridCount());

    // Initialize reentrant lock.
    IgniteLock l = ignite.reentrantLock("lock", true, true, true);

    // Initialize OPS_COUNTER.
    ignite.getOrCreateCache(OPS_COUNTER).put(OPS_COUNTER, (long) 0);

    final Map<Integer, Long> counts = new ConcurrentHashMap<>();

    IgniteInternalFuture<?> fut =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                final int localNodeId = (int) threadCounter.getAndIncrement();

                final Ignite grid = grid(localNodeId);

                IgniteClosure<Ignite, Long> closure =
                    new IgniteClosure<Ignite, Long>() {
                      @Override
                      public Long apply(Ignite ignite) {
                        IgniteLock l = ignite.reentrantLock("lock", true, true, true);

                        long localCount = 0;

                        IgniteCountDownLatch latch =
                            ignite.countDownLatch("latch", threadCount, false, true);

                        latch.countDown();

                        latch.await();

                        while (true) {
                          l.lock();

                          try {
                            long opsCounter =
                                (long) ignite.getOrCreateCache(OPS_COUNTER).get(OPS_COUNTER);

                            if (opsCounter == opsCount) break;

                            ignite.getOrCreateCache(OPS_COUNTER).put(OPS_COUNTER, ++opsCounter);

                            localCount++;

                            if (localCount > 1000) {
                              assertTrue(localCount < (1 + tolerance) * opsCounter / threadCount);

                              assertTrue(localCount > (1 - tolerance) * opsCounter / threadCount);
                            }

                            if (localCount % 100 == 0) {
                              info(
                                  "Node [id="
                                      + ignite.cluster().localNode().id()
                                      + "] acquired "
                                      + localCount
                                      + " times. "
                                      + "Total ops count: "
                                      + opsCounter
                                      + "/"
                                      + opsCount
                                      + "]");
                            }
                          } finally {
                            l.unlock();
                          }
                        }

                        return localCount;
                      }
                    };

                long localCount = closure.apply(grid);

                counts.put(localNodeId, localCount);

                return null;
              }
            },
            threadCount);

    fut.get();

    long totalSum = 0;

    for (int i = 0; i < gridCount(); i++) {

      totalSum += counts.get(i);

      info("Node " + grid(i).localNode().id() + " acquired the lock " + counts.get(i) + " times. ");
    }

    assertEquals(totalSum, opsCount);

    l.close();

    ignite.close();
  }
  /** @throws Exception If failed. */
  private void testLockInterruptiblyMultinode(final boolean fair) throws Exception {
    if (gridCount() == 1) return;

    // Initialize reentrant lock.
    final IgniteLock lock0 = grid(0).reentrantLock("lock", true, fair, true);

    assertEquals(0, lock0.getHoldCount());

    assertFalse(lock0.hasQueuedThreads());

    lock0.lock();

    // Number of threads, one per node.
    final int threadCount = gridCount();

    final AtomicLong threadCounter = new AtomicLong(0);

    IgniteInternalFuture<?> fut =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                final int localNodeId = (int) threadCounter.getAndIncrement();

                final Ignite grid = grid(localNodeId);

                IgniteClosure<Ignite, Void> closure =
                    new IgniteClosure<Ignite, Void>() {
                      @Override
                      public Void apply(Ignite ignite) {
                        final IgniteLock l = ignite.reentrantLock("lock", true, true, true);

                        final AtomicReference<Thread> thread = new AtomicReference<>();

                        final AtomicBoolean done = new AtomicBoolean(false);

                        final AtomicBoolean exceptionThrown = new AtomicBoolean(false);

                        final IgniteCountDownLatch latch =
                            ignite.countDownLatch("latch", threadCount, false, true);

                        IgniteInternalFuture<?> fut =
                            GridTestUtils.runAsync(
                                new Callable<Void>() {
                                  @Override
                                  public Void call() throws Exception {
                                    try {
                                      thread.set(Thread.currentThread());

                                      l.lockInterruptibly();
                                    } catch (IgniteInterruptedException e) {
                                      exceptionThrown.set(true);
                                    } finally {
                                      done.set(true);
                                    }

                                    return null;
                                  }
                                });

                        // Wait until l.lock() has been called.
                        while (!l.hasQueuedThreads()) {
                          // No-op.
                        }

                        latch.countDown();

                        latch.await();

                        thread.get().interrupt();

                        while (!done.get()) {
                          // No-op.
                        }

                        try {
                          fut.get();
                        } catch (IgniteCheckedException e) {
                          fail(e.getMessage());

                          throw new RuntimeException(e);
                        }

                        assertTrue(exceptionThrown.get());

                        return null;
                      }
                    };

                closure.apply(grid);

                return null;
              }
            },
            threadCount);

    fut.get();

    lock0.unlock();

    info("Checking if interrupted threads are removed from global waiting queue...");

    // Check if interrupted threads are removed from global waiting queue.
    boolean locked = lock0.tryLock(1000, MILLISECONDS);

    info("Interrupted threads successfully removed from global waiting queue. ");

    assertTrue(locked);

    lock0.unlock();

    assertFalse(lock0.isLocked());

    lock0.close();
  }
  /** @throws Exception If failed. */
  public void testSyncConsistent() throws Exception {
    final AtomicBoolean stop = new AtomicBoolean();

    final AtomicLong x = new AtomicLong();
    final AtomicLong y = new AtomicLong();
    final AtomicLong z = new AtomicLong();

    final Random rnd = new Random();

    final String oops = "Oops!";

    final GridSnapshotLock<T3<Long, Long, Long>> lock =
        new GridSnapshotLock<T3<Long, Long, Long>>() {
          @Override
          protected T3<Long, Long, Long> doSnapshot() {
            if (rnd.nextBoolean()) throw new IgniteException(oops);

            return new T3<>(x.get(), y.get(), z.get());
          }
        };

    IgniteInternalFuture<?> fut1 =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                Random rnd = new Random();

                while (!stop.get()) {
                  if (rnd.nextBoolean()) {
                    if (!lock.tryBeginUpdate()) continue;
                  } else lock.beginUpdate();

                  int n = 1 + rnd.nextInt(1000);

                  if (rnd.nextBoolean()) x.addAndGet(n);
                  else y.addAndGet(n);

                  z.addAndGet(n);

                  lock.endUpdate();
                }

                return null;
              }
            },
            15,
            "update");

    IgniteInternalFuture<?> fut2 =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                while (!stop.get()) {
                  T3<Long, Long, Long> t;

                  try {
                    t = lock.snapshot();
                  } catch (IgniteException e) {
                    assertEquals(oops, e.getMessage());

                    continue;
                  }

                  assertEquals(t.get3().longValue(), t.get1() + t.get2());
                }

                return null;
              }
            },
            8,
            "snapshot");

    Thread.sleep(20000);

    stop.set(true);

    fut1.get();
    fut2.get();
  }
 /**
  * @param f Future.
  * @return {@code True} if mini-future.
  */
 private boolean isMini(IgniteInternalFuture<?> f) {
   return f.getClass().equals(MiniFuture.class);
 }
    /** @param res Result callback. */
    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
    void onResult(final GridNearGetResponse res) {
      final Collection<Integer> invalidParts = res.invalidPartitions();

      // If error happened on remote node, fail the whole future.
      if (res.error() != null) {
        onDone(res.error());

        return;
      }

      // Remap invalid partitions.
      if (!F.isEmpty(invalidParts)) {
        AffinityTopologyVersion rmtTopVer = res.topologyVersion();

        assert !rmtTopVer.equals(AffinityTopologyVersion.ZERO);

        if (rmtTopVer.compareTo(topVer) <= 0) {
          // Fail the whole get future.
          onDone(
              new IgniteCheckedException(
                  "Failed to process invalid partitions response (remote node reported "
                      + "invalid partitions but remote topology version does not differ from local) "
                      + "[topVer="
                      + topVer
                      + ", rmtTopVer="
                      + rmtTopVer
                      + ", invalidParts="
                      + invalidParts
                      + ", nodeId="
                      + node.id()
                      + ']'));

          return;
        }

        if (log.isDebugEnabled())
          log.debug(
              "Remapping mini get future [invalidParts=" + invalidParts + ", fut=" + this + ']');

        if (!canRemap) {
          map(
              F.view(
                  keys.keySet(),
                  new P1<KeyCacheObject>() {
                    @Override
                    public boolean apply(KeyCacheObject key) {
                      return invalidParts.contains(cctx.affinity().partition(key));
                    }
                  }),
              F.t(node, keys),
              topVer);

          onDone(createResultMap(res.entries()));

          return;
        }

        // Need to wait for next topology version to remap.
        IgniteInternalFuture<AffinityTopologyVersion> topFut =
            cctx.affinity().affinityReadyFuture(rmtTopVer);

        topFut.listen(
            new CIX1<IgniteInternalFuture<AffinityTopologyVersion>>() {
              @SuppressWarnings("unchecked")
              @Override
              public void applyx(IgniteInternalFuture<AffinityTopologyVersion> fut)
                  throws IgniteCheckedException {
                AffinityTopologyVersion topVer = fut.get();

                // This will append new futures to compound list.
                map(
                    F.view(
                        keys.keySet(),
                        new P1<KeyCacheObject>() {
                          @Override
                          public boolean apply(KeyCacheObject key) {
                            return invalidParts.contains(cctx.affinity().partition(key));
                          }
                        }),
                    F.t(node, keys),
                    topVer);

                onDone(createResultMap(res.entries()));
              }
            });
      } else {
        try {
          onDone(createResultMap(res.entries()));
        } catch (Exception e) {
          onDone(e);
        }
      }
    }
  /** @throws Exception If failed. */
  private void testHasQueuedThreads(final boolean fair) throws Exception {
    final IgniteLock lock0 = grid(0).reentrantLock("lock", true, fair, true);

    assertEquals(0, lock0.getHoldCount());

    assertFalse(lock0.hasQueuedThreads());

    final int totalThreads = 5;

    final Set<Thread> startedThreads = new GridConcurrentHashSet<>();

    final Set<Thread> finishedThreads = new GridConcurrentHashSet<>();

    IgniteInternalFuture<?> fut =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                assertFalse(lock0.isHeldByCurrentThread());

                startedThreads.add(Thread.currentThread());

                lock0.lock();

                // Wait until every thread tries to lock.
                do {
                  Thread.sleep(1000);
                } while (startedThreads.size() != totalThreads);

                try {
                  info("Acquired in separate thread. ");

                  assertTrue(lock0.isHeldByCurrentThread());

                  assertFalse(lock0.hasQueuedThread(Thread.currentThread()));

                  finishedThreads.add(Thread.currentThread());

                  if (startedThreads.size() != finishedThreads.size()) {
                    assertTrue(lock0.hasQueuedThreads());
                  }

                  for (Thread t : startedThreads) {
                    assertTrue(lock0.hasQueuedThread(t) != finishedThreads.contains(t));
                  }
                } finally {
                  lock0.unlock();

                  assertFalse(lock0.isHeldByCurrentThread());
                }

                return null;
              }
            },
            totalThreads);

    fut.get();

    assertFalse(lock0.hasQueuedThreads());

    for (Thread t : startedThreads) assertFalse(lock0.hasQueuedThread(t));

    lock0.close();
  }
  /** @throws Exception If failed. */
  private void testTryLock(final boolean fair) throws Exception {
    final IgniteLock lock0 = grid(0).reentrantLock("lock", true, fair, true);

    assertEquals(0, lock0.getHoldCount());

    assertFalse(lock0.hasQueuedThreads());

    final int totalThreads = 2;

    final Set<Thread> startedThreads = new GridConcurrentHashSet<>();

    lock0.lock();

    IgniteInternalFuture<?> fut =
        multithreadedAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                assertFalse(lock0.isHeldByCurrentThread());

                startedThreads.add(Thread.currentThread());

                boolean isInterrupted = false;

                boolean locked = false;

                try {
                  locked = lock0.tryLock();
                } catch (IgniteInterruptedException e) {
                  isInterrupted = true;

                  fail("tryLock() method is uninterruptible.");
                } finally {
                  // Assert that thread was not interrupted.
                  assertFalse(isInterrupted);

                  // Assert that lock is locked.
                  assertTrue(lock0.isLocked());

                  // Assert that this thread does own the lock.
                  assertEquals(locked, lock0.isHeldByCurrentThread());

                  // Release lock.
                  if (locked) lock0.unlock();
                }

                return null;
              }
            },
            totalThreads);

    // Wait for all threads to attempt to acquire lock.
    while (startedThreads.size() != totalThreads) {
      Thread.sleep(500);
    }

    for (Thread t : startedThreads) t.interrupt();

    fut.get();

    lock0.unlock();

    assertFalse(lock0.isLocked());

    for (Thread t : startedThreads) assertFalse(lock0.hasQueuedThread(t));

    lock0.close();
  }
  /**
   * @param cacheMode Cache mode.
   * @param sameAff If {@code false} uses different number of partitions for caches.
   * @param concurrency Transaction concurrency.
   * @param isolation Transaction isolation.
   * @throws Exception If failed.
   */
  private void crossCacheTxFailover(
      CacheMode cacheMode,
      boolean sameAff,
      final TransactionConcurrency concurrency,
      final TransactionIsolation isolation)
      throws Exception {
    IgniteKernal ignite0 = (IgniteKernal) ignite(0);

    final AtomicBoolean stop = new AtomicBoolean();

    try {
      ignite0.createCache(cacheConfiguration(CACHE1, cacheMode, 256));
      ignite0.createCache(cacheConfiguration(CACHE2, cacheMode, sameAff ? 256 : 128));

      final AtomicInteger threadIdx = new AtomicInteger();

      IgniteInternalFuture<?> fut =
          GridTestUtils.runMultiThreadedAsync(
              new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                  int idx = threadIdx.getAndIncrement();

                  Ignite ignite = ignite(idx % GRID_CNT);

                  log.info(
                      "Started update thread [node="
                          + ignite.name()
                          + ", client="
                          + ignite.configuration().isClientMode()
                          + ']');

                  IgniteCache<TestKey, TestValue> cache1 = ignite.cache(CACHE1);
                  IgniteCache<TestKey, TestValue> cache2 = ignite.cache(CACHE2);

                  assertNotSame(cache1, cache2);

                  IgniteTransactions txs = ignite.transactions();

                  ThreadLocalRandom rnd = ThreadLocalRandom.current();

                  long iter = 0;

                  while (!stop.get()) {
                    boolean sameKey = rnd.nextBoolean();

                    try {
                      try (Transaction tx = txs.txStart(concurrency, isolation)) {
                        if (sameKey) {
                          TestKey key = new TestKey(rnd.nextLong(KEY_RANGE));

                          cacheOperation(rnd, cache1, key);
                          cacheOperation(rnd, cache2, key);
                        } else {
                          TestKey key1 = new TestKey(rnd.nextLong(KEY_RANGE));
                          TestKey key2 = new TestKey(key1.key() + 1);

                          cacheOperation(rnd, cache1, key1);
                          cacheOperation(rnd, cache2, key2);
                        }

                        tx.commit();
                      }
                    } catch (CacheException | IgniteException e) {
                      log.info("Update error: " + e);
                    }

                    if (iter++ % 500 == 0) log.info("Iteration: " + iter);
                  }

                  return null;
                }

                /**
                 * @param rnd Random.
                 * @param cache Cache.
                 * @param key Key.
                 */
                private void cacheOperation(
                    ThreadLocalRandom rnd, IgniteCache<TestKey, TestValue> cache, TestKey key) {
                  switch (rnd.nextInt(4)) {
                    case 0:
                      cache.put(key, new TestValue(rnd.nextLong()));

                      break;

                    case 1:
                      cache.remove(key);

                      break;

                    case 2:
                      cache.invoke(key, new TestEntryProcessor(rnd.nextBoolean() ? 1L : null));

                      break;

                    case 3:
                      cache.get(key);

                      break;

                    default:
                      assert false;
                  }
                }
              },
              10,
              "tx-thread");

      long stopTime = System.currentTimeMillis() + 3 * 60_000;

      long topVer = ignite0.cluster().topologyVersion();

      boolean failed = false;

      while (System.currentTimeMillis() < stopTime) {
        log.info("Start node.");

        IgniteKernal ignite = (IgniteKernal) startGrid(GRID_CNT);

        assertFalse(ignite.configuration().isClientMode());

        topVer++;

        IgniteInternalFuture<?> affFut =
            ignite
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after start: " + topVer);

          failed = true;

          break;
        }

        Thread.sleep(500);

        log.info("Stop node.");

        stopGrid(GRID_CNT);

        topVer++;

        affFut =
            ignite0
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after stop: " + topVer);

          failed = true;

          break;
        }
      }

      stop.set(true);

      fut.get();

      assertFalse("Test failed, see log for details.", failed);
    } finally {
      stop.set(true);

      ignite0.destroyCache(CACHE1);
      ignite0.destroyCache(CACHE2);

      awaitPartitionMapExchange();
    }
  }
  /**
   * @param setPart If {@code true} sets partition for scan query.
   * @throws Exception If failed.
   */
  private void scanQueryReconnectInProgress(boolean setPart) throws Exception {
    Ignite cln = grid(serverCount());

    assertTrue(cln.cluster().localNode().isClient());

    final Ignite srv = clientRouter(cln);

    final IgniteCache<Integer, Person> clnCache = cln.getOrCreateCache(QUERY_CACHE);

    clnCache.put(1, new Person(1, "name1", "surname1"));
    clnCache.put(2, new Person(2, "name2", "surname2"));
    clnCache.put(3, new Person(3, "name3", "surname3"));

    final ScanQuery<Integer, Person> scanQry = new ScanQuery<>();

    scanQry.setPageSize(1);

    scanQry.setFilter(
        new IgniteBiPredicate<Integer, Person>() {
          @Override
          public boolean apply(Integer integer, Person person) {
            return true;
          }
        });

    if (setPart) scanQry.setPartition(1);

    blockMessage(GridCacheQueryResponse.class);

    final IgniteInternalFuture<Object> fut =
        GridTestUtils.runAsync(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                try {
                  QueryCursor<Cache.Entry<Integer, Person>> qryCursor = clnCache.query(scanQry);

                  qryCursor.getAll();
                } catch (CacheException e) {
                  checkAndWait(e);

                  return true;
                }

                return false;
              }
            });

    // Check that client waiting operation.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return fut.get(200);
          }
        },
        IgniteFutureTimeoutCheckedException.class,
        null);

    assertNotDone(fut);

    unblockMessage();

    reconnectClientNode(cln, srv, null);

    assertTrue((Boolean) fut.get(2, SECONDS));

    QueryCursor<Cache.Entry<Integer, Person>> qryCursor2 = clnCache.query(scanQry);

    assertEquals(setPart ? 1 : 3, qryCursor2.getAll().size());
  }
  /** @throws Exception If failed. */
  private void testReentrantLockMultinode1(final boolean fair) throws Exception {
    if (gridCount() == 1) return;

    IgniteLock lock = grid(0).reentrantLock("s1", true, fair, true);

    List<IgniteInternalFuture<?>> futs = new ArrayList<>();

    for (int i = 0; i < gridCount(); i++) {
      final Ignite ignite = grid(i);

      futs.add(
          GridTestUtils.runAsync(
              new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                  IgniteLock lock = ignite.reentrantLock("s1", true, fair, false);

                  assertNotNull(lock);

                  IgniteCondition cond1 = lock.getOrCreateCondition("c1");

                  IgniteCondition cond2 = lock.getOrCreateCondition("c2");

                  try {
                    boolean wait = lock.tryLock(30_000, MILLISECONDS);

                    assertTrue(wait);

                    cond2.signal();

                    cond1.await();
                  } finally {
                    lock.unlock();
                  }

                  return null;
                }
              }));
    }

    boolean done = false;

    while (!done) {
      done = true;

      for (IgniteInternalFuture<?> fut : futs) {
        if (!fut.isDone()) done = false;
      }

      try {
        lock.lock();

        lock.getOrCreateCondition("c1").signal();

        lock.getOrCreateCondition("c2").await(10, MILLISECONDS);
      } finally {
        lock.unlock();
      }
    }

    for (IgniteInternalFuture<?> fut : futs) fut.get(30_000);
  }
Example #27
0
  /**
   * Commits local part of colocated transaction.
   *
   * @return Commit future.
   */
  public IgniteInternalFuture<IgniteInternalTx> commitAsyncLocal() {
    if (log.isDebugEnabled()) log.debug("Committing colocated tx locally: " + this);

    // In optimistic mode prepare was called explicitly.
    if (pessimistic()) prepareAsync();

    IgniteInternalFuture<?> prep = prepFut.get();

    // Do not create finish future if there are no remote nodes.
    if (F.isEmpty(dhtMap) && F.isEmpty(nearMap)) {
      if (prep != null) return (IgniteInternalFuture<IgniteInternalTx>) (IgniteInternalFuture) prep;

      return new GridFinishedFuture<IgniteInternalTx>(this);
    }

    final GridDhtTxFinishFuture fut = new GridDhtTxFinishFuture<>(cctx, this, /*commit*/ true);

    cctx.mvcc().addFuture(fut, fut.futureId());

    if (prep == null || prep.isDone()) {
      assert prep != null || optimistic();

      try {
        if (prep != null) prep.get(); // Check for errors of a parent future.

        fut.finish();
      } catch (IgniteTxOptimisticCheckedException e) {
        if (log.isDebugEnabled())
          log.debug("Failed optimistically to prepare transaction [tx=" + this + ", e=" + e + ']');

        fut.onError(e);
      } catch (IgniteCheckedException e) {
        U.error(log, "Failed to prepare transaction: " + this, e);

        fut.onError(e);
      }
    } else
      prep.listen(
          new CI1<IgniteInternalFuture<?>>() {
            @Override
            public void apply(IgniteInternalFuture<?> f) {
              try {
                f.get(); // Check for errors of a parent future.

                fut.finish();
              } catch (IgniteTxOptimisticCheckedException e) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Failed optimistically to prepare transaction [tx="
                          + this
                          + ", e="
                          + e
                          + ']');

                fut.onError(e);
              } catch (IgniteCheckedException e) {
                U.error(log, "Failed to prepare transaction: " + this, e);

                fut.onError(e);
              }
            }
          });

    return fut;
  }
Example #28
0
  /**
   * @param nodeId Node ID.
   * @param req Get request.
   */
  protected void processNearSingleGetRequest(
      final UUID nodeId, final GridNearSingleGetRequest req) {
    assert ctx.affinityNode();

    final CacheExpiryPolicy expiryPlc = CacheExpiryPolicy.forAccess(req.accessTtl());

    IgniteInternalFuture<GridCacheEntryInfo> fut =
        getDhtSingleAsync(
            nodeId,
            req.messageId(),
            req.key(),
            req.addReader(),
            req.readThrough(),
            req.topologyVersion(),
            req.subjectId(),
            req.taskNameHash(),
            expiryPlc,
            req.skipValues());

    fut.listen(
        new CI1<IgniteInternalFuture<GridCacheEntryInfo>>() {
          @Override
          public void apply(IgniteInternalFuture<GridCacheEntryInfo> f) {
            GridNearSingleGetResponse res;

            GridDhtFuture<GridCacheEntryInfo> fut = (GridDhtFuture<GridCacheEntryInfo>) f;

            try {
              GridCacheEntryInfo info = fut.get();

              if (F.isEmpty(fut.invalidPartitions())) {
                Message res0 = null;

                if (info != null) {
                  if (req.needEntryInfo()) {
                    info.key(null);

                    res0 = info;
                  } else if (req.needVersion())
                    res0 = new CacheVersionedValue(info.value(), info.version());
                  else res0 = info.value();
                }

                res =
                    new GridNearSingleGetResponse(
                        ctx.cacheId(),
                        req.futureId(),
                        req.topologyVersion(),
                        res0,
                        false,
                        req.addDeploymentInfo());

                if (info != null && req.skipValues()) res.setContainsValue();
              } else {
                AffinityTopologyVersion topVer = ctx.shared().exchange().readyAffinityVersion();

                assert topVer.compareTo(req.topologyVersion()) >= 0
                    : "Wrong ready topology version for "
                        + "invalid partitions response [topVer="
                        + topVer
                        + ", req="
                        + req
                        + ']';

                res =
                    new GridNearSingleGetResponse(
                        ctx.cacheId(), req.futureId(), topVer, null, true, req.addDeploymentInfo());
              }
            } catch (NodeStoppingException e) {
              return;
            } catch (IgniteCheckedException e) {
              U.error(log, "Failed processing get request: " + req, e);

              res =
                  new GridNearSingleGetResponse(
                      ctx.cacheId(),
                      req.futureId(),
                      req.topologyVersion(),
                      null,
                      false,
                      req.addDeploymentInfo());

              res.error(e);
            }

            try {
              ctx.io().send(nodeId, res, ctx.ioPolicy());
            } catch (IgniteCheckedException e) {
              U.error(
                  log,
                  "Failed to send get response to node (is node still alive?) [nodeId="
                      + nodeId
                      + ",req="
                      + req
                      + ", res="
                      + res
                      + ']',
                  e);
            }

            sendTtlUpdateRequest(expiryPlc);
          }
        });
  }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  @SuppressWarnings({"TooBroadScope"})
  public void testRestarts() throws Exception {
    int duration = 60 * 1000;
    int qryThreadNum = 10;
    final long nodeLifeTime = 2 * 1000;
    final int logFreq = 20;

    final IgniteCache<Integer, Integer> cache = grid(0).cache(null);

    assert cache != null;

    for (int i = 0; i < KEY_CNT; i++) cache.put(i, i);

    assertEquals(KEY_CNT, cache.localSize());

    final AtomicInteger qryCnt = new AtomicInteger();

    final AtomicBoolean done = new AtomicBoolean();

    IgniteInternalFuture<?> fut1 =
        multithreadedAsync(
            new CAX() {
              @Override
              public void applyx() throws IgniteCheckedException {
                while (!done.get()) {
                  Collection<Cache.Entry<Integer, Integer>> res =
                      cache.query(new SqlQuery(Integer.class, "_val >= 0")).getAll();

                  assertFalse(res.isEmpty());

                  int c = qryCnt.incrementAndGet();

                  if (c % logFreq == 0) info("Executed queries: " + c);
                }
              }
            },
            qryThreadNum);

    final AtomicInteger restartCnt = new AtomicInteger();

    CollectingEventListener lsnr = new CollectingEventListener();

    for (int i = 0; i < GRID_CNT; i++)
      grid(i).events().localListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED);

    IgniteInternalFuture<?> fut2 =
        multithreadedAsync(
            new Callable<Object>() {
              @SuppressWarnings({"BusyWait"})
              @Override
              public Object call() throws Exception {
                while (!done.get()) {
                  int idx = GRID_CNT;

                  startGrid(idx);

                  Thread.sleep(nodeLifeTime);

                  stopGrid(idx);

                  int c = restartCnt.incrementAndGet();

                  if (c % logFreq == 0) info("Node restarts: " + c);
                }

                return true;
              }
            },
            1);

    Thread.sleep(duration);

    done.set(true);

    fut1.get();
    fut2.get();

    info("Awaiting rebalance events [restartCnt=" + restartCnt.get() + ']');

    boolean success = lsnr.awaitEvents(GRID_CNT * 2 * restartCnt.get(), 15000);

    for (int i = 0; i < GRID_CNT; i++)
      grid(i).events().stopLocalListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED);

    assert success;
  }