/** {@inheritDoc} */
    @Override
    protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
      if (log.isDebugEnabled()) log.debug("GC worker started.");

      File workTokDir = tokDir.getParentFile();

      assert workTokDir != null;

      boolean lastRunNeeded = true;

      while (true) {
        try {
          // Sleep only if not cancelled.
          if (lastRunNeeded) Thread.sleep(GC_FREQ);
        } catch (InterruptedException ignored) {
          // No-op.
        }

        if (log.isDebugEnabled()) log.debug("Starting GC iteration.");

        cleanupResources(workTokDir);

        // Process spaces created by this endpoint.
        if (log.isDebugEnabled()) log.debug("Processing local spaces.");

        for (IpcSharedMemoryClientEndpoint e : endpoints) {
          if (log.isDebugEnabled()) log.debug("Processing endpoint: " + e);

          if (!e.checkOtherPartyAlive()) {
            endpoints.remove(e);

            if (log.isDebugEnabled()) log.debug("Removed endpoint: " + e);
          }
        }

        if (isCancelled()) {
          if (lastRunNeeded) {
            lastRunNeeded = false;

            // Clear interrupted status.
            Thread.interrupted();
          } else {
            Thread.currentThread().interrupt();

            break;
          }
        }
      }
    }
    /** @param workTokDir Token directory (common for multiple nodes). */
    private void cleanupResources(File workTokDir) {
      RandomAccessFile lockFile = null;

      FileLock lock = null;

      try {
        lockFile = new RandomAccessFile(new File(workTokDir, LOCK_FILE_NAME), "rw");

        lock = lockFile.getChannel().lock();

        if (lock != null) processTokenDirectory(workTokDir);
        else if (log.isDebugEnabled())
          log.debug(
              "Token directory is being processed concurrently: " + workTokDir.getAbsolutePath());
      } catch (OverlappingFileLockException ignored) {
        if (log.isDebugEnabled())
          log.debug(
              "Token directory is being processed concurrently: " + workTokDir.getAbsolutePath());
      } catch (FileLockInterruptionException ignored) {
        Thread.currentThread().interrupt();
      } catch (IOException e) {
        U.error(log, "Failed to process directory: " + workTokDir.getAbsolutePath(), e);
      } finally {
        U.releaseQuiet(lock);
        U.closeQuiet(lockFile);
      }
    }
  /**
   * @param key Removed key.
   * @param ver Removed version.
   * @throws IgniteCheckedException If failed.
   */
  public void onDeferredDelete(KeyCacheObject key, GridCacheVersion ver)
      throws IgniteCheckedException {
    try {
      T2<KeyCacheObject, GridCacheVersion> evicted = rmvQueue.add(new T2<>(key, ver));

      if (evicted != null) cctx.dht().removeVersionedEntry(evicted.get1(), evicted.get2());
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();

      throw new IgniteInterruptedCheckedException(e);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void close() {
    closed = true;

    U.closeQuiet(srvSock);

    if (gcWorker != null) {
      U.cancel(gcWorker);

      // This method may be called from already interrupted thread.
      // Need to ensure cleaning on close.
      boolean interrupted = Thread.interrupted();

      try {
        U.join(gcWorker);
      } catch (IgniteInterruptedCheckedException e) {
        U.warn(log, "Interrupted when stopping GC worker.", e);
      } finally {
        if (interrupted) Thread.currentThread().interrupt();
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public R get(long timeout, TimeUnit unit) throws IgniteCheckedException {
    A.ensure(timeout >= 0, "timeout cannot be negative: " + timeout);
    A.notNull(unit, "unit");

    try {
      return get0(unit.toNanos(timeout));
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();

      throw new IgniteInterruptedCheckedException(
          "Got interrupted while waiting for future to complete.", e);
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testPutGetUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false));

    String thName = Thread.currentThread().getName();

    for (int i = 0; i < 5; i++) {
      queue.put(thName);
      queue.peek();
      queue.take();
    }

    assert queue.isEmpty() : queue.size();
  }
  /**
   * Processes cache query request.
   *
   * @param sndId Sender node id.
   * @param req Query request.
   */
  @SuppressWarnings("unchecked")
  @Override
  void processQueryRequest(UUID sndId, GridCacheQueryRequest req) {
    if (req.cancel()) {
      cancelIds.add(new CancelMessageId(req.id(), sndId));

      if (req.fields()) removeFieldsQueryResult(sndId, req.id());
      else removeQueryResult(sndId, req.id());
    } else {
      if (!cancelIds.contains(new CancelMessageId(req.id(), sndId))) {
        if (!F.eq(req.cacheName(), cctx.name())) {
          GridCacheQueryResponse res =
              new GridCacheQueryResponse(
                  cctx.cacheId(),
                  req.id(),
                  new IgniteCheckedException(
                      "Received request for incorrect cache [expected="
                          + cctx.name()
                          + ", actual="
                          + req.cacheName()));

          sendQueryResponse(sndId, res, 0);
        } else {
          threads.put(req.id(), Thread.currentThread());

          try {
            GridCacheQueryInfo info = distributedQueryInfo(sndId, req);

            if (info == null) return;

            if (req.fields()) runFieldsQuery(info);
            else runQuery(info);
          } catch (Throwable e) {
            U.error(log(), "Failed to run query.", e);

            sendQueryResponse(
                sndId, new GridCacheQueryResponse(cctx.cacheId(), req.id(), e.getCause()), 0);

            if (e instanceof Error) throw (Error) e;
          } finally {
            threads.remove(req.id());
          }
        }
      }
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testPutRemoveUnbounded() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    String thread = Thread.currentThread().getName();

    for (int i = 0; i < QUEUE_CAPACITY; i++) queue.put(thread);

    info("Finished loop 1: " + thread);

    queue.clear();

    info("Cleared queue 1: " + thread);

    assert queue.isEmpty() : "Queue must be empty. " + queue.size();
  }
  /** {@inheritDoc} */
  @Override
  public R get() throws IgniteCheckedException {
    try {
      if (endTime == 0) {
        if (ignoreInterrupts) acquireShared(0);
        else acquireSharedInterruptibly(0);
      }

      if (getState() == CANCELLED)
        throw new IgniteFutureCancelledCheckedException("Future was cancelled: " + this);

      assert resFlag != 0;

      if (resFlag == ERR) throw U.cast((Throwable) res);

      return (R) res;
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();

      throw new IgniteInterruptedCheckedException(e);
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  @SuppressWarnings({"TooBroadScope"})
  public void testRestarts() throws Exception {
    int duration = 60 * 1000;
    int qryThreadNum = 10;
    final long nodeLifeTime = 2 * 1000;
    final int logFreq = 20;

    final IgniteCache<Integer, Integer> cache = grid(0).cache(null);

    assert cache != null;

    for (int i = 0; i < KEY_CNT; i++) cache.put(i, i);

    assertEquals(KEY_CNT, cache.localSize());

    final AtomicInteger qryCnt = new AtomicInteger();

    final AtomicBoolean done = new AtomicBoolean();

    IgniteInternalFuture<?> fut1 =
        multithreadedAsync(
            new CAX() {
              @Override
              public void applyx() throws IgniteCheckedException {
                while (!done.get()) {
                  Collection<Cache.Entry<Integer, Integer>> res =
                      cache.query(new SqlQuery(Integer.class, "_val >= 0")).getAll();

                  assertFalse(res.isEmpty());

                  int c = qryCnt.incrementAndGet();

                  if (c % logFreq == 0) info("Executed queries: " + c);
                }
              }
            },
            qryThreadNum);

    final AtomicInteger restartCnt = new AtomicInteger();

    CollectingEventListener lsnr = new CollectingEventListener();

    for (int i = 0; i < GRID_CNT; i++)
      grid(i).events().localListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED);

    IgniteInternalFuture<?> fut2 =
        multithreadedAsync(
            new Callable<Object>() {
              @SuppressWarnings({"BusyWait"})
              @Override
              public Object call() throws Exception {
                while (!done.get()) {
                  int idx = GRID_CNT;

                  startGrid(idx);

                  Thread.sleep(nodeLifeTime);

                  stopGrid(idx);

                  int c = restartCnt.incrementAndGet();

                  if (c % logFreq == 0) info("Node restarts: " + c);
                }

                return true;
              }
            },
            1);

    Thread.sleep(duration);

    done.set(true);

    fut1.get();
    fut2.get();

    info("Awaiting rebalance events [restartCnt=" + restartCnt.get() + ']');

    boolean success = lsnr.awaitEvents(GRID_CNT * 2 * restartCnt.get(), 15000);

    for (int i = 0; i < GRID_CNT; i++)
      grid(i).events().stopLocalListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED);

    assert success;
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testQueueRemoveMultithreadBounded() throws Exception {
    // Random queue name.
    final String queueName = UUID.randomUUID().toString();

    final IgniteQueue<String> queue = grid(0).queue(queueName, QUEUE_CAPACITY, config(false));

    final CountDownLatch putLatch = new CountDownLatch(THREAD_NUM);

    final CountDownLatch clearLatch = new CountDownLatch(THREAD_NUM);

    for (int t = 0; t < THREAD_NUM; t++) {
      Thread th =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  if (log.isDebugEnabled())
                    log.debug("Thread has been started." + Thread.currentThread().getName());

                  try {
                    // Thread must be blocked on put operation.
                    for (int i = 0; i < (QUEUE_CAPACITY * THREAD_NUM); i++)
                      queue.offer("anything", 3, TimeUnit.MINUTES);

                    fail("Queue failed");
                  } catch (IgniteException | IllegalStateException e) {
                    putLatch.countDown();

                    assert e.getMessage().contains("removed");

                    assert queue.removed();
                  }

                  if (log.isDebugEnabled())
                    log.debug("Thread has been stopped." + Thread.currentThread().getName());
                }
              });
      th.start();
    }

    for (int t = 0; t < THREAD_NUM; t++) {
      Thread th =
          new Thread(
              new Runnable() {
                @Override
                public void run() {
                  try {
                    IgniteQueue<String> queue = grid(0).queue(queueName, 0, null);

                    if (queue != null) queue.close();
                  } catch (Exception e) {
                    fail("Unexpected exception: " + e);
                  } finally {
                    clearLatch.countDown();
                  }
                }
              });
      th.start();
    }

    assert putLatch.await(3, TimeUnit.MINUTES);

    assert clearLatch.await(3, TimeUnit.MINUTES);

    try {
      assert queue.isEmpty() : queue.size();

      fail("Queue must be removed.");
    } catch (IgniteException | IllegalStateException e) {
      assert e.getMessage().contains("removed");

      assert queue.removed();
    }
  }
  /**
   * Sends cache query response.
   *
   * @param nodeId Node to send response.
   * @param res Cache query response.
   * @param timeout Message timeout.
   * @return {@code true} if response was sent, {@code false} otherwise.
   */
  private boolean sendQueryResponse(UUID nodeId, GridCacheQueryResponse res, long timeout) {
    ClusterNode node = cctx.node(nodeId);

    if (node == null) return false;

    int attempt = 1;

    IgniteCheckedException err = null;

    while (!Thread.currentThread().isInterrupted()) {
      try {
        if (log.isDebugEnabled()) log.debug("Send query response: " + res);

        Object topic = topic(nodeId, res.requestId());

        cctx.io()
            .sendOrderedMessage(
                node, topic, res, cctx.ioPolicy(), timeout > 0 ? timeout : Long.MAX_VALUE);

        return true;
      } catch (ClusterTopologyCheckedException ignored) {
        if (log.isDebugEnabled())
          log.debug(
              "Failed to send query response since node left grid [nodeId="
                  + nodeId
                  + ", res="
                  + res
                  + "]");

        return false;
      } catch (IgniteCheckedException e) {
        if (err == null) err = e;

        if (Thread.currentThread().isInterrupted()) break;

        if (attempt < RESEND_ATTEMPTS) {
          if (log.isDebugEnabled())
            log.debug(
                "Failed to send queries response (will try again) [nodeId="
                    + nodeId
                    + ", res="
                    + res
                    + ", attempt="
                    + attempt
                    + ", err="
                    + e
                    + "]");

          if (!Thread.currentThread().isInterrupted())
            try {
              U.sleep(RESEND_FREQ);
            } catch (IgniteInterruptedCheckedException e1) {
              U.error(
                  log,
                  "Waiting for queries response resending was interrupted (response will not be sent) "
                      + "[nodeId="
                      + nodeId
                      + ", response="
                      + res
                      + "]",
                  e1);

              return false;
            }
        } else {
          U.error(
              log,
              "Failed to sender cache response [nodeId=" + nodeId + ", response=" + res + "]",
              err);

          return false;
        }
      }

      attempt++;
    }

    return false;
  }
  /**
   * @param cacheMode Cache mode.
   * @param sameAff If {@code false} uses different number of partitions for caches.
   * @param concurrency Transaction concurrency.
   * @param isolation Transaction isolation.
   * @throws Exception If failed.
   */
  private void crossCacheTxFailover(
      CacheMode cacheMode,
      boolean sameAff,
      final TransactionConcurrency concurrency,
      final TransactionIsolation isolation)
      throws Exception {
    IgniteKernal ignite0 = (IgniteKernal) ignite(0);

    final AtomicBoolean stop = new AtomicBoolean();

    try {
      ignite0.createCache(cacheConfiguration(CACHE1, cacheMode, 256));
      ignite0.createCache(cacheConfiguration(CACHE2, cacheMode, sameAff ? 256 : 128));

      final AtomicInteger threadIdx = new AtomicInteger();

      IgniteInternalFuture<?> fut =
          GridTestUtils.runMultiThreadedAsync(
              new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                  int idx = threadIdx.getAndIncrement();

                  Ignite ignite = ignite(idx % GRID_CNT);

                  log.info(
                      "Started update thread [node="
                          + ignite.name()
                          + ", client="
                          + ignite.configuration().isClientMode()
                          + ']');

                  IgniteCache<TestKey, TestValue> cache1 = ignite.cache(CACHE1);
                  IgniteCache<TestKey, TestValue> cache2 = ignite.cache(CACHE2);

                  assertNotSame(cache1, cache2);

                  IgniteTransactions txs = ignite.transactions();

                  ThreadLocalRandom rnd = ThreadLocalRandom.current();

                  long iter = 0;

                  while (!stop.get()) {
                    boolean sameKey = rnd.nextBoolean();

                    try {
                      try (Transaction tx = txs.txStart(concurrency, isolation)) {
                        if (sameKey) {
                          TestKey key = new TestKey(rnd.nextLong(KEY_RANGE));

                          cacheOperation(rnd, cache1, key);
                          cacheOperation(rnd, cache2, key);
                        } else {
                          TestKey key1 = new TestKey(rnd.nextLong(KEY_RANGE));
                          TestKey key2 = new TestKey(key1.key() + 1);

                          cacheOperation(rnd, cache1, key1);
                          cacheOperation(rnd, cache2, key2);
                        }

                        tx.commit();
                      }
                    } catch (CacheException | IgniteException e) {
                      log.info("Update error: " + e);
                    }

                    if (iter++ % 500 == 0) log.info("Iteration: " + iter);
                  }

                  return null;
                }

                /**
                 * @param rnd Random.
                 * @param cache Cache.
                 * @param key Key.
                 */
                private void cacheOperation(
                    ThreadLocalRandom rnd, IgniteCache<TestKey, TestValue> cache, TestKey key) {
                  switch (rnd.nextInt(4)) {
                    case 0:
                      cache.put(key, new TestValue(rnd.nextLong()));

                      break;

                    case 1:
                      cache.remove(key);

                      break;

                    case 2:
                      cache.invoke(key, new TestEntryProcessor(rnd.nextBoolean() ? 1L : null));

                      break;

                    case 3:
                      cache.get(key);

                      break;

                    default:
                      assert false;
                  }
                }
              },
              10,
              "tx-thread");

      long stopTime = System.currentTimeMillis() + 3 * 60_000;

      long topVer = ignite0.cluster().topologyVersion();

      boolean failed = false;

      while (System.currentTimeMillis() < stopTime) {
        log.info("Start node.");

        IgniteKernal ignite = (IgniteKernal) startGrid(GRID_CNT);

        assertFalse(ignite.configuration().isClientMode());

        topVer++;

        IgniteInternalFuture<?> affFut =
            ignite
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after start: " + topVer);

          failed = true;

          break;
        }

        Thread.sleep(500);

        log.info("Stop node.");

        stopGrid(GRID_CNT);

        topVer++;

        affFut =
            ignite0
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after stop: " + topVer);

          failed = true;

          break;
        }
      }

      stop.set(true);

      fut.get();

      assertFalse("Test failed, see log for details.", failed);
    } finally {
      stop.set(true);

      ignite0.destroyCache(CACHE1);
      ignite0.destroyCache(CACHE2);

      awaitPartitionMapExchange();
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("ErrorNotRethrown")
  @Override
  public IpcEndpoint accept() throws IgniteCheckedException {
    while (!Thread.currentThread().isInterrupted()) {
      Socket sock = null;

      boolean accepted = false;

      try {
        sock = srvSock.accept();

        accepted = true;

        InputStream inputStream = sock.getInputStream();
        ObjectInputStream in = new ObjectInputStream(inputStream);

        ObjectOutputStream out = new ObjectOutputStream(sock.getOutputStream());

        IpcSharedMemorySpace inSpace = null;

        IpcSharedMemorySpace outSpace = null;

        boolean err = true;

        try {
          IpcSharedMemoryInitRequest req = (IpcSharedMemoryInitRequest) in.readObject();

          if (log.isDebugEnabled()) log.debug("Processing request: " + req);

          IgnitePair<String> p = inOutToken(req.pid(), size);

          String file1 = p.get1();
          String file2 = p.get2();

          assert file1 != null;
          assert file2 != null;

          // Create tokens.
          new File(file1).createNewFile();
          new File(file2).createNewFile();

          if (log.isDebugEnabled()) log.debug("Created token files: " + p);

          inSpace = new IpcSharedMemorySpace(file1, req.pid(), pid, size, true, log);

          outSpace = new IpcSharedMemorySpace(file2, pid, req.pid(), size, false, log);

          IpcSharedMemoryClientEndpoint ret =
              new IpcSharedMemoryClientEndpoint(inSpace, outSpace, log);

          out.writeObject(
              new IpcSharedMemoryInitResponse(
                  file2, outSpace.sharedMemoryId(), file1, inSpace.sharedMemoryId(), pid, size));

          err = !in.readBoolean();

          endpoints.add(ret);

          return ret;
        } catch (UnsatisfiedLinkError e) {
          throw IpcSharedMemoryUtils.linkError(e);
        } catch (IOException e) {
          if (log.isDebugEnabled())
            log.debug(
                "Failed to process incoming connection "
                    + "(was connection closed by another party):"
                    + e.getMessage());
        } catch (ClassNotFoundException e) {
          U.error(log, "Failed to process incoming connection.", e);
        } catch (ClassCastException e) {
          String msg =
              "Failed to process incoming connection (most probably, shared memory "
                  + "rest endpoint has been configured by mistake).";

          LT.warn(log, null, msg);

          sendErrorResponse(out, e);
        } catch (IpcOutOfSystemResourcesException e) {
          if (!omitOutOfResourcesWarn) LT.warn(log, null, OUT_OF_RESOURCES_MSG);

          sendErrorResponse(out, e);
        } catch (IgniteCheckedException e) {
          LT.error(log, e, "Failed to process incoming shared memory connection.");

          sendErrorResponse(out, e);
        } finally {
          // Exception has been thrown, need to free system resources.
          if (err) {
            if (inSpace != null) inSpace.forceClose();

            // Safety.
            if (outSpace != null) outSpace.forceClose();
          }
        }
      } catch (IOException e) {
        if (!Thread.currentThread().isInterrupted() && !accepted)
          throw new IgniteCheckedException("Failed to accept incoming connection.", e);

        if (!closed)
          LT.error(
              log, null, "Failed to process incoming shared memory connection: " + e.getMessage());
      } finally {
        U.closeQuiet(sock);
      }
    } // while

    throw new IgniteInterruptedCheckedException("Socket accept was interrupted.");
  }