/**
   * @param key Key.
   * @param val Value.
   * @param needVer If {@code true} version is required for loaded values.
   * @param skipVals Skip values flag.
   * @param c Closure.
   */
  private void processLoaded(
      KeyCacheObject key,
      @Nullable Object val,
      boolean needVer,
      boolean skipVals,
      GridInClosure3<KeyCacheObject, Object, GridCacheVersion> c) {
    if (val != null) {
      Object v;
      GridCacheVersion ver;

      if (needVer) {
        T2<Object, GridCacheVersion> t = (T2) val;

        v = t.get1();
        ver = t.get2();
      } else {
        v = val;
        ver = null;
      }

      if (skipVals && v == Boolean.FALSE)
        c.apply(key, null, IgniteTxEntry.SER_READ_EMPTY_ENTRY_VER);
      else c.apply(key, v, ver);
    } else c.apply(key, null, IgniteTxEntry.SER_READ_EMPTY_ENTRY_VER);
  }
  /**
   * @param key Removed key.
   * @param ver Removed version.
   * @throws IgniteCheckedException If failed.
   */
  public void onDeferredDelete(KeyCacheObject key, GridCacheVersion ver)
      throws IgniteCheckedException {
    try {
      T2<KeyCacheObject, GridCacheVersion> evicted = rmvQueue.add(new T2<>(key, ver));

      if (evicted != null) cctx.dht().removeVersionedEntry(evicted.get1(), evicted.get2());
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();

      throw new IgniteInterruptedCheckedException(e);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void onDeferredDelete(GridCacheEntryEx entry, GridCacheVersion ver) {
    assert entry.isNear();

    try {
      T2<KeyCacheObject, GridCacheVersion> evicted = rmvQueue.add(new T2<>(entry.key(), ver));

      if (evicted != null) removeVersionedEntry(evicted.get1(), evicted.get2());
    } catch (InterruptedException ignore) {
      if (log.isDebugEnabled())
        log.debug("Failed to enqueue deleted entry [key=" + entry.key() + ", ver=" + ver + ']');

      Thread.currentThread().interrupt();
    }
  }
  /**
   * @param name URI passed to constructor.
   * @param cfg Configuration passed to constructor.
   * @throws IOException If initialization failed.
   */
  @SuppressWarnings("ConstantConditions")
  private void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
      if (rmtClient != null)
        throw new IOException("File system is already initialized: " + rmtClient);

      A.notNull(name, "name");
      A.notNull(cfg, "cfg");

      if (!IGFS_SCHEME.equals(name.getScheme()))
        throw new IOException(
            "Illegal file system URI [expected="
                + IGFS_SCHEME
                + "://[name]/[optional_path], actual="
                + name
                + ']');

      uriAuthority = name.getAuthority();

      // Override sequential reads before prefetch if needed.
      seqReadsBeforePrefetch =
          parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

      if (seqReadsBeforePrefetch > 0) seqReadsBeforePrefetchOverride = true;

      // In Ignite replication factor is controlled by data cache affinity.
      // We use replication factor to force the whole file to be stored on local node.
      dfltReplication = (short) cfg.getInt("dfs.replication", 3);

      // Get file colocation control flag.
      colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
      preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

      // Get log directory.
      String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

      File logDirFile = U.resolveIgnitePath(logDirCfg);

      String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

      rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);

      // Handshake.
      IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

      grpBlockSize = handshake.blockSize();

      IgfsPaths paths = handshake.secondaryPaths();

      Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

      if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
        // Initiate client logger.
        if (logDir == null) throw new IOException("Failed to resolve log directory: " + logDirCfg);

        Integer batchSize =
            parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);

        clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
      } else clientLog = IgfsLogger.disabledLogger();

      try {
        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());
      } catch (IgniteCheckedException ice) {
        throw new IOException(ice);
      }

      boolean initSecondary = paths.defaultMode() == PROXY;

      if (!initSecondary && paths.pathModes() != null) {
        for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
          IgfsMode mode = pathMode.getValue();

          if (mode == PROXY) {
            initSecondary = true;

            break;
          }
        }
      }

      if (initSecondary) {
        try {
          factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
        } catch (IgniteCheckedException e) {
          throw new IOException("Failed to get secondary file system factory.", e);
        }

        if (factory == null)
          throw new IOException(
              "Failed to get secondary file system factory (did you set "
                  + IgniteHadoopIgfsSecondaryFileSystem.class.getName()
                  + " as \"secondaryFIleSystem\" in "
                  + FileSystemConfiguration.class.getName()
                  + "?)");

        assert factory != null;

        if (factory instanceof LifecycleAware) ((LifecycleAware) factory).start();

        try {
          FileSystem secFs = factory.get(user);

          secondaryUri = secFs.getUri();

          A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
        } catch (IOException e) {
          throw new IOException(
              "Failed to connect to the secondary file system: " + secondaryUri, e);
        }
      }
    } finally {
      leaveBusy();
    }
  }
  /**
   * Executes one test iteration.
   *
   * @throws Exception If failed.
   */
  private void doTestFailover() throws Exception {
    try {
      done = false;

      nodes = new AtomicReferenceArray<>(GRID_CNT);

      startGridsMultiThreaded(GRID_CNT, false);

      for (int i = 0; i < GRID_CNT; i++) assertTrue(nodes.compareAndSet(i, null, ignite(i)));

      List<IgniteInternalFuture> futs = new ArrayList<>();

      for (int i = 0; i < GRID_CNT + 1; i++) {
        futs.add(
            multithreadedAsync(
                new Runnable() {
                  @Override
                  public void run() {
                    T2<Ignite, Integer> ignite;

                    while ((ignite = randomNode()) != null) {
                      IgniteCache<Object, Object> cache = ignite.get1().cache(null);

                      for (int i = 0; i < 100; i++)
                        cache.containsKey(ThreadLocalRandom.current().nextInt(100_000));

                      assertTrue(nodes.compareAndSet(ignite.get2(), null, ignite.get1()));

                      try {
                        Thread.sleep(ThreadLocalRandom.current().nextLong(50));
                      } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                      }
                    }
                  }
                },
                1,
                "containsKey-thread-" + i));

        futs.add(
            multithreadedAsync(
                new Runnable() {
                  @Override
                  public void run() {
                    T2<Ignite, Integer> ignite;

                    while ((ignite = randomNode()) != null) {
                      IgniteCache<Object, Object> cache = ignite.get1().cache(null);

                      for (int i = 0; i < 100; i++)
                        cache.put(ThreadLocalRandom.current().nextInt(100_000), UUID.randomUUID());

                      assertTrue(nodes.compareAndSet(ignite.get2(), null, ignite.get1()));

                      try {
                        Thread.sleep(ThreadLocalRandom.current().nextLong(50));
                      } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                      }
                    }
                  }
                },
                1,
                "put-thread-" + i));
      }

      try {
        int aliveGrids = GRID_CNT;

        while (aliveGrids > 0) {
          T2<Ignite, Integer> ignite = randomNode();

          assert ignite != null;

          Ignite ignite0 = ignite.get1();

          log.info("Stop node: " + ignite0.name());

          ignite0.close();

          log.info("Node stop finished: " + ignite0.name());

          aliveGrids--;
        }
      } finally {
        done = true;
      }

      for (IgniteInternalFuture fut : futs) fut.get();
    } finally {
      done = true;
    }
  }
  /**
   * @param key Key.
   * @param part Partition.
   * @param locVals Local values.
   * @return {@code True} if there is no need to further search value.
   */
  private boolean localGet(KeyCacheObject key, int part, Map<K, V> locVals) {
    assert cctx.affinityNode() : this;

    GridDhtCacheAdapter<K, V> cache = cache();

    while (true) {
      GridCacheEntryEx entry;

      try {
        entry = cache.context().isSwapOrOffheapEnabled() ? cache.entryEx(key) : cache.peekEx(key);

        // If our DHT cache do has value, then we peek it.
        if (entry != null) {
          boolean isNew = entry.isNewLocked();

          CacheObject v = null;
          GridCacheVersion ver = null;

          if (needVer) {
            T2<CacheObject, GridCacheVersion> res =
                entry.innerGetVersioned(
                    null,
                    null,
                    /*swap*/ true,
                    /*unmarshal*/ true,
                    /** update-metrics */
                    false,
                    /*event*/ !skipVals,
                    subjId,
                    null,
                    taskName,
                    expiryPlc,
                    !deserializeBinary);

            if (res != null) {
              v = res.get1();
              ver = res.get2();
            }
          } else {
            v =
                entry.innerGet(
                    null,
                    null,
                    /*swap*/ true,
                    /*read-through*/ false,
                    /** update-metrics */
                    false,
                    /*event*/ !skipVals,
                    /*temporary*/ false,
                    subjId,
                    null,
                    taskName,
                    expiryPlc,
                    !deserializeBinary);
          }

          cache.context().evicts().touch(entry, topVer);

          // Entry was not in memory or in swap, so we remove it from cache.
          if (v == null) {
            if (isNew && entry.markObsoleteIfEmpty(ver)) cache.removeEntry(entry);
          } else {
            cctx.addResult(
                locVals, key, v, skipVals, keepCacheObjects, deserializeBinary, true, ver);

            return true;
          }
        }

        boolean topStable = cctx.isReplicated() || topVer.equals(cctx.topology().topologyVersion());

        // Entry not found, do not continue search if topology did not change and there is no store.
        if (!cctx.readThroughConfigured() && (topStable || partitionOwned(part))) {
          if (!skipVals && cctx.config().isStatisticsEnabled()) cache.metrics0().onRead(false);

          return true;
        }

        return false;
      } catch (GridCacheEntryRemovedException ignored) {
        // No-op, will retry.
      } catch (GridDhtInvalidPartitionException ignored) {
        return false;
      } catch (IgniteCheckedException e) {
        onDone(e);

        return true;
      }
    }
  }