/** {@inheritDoc} */
  @Override
  public boolean delete(Path f, boolean recursive) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    try {
      IgfsPath path = convert(f);

      IgfsMode mode = modeRslvr.resolveMode(path);

      if (mode == PROXY) {
        if (clientLog.isLogEnabled()) clientLog.logDelete(path, PROXY, recursive);

        return secondaryFileSystem().delete(toSecondary(f), recursive);
      }

      boolean res = rmtClient.delete(path, recursive);

      if (clientLog.isLogEnabled()) clientLog.logDelete(path, mode, recursive);

      return res;
    } finally {
      leaveBusy();
    }
  }
  /** {@inheritDoc} */
  @Override
  public FileStatus[] listStatus(Path f) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    try {
      IgfsPath path = convert(f);
      IgfsMode mode = modeRslvr.resolveMode(path);

      if (mode == PROXY) {
        FileStatus[] arr = secondaryFileSystem().listStatus(toSecondary(f));

        if (arr == null) throw new FileNotFoundException("File " + f + " does not exist.");

        for (int i = 0; i < arr.length; i++) arr[i] = toPrimary(arr[i]);

        if (clientLog.isLogEnabled()) {
          String[] fileArr = new String[arr.length];

          for (int i = 0; i < arr.length; i++) fileArr[i] = arr[i].getPath().toString();

          clientLog.logListDirectory(path, PROXY, fileArr);
        }

        return arr;
      } else {
        Collection<IgfsFile> list = rmtClient.listFiles(path);

        if (list == null) throw new FileNotFoundException("File " + f + " does not exist.");

        List<IgfsFile> files = new ArrayList<>(list);

        FileStatus[] arr = new FileStatus[files.size()];

        for (int i = 0; i < arr.length; i++) arr[i] = convert(files.get(i));

        if (clientLog.isLogEnabled()) {
          String[] fileArr = new String[arr.length];

          for (int i = 0; i < arr.length; i++) fileArr[i] = arr[i].getPath().toString();

          clientLog.logListDirectory(path, mode, fileArr);
        }

        return arr;
      }
    } finally {
      leaveBusy();
    }
  }
  /** {@inheritDoc} */
  @Override
  public void close() throws IOException {
    if (closeGuard.compareAndSet(false, true)) {
      if (rmtClient == null) return;

      rmtClient.close(false);

      if (clientLog.isLogEnabled()) clientLog.close();

      if (factory instanceof LifecycleAware) ((LifecycleAware) factory).stop();

      // Reset initialized resources.
      rmtClient = null;
    }
  }
  /** {@inheritDoc} */
  @Override
  public void renameInternal(Path src, Path dst) throws IOException {
    A.notNull(src, "src");
    A.notNull(dst, "dst");

    enterBusy();

    try {
      IgfsPath srcPath = convert(src);
      IgfsPath dstPath = convert(dst);

      IgfsMode srcMode = modeRslvr.resolveMode(srcPath);

      if (clientLog.isLogEnabled()) clientLog.logRename(srcPath, srcMode, dstPath);

      if (srcMode == PROXY) secondaryFileSystem().rename(toSecondary(src), toSecondary(dst));
      else rmtClient.rename(srcPath, dstPath);
    } finally {
      leaveBusy();
    }
  }
  /** {@inheritDoc} */
  @Override
  public void mkdir(Path f, FsPermission perm, boolean createParent) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    try {
      IgfsPath path = convert(f);
      IgfsMode mode = modeRslvr.resolveMode(path);

      if (mode == PROXY) {
        if (clientLog.isLogEnabled()) clientLog.logMakeDirectory(path, PROXY);

        secondaryFileSystem().mkdirs(toSecondary(f), perm);
      } else {
        rmtClient.mkdirs(path, permission(perm));

        if (clientLog.isLogEnabled()) clientLog.logMakeDirectory(path, mode);
      }
    } finally {
      leaveBusy();
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("deprecation")
  @Override
  public FSDataOutputStream createInternal(
      Path f,
      EnumSet<CreateFlag> flag,
      FsPermission perm,
      int bufSize,
      short replication,
      long blockSize,
      Progressable progress,
      Options.ChecksumOpt checksumOpt,
      boolean createParent)
      throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
      IgfsPath path = convert(f);
      IgfsMode mode = modeRslvr.resolveMode(path);

      if (LOG.isDebugEnabled())
        LOG.debug(
            "Opening output stream in create [thread="
                + Thread.currentThread().getName()
                + "path="
                + path
                + ", overwrite="
                + overwrite
                + ", bufSize="
                + bufSize
                + ']');

      if (mode == PROXY) {
        FSDataOutputStream os =
            secondaryFileSystem()
                .create(toSecondary(f), perm, flag, bufSize, replication, blockSize, progress);

        if (clientLog.isLogEnabled()) {
          long logId = IgfsLogger.nextId();

          if (append) clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
          else clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

          return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
        } else return os;
      } else {
        Map<String, String> permMap =
            F.asMap(
                IgfsUtils.PROP_PERMISSION,
                toString(perm),
                IgfsUtils.PROP_PREFER_LOCAL_WRITES,
                Boolean.toString(preferLocFileWrites));

        // Create stream and close it in the 'finally' section if any sequential operation failed.
        HadoopIgfsStreamDelegate stream;

        long logId = -1;

        if (append) {
          stream = rmtClient.append(path, create, permMap);

          if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();

            clientLog.logAppend(logId, path, mode, bufSize);
          }

          if (LOG.isDebugEnabled())
            LOG.debug(
                "Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
        } else {
          stream =
              rmtClient.create(
                  path, overwrite, colocateFileWrites, replication, blockSize, permMap);

          if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();

            clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
          }

          if (LOG.isDebugEnabled())
            LOG.debug(
                "Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
        }

        assert stream != null;

        HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);

        bufSize = Math.max(64 * 1024, bufSize);

        out = new BufferedOutputStream(igfsOut, bufSize);

        FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

        // Mark stream created successfully.
        out = null;

        return res;
      }
    } finally {
      // Close if failed during stream creation.
      if (out != null) U.closeQuiet(out);

      leaveBusy();
    }
  }
  /** {@inheritDoc} */
  @Override
  public FSDataInputStream open(Path f, int bufSize) throws IOException {
    A.notNull(f, "f");

    enterBusy();

    try {
      IgfsPath path = convert(f);
      IgfsMode mode = modeRslvr.resolveMode(path);

      if (mode == PROXY) {
        FSDataInputStream is = secondaryFileSystem().open(toSecondary(f), bufSize);

        if (clientLog.isLogEnabled()) {
          // At this point we do not know file size, so we perform additional request to remote FS
          // to get it.
          FileStatus status = secondaryFileSystem().getFileStatus(toSecondary(f));

          long size = status != null ? status.getLen() : -1;

          long logId = IgfsLogger.nextId();

          clientLog.logOpen(logId, path, PROXY, bufSize, size);

          return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId));
        } else return is;
      } else {
        HadoopIgfsStreamDelegate stream =
            seqReadsBeforePrefetchOverride
                ? rmtClient.open(path, seqReadsBeforePrefetch)
                : rmtClient.open(path);

        long logId = -1;

        if (clientLog.isLogEnabled()) {
          logId = IgfsLogger.nextId();

          clientLog.logOpen(logId, path, mode, bufSize, stream.length());
        }

        if (LOG.isDebugEnabled())
          LOG.debug(
              "Opening input stream [thread="
                  + Thread.currentThread().getName()
                  + ", path="
                  + path
                  + ", bufSize="
                  + bufSize
                  + ']');

        HadoopIgfsInputStream igfsIn =
            new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId);

        if (LOG.isDebugEnabled())
          LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']');

        return new FSDataInputStream(igfsIn);
      }
    } finally {
      leaveBusy();
    }
  }
  /**
   * @param name URI passed to constructor.
   * @param cfg Configuration passed to constructor.
   * @throws IOException If initialization failed.
   */
  @SuppressWarnings("ConstantConditions")
  private void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();

    try {
      if (rmtClient != null)
        throw new IOException("File system is already initialized: " + rmtClient);

      A.notNull(name, "name");
      A.notNull(cfg, "cfg");

      if (!IGFS_SCHEME.equals(name.getScheme()))
        throw new IOException(
            "Illegal file system URI [expected="
                + IGFS_SCHEME
                + "://[name]/[optional_path], actual="
                + name
                + ']');

      uriAuthority = name.getAuthority();

      // Override sequential reads before prefetch if needed.
      seqReadsBeforePrefetch =
          parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);

      if (seqReadsBeforePrefetch > 0) seqReadsBeforePrefetchOverride = true;

      // In Ignite replication factor is controlled by data cache affinity.
      // We use replication factor to force the whole file to be stored on local node.
      dfltReplication = (short) cfg.getInt("dfs.replication", 3);

      // Get file colocation control flag.
      colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
      preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);

      // Get log directory.
      String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);

      File logDirFile = U.resolveIgnitePath(logDirCfg);

      String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;

      rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);

      // Handshake.
      IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);

      grpBlockSize = handshake.blockSize();

      IgfsPaths paths = handshake.secondaryPaths();

      Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);

      if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
        // Initiate client logger.
        if (logDir == null) throw new IOException("Failed to resolve log directory: " + logDirCfg);

        Integer batchSize =
            parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);

        clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
      } else clientLog = IgfsLogger.disabledLogger();

      try {
        modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes());
      } catch (IgniteCheckedException ice) {
        throw new IOException(ice);
      }

      boolean initSecondary = paths.defaultMode() == PROXY;

      if (!initSecondary && paths.pathModes() != null) {
        for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) {
          IgfsMode mode = pathMode.getValue();

          if (mode == PROXY) {
            initSecondary = true;

            break;
          }
        }
      }

      if (initSecondary) {
        try {
          factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader());
        } catch (IgniteCheckedException e) {
          throw new IOException("Failed to get secondary file system factory.", e);
        }

        if (factory == null)
          throw new IOException(
              "Failed to get secondary file system factory (did you set "
                  + IgniteHadoopIgfsSecondaryFileSystem.class.getName()
                  + " as \"secondaryFIleSystem\" in "
                  + FileSystemConfiguration.class.getName()
                  + "?)");

        assert factory != null;

        if (factory instanceof LifecycleAware) ((LifecycleAware) factory).start();

        try {
          FileSystem secFs = factory.get(user);

          secondaryUri = secFs.getUri();

          A.ensure(secondaryUri != null, "Secondary file system uri should not be null.");
        } catch (IOException e) {
          throw new IOException(
              "Failed to connect to the secondary file system: " + secondaryUri, e);
        }
      }
    } finally {
      leaveBusy();
    }
  }