/**
   * Gets job priority. At first tries to get from job context. If job context has no priority, then
   * tries to get from task session. If task session has no priority default one will be used.
   *
   * @param ctx Collision job context.
   * @return Job priority.
   */
  private int getJobPriority(GridCollisionJobContext ctx) {
    assert ctx != null;

    Integer p = null;

    GridJobContext jctx = ctx.getJobContext();

    try {
      p = (Integer) jctx.getAttribute(jobAttrKey);
    } catch (ClassCastException e) {
      LT.error(
          log,
          e,
          "Type of job context priority attribute '"
              + jobAttrKey
              + "' is not java.lang.Integer [type="
              + jctx.getAttribute(jobAttrKey).getClass()
              + ']');
    }

    if (p == null) {
      GridTaskSession ses = ctx.getTaskSession();

      try {
        p = (Integer) ses.getAttribute(taskAttrKey);
      } catch (ClassCastException e) {
        LT.error(
            log,
            e,
            "Type of task session priority attribute '"
                + taskAttrKey
                + "' is not java.lang.Integer [type="
                + ses.getAttribute(taskAttrKey).getClass()
                + ']');
      }

      if (p == null) {
        if (log.isDebugEnabled()) {
          log.debug(
              "Failed get priority from job context attribute '"
                  + jobAttrKey
                  + "' and task session attribute '"
                  + taskAttrKey
                  + "' (will use default priority): "
                  + dfltPriority);
        }

        p = dfltPriority;
      }
    }

    assert p != null;

    return p;
  }
  /**
   * @param nodeId Node ID.
   * @param retryCnt Number of retries.
   */
  private void sendAllPartitions(final UUID nodeId, final int retryCnt) {
    ClusterNode n = cctx.node(nodeId);

    try {
      if (n != null) sendAllPartitions(F.asList(n), exchId);
    } catch (IgniteCheckedException e) {
      if (e instanceof ClusterTopologyCheckedException || !cctx.discovery().alive(n)) {
        log.debug(
            "Failed to send full partition map to node, node left grid "
                + "[rmtNode="
                + nodeId
                + ", exchangeId="
                + exchId
                + ']');

        return;
      }

      if (retryCnt > 0) {
        long timeout = cctx.gridConfig().getNetworkSendRetryDelay();

        LT.error(
            log,
            e,
            "Failed to send full partition map to node (will retry after timeout) "
                + "[node="
                + nodeId
                + ", exchangeId="
                + exchId
                + ", timeout="
                + timeout
                + ']');

        cctx.time()
            .addTimeoutObject(
                new GridTimeoutObjectAdapter(timeout) {
                  @Override
                  public void onTimeout() {
                    sendAllPartitions(nodeId, retryCnt - 1);
                  }
                });
      } else
        U.error(
            log,
            "Failed to send full partition map [node=" + n + ", exchangeId=" + exchId + ']',
            e);
    }
  }
  /** Perform cleanup of the trash directory. */
  private void delete() {
    IgfsFileInfo info = null;

    try {
      info = meta.info(TRASH_ID);
    } catch (ClusterTopologyServerNotFoundException e) {
      LT.warn(log, e, "Server nodes not found.");
    } catch (IgniteCheckedException e) {
      U.error(log, "Cannot obtain trash directory info.", e);
    }

    if (info != null) {
      for (Map.Entry<String, IgfsListingEntry> entry : info.listing().entrySet()) {
        IgniteUuid fileId = entry.getValue().fileId();

        if (log.isDebugEnabled())
          log.debug(
              "Deleting IGFS trash entry [name=" + entry.getKey() + ", fileId=" + fileId + ']');

        try {
          if (!cancelled) {
            if (delete(entry.getKey(), fileId)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Sending delete confirmation message [name="
                        + entry.getKey()
                        + ", fileId="
                        + fileId
                        + ']');

              sendDeleteMessage(new IgfsDeleteMessage(fileId));
            }
          } else break;
        } catch (IgniteInterruptedCheckedException ignored) {
          // Ignore this exception while stopping.
        } catch (IgniteCheckedException e) {
          U.error(log, "Failed to delete entry from the trash directory: " + entry.getKey(), e);

          sendDeleteMessage(new IgfsDeleteMessage(fileId, e));
        }
      }
    }
  }
  /**
   * Remove particular entry from the TRASH directory.
   *
   * @param name Entry name.
   * @param id Entry ID.
   * @return {@code True} in case the entry really was deleted form the file system by this call.
   * @throws IgniteCheckedException If failed.
   */
  private boolean delete(String name, IgniteUuid id) throws IgniteCheckedException {
    assert name != null;
    assert id != null;

    while (true) {
      IgfsFileInfo info = meta.info(id);

      if (info != null) {
        if (info.isDirectory()) {
          deleteDirectory(TRASH_ID, id);

          if (meta.delete(TRASH_ID, name, id)) return true;
        } else {
          assert info.isFile();

          // Delete file content first.
          // In case this node crashes, other node will re-delete the file.
          data.delete(info).get();

          boolean ret = meta.delete(TRASH_ID, name, id);

          if (evts.isRecordable(EVT_IGFS_FILE_PURGED)) {
            if (info.path() != null)
              evts.record(
                  new IgfsEvent(
                      info.path(),
                      igfsCtx.kernalContext().discovery().localNode(),
                      EVT_IGFS_FILE_PURGED));
            else LT.warn(log, null, "Removing file without path info: " + info);
          }

          return ret;
        }
      } else return false; // Entry was deleted concurrently.
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("ErrorNotRethrown")
  @Override
  public IpcEndpoint accept() throws IgniteCheckedException {
    while (!Thread.currentThread().isInterrupted()) {
      Socket sock = null;

      boolean accepted = false;

      try {
        sock = srvSock.accept();

        accepted = true;

        InputStream inputStream = sock.getInputStream();
        ObjectInputStream in = new ObjectInputStream(inputStream);

        ObjectOutputStream out = new ObjectOutputStream(sock.getOutputStream());

        IpcSharedMemorySpace inSpace = null;

        IpcSharedMemorySpace outSpace = null;

        boolean err = true;

        try {
          IpcSharedMemoryInitRequest req = (IpcSharedMemoryInitRequest) in.readObject();

          if (log.isDebugEnabled()) log.debug("Processing request: " + req);

          IgnitePair<String> p = inOutToken(req.pid(), size);

          String file1 = p.get1();
          String file2 = p.get2();

          assert file1 != null;
          assert file2 != null;

          // Create tokens.
          new File(file1).createNewFile();
          new File(file2).createNewFile();

          if (log.isDebugEnabled()) log.debug("Created token files: " + p);

          inSpace = new IpcSharedMemorySpace(file1, req.pid(), pid, size, true, log);

          outSpace = new IpcSharedMemorySpace(file2, pid, req.pid(), size, false, log);

          IpcSharedMemoryClientEndpoint ret =
              new IpcSharedMemoryClientEndpoint(inSpace, outSpace, log);

          out.writeObject(
              new IpcSharedMemoryInitResponse(
                  file2, outSpace.sharedMemoryId(), file1, inSpace.sharedMemoryId(), pid, size));

          err = !in.readBoolean();

          endpoints.add(ret);

          return ret;
        } catch (UnsatisfiedLinkError e) {
          throw IpcSharedMemoryUtils.linkError(e);
        } catch (IOException e) {
          if (log.isDebugEnabled())
            log.debug(
                "Failed to process incoming connection "
                    + "(was connection closed by another party):"
                    + e.getMessage());
        } catch (ClassNotFoundException e) {
          U.error(log, "Failed to process incoming connection.", e);
        } catch (ClassCastException e) {
          String msg =
              "Failed to process incoming connection (most probably, shared memory "
                  + "rest endpoint has been configured by mistake).";

          LT.warn(log, null, msg);

          sendErrorResponse(out, e);
        } catch (IpcOutOfSystemResourcesException e) {
          if (!omitOutOfResourcesWarn) LT.warn(log, null, OUT_OF_RESOURCES_MSG);

          sendErrorResponse(out, e);
        } catch (IgniteCheckedException e) {
          LT.error(log, e, "Failed to process incoming shared memory connection.");

          sendErrorResponse(out, e);
        } finally {
          // Exception has been thrown, need to free system resources.
          if (err) {
            if (inSpace != null) inSpace.forceClose();

            // Safety.
            if (outSpace != null) outSpace.forceClose();
          }
        }
      } catch (IOException e) {
        if (!Thread.currentThread().isInterrupted() && !accepted)
          throw new IgniteCheckedException("Failed to accept incoming connection.", e);

        if (!closed)
          LT.error(
              log, null, "Failed to process incoming shared memory connection: " + e.getMessage());
      } finally {
        U.closeQuiet(sock);
      }
    } // while

    throw new IgniteInterruptedCheckedException("Socket accept was interrupted.");
  }