示例#1
0
    /** {@inheritDoc} */
    @Override
    protected Collection<GridComputeJobAdapter> split(int gridSize, Object arg)
        throws GridException {
      assert rsrc1 != null;
      assert rsrc2 != null;
      assert rsrc3 != null;
      assert rsrc4 != null;
      assert log != null;

      log.info("Injected shared resource1 into task: " + rsrc1);
      log.info("Injected shared resource2 into task: " + rsrc2);
      log.info("Injected shared resource3 into task: " + rsrc3);
      log.info("Injected shared resource4 into task: " + rsrc4);
      log.info("Injected log resource into task: " + log);

      task1Rsrc1 = rsrc1;
      task1Rsrc2 = rsrc2;
      task1Rsrc3 = rsrc3;
      task1Rsrc4 = rsrc4;

      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize);

      for (int i = 0; i < gridSize; i++) {
        jobs.add(new GridSharedJob1());
      }

      return jobs;
    }
示例#2
0
  /**
   * Schedules runnable task for execution.
   *
   * @param w Runnable task.
   * @throws GridException Thrown if any exception occurred.
   */
  @SuppressWarnings({"CatchGenericClass", "ProhibitedExceptionThrown"})
  public void execute(final GridWorker w) throws GridException {
    workers.add(w);

    try {
      exec.execute(
          new Runnable() {
            @Override
            public void run() {
              try {
                w.run();
              } finally {
                workers.remove(w);
              }
            }
          });
    } catch (RejectedExecutionException e) {
      workers.remove(w);

      throw new GridComputeExecutionRejectedException(
          "Failed to execute worker due to execution rejection.", e);
    } catch (RuntimeException e) {
      workers.remove(w);

      throw new GridException("Failed to execute worker due to runtime exception.", e);
    } catch (Error e) {
      workers.remove(w);

      throw e;
    }
  }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, String arg)
        throws GridException {
      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(jobCnt);

      for (int i = 0; i < jobCnt; i++) jobs.add(new TestJob());

      return jobs;
    }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, Object arg)
        throws GridException {
      if (log.isInfoEnabled())
        log.info("Splitting job [job=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']');

      Collection<GridComputeJob> jobs = new ArrayList<>(SPLIT_COUNT);

      for (int i = 1; i <= SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob(i));

      return jobs;
    }
  /**
   * @param rmtReducer Optional reducer.
   * @param rmtTransform Optional transformer.
   * @param args Arguments.
   * @return Future.
   */
  @SuppressWarnings("IfMayBeConditional")
  private <R> GridCacheQueryFuture<R> execute(
      @Nullable GridReducer<T, R> rmtReducer,
      @Nullable GridClosure<T, R> rmtTransform,
      @Nullable Object... args) {
    Collection<GridNode> nodes = nodes();

    cctx.checkSecurity(GridSecurityPermission.CACHE_READ);

    if (F.isEmpty(nodes))
      return new GridCacheQueryErrorFuture<>(
          cctx.kernalContext(),
          new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx()));

    if (log.isDebugEnabled())
      log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']');

    if (cctx.deploymentEnabled()) {
      try {
        cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform);
        cctx.deploy().registerClasses(args);
      } catch (GridException e) {
        return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e);
      }
    }

    if (subjId == null) subjId = cctx.localNodeId();

    taskHash = cctx.kernalContext().job().currentTaskNameHash();

    GridCacheQueryBean bean =
        new GridCacheQueryBean(
            this,
            (GridReducer<Object, Object>) rmtReducer,
            (GridClosure<Object, Object>) rmtTransform,
            args);

    GridCacheQueryManager qryMgr = cctx.queries();

    boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId());

    if (type == SQL_FIELDS)
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes));
    else
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes));
  }
  /** {@inheritDoc} */
  @Override
  public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException {
    ExecutorService exec =
        new ThreadPoolExecutor(
            threadsCnt,
            threadsCnt,
            0L,
            MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(batchQueueSize),
            new BlockingRejectedExecutionHandler());

    Iterator<I> iter = inputIterator(args);

    Collection<I> buf = new ArrayList<>(batchSize);

    try {
      while (iter.hasNext()) {
        if (Thread.currentThread().isInterrupted()) {
          U.warn(log, "Working thread was interrupted while loading data.");

          break;
        }

        buf.add(iter.next());

        if (buf.size() == batchSize) {
          exec.submit(new Worker(c, buf, args));

          buf = new ArrayList<>(batchSize);
        }
      }

      if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args));
    } catch (RejectedExecutionException ignored) {
      // Because of custom RejectedExecutionHandler.
      assert false : "RejectedExecutionException was thrown while it shouldn't.";
    } finally {
      exec.shutdown();

      try {
        exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while waiting for put operations to complete.");

        Thread.currentThread().interrupt();
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, Integer arg)
      throws GridException {
    assert taskSes != null;
    assert arg != null;
    assert arg > 0;

    Map<GridSessionLoadTestJob, GridNode> map = new HashMap<>(subgrid.size());

    Iterator<GridNode> iter = subgrid.iterator();

    Random rnd = new Random();

    params = new HashMap<>(arg);

    Collection<UUID> assigned = new ArrayList<>(subgrid.size());

    for (int i = 0; i < arg; i++) {
      // Recycle iterator.
      if (!iter.hasNext()) iter = subgrid.iterator();

      String paramName = UUID.randomUUID().toString();

      int paramVal = rnd.nextInt();

      taskSes.setAttribute(paramName, paramVal);

      GridNode node = iter.next();

      assigned.add(node.id());

      map.put(new GridSessionLoadTestJob(paramName), node);

      params.put(paramName, paramVal);

      if (log.isDebugEnabled())
        log.debug("Set session attribute [name=" + paramName + ", value=" + paramVal + ']');
    }

    taskSes.setAttribute("nodes", assigned);

    return map;
  }
  /**
   * Increases priority if job has bumped down.
   *
   * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting
   *     for execution.
   * @param passiveJobs Reordered collection of collision contexts for waiting jobs.
   */
  private void bumpPriority(
      Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) {
    assert waitJobs != null;
    assert passiveJobs != null;
    assert waitJobs.size() == passiveJobs.size();

    for (int i = 0; i < passiveJobs.size(); i++) {
      GridCollisionJobContext ctx = passiveJobs.get(i);

      if (i > indexOf(waitJobs, ctx))
        ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc);
    }
  }
示例#9
0
  /**
   * @param cctx Registry.
   * @param keys Keys to lock.
   * @param tx Transaction.
   * @param read Read flag.
   * @param retval Flag to return value or not.
   * @param timeout Lock acquisition timeout.
   * @param filter Filter.
   */
  public GridNearLockFuture(
      GridCacheContext<K, V> cctx,
      Collection<? extends K> keys,
      @Nullable GridNearTxLocal<K, V> tx,
      boolean read,
      boolean retval,
      long timeout,
      GridPredicate<GridCacheEntry<K, V>>[] filter) {
    super(cctx.kernalContext(), CU.boolReducer());
    assert cctx != null;
    assert keys != null;

    this.cctx = cctx;
    this.keys = keys;
    this.tx = tx;
    this.read = read;
    this.retval = retval;
    this.timeout = timeout;
    this.filter = filter;

    threadId = tx == null ? Thread.currentThread().getId() : tx.threadId();

    lockVer = tx != null ? tx.xidVersion() : cctx.versions().next();

    futId = GridUuid.randomUuid();

    entries = new ArrayList<>(keys.size());

    log = U.logger(ctx, logRef, GridNearLockFuture.class);

    if (timeout > 0) {
      timeoutObj = new LockTimeoutObject();

      cctx.time().addTimeoutObject(timeoutObj);
    }

    valMap = new ConcurrentHashMap8<>(keys.size(), 1f);
  }
示例#10
0
  /** {@inheritDoc} */
  @Override
  public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) {
    A.notEmpty(entries, "entries");

    enterBusy();

    try {
      GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx);

      activeFuts.add(resFut);

      resFut.listenAsync(rmvActiveFut);

      Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16);

      for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey());

      load0(entries, resFut, keys, 0);

      return resFut;
    } finally {
      leaveBusy();
    }
  }
示例#11
0
    /** {@inheritDoc} */
    @Override
    public Class<?> deployClass() {
      if (cls == null) {
        Class<?> cls0 = null;

        if (depCls != null) cls0 = depCls;
        else {
          for (Iterator<Object> it = objs.iterator();
              (cls0 == null || U.isJdk(cls0)) && it.hasNext(); ) {
            Object o = it.next();

            if (o != null) cls0 = U.detectClass(o);
          }

          if (cls0 == null || U.isJdk(cls0)) cls0 = GridDataLoaderImpl.class;
        }

        assert cls0 != null : "Failed to detect deploy class [objs=" + objs + ']';

        cls = cls0;
      }

      return cls;
    }
  /** {@inheritDoc} */
  @Override
  public void execute(@Nullable GridProjection prj) throws GridException {
    if (cb == null)
      throw new IllegalStateException("Mandatory local callback is not set for the query: " + this);

    if (prj == null) prj = ctx.grid();

    prj = prj.forCache(ctx.name());

    if (prj.nodes().isEmpty())
      throw new GridTopologyException("Failed to execute query (projection is empty): " + this);

    GridCacheMode mode = ctx.config().getCacheMode();

    if (mode == LOCAL || mode == REPLICATED) {
      Collection<GridNode> nodes = prj.nodes();

      GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes);

      assert node != null;

      if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) {
        if (node.id().equals(ctx.localNodeId()))
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on local node. "
                  + "Will execute query locally: "
                  + this);
        else
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on single node. "
                  + "Will execute query on remote node [qry="
                  + this
                  + ", node="
                  + node
                  + ']');
      }

      prj = prj.forNode(node);
    }

    closeLock.lock();

    try {
      if (routineId != null)
        throw new IllegalStateException("Continuous query can't be executed twice.");

      guard.block();

      GridContinuousHandler hnd =
          new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred);

      routineId =
          ctx.kernalContext()
              .continuous()
              .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate())
              .get();
    } finally {
      closeLock.unlock();
    }
  }
示例#13
0
    /** {@inheritDoc} */
    @Override
    protected Collection<GridComputeJobAdapter> split(int gridSize, Object arg)
        throws GridException {
      assert rsrc1 != null;
      assert rsrc2 != null;
      assert rsrc3 != null;
      assert rsrc4 != null;
      assert log != null;

      log.info("Injected shared resource1 into task: " + rsrc1);
      log.info("Injected shared resource2 into task: " + rsrc2);
      log.info("Injected shared resource3 into task: " + rsrc3);
      log.info("Injected shared resource4 into task: " + rsrc4);
      log.info("Injected log resource into task: " + log);

      task2Rsrc1 = rsrc1;
      task2Rsrc2 = rsrc2;
      task2Rsrc3 = rsrc3;
      task2Rsrc4 = rsrc4;

      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize);

      for (int i = 0; i < gridSize; i++) {
        jobs.add(
            new GridComputeJobAdapter() {
              /** User resource. */
              @GridUserResource(resourceClass = UserResource1.class)
              private transient GridAbstractUserResource rsrc5;

              /** User resource */
              @GridUserResource private transient UserResource2 rsrc6;

              /** User resource. */
              @GridUserResource(resourceClass = UserResource1.class, resourceName = "rsrc3")
              private transient GridAbstractUserResource rsrc7;

              /** User resource */
              @GridUserResource(resourceName = "rsrc4")
              private transient UserResource2 rsrc8;

              /** {@inheritDoc} */
              @SuppressWarnings({"ObjectEquality"})
              @Override
              public Serializable execute() {
                assert rsrc1 != null;
                assert rsrc2 != null;
                assert rsrc3 != null;
                assert rsrc4 != null;
                assert log != null;

                assert rsrc5 != null;
                assert rsrc6 != null;
                assert rsrc7 != null;
                assert rsrc8 != null;

                // Make sure that neither task nor global scope got
                // created more than once.
                assert rsrc1 == rsrc5;
                assert rsrc2 == rsrc6;
                assert rsrc3 == rsrc7;
                assert rsrc4 == rsrc8;

                log.info("Injected shared resource1 into job: " + rsrc1);
                log.info("Injected shared resource2 into job: " + rsrc2);
                log.info("Injected shared resource3 into job: " + rsrc3);
                log.info("Injected shared resource4 into job: " + rsrc4);
                log.info("Injected shared resource5 into job: " + rsrc5);
                log.info("Injected shared resource6 into job: " + rsrc6);
                log.info("Injected shared resource7 into job: " + rsrc7);
                log.info("Injected shared resource8 into job: " + rsrc8);
                log.info("Injected log resource into job: " + log);

                return null;
              }
            });
      }

      return jobs;
    }
  /** {@inheritDoc} */
  @Override
  public void onCollision(
      Collection<GridCollisionJobContext> waitJobs,
      Collection<GridCollisionJobContext> activeJobs) {
    assert waitJobs != null;
    assert activeJobs != null;

    int activeSize = F.size(activeJobs, RUNNING_JOBS);

    waitingCnt.set(waitJobs.size());
    runningCnt.set(activeSize);
    heldCnt.set(activeJobs.size() - activeSize);

    int waitSize = waitJobs.size();

    int activateCnt = parallelJobsNum - activeSize;

    if (activateCnt > 0 && !waitJobs.isEmpty()) {
      if (waitJobs.size() <= activateCnt) {
        for (GridCollisionJobContext waitJob : waitJobs) {
          waitJob.activate();

          waitSize--;
        }
      } else {
        List<GridCollisionJobContext> passiveList =
            new ArrayList<GridCollisionJobContext>(waitJobs);

        Collections.sort(
            passiveList,
            new Comparator<GridCollisionJobContext>() {
              /** {@inheritDoc} */
              @Override
              public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
                int p1 = getJobPriority(o1);
                int p2 = getJobPriority(o2);

                return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
              }
            });

        if (preventStarvation) bumpPriority(waitJobs, passiveList);

        for (int i = 0; i < activateCnt; i++) {
          passiveList.get(i).activate();

          waitSize--;
        }
      }
    }

    if (waitSize > waitJobsNum) {
      List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs);

      // Put jobs with highest priority first.
      Collections.sort(
          waitList,
          new Comparator<GridCollisionJobContext>() {
            /** {@inheritDoc} */
            @Override
            public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
              int p1 = getJobPriority(o1);
              int p2 = getJobPriority(o2);

              return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
            }
          });

      int skip = waitJobs.size() - waitSize;

      int i = 0;

      for (GridCollisionJobContext waitCtx : waitList) {
        if (++i >= skip) {
          waitCtx.cancel();

          if (--waitSize <= waitJobsNum) break;
        }
      }
    }
  }
  /**
   * See <a href="http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html">
   * http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html</a> for more
   * information.
   *
   * @param str Virtual name by which the class is registered as a {@code startupClass} in the
   *     {@code config.xml} file
   * @param params A hashtable that is made up of the name-value pairs supplied from the {@code
   *     startupArgs} property
   * @return Result string (log message).
   * @throws Exception Thrown if error occurred.
   */
  @SuppressWarnings({"unchecked", "CatchGenericClass"})
  @Override
  public String startup(String str, Hashtable params) throws Exception {
    GridLogger log = new GridJavaLogger(LoggingHelper.getServerLogger());

    cfgFile = (String) params.get(cfgFilePathParam);

    if (cfgFile == null) {
      throw new IllegalArgumentException("Failed to read property: " + cfgFilePathParam);
    }

    String workMgrName = (String) params.get(workMgrParam);

    URL cfgUrl = U.resolveGridGainUrl(cfgFile);

    if (cfgUrl == null)
      throw new ServerLifecycleException(
          "Failed to find Spring configuration file (path provided should be "
              + "either absolute, relative to GRIDGAIN_HOME, or relative to META-INF folder): "
              + cfgFile);

    GenericApplicationContext springCtx;

    try {
      springCtx = new GenericApplicationContext();

      XmlBeanDefinitionReader xmlReader = new XmlBeanDefinitionReader(springCtx);

      xmlReader.loadBeanDefinitions(new UrlResource(cfgUrl));

      springCtx.refresh();
    } catch (BeansException e) {
      throw new ServerLifecycleException(
          "Failed to instantiate Spring XML application context: " + e.getMessage(), e);
    }

    Map cfgMap;

    try {
      // Note: Spring is not generics-friendly.
      cfgMap = springCtx.getBeansOfType(GridConfiguration.class);
    } catch (BeansException e) {
      throw new ServerLifecycleException(
          "Failed to instantiate bean [type="
              + GridConfiguration.class
              + ", err="
              + e.getMessage()
              + ']',
          e);
    }

    if (cfgMap == null)
      throw new ServerLifecycleException(
          "Failed to find a single grid factory configuration in: " + cfgUrl);

    if (cfgMap.isEmpty())
      throw new ServerLifecycleException("Can't find grid factory configuration in: " + cfgUrl);

    try {
      ExecutorService execSvc = null;

      MBeanServer mbeanSrv = null;

      for (GridConfiguration cfg : (Collection<GridConfiguration>) cfgMap.values()) {
        assert cfg != null;

        GridConfigurationAdapter adapter = new GridConfigurationAdapter(cfg);

        // Set logger.
        if (cfg.getGridLogger() == null) adapter.setGridLogger(log);

        if (cfg.getExecutorService() == null) {
          if (execSvc == null)
            execSvc =
                workMgrName != null
                    ? new GridThreadWorkManagerExecutor(workMgrName)
                    : new GridThreadWorkManagerExecutor(J2EEWorkManager.getDefault());

          adapter.setExecutorService(execSvc);
        }

        if (cfg.getMBeanServer() == null) {
          if (mbeanSrv == null) {
            InitialContext ctx = null;

            try {
              ctx = new InitialContext();

              mbeanSrv = (MBeanServer) ctx.lookup("java:comp/jmx/runtime");
            } catch (Exception e) {
              throw new IllegalArgumentException(
                  "MBean server was not provided and failed to obtain " + "Weblogic MBean server.",
                  e);
            } finally {
              if (ctx != null) ctx.close();
            }
          }

          adapter.setMBeanServer(mbeanSrv);
        }

        Grid grid = G.start(adapter, springCtx);

        // Test if grid is not null - started properly.
        if (grid != null) gridNames.add(grid.name());
      }

      return getClass().getSimpleName() + " started successfully.";
    } catch (GridException e) {
      // Stop started grids only.
      for (String name : gridNames) G.stop(name, true);

      throw new ServerLifecycleException("Failed to start GridGain.", e);
    }
  }
示例#16
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }