コード例 #1
0
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public T reduce(List<GridJobResult> results) throws GridException {
    assert results != null;
    assert results.size() == 1;

    return (T) results.get(0).getData();
  }
コード例 #2
0
    /** {@inheritDoc} */
    @Override
    public Serializable reduce(List<GridComputeJobResult> results) throws GridException {
      if (log.isInfoEnabled()) log.info("Reducing job [job=" + this + ", results=" + results + ']');

      if (results.size() > 1) fail();

      return results.get(0).getData();
    }
コード例 #3
0
  /**
   * Increases priority if job has bumped down.
   *
   * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting
   *     for execution.
   * @param passiveJobs Reordered collection of collision contexts for waiting jobs.
   */
  private void bumpPriority(
      Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) {
    assert waitJobs != null;
    assert passiveJobs != null;
    assert waitJobs.size() == passiveJobs.size();

    for (int i = 0; i < passiveJobs.size(); i++) {
      GridCollisionJobContext ctx = passiveJobs.get(i);

      if (i > indexOf(waitJobs, ctx))
        ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc);
    }
  }
コード例 #4
0
  /**
   * @param nodeId Primary node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   * @throws GridDistributedLockCancelledException If lock has been cancelled.
   */
  @SuppressWarnings({"RedundantTypeArguments"})
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req)
      throws GridException, GridDistributedLockCancelledException {
    List<byte[]> nearKeyBytes = req.nearKeyBytes();

    GridNearTxRemote<K, V> tx = null;

    ClassLoader ldr = ctx.deploy().globalLoader();

    if (ldr != null) {
      for (int i = 0; i < nearKeyBytes.size(); i++) {
        byte[] bytes = nearKeyBytes.get(i);

        if (bytes == null) continue;

        K key = req.nearKeys().get(i);

        Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i);

        if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

        GridNearCacheEntry<K, V> entry = null;

        while (true) {
          try {
            entry = peekExx(key);

            if (entry != null) {
              entry.keyBytes(bytes);

              // Handle implicit locks for pessimistic transactions.
              if (req.inTx()) {
                tx = ctx.tm().tx(req.version());

                if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/);
                else {
                  tx =
                      new GridNearTxRemote<K, V>(
                          nodeId,
                          req.nearNodeId(),
                          req.threadId(),
                          req.version(),
                          null,
                          PESSIMISTIC,
                          req.isolation(),
                          req.isInvalidate(),
                          req.timeout(),
                          key,
                          bytes,
                          null, // Value.
                          null, // Value bytes.
                          ctx);

                  if (tx.empty()) return tx;

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + req.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  req.nodeId(),
                  nodeId,
                  req.threadId(),
                  req.version(),
                  req.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, req.version(), req.committedVersions(), req.rolledbackVersions());

              entry.orderOwned(req.version(), req.owned(entry.key()));
            }

            // Double-check in case if sender node left the grid.
            if (ctx.discovery().node(req.nodeId()) == null) {
              if (log.isDebugEnabled())
                log.debug("Node requesting lock left grid (lock request will be ignored): " + req);

              if (tx != null) tx.rollback();

              return null;
            }

            // Entry is legit.
            break;
          } catch (GridCacheEntryRemovedException ignored) {
            assert entry.obsoleteVersion() != null
                : "Obsolete flag not set on removed entry: " + entry;

            if (log.isDebugEnabled())
              log.debug("Received entry removed exception (will retry on renewed entry): " + entry);

            if (tx != null) {
              tx.clearEntry(entry.key());

              if (log.isDebugEnabled())
                log.debug(
                    "Cleared removed entry from remote transaction (will retry) [entry="
                        + entry
                        + ", tx="
                        + tx
                        + ']');
            }
          }
        }
      }
    } else {
      String err = "Failed to acquire deployment class loader for message: " + req;

      U.warn(log, err);

      throw new GridException(err);
    }

    return tx;
  }
コード例 #5
0
  /** {@inheritDoc} */
  @Override
  public void onCollision(
      Collection<GridCollisionJobContext> waitJobs,
      Collection<GridCollisionJobContext> activeJobs) {
    assert waitJobs != null;
    assert activeJobs != null;

    int activeSize = F.size(activeJobs, RUNNING_JOBS);

    waitingCnt.set(waitJobs.size());
    runningCnt.set(activeSize);
    heldCnt.set(activeJobs.size() - activeSize);

    int waitSize = waitJobs.size();

    int activateCnt = parallelJobsNum - activeSize;

    if (activateCnt > 0 && !waitJobs.isEmpty()) {
      if (waitJobs.size() <= activateCnt) {
        for (GridCollisionJobContext waitJob : waitJobs) {
          waitJob.activate();

          waitSize--;
        }
      } else {
        List<GridCollisionJobContext> passiveList =
            new ArrayList<GridCollisionJobContext>(waitJobs);

        Collections.sort(
            passiveList,
            new Comparator<GridCollisionJobContext>() {
              /** {@inheritDoc} */
              @Override
              public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
                int p1 = getJobPriority(o1);
                int p2 = getJobPriority(o2);

                return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
              }
            });

        if (preventStarvation) bumpPriority(waitJobs, passiveList);

        for (int i = 0; i < activateCnt; i++) {
          passiveList.get(i).activate();

          waitSize--;
        }
      }
    }

    if (waitSize > waitJobsNum) {
      List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs);

      // Put jobs with highest priority first.
      Collections.sort(
          waitList,
          new Comparator<GridCollisionJobContext>() {
            /** {@inheritDoc} */
            @Override
            public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
              int p1 = getJobPriority(o1);
              int p2 = getJobPriority(o2);

              return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
            }
          });

      int skip = waitJobs.size() - waitSize;

      int i = 0;

      for (GridCollisionJobContext waitCtx : waitList) {
        if (++i >= skip) {
          waitCtx.cancel();

          if (--waitSize <= waitJobsNum) break;
        }
      }
    }
  }
コード例 #6
0
  /**
   * Performs flush.
   *
   * @throws GridException If failed.
   */
  private void doFlush() throws GridException {
    lastFlushTime = U.currentTimeMillis();

    List<GridFuture> activeFuts0 = null;

    int doneCnt = 0;

    for (GridFuture<?> f : activeFuts) {
      if (!f.isDone()) {
        if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));

        activeFuts0.add(f);
      } else {
        f.get();

        doneCnt++;
      }
    }

    if (activeFuts0 == null || activeFuts0.isEmpty()) return;

    while (true) {
      Queue<GridFuture<?>> q = null;

      for (Buffer buf : bufMappings.values()) {
        GridFuture<?> flushFut = buf.flush();

        if (flushFut != null) {
          if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2);

          q.add(flushFut);
        }
      }

      if (q != null) {
        assert !q.isEmpty();

        boolean err = false;

        for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) {
          try {
            fut.get();
          } catch (GridException e) {
            if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e);

            err = true;
          }
        }

        if (err)
          // Remaps needed - flush buffers.
          continue;
      }

      doneCnt = 0;

      for (int i = 0; i < activeFuts0.size(); i++) {
        GridFuture f = activeFuts0.get(i);

        if (f == null) doneCnt++;
        else if (f.isDone()) {
          f.get();

          doneCnt++;

          activeFuts0.set(i, null);
        } else break;
      }

      if (doneCnt == activeFuts0.size()) return;
    }
  }
コード例 #7
0
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }
コード例 #8
0
  /**
   * Processes lock request.
   *
   * @param nodeId Sender node ID.
   * @param msg Lock request.
   */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) {
    assert !nodeId.equals(locNodeId);

    List<byte[]> keys = msg.keyBytes();

    int cnt = keys.size();

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedLockResponse res;

    ClassLoader ldr = null;

    try {
      ldr = ctx.deploy().globalLoader();

      if (ldr != null) {
        res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt);

        for (int i = 0; i < keys.size(); i++) {
          byte[] bytes = keys.get(i);
          K key = msg.keys().get(i);

          Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i);

          if (bytes == null) continue;

          if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

          GridDistributedCacheEntry<K, V> entry = null;

          while (true) {
            try {
              entry = entryexx(key);

              // Handle implicit locks for pessimistic transactions.
              if (msg.inTx()) {
                tx = ctx.tm().tx(msg.version());

                if (tx != null) {
                  if (msg.txRead()) tx.addRead(key, bytes);
                  else tx.addWrite(key, bytes);
                } else {
                  tx =
                      new GridReplicatedTxRemote<K, V>(
                          nodeId,
                          msg.threadId(),
                          msg.version(),
                          null,
                          PESSIMISTIC,
                          msg.isolation(),
                          msg.isInvalidate(),
                          msg.timeout(),
                          key,
                          bytes,
                          msg.txRead(),
                          ctx);

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + msg.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  msg.nodeId(),
                  null,
                  msg.threadId(),
                  msg.version(),
                  msg.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions());

              // Double-check in case if sender node left the grid.
              if (ctx.discovery().node(msg.nodeId()) == null) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Node requesting lock left grid (lock request will be ignored): " + msg);

                if (tx != null) tx.rollback();

                return;
              }

              res.setCandidates(
                  i,
                  entry.localCandidates(),
                  ctx.tm().committedVersions(msg.version()),
                  ctx.tm().rolledbackVersions(msg.version()));

              res.addValueBytes(
                  entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx);

              // Entry is legit.
              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsoleteVersion() != null
                  : "Obsolete flag not set on removed entry: " + entry;

              if (log.isDebugEnabled())
                log.debug(
                    "Received entry removed exception (will retry on renewed entry): " + entry);

              if (tx != null) {
                tx.clearEntry(entry.key());

                if (log.isDebugEnabled())
                  log.debug(
                      "Cleared removed entry from remote transaction (will retry) [entry="
                          + entry
                          + ", tx="
                          + tx
                          + ']');
              }
            }
          }
        }
      } else {
        String err = "Failed to acquire deployment class for message: " + msg;

        U.warn(log, err);

        res =
            new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err));
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Received lock request for completed transaction (will ignore): " + e);

      res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e);
    } catch (GridException e) {
      String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg;

      log.error(err, e);

      res =
          new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e));

      if (tx != null) tx.rollback();
    } catch (GridDistributedLockCancelledException ignored) {
      // Received lock request for cancelled lock.
      if (log.isDebugEnabled())
        log.debug("Received lock request for canceled lock (will ignore): " + msg);

      if (tx != null) tx.rollback();

      // Don't send response back.
      return;
    }

    GridNode node = ctx.discovery().node(msg.nodeId());

    boolean releaseAll = false;

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e);

        releaseAll = ldr != null;
      }
    }
    // If sender left grid, release all locks acquired so far.
    else releaseAll = ldr != null;

    // Release all locks because sender node left grid.
    if (releaseAll) {
      for (K key : msg.keys()) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId());

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock on removed entity during failure "
                      + "of replicated lock request handling (will retry): "
                      + entry);
          }
        }
      }

      U.warn(
          log, "Sender node left grid in the midst of lock acquisition (locks will be released).");
    }
  }
コード例 #9
0
    /** {@inheritDoc} */
    @Override
    public String reduce(List<GridComputeJobResult> results) throws GridException {
      assert results.size() == 1;

      return results.get(0).getData();
    }
コード例 #10
0
  /**
   * This method is called to map or split grid task into multiple grid jobs. This is the first
   * method that gets called when task execution starts.
   *
   * @param data Task execution argument. Can be {@code null}. This is the same argument as the one
   *     passed into {@code Grid#execute(...)} methods.
   * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed
   *     to be randomized by container. This ensures that every time you simply iterate through grid
   *     nodes, the order of nodes will be random which over time should result into all nodes being
   *     used equally.
   * @return Map of grid jobs assigned to subgrid node. Unless {@link
   *     GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is
   *     returned, exception will be thrown.
   * @throws GridException If mapping could not complete successfully. This exception will be thrown
   *     out of {@link GridComputeTaskFuture#get()} method.
   */
  @Override
  public Map<? extends GridComputeJob, GridNode> map(
      List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException {
    assert !subgrid.isEmpty();

    // Give preference to wanted node. Otherwise, take the first one.
    GridNode targetNode =
        F.find(
            subgrid,
            subgrid.get(0),
            new GridPredicate<GridNode>() {
              @Override
              public boolean apply(GridNode e) {
                return preferredNode.equals(e.id());
              }
            });

    return Collections.singletonMap(
        new GridComputeJobAdapter() {
          @GridLoggerResource private GridLogger log;

          @GridInstanceResource private Grid grid;

          @Override
          public Object execute() throws GridException {
            log.info("Going to put data: " + data.size());

            GridCache<Object, Object> cache = grid.cache(cacheName);

            assert cache != null;

            Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data);

            for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) {
              T2<Integer, Collection<Integer>> pair = entry.getValue();

              Object affKey = pair.get1();

              // Group lock partition.
              try (GridCacheTx tx =
                  cache.txStartPartition(
                      cache.affinity().partition(affKey),
                      optimistic ? OPTIMISTIC : PESSIMISTIC,
                      REPEATABLE_READ,
                      0,
                      pair.get2().size())) {
                for (Integer val : pair.get2()) cache.put(val, val);

                tx.commit();
              }
            }

            log.info("Finished put data: " + data.size());

            return data;
          }

          /**
           * Groups values by partitions.
           *
           * @param data Data to put.
           * @return Grouped map.
           */
          private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) {
            GridCache<Object, Object> cache = grid.cache(cacheName);

            Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>();

            for (Integer val : data) {
              int part = cache.affinity().partition(val);

              T2<Integer, Collection<Integer>> tup = res.get(part);

              if (tup == null) {
                tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>());

                res.put(part, tup);
              }

              tup.get2().add(val);
            }

            return res;
          }
        },
        targetNode);
  }
コード例 #11
0
  /**
   * Runs all tests belonging to this test suite on the grid.
   *
   * @param result Test result collector.
   */
  @Override
  public void run(TestResult result) {
    if (isDisabled) {
      copy.run(result);
    } else {
      GridTestRouter router = createRouter();

      Grid grid = startGrid();

      try {
        List<GridTaskFuture<?>> futs = new ArrayList<GridTaskFuture<?>>(testCount());

        List<GridJunit3SerializableTest> tests =
            new ArrayList<GridJunit3SerializableTest>(testCount());

        for (int i = 0; i < testCount(); i++) {
          Test junit = testAt(i);

          GridJunit3SerializableTest test;

          if (junit instanceof TestSuite) {
            test = new GridJunit3SerializableTestSuite((TestSuite) junit);
          } else {
            assert junit instanceof TestCase
                : "Test must be either TestSuite or TestCase: " + junit;

            test = new GridJunit3SerializableTestCase((TestCase) junit);
          }

          tests.add(test);

          if (clsLdr == null) {
            clsLdr = U.detectClassLoader(junit.getClass());
          }

          futs.add(
              grid.execute(
                  new GridJunit3Task(junit.getClass(), clsLdr),
                  new GridJunit3Argument(router, test, locTests.contains(test.getName())),
                  timeout));
        }

        for (int i = 0; i < testCount(); i++) {
          GridTaskFuture<?> fut = futs.get(i);

          GridJunit3SerializableTest origTest = tests.get(i);

          try {
            GridJunit3SerializableTest resTest = (GridJunit3SerializableTest) fut.get();

            origTest.setResult(resTest);

            origTest.getTest().run(result);
          } catch (GridException e) {
            handleFail(result, origTest, e);
          }
        }
      } finally {
        stopGrid();
      }
    }
  }
  /**
   * @param seq Start/stop sequence.
   * @throws Exception If failed.
   */
  private void checkSequence0(boolean[] seq) throws Exception {
    try {
      startGrid(0);

      TreeSet<Integer> started = new TreeSet<>();

      started.add(0);

      int topVer = 1;

      for (boolean start : seq) {
        if (start) {
          int nextIdx = nextIndex(started);

          startGrid(nextIdx);

          started.add(nextIdx);
        } else {
          int idx = started.last();

          stopGrid(idx);

          started.remove(idx);
        }

        topVer++;

        info("Grid 0: " + grid(0).localNode().id());

        ((GridKernal) grid(0))
            .internalCache()
            .context()
            .affinity()
            .affinityReadyFuture(topVer)
            .get();

        for (int i : started) {
          if (i != 0) {
            GridEx grid = grid(i);

            ((GridKernal) grid)
                .internalCache()
                .context()
                .affinity()
                .affinityReadyFuture(topVer)
                .get();

            info("Grid " + i + ": " + grid.localNode().id());

            for (int part = 0; part < parts; part++) {
              List<GridNode> firstNodes =
                  (List<GridNode>)
                      grid(0).cache(null).affinity().mapPartitionToPrimaryAndBackups(part);

              List<GridNode> secondNodes =
                  (List<GridNode>)
                      grid.cache(null).affinity().mapPartitionToPrimaryAndBackups(part);

              assertEquals(firstNodes.size(), secondNodes.size());

              for (int n = 0; n < firstNodes.size(); n++)
                assertEquals(firstNodes.get(n), secondNodes.get(n));
            }
          }
        }
      }
    } finally {
      stopAllGrids();
    }
  }