コード例 #1
0
  /** {@inheritDoc} */
  @Override
  public void printMemoryStats() {
    X.println(">>> ");
    X.println(">>> Grid memory stats [grid=" + gridName() + ']');

    for (GridComponent comp : comps) comp.printMemoryStats();
  }
コード例 #2
0
  /** {@inheritDoc} */
  @Override
  public void printMemoryStats() {
    super.printMemoryStats();

    X.println(">>>   threadsSize: " + threads.size());
    X.println(">>>   futsSize: " + futs.size());
  }
コード例 #3
0
  /** {@inheritDoc} */
  @Override
  public void printMemoryStats(int threshold) {
    X.println(
        ">>>  Cache partition topology stats [grid="
            + cctx.gridName()
            + ", cache="
            + cctx.name()
            + ']');

    for (GridDhtLocalPartition part : locParts.values()) {
      int size = part.size();

      if (size >= threshold)
        X.println(">>>   Local partition [part=" + part.id() + ", size=" + size + ']');
    }
  }
コード例 #4
0
  /**
   * Sends get atomically and handles fail.
   *
   * @param k Key.
   */
  protected void failAtomicGet(int k) {
    try {
      jcache(0).get(new TestKey(String.valueOf(k)));

      assert false : "p2p marshalling failed, but error response was not sent";
    } catch (CacheException e) {
      assert X.hasCause(e, IOException.class);
    }
  }
コード例 #5
0
 /** {@inheritDoc} */
 @Override
 public void printMemoryStats() {
   X.println(">>> ");
   X.println(">>> Mvcc manager memory stats [grid=" + cctx.gridName() + ']');
   X.println(">>>   rmvLocksSize: " + rmvLocks.size());
   X.println(">>>   dhtLocCandsSize: " + dhtLocCands.size());
   X.println(">>>   lockedSize: " + locked.size());
   X.println(">>>   futsSize: " + futs.size());
   X.println(">>>   near2dhtSize: " + near2dht.size());
   X.println(">>>   finishFutsSize: " + finishFuts.size());
 }
コード例 #6
0
 /**
  * @param log Logger.
  * @param time Time.
  * @param msg Message.
  */
 private static void log0(@Nullable IgniteLogger log, long time, String msg) {
   if (log != null) {
     if (log.isDebugEnabled()) log.debug(msg);
     else log.warning(msg);
   } else
     X.println(
         String.format(
             "[%s][%s]%s",
             DEBUG_DATE_FMT.get().format(time), Thread.currentThread().getName(), msg));
 }
コード例 #7
0
  /**
   * Sends put atomically and handles fail.
   *
   * @param k Key.
   */
  protected void failAtomicPut(int k) {
    try {
      jcache(0).put(new TestKey(String.valueOf(k)), "");

      assert false : "p2p marshalling failed, but error response was not sent";
    } catch (CacheException e) {
      assert X.hasCause(e, IOException.class);
    }

    assert readCnt.get() == 0; // ensure we have read count as expected.
  }
コード例 #8
0
  /** @return {@code True} if succeeded. */
  private boolean spreadPartitions() {
    try {
      sendAllPartitions(rmtNodes, exchId);

      return true;
    } catch (IgniteCheckedException e) {
      scheduleRecheck();

      if (!X.hasCause(e, InterruptedException.class))
        U.error(
            log,
            "Failed to send full partition map to nodes (will retry after timeout) [nodes="
                + F.nodeId8s(rmtNodes)
                + ", exchangeId="
                + exchId
                + ']',
            e);

      return false;
    }
  }
コード例 #9
0
  /** {@inheritDoc} */
  @Override
  public ClusterStartNodeResult call() {
    JSch ssh = new JSch();

    Session ses = null;

    try {
      if (spec.key() != null) ssh.addIdentity(spec.key().getAbsolutePath());

      ses = ssh.getSession(spec.username(), spec.host(), spec.port());

      if (spec.password() != null) ses.setPassword(spec.password());

      ses.setConfig("StrictHostKeyChecking", "no");

      ses.connect(timeout);

      boolean win = isWindows(ses);

      char separator = win ? '\\' : '/';

      spec.fixPaths(separator);

      String igniteHome = spec.igniteHome();

      if (igniteHome == null) igniteHome = win ? DFLT_IGNITE_HOME_WIN : DFLT_IGNITE_HOME_LINUX;

      String script = spec.script();

      if (script == null) script = DFLT_SCRIPT_LINUX;

      String cfg = spec.configuration();

      if (cfg == null) cfg = "";

      String startNodeCmd;
      String scriptOutputFileName =
          FILE_NAME_DATE_FORMAT.format(new Date())
              + '-'
              + UUID.randomUUID().toString().substring(0, 8)
              + ".log";

      if (win)
        throw new UnsupportedOperationException(
            "Apache Ignite cannot be auto-started on Windows from IgniteCluster.startNodes(…) API.");
      else { // Assume Unix.
        int spaceIdx = script.indexOf(' ');

        String scriptPath = spaceIdx > -1 ? script.substring(0, spaceIdx) : script;
        String scriptArgs = spaceIdx > -1 ? script.substring(spaceIdx + 1) : "";
        String rmtLogArgs = buildRemoteLogArguments(spec.username(), spec.host());
        String tmpDir = env(ses, "$TMPDIR", "/tmp/");
        String scriptOutputDir = tmpDir + "ignite-startNodes";

        shell(ses, "mkdir " + scriptOutputDir);

        // Mac os don't support ~ in double quotes. Trying get home path from remote system.
        if (igniteHome.startsWith("~")) {
          String homeDir = env(ses, "$HOME", "~");

          igniteHome = igniteHome.replaceFirst("~", homeDir);
        }

        startNodeCmd =
            new SB()
                .
                // Console output is consumed, started nodes must use Ignite file appenders for log.
                a("nohup ")
                .a("\"")
                .a(igniteHome)
                .a('/')
                .a(scriptPath)
                .a("\"")
                .a(" ")
                .a(scriptArgs)
                .a(!cfg.isEmpty() ? " \"" : "")
                .a(cfg)
                .a(!cfg.isEmpty() ? "\"" : "")
                .a(rmtLogArgs)
                .a(" > ")
                .a(scriptOutputDir)
                .a("/")
                .a(scriptOutputFileName)
                .a(" 2>& 1 &")
                .toString();
      }

      info("Starting remote node with SSH command: " + startNodeCmd, spec.logger(), log);

      shell(ses, startNodeCmd);

      return new ClusterStartNodeResultImpl(spec.host(), true, null);
    } catch (IgniteInterruptedCheckedException e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, e.getMessage());
    } catch (Exception e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, X.getFullStackTrace(e));
    } finally {
      if (ses != null && ses.isConnected()) ses.disconnect();
    }
  }
コード例 #10
0
  /** {@inheritDoc} */
  @Override
  protected void afterTest() throws Exception {
    Transaction tx = jcache().unwrap(Ignite.class).transactions().tx();

    if (tx != null) {
      tx.close();

      fail("Cache transaction remained after test completion: " + tx);
    }

    for (int i = 0; i < gridCount(); i++) {
      info("Checking grid: " + i);

      while (true) {
        try {
          final int fi = i;

          assertTrue(
              "Cache is not empty: "
                  + " localSize = "
                  + jcache(fi).localSize(CachePeekMode.ALL)
                  + ", local entries "
                  + entrySet(jcache(fi).localEntries()),
              GridTestUtils.waitForCondition(
                  // Preloading may happen as nodes leave, so we need to wait.
                  new GridAbsPredicateX() {
                    @Override
                    public boolean applyx() throws IgniteCheckedException {
                      jcache(fi).removeAll();

                      if (jcache(fi).size(CachePeekMode.ALL) > 0) {
                        for (Cache.Entry<String, ?> k : jcache(fi).localEntries())
                          jcache(fi).remove(k.getKey());
                      }

                      return jcache(fi).localSize(CachePeekMode.ALL) == 0;
                    }
                  },
                  getTestTimeout()));

          int primaryKeySize = jcache(i).localSize(CachePeekMode.PRIMARY);
          int keySize = jcache(i).localSize();
          int size = jcache(i).localSize();
          int globalSize = jcache(i).size();
          int globalPrimarySize = jcache(i).size(CachePeekMode.PRIMARY);

          info(
              "Size after [idx="
                  + i
                  + ", size="
                  + size
                  + ", keySize="
                  + keySize
                  + ", primarySize="
                  + primaryKeySize
                  + ", globalSize="
                  + globalSize
                  + ", globalPrimarySize="
                  + globalPrimarySize
                  + ", entrySet="
                  + jcache(i).localEntries()
                  + ']');

          assertEquals(
              "Cache is not empty [idx=" + i + ", entrySet=" + jcache(i).localEntries() + ']',
              0,
              jcache(i).localSize(CachePeekMode.ALL));

          break;
        } catch (Exception e) {
          if (X.hasCause(e, ClusterTopologyCheckedException.class)) {
            info("Got topology exception while tear down (will retry in 1000ms).");

            U.sleep(1000);
          } else throw e;
        }
      }

      for (Cache.Entry<String, Integer> entry : jcache(i).localEntries(CachePeekMode.SWAP))
        jcache(i).remove(entry.getKey());
    }

    assert jcache().unwrap(Ignite.class).transactions().tx() == null;
    assertEquals("Cache is not empty", 0, jcache().localSize(CachePeekMode.ALL));

    resetStore();
  }
コード例 #11
0
  /** @throws Exception If failed. */
  public void testCompact() throws Exception {
    File file = new File(UUID.randomUUID().toString());

    X.println("file: " + file.getPath());

    FileSwapSpaceSpi.SwapFile f = new FileSwapSpaceSpi.SwapFile(file, 8);

    Random rnd = new Random();

    ArrayList<FileSwapSpaceSpi.SwapValue> arr = new ArrayList<>();

    int size = 0;

    for (int a = 0; a < 100; a++) {
      FileSwapSpaceSpi.SwapValue[] vals = new FileSwapSpaceSpi.SwapValue[1 + rnd.nextInt(10)];

      int size0 = 0;

      for (int i = 0; i < vals.length; i++) {
        byte[] bytes = new byte[1 + rnd.nextInt(49)];

        rnd.nextBytes(bytes);

        size0 += bytes.length;

        vals[i] = new FileSwapSpaceSpi.SwapValue(bytes);

        arr.add(vals[i]);
      }

      f.write(new FileSwapSpaceSpi.SwapValues(vals, size0), 1);

      size += size0;

      assertEquals(f.length(), size);
      assertEquals(file.length(), size);
    }

    int i = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) assertEquals(val.idx(), ++i);

    i = 0;

    for (int cnt = arr.size() / 2; i < cnt; i++) {

      FileSwapSpaceSpi.SwapValue v = arr.remove(rnd.nextInt(arr.size()));

      assertTrue(f.tryRemove(v.idx(), v));
    }

    int hash0 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash0 += Arrays.hashCode(val.readValue(f.readCh));

    ArrayList<T2<ByteBuffer, ArrayDeque<FileSwapSpaceSpi.SwapValue>>> bufs = new ArrayList();

    for (; ; ) {
      ArrayDeque<FileSwapSpaceSpi.SwapValue> que = new ArrayDeque<>();

      ByteBuffer buf = f.compact(que, 1024);

      if (buf == null) break;

      bufs.add(new T2(buf, que));
    }

    f.delete();

    int hash1 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash1 += Arrays.hashCode(val.value(null));

    assertEquals(hash0, hash1);

    File file0 = new File(UUID.randomUUID().toString());

    FileSwapSpaceSpi.SwapFile f0 = new FileSwapSpaceSpi.SwapFile(file0, 8);

    for (T2<ByteBuffer, ArrayDeque<FileSwapSpaceSpi.SwapValue>> t : bufs)
      f0.write(t.get2(), t.get1(), 1);

    int hash2 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash2 += Arrays.hashCode(val.readValue(f0.readCh));

    assertEquals(hash2, hash1);
  }
コード例 #12
0
  /**
   * Establish TCP connection to remote hadoop process and returns client.
   *
   * @param desc Process descriptor.
   * @return Client.
   * @throws IgniteCheckedException If failed.
   */
  protected HadoopCommunicationClient createTcpClient(HadoopProcessDescriptor desc)
      throws IgniteCheckedException {
    String addr = desc.address();

    int port = desc.tcpPort();

    if (log.isDebugEnabled())
      log.debug(
          "Trying to connect to remote process [locProcDesc="
              + locProcDesc
              + ", desc="
              + desc
              + ']');

    boolean conn = false;
    HadoopTcpNioCommunicationClient client = null;
    IgniteCheckedException errs = null;

    int connectAttempts = 1;

    long connTimeout0 = connTimeout;

    int attempt = 1;

    while (!conn) { // Reconnection on handshake timeout.
      try {
        SocketChannel ch = SocketChannel.open();

        ch.configureBlocking(true);

        ch.socket().setTcpNoDelay(tcpNoDelay);
        ch.socket().setKeepAlive(true);

        if (sockRcvBuf > 0) ch.socket().setReceiveBufferSize(sockRcvBuf);

        if (sockSndBuf > 0) ch.socket().setSendBufferSize(sockSndBuf);

        ch.socket().connect(new InetSocketAddress(addr, port), (int) connTimeout);

        HandshakeFinish fin = new HandshakeFinish();

        GridNioSession ses = nioSrvr.createSession(ch, F.asMap(HANDSHAKE_FINISH_META, fin)).get();

        client = new HadoopTcpNioCommunicationClient(ses);

        if (log.isDebugEnabled()) log.debug("Waiting for handshake finish for client: " + client);

        fin.await(connTimeout0);

        conn = true;
      } catch (HadoopHandshakeTimeoutException e) {
        if (client != null) {
          client.forceClose();

          client = null;
        }

        if (log.isDebugEnabled())
          log.debug(
              "Handshake timedout (will retry with increased timeout) [timeout="
                  + connTimeout0
                  + ", desc="
                  + desc
                  + ", port="
                  + port
                  + ", err="
                  + e
                  + ']');

        if (attempt == reconCnt || connTimeout0 > maxConnTimeout) {
          if (log.isDebugEnabled())
            log.debug(
                "Handshake timed out (will stop attempts to perform the handshake) "
                    + "[timeout="
                    + connTimeout0
                    + ", maxConnTimeout="
                    + maxConnTimeout
                    + ", attempt="
                    + attempt
                    + ", reconCnt="
                    + reconCnt
                    + ", err="
                    + e.getMessage()
                    + ", addr="
                    + addr
                    + ']');

          if (errs == null)
            errs =
                new IgniteCheckedException(
                    "Failed to connect to remote Hadoop process "
                        + "(is process still running?) [desc="
                        + desc
                        + ", addrs="
                        + addr
                        + ']');

          errs.addSuppressed(e);

          break;
        } else {
          attempt++;

          connTimeout0 *= 2;

          // Continue loop.
        }
      } catch (Exception e) {
        if (client != null) {
          client.forceClose();

          client = null;
        }

        if (log.isDebugEnabled())
          log.debug("Client creation failed [addr=" + addr + ", port=" + port + ", err=" + e + ']');

        if (X.hasCause(e, SocketTimeoutException.class))
          LT.warn(
              log,
              null,
              "Connect timed out (consider increasing 'connTimeout' "
                  + "configuration property) [addr="
                  + addr
                  + ", port="
                  + port
                  + ']');

        if (errs == null)
          errs =
              new IgniteCheckedException(
                  "Failed to connect to remote Hadoop process (is process still running?) "
                      + "[desc="
                      + desc
                      + ", addrs="
                      + addr
                      + ']');

        errs.addSuppressed(e);

        // Reconnect for the second time, if connection is not established.
        if (connectAttempts < 2
            && (e instanceof ConnectException || X.hasCause(e, ConnectException.class))) {
          connectAttempts++;

          continue;
        }

        break;
      }
    }

    if (client == null) {
      assert errs != null;

      if (X.hasCause(errs, ConnectException.class))
        LT.warn(
            log,
            null,
            "Failed to connect to a remote Hadoop process (is process still running?). "
                + "Make sure operating system firewall is disabled on local and remote host) "
                + "[addrs="
                + addr
                + ", port="
                + port
                + ']');

      throw errs;
    }

    if (log.isDebugEnabled()) log.debug("Created client: " + client);

    return client;
  }
コード例 #13
0
  /**
   * @param desc Process descriptor.
   * @param port Port.
   * @return Client.
   * @throws IgniteCheckedException If failed.
   */
  @Nullable
  protected HadoopCommunicationClient createShmemClient(HadoopProcessDescriptor desc, int port)
      throws IgniteCheckedException {
    int attempt = 1;

    int connectAttempts = 1;

    long connTimeout0 = connTimeout;

    while (true) {
      IpcEndpoint clientEndpoint;

      try {
        clientEndpoint = new IpcSharedMemoryClientEndpoint(port, (int) connTimeout, log);
      } catch (IgniteCheckedException e) {
        // Reconnect for the second time, if connection is not established.
        if (connectAttempts < 2 && X.hasCause(e, ConnectException.class)) {
          connectAttempts++;

          continue;
        }

        throw e;
      }

      HadoopCommunicationClient client = null;

      try {
        ShmemWorker worker = new ShmemWorker(clientEndpoint, false);

        shmemWorkers.add(worker);

        GridNioSession ses = worker.session();

        HandshakeFinish fin = new HandshakeFinish();

        // We are in lock, it is safe to get session and attach
        ses.addMeta(HANDSHAKE_FINISH_META, fin);

        client = new HadoopTcpNioCommunicationClient(ses);

        new IgniteThread(worker).start();

        fin.await(connTimeout0);
      } catch (HadoopHandshakeTimeoutException e) {
        if (log.isDebugEnabled())
          log.debug(
              "Handshake timed out (will retry with increased timeout) [timeout="
                  + connTimeout0
                  + ", err="
                  + e.getMessage()
                  + ", client="
                  + client
                  + ']');

        if (client != null) client.forceClose();

        if (attempt == reconCnt || connTimeout0 > maxConnTimeout) {
          if (log.isDebugEnabled())
            log.debug(
                "Handshake timedout (will stop attempts to perform the handshake) "
                    + "[timeout="
                    + connTimeout0
                    + ", maxConnTimeout="
                    + maxConnTimeout
                    + ", attempt="
                    + attempt
                    + ", reconCnt="
                    + reconCnt
                    + ", err="
                    + e.getMessage()
                    + ", client="
                    + client
                    + ']');

          throw e;
        } else {
          attempt++;

          connTimeout0 *= 2;

          continue;
        }
      } catch (RuntimeException | Error e) {
        if (log.isDebugEnabled())
          log.debug(
              "Caught exception (will close client) [err="
                  + e.getMessage()
                  + ", client="
                  + client
                  + ']');

        if (client != null) client.forceClose();

        throw e;
      }

      return client;
    }
  }
コード例 #14
0
  /** {@inheritDoc} */
  @SuppressWarnings("unchecked")
  @Override
  public void onUtilityCacheStarted() throws IgniteCheckedException {
    IgniteCacheProxy<Object, Object> proxy = ctx.cache().jcache(CU.UTILITY_CACHE_NAME);

    boolean old = proxy.context().deploy().ignoreOwnership(true);

    try {
      metaDataCache = (IgniteCacheProxy) proxy.withNoRetries();
    } finally {
      proxy.context().deploy().ignoreOwnership(old);
    }

    if (clientNode) {
      assert !metaDataCache.context().affinityNode();

      metaCacheQryId =
          metaDataCache
              .context()
              .continuousQueries()
              .executeInternalQuery(
                  new MetaDataEntryListener(), new MetaDataEntryFilter(), false, true);

      while (true) {
        ClusterNode oldestSrvNode =
            CU.oldestAliveCacheServerNode(ctx.cache().context(), AffinityTopologyVersion.NONE);

        if (oldestSrvNode == null) break;

        GridCacheQueryManager qryMgr = metaDataCache.context().queries();

        CacheQuery<Map.Entry<PortableMetadataKey, BinaryMetadata>> qry =
            qryMgr.createScanQuery(new MetaDataPredicate(), null, false);

        qry.keepAll(false);

        qry.projection(ctx.cluster().get().forNode(oldestSrvNode));

        try {
          CacheQueryFuture<Map.Entry<PortableMetadataKey, BinaryMetadata>> fut = qry.execute();

          Map.Entry<PortableMetadataKey, BinaryMetadata> next;

          while ((next = fut.next()) != null) {
            assert next.getKey() != null : next;
            assert next.getValue() != null : next;

            addClientCacheMetaData(next.getKey(), next.getValue());
          }
        } catch (IgniteCheckedException e) {
          if (!ctx.discovery().alive(oldestSrvNode)
              || !ctx.discovery().pingNode(oldestSrvNode.id())) continue;
          else throw e;
        } catch (CacheException e) {
          if (X.hasCause(e, ClusterTopologyCheckedException.class, ClusterTopologyException.class))
            continue;
          else throw e;
        }

        break;
      }
    }

    for (Map.Entry<Integer, BinaryMetadata> e : metaBuf.entrySet())
      addMeta(e.getKey(), e.getValue().wrap(portableCtx));

    metaBuf.clear();

    startLatch.countDown();
  }
コード例 #15
0
    /** {@inheritDoc} */
    @Override
    public String execute() {
      assert taskSes != null;

      assert startLatch != null;

      assert read1Latch != null;
      assert read2Latch != null;
      assert read3Latch != null;

      assert read1FinishedLatch != null;
      assert read2FinishedLatch != null;
      assert read3FinishedLatch != null;

      assert rmvLatch != null;

      startLatch.countDown();

      try {
        startLatch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      X.println(">>> Consumer started.");

      try {
        read1Latch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      // Test that checkpoints were saved properly.
      assertWithRetries(
          new GridAbsClosureX() {
            @Override
            public void applyx() {
              assert GLOBAL_VAL.equals(taskSes.loadCheckpoint(GLOBAL_KEY));
              assert SES_VAL.equals(taskSes.loadCheckpoint(SES_KEY));
            }
          });

      read1FinishedLatch.countDown();

      try {
        read2Latch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      // Test that checkpoints were not overwritten.
      assertWithRetries(
          new GridAbsClosureX() {
            @Override
            public void applyx() {
              assert GLOBAL_VAL.equals(taskSes.loadCheckpoint(GLOBAL_KEY));
              assert SES_VAL.equals(taskSes.loadCheckpoint(SES_KEY));
            }
          });

      read2FinishedLatch.countDown();

      try {
        read3Latch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      assertWithRetries(
          new GridAbsClosureX() {
            @Override
            public void applyx() {
              assertEquals(SES_VAL_OVERWRITTEN, taskSes.loadCheckpoint(GLOBAL_KEY));
              assertEquals(GLOBAL_VAL_OVERWRITTEN, taskSes.loadCheckpoint(SES_KEY));
            }
          });

      read3FinishedLatch.countDown();

      try {
        rmvLatch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }
      // Check checkpoints are actually removed.
      assert !taskSes.removeCheckpoint(GLOBAL_KEY);
      assert !taskSes.removeCheckpoint(SES_KEY);

      assertWithRetries(
          new GridAbsClosureX() {
            @Override
            public void applyx() {
              assert taskSes.loadCheckpoint(GLOBAL_KEY) == null;
              assert taskSes.loadCheckpoint(SES_KEY) == null;
            }
          });

      return null;
    }
コード例 #16
0
    /** {@inheritDoc} */
    @Override
    public String execute() {
      assert ignite != null;
      assert taskSes != null;

      assert startLatch != null;

      assert read1Latch != null;
      assert read2Latch != null;
      assert read3Latch != null;

      assert read1FinishedLatch != null;
      assert read2FinishedLatch != null;
      assert read3FinishedLatch != null;

      assert rmvLatch != null;

      startLatch.countDown();

      try {
        startLatch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      X.println(">>> Producer started.");

      taskSes.saveCheckpoint(GLOBAL_KEY, GLOBAL_VAL, GLOBAL_SCOPE, 0);
      taskSes.saveCheckpoint(SES_KEY, SES_VAL, SESSION_SCOPE, 0);

      read1Latch.countDown();

      try {
        read1FinishedLatch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      // No retries here as other thread should have seen checkpoint already.
      assert GLOBAL_VAL.equals(taskSes.loadCheckpoint(GLOBAL_KEY));
      assert SES_VAL.equals(taskSes.loadCheckpoint(SES_KEY));

      taskSes.saveCheckpoint(GLOBAL_KEY, SES_VAL + "-notoverwritten", GLOBAL_SCOPE, 0, false);
      taskSes.saveCheckpoint(SES_KEY, GLOBAL_VAL + "-notoverwritten", SESSION_SCOPE, 0, false);

      read2Latch.countDown();

      try {
        read2FinishedLatch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      assert GLOBAL_VAL.equals(taskSes.loadCheckpoint(GLOBAL_KEY));
      assert SES_VAL.equals(taskSes.loadCheckpoint(SES_KEY));

      // Swap values.
      taskSes.saveCheckpoint(GLOBAL_KEY, SES_VAL_OVERWRITTEN, GLOBAL_SCOPE, 0, true);
      taskSes.saveCheckpoint(SES_KEY, GLOBAL_VAL_OVERWRITTEN, SESSION_SCOPE, 0, true);

      read3Latch.countDown();

      try {
        read3FinishedLatch.await();
      } catch (InterruptedException e) {
        throw new IgniteException("Thread has been interrupted.", e);
      }

      assert SES_VAL_OVERWRITTEN.equals(taskSes.loadCheckpoint(GLOBAL_KEY));
      assert GLOBAL_VAL_OVERWRITTEN.equals(taskSes.loadCheckpoint(SES_KEY));

      // Remove checkpoints.
      assert taskSes.removeCheckpoint(GLOBAL_KEY);
      assert taskSes.removeCheckpoint(SES_KEY);

      // Check checkpoints are actually removed.
      assert !taskSes.removeCheckpoint(GLOBAL_KEY);
      assert !taskSes.removeCheckpoint(SES_KEY);

      rmvLatch.countDown();

      assertWithRetries(
          new GridAbsClosureX() {
            @Override
            public void applyx() {
              assert taskSes.loadCheckpoint(GLOBAL_KEY) == null;
              assert taskSes.loadCheckpoint(SES_KEY) == null;
            }
          });

      return null;
    }