Exemplo n.º 1
0
  /**
   * In ATOMIC cache with CLOCK mode if key is updated from different nodes at same time only one
   * update wins others are ignored (can happen in test event when updates are executed from
   * different nodes sequentially), this delay is used to avoid lost updates.
   *
   * @param cache Cache.
   * @throws Exception If failed.
   */
  protected void atomicClockModeDelay(IgniteCache cache) throws Exception {
    CacheConfiguration ccfg = (CacheConfiguration) cache.getConfiguration(CacheConfiguration.class);

    if (ccfg.getCacheMode() != LOCAL
        && ccfg.getAtomicityMode() == CacheAtomicityMode.ATOMIC
        && ccfg.getAtomicWriteOrderMode() == CacheAtomicWriteOrderMode.CLOCK) U.sleep(50);
  }
  /** {@inheritDoc} */
  @SuppressWarnings("unchecked")
  @Override
  public boolean addAll(final Collection<? extends T> items) {
    A.notNull(items, "items");

    try {
      boolean retVal;

      int cnt = 0;

      while (true) {
        try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
          Long idx = (Long) cache.invoke(queueKey, new AddProcessor(id, items.size())).get();

          if (idx != null) {
            checkRemoved(idx);

            Map<GridCacheQueueItemKey, T> putMap = new HashMap<>();

            for (T item : items) {
              putMap.put(itemKey(idx), item);

              idx++;
            }

            cache.putAll(putMap);

            retVal = true;
          } else retVal = false;

          tx.commit();

          break;
        } catch (ClusterTopologyCheckedException e) {
          if (e instanceof ClusterGroupEmptyCheckedException) throw e;

          if (cnt++ == MAX_UPDATE_RETRIES) throw e;
          else {
            U.warn(log, "Failed to add item, will retry [err=" + e + ']');

            U.sleep(RETRY_DELAY);
          }
        }
      }

      return retVal;
    } catch (IgniteCheckedException e) {
      throw U.convertException(e);
    }
  }
  /** @throws Exception If any error occurs. */
  public void testMultipleStartOnCoordinatorStop() throws Exception {
    for (int k = 0; k < 3; k++) {
      log.info("Iteration: " + k);

      clientFlagGlobal = false;

      final int START_NODES = 5;
      final int JOIN_NODES = 10;

      startGrids(START_NODES);

      final CyclicBarrier barrier = new CyclicBarrier(JOIN_NODES + 1);

      final AtomicInteger startIdx = new AtomicInteger(START_NODES);

      IgniteInternalFuture<?> fut =
          GridTestUtils.runMultiThreadedAsync(
              new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                  int idx = startIdx.getAndIncrement();

                  Thread.currentThread().setName("start-thread-" + idx);

                  barrier.await();

                  Ignite ignite = startGrid(idx);

                  assertFalse(ignite.configuration().isClientMode());

                  log.info("Started node: " + ignite.name());

                  return null;
                }
              },
              JOIN_NODES,
              "start-thread");

      barrier.await();

      U.sleep(ThreadLocalRandom.current().nextInt(10, 100));

      for (int i = 0; i < START_NODES; i++) stopGrid(i);

      fut.get();

      stopAllGrids();
    }
  }
  /**
   * Checks that cache is empty.
   *
   * @param cache Cache to check.
   * @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted while
   *     sleeping.
   */
  @SuppressWarnings({"ErrorNotRethrown", "TypeMayBeWeakened"})
  private void checkEmpty(IgniteCache<String, String> cache)
      throws IgniteInterruptedCheckedException {
    for (int i = 0; i < 3; i++) {
      try {
        assertTrue(!cache.iterator().hasNext());

        break;
      } catch (AssertionError e) {
        if (i == 2) throw e;

        info(">>> Cache is not empty, flushing evictions.");

        U.sleep(1000);
      }
    }
  }
  /**
   * Executes command using {@code shell} channel.
   *
   * @param ses SSH session.
   * @param cmd Command.
   * @throws JSchException In case of SSH error.
   * @throws IOException If IO error occurs.
   * @throws IgniteInterruptedCheckedException If thread was interrupted while waiting.
   */
  private void shell(Session ses, String cmd)
      throws JSchException, IOException, IgniteInterruptedCheckedException {
    ChannelShell ch = null;

    try {
      ch = (ChannelShell) ses.openChannel("shell");

      ch.connect();

      try (PrintStream out = new PrintStream(ch.getOutputStream(), true)) {
        out.println(cmd);

        U.sleep(1000);
      }
    } finally {
      if (ch != null && ch.isConnected()) ch.disconnect();
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("unchecked")
  @Override
  public boolean offer(final T item) throws IgniteException {
    A.notNull(item, "item");

    try {
      boolean retVal;

      int cnt = 0;

      while (true) {
        try {
          try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
            Long idx = (Long) cache.invoke(queueKey, new AddProcessor(id, 1)).get();

            if (idx != null) {
              checkRemoved(idx);

              cache.getAndPut(itemKey(idx), item);

              retVal = true;
            } else retVal = false;

            tx.commit();

            break;
          }
        } catch (ClusterTopologyCheckedException e) {
          if (e instanceof ClusterGroupEmptyCheckedException) throw e;

          if (cnt++ == MAX_UPDATE_RETRIES) throw e;
          else {
            U.warn(log, "Failed to add item, will retry [err=" + e + ']');

            U.sleep(RETRY_DELAY);
          }
        }
      }

      return retVal;
    } catch (IgniteCheckedException e) {
      throw U.convertException(e);
    }
  }
  /**
   * Calls garbage collector and wait.
   *
   * @throws Exception if any thread has interrupted the current thread while waiting.
   */
  private void gc() throws Exception {
    Runtime rt = Runtime.getRuntime();

    long freeMem0 = rt.freeMemory();
    long freeMem = Long.MAX_VALUE;

    int cnt = 0;

    while (freeMem0 < freeMem && cnt < GC_CALL_CNT) {
      System.gc();

      U.sleep(WAIT_TIME);

      cnt++;

      freeMem = freeMem0;
      freeMem0 = rt.freeMemory();
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("unchecked")
  @Nullable
  @Override
  public T poll() throws IgniteException {
    try {
      int cnt = 0;

      T retVal;

      while (true) {
        try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
          Long idx = (Long) cache.invoke(queueKey, new PollProcessor(id)).get();

          if (idx != null) {
            checkRemoved(idx);

            retVal = (T) cache.getAndRemove(itemKey(idx));

            assert retVal != null : idx;
          } else retVal = null;

          tx.commit();

          break;
        } catch (ClusterTopologyCheckedException e) {
          if (e instanceof ClusterGroupEmptyCheckedException) throw e;

          if (cnt++ == MAX_UPDATE_RETRIES) throw e;
          else {
            U.warn(log, "Failed to add item, will retry [err=" + e + ']');

            U.sleep(RETRY_DELAY);
          }
        }
      }

      return retVal;
    } catch (IgniteCheckedException e) {
      throw U.convertException(e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("unchecked")
  @Override
  protected void removeItem(final long rmvIdx) throws IgniteCheckedException {
    try {
      int cnt = 0;

      while (true) {
        try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) {
          Long idx = (Long) cache.invoke(queueKey, new RemoveProcessor(id, rmvIdx)).get();

          if (idx != null) {
            checkRemoved(idx);

            boolean rmv = cache.remove(itemKey(idx));

            assert rmv : idx;
          }

          tx.commit();

          break;
        } catch (ClusterTopologyCheckedException e) {
          if (e instanceof ClusterGroupEmptyCheckedException) throw e;

          if (cnt++ == MAX_UPDATE_RETRIES) throw e;
          else {
            U.warn(log, "Failed to add item, will retry [err=" + e + ']');

            U.sleep(RETRY_DELAY);
          }
        }
      }
    } catch (IgniteCheckedException e) {
      throw U.convertException(e);
    }
  }
Exemplo n.º 10
0
  /** {@inheritDoc} */
  @Override
  protected void afterTest() throws Exception {
    Transaction tx = jcache().unwrap(Ignite.class).transactions().tx();

    if (tx != null) {
      tx.close();

      fail("Cache transaction remained after test completion: " + tx);
    }

    for (int i = 0; i < gridCount(); i++) {
      info("Checking grid: " + i);

      while (true) {
        try {
          final int fi = i;

          assertTrue(
              "Cache is not empty: "
                  + " localSize = "
                  + jcache(fi).localSize(CachePeekMode.ALL)
                  + ", local entries "
                  + entrySet(jcache(fi).localEntries()),
              GridTestUtils.waitForCondition(
                  // Preloading may happen as nodes leave, so we need to wait.
                  new GridAbsPredicateX() {
                    @Override
                    public boolean applyx() throws IgniteCheckedException {
                      jcache(fi).removeAll();

                      if (jcache(fi).size(CachePeekMode.ALL) > 0) {
                        for (Cache.Entry<String, ?> k : jcache(fi).localEntries())
                          jcache(fi).remove(k.getKey());
                      }

                      return jcache(fi).localSize(CachePeekMode.ALL) == 0;
                    }
                  },
                  getTestTimeout()));

          int primaryKeySize = jcache(i).localSize(CachePeekMode.PRIMARY);
          int keySize = jcache(i).localSize();
          int size = jcache(i).localSize();
          int globalSize = jcache(i).size();
          int globalPrimarySize = jcache(i).size(CachePeekMode.PRIMARY);

          info(
              "Size after [idx="
                  + i
                  + ", size="
                  + size
                  + ", keySize="
                  + keySize
                  + ", primarySize="
                  + primaryKeySize
                  + ", globalSize="
                  + globalSize
                  + ", globalPrimarySize="
                  + globalPrimarySize
                  + ", entrySet="
                  + jcache(i).localEntries()
                  + ']');

          assertEquals(
              "Cache is not empty [idx=" + i + ", entrySet=" + jcache(i).localEntries() + ']',
              0,
              jcache(i).localSize(CachePeekMode.ALL));

          break;
        } catch (Exception e) {
          if (X.hasCause(e, ClusterTopologyCheckedException.class)) {
            info("Got topology exception while tear down (will retry in 1000ms).");

            U.sleep(1000);
          } else throw e;
        }
      }

      for (Cache.Entry<String, Integer> entry : jcache(i).localEntries(CachePeekMode.SWAP))
        jcache(i).remove(entry.getKey());
    }

    assert jcache().unwrap(Ignite.class).transactions().tx() == null;
    assertEquals("Cache is not empty", 0, jcache().localSize(CachePeekMode.ALL));

    resetStore();
  }
  /**
   * Check how prefetch override works.
   *
   * @throws Exception IF failed.
   */
  public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));

    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;

    IgfsOutputStream out = igfsSecondary.append(FILE, false);

    int totalWritten = 0;

    while (totalWritten < blockSize * 2 + chunk.length) {
      out.write(chunk);

      totalWritten += chunk.length;
    }

    out.close();

    awaitFileClose(igfsSecondary.asSecondary(), FILE);

    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));

    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;

    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);

    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);

    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());

    FSDataInputStream fsIn = fs.open(file);

    final byte[] readBuf = new byte[blockSize * 2];

    fsIn.readFully(0, readBuf, 0, readBuf.length);

    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();

    IgfsFileInfo info = meta.info(meta.fileId(FILE));

    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);

    IgniteCache<IgfsBlockKey, byte[]> dataCache =
        igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName());

    for (int i = 0; i < 10; i++) {
      if (dataCache.containsKey(key)) break;
      else U.sleep(100);
    }

    fsIn.close();

    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);

    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);

            in0.seek(blockSize * 2);

            try {
              in0.read(readBuf);
            } finally {
              U.closeQuiet(in0);
            }

            return null;
          }
        },
        IOException.class,
        "Failed to read data due to secondary file system exception: /dir/subdir/file");
  }
  /**
   * Sends cache query response.
   *
   * @param nodeId Node to send response.
   * @param res Cache query response.
   * @param timeout Message timeout.
   * @return {@code true} if response was sent, {@code false} otherwise.
   */
  private boolean sendQueryResponse(UUID nodeId, GridCacheQueryResponse res, long timeout) {
    ClusterNode node = cctx.node(nodeId);

    if (node == null) return false;

    int attempt = 1;

    IgniteCheckedException err = null;

    while (!Thread.currentThread().isInterrupted()) {
      try {
        if (log.isDebugEnabled()) log.debug("Send query response: " + res);

        Object topic = topic(nodeId, res.requestId());

        cctx.io()
            .sendOrderedMessage(
                node, topic, res, cctx.ioPolicy(), timeout > 0 ? timeout : Long.MAX_VALUE);

        return true;
      } catch (ClusterTopologyCheckedException ignored) {
        if (log.isDebugEnabled())
          log.debug(
              "Failed to send query response since node left grid [nodeId="
                  + nodeId
                  + ", res="
                  + res
                  + "]");

        return false;
      } catch (IgniteCheckedException e) {
        if (err == null) err = e;

        if (Thread.currentThread().isInterrupted()) break;

        if (attempt < RESEND_ATTEMPTS) {
          if (log.isDebugEnabled())
            log.debug(
                "Failed to send queries response (will try again) [nodeId="
                    + nodeId
                    + ", res="
                    + res
                    + ", attempt="
                    + attempt
                    + ", err="
                    + e
                    + "]");

          if (!Thread.currentThread().isInterrupted())
            try {
              U.sleep(RESEND_FREQ);
            } catch (IgniteInterruptedCheckedException e1) {
              U.error(
                  log,
                  "Waiting for queries response resending was interrupted (response will not be sent) "
                      + "[nodeId="
                      + nodeId
                      + ", response="
                      + res
                      + "]",
                  e1);

              return false;
            }
        } else {
          U.error(
              log,
              "Failed to sender cache response [nodeId=" + nodeId + ", response=" + res + "]",
              err);

          return false;
        }
      }

      attempt++;
    }

    return false;
  }