/**
   * Checks that gets work for implicit txs.
   *
   * @param cache Cache to test.
   * @throws Exception If failed.
   */
  private void checkExplicitTx(Ignite ignite, IgniteCache<String, String> cache) throws Exception {
    IgniteCache<String, String> asyncCache = cache.withAsync();

    Transaction tx = ignite.transactions().txStart();

    try {
      assertNull(cache.get("key1"));

      tx.commit();
    } finally {
      tx.close();
    }

    tx = ignite.transactions().txStart();

    try {
      asyncCache.get("key2");

      assertNull(asyncCache.future().get());

      tx.commit();
    } finally {
      tx.close();
    }

    tx = ignite.transactions().txStart();

    try {
      assertTrue(cache.getAll(F.asSet("key3", "key4")).isEmpty());

      tx.commit();
    } finally {
      tx.close();
    }

    tx = ignite.transactions().txStart();

    try {
      asyncCache.getAll(F.asSet("key5", "key6"));

      assertTrue(((Map) asyncCache.future().get()).isEmpty());

      tx.commit();
    } finally {
      tx.close();
    }

    tx = ignite.transactions().txStart();

    try {
      cache.put("key7", "key7");

      cache.remove("key7");

      assertNull(cache.get("key7"));

      tx.commit();
    } finally {
      tx.close();
    }

    checkEmpty(cache);
  }
コード例 #2
0
  /**
   * Tests offset and limit clauses for query.
   *
   * @throws Exception If failed.
   */
  public void testOffsetLimit() throws Exception {
    IgniteCache<Integer, Integer> c =
        ignite(0).getOrCreateCache(cacheConfig("ints", true, Integer.class, Integer.class));

    try {
      List<Integer> res = new ArrayList<>();

      Random rnd = new GridRandom();

      for (int i = 0; i < 10; i++) {
        int val = rnd.nextInt(100);

        c.put(i, val);
        res.add(val);
      }

      Collections.sort(res);

      String qry = "select _val from Integer order by _val ";

      assertEqualsCollections(res, columnQuery(c, qry));
      assertEqualsCollections(res.subList(0, 0), columnQuery(c, qry + "limit ?", 0));
      assertEqualsCollections(res.subList(0, 3), columnQuery(c, qry + "limit ?", 3));
      assertEqualsCollections(res.subList(0, 9), columnQuery(c, qry + "limit ? offset ?", 9, 0));
      assertEqualsCollections(res.subList(3, 7), columnQuery(c, qry + "limit ? offset ?", 4, 3));
      assertEqualsCollections(res.subList(7, 9), columnQuery(c, qry + "limit ? offset ?", 2, 7));
      assertEqualsCollections(res.subList(8, 10), columnQuery(c, qry + "limit ? offset ?", 2, 8));
      assertEqualsCollections(res.subList(9, 10), columnQuery(c, qry + "limit ? offset ?", 1, 9));
      assertEqualsCollections(res.subList(10, 10), columnQuery(c, qry + "limit ? offset ?", 1, 10));
      assertEqualsCollections(
          res.subList(9, 10), columnQuery(c, qry + "limit ? offset abs(-(4 + ?))", 1, 5));
    } finally {
      c.destroy();
    }
  }
コード例 #3
0
  /** @throws Exception If failed. */
  public void testGroupIndexOperations() throws Exception {
    IgniteCache<Integer, GroupIndexTestValue> c =
        ignite(0)
            .getOrCreateCache(cacheConfig("grp", false, Integer.class, GroupIndexTestValue.class));

    try {
      // Check group index usage.
      String qry = "select 1 from GroupIndexTestValue ";

      String plan = columnQuery(c, "explain " + qry + "where a = 1 and b > 0").get(0).toString();

      info("Plan: " + plan);

      assertTrue(plan.contains("grpIdx"));

      // Sorted list
      List<GroupIndexTestValue> list =
          F.asList(
              new GroupIndexTestValue(0, 0),
              new GroupIndexTestValue(0, 5),
              new GroupIndexTestValue(1, 1),
              new GroupIndexTestValue(1, 3),
              new GroupIndexTestValue(2, -1),
              new GroupIndexTestValue(2, 2));

      // Fill cache.
      for (int i = 0; i < list.size(); i++) c.put(i, list.get(i));

      // Check results.
      assertEquals(1, columnQuery(c, qry + "where a = 1 and b = 1").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b < 4").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b <= 3").size());
      assertEquals(1, columnQuery(c, qry + "where a = 1 and b < 3").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b > 0").size());
      assertEquals(1, columnQuery(c, qry + "where a = 1 and b > 1").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b >= 1").size());
      assertEquals(4, columnQuery(c, qry + "where a > 0 and b > 0").size());
      assertEquals(4, columnQuery(c, qry + "where a > 0 and b >= 1").size());
      assertEquals(3, columnQuery(c, qry + "where a > 0 and b > 1").size());
    } finally {
      c.destroy();
    }
  }
  /**
   * @param g Grid.
   * @throws Exception If failed.
   */
  private void checkNodes(Ignite g) throws Exception {
    IgniteCache<String, String> cache = g.cache("test");

    for (char c = 'a'; c <= 'z'; c++) {
      String key = Character.toString(c);

      cache.put(key, "val-" + key);

      String v1 = cache.get(key);
      String v2 = cache.get(key); // Get second time.

      info("v1: " + v1);
      info("v2: " + v2);

      assertNotNull(v1);
      assertNotNull(v2);

      if (affinity(cache).mapKeyToNode(key).isLocal()) assertSame(v1, v2);
      else assertEquals(v1, v2);
    }
  }
  /**
   * Checks that cache is empty.
   *
   * @param cache Cache to check.
   * @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted while
   *     sleeping.
   */
  @SuppressWarnings({"ErrorNotRethrown", "TypeMayBeWeakened"})
  private void checkEmpty(IgniteCache<String, String> cache)
      throws IgniteInterruptedCheckedException {
    for (int i = 0; i < 3; i++) {
      try {
        assertTrue(!cache.iterator().hasNext());

        break;
      } catch (AssertionError e) {
        if (i == 2) throw e;

        info(">>> Cache is not empty, flushing evictions.");

        U.sleep(1000);
      }
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  @SuppressWarnings({"TooBroadScope"})
  public void testRestarts() throws Exception {
    int duration = 60 * 1000;
    int qryThreadNum = 10;
    final long nodeLifeTime = 2 * 1000;
    final int logFreq = 20;

    final IgniteCache<Integer, Integer> cache = grid(0).cache(null);

    assert cache != null;

    for (int i = 0; i < KEY_CNT; i++) cache.put(i, i);

    assertEquals(KEY_CNT, cache.localSize());

    final AtomicInteger qryCnt = new AtomicInteger();

    final AtomicBoolean done = new AtomicBoolean();

    IgniteInternalFuture<?> fut1 =
        multithreadedAsync(
            new CAX() {
              @Override
              public void applyx() throws IgniteCheckedException {
                while (!done.get()) {
                  Collection<Cache.Entry<Integer, Integer>> res =
                      cache.query(new SqlQuery(Integer.class, "_val >= 0")).getAll();

                  assertFalse(res.isEmpty());

                  int c = qryCnt.incrementAndGet();

                  if (c % logFreq == 0) info("Executed queries: " + c);
                }
              }
            },
            qryThreadNum);

    final AtomicInteger restartCnt = new AtomicInteger();

    CollectingEventListener lsnr = new CollectingEventListener();

    for (int i = 0; i < GRID_CNT; i++)
      grid(i).events().localListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED);

    IgniteInternalFuture<?> fut2 =
        multithreadedAsync(
            new Callable<Object>() {
              @SuppressWarnings({"BusyWait"})
              @Override
              public Object call() throws Exception {
                while (!done.get()) {
                  int idx = GRID_CNT;

                  startGrid(idx);

                  Thread.sleep(nodeLifeTime);

                  stopGrid(idx);

                  int c = restartCnt.incrementAndGet();

                  if (c % logFreq == 0) info("Node restarts: " + c);
                }

                return true;
              }
            },
            1);

    Thread.sleep(duration);

    done.set(true);

    fut1.get();
    fut2.get();

    info("Awaiting rebalance events [restartCnt=" + restartCnt.get() + ']');

    boolean success = lsnr.awaitEvents(GRID_CNT * 2 * restartCnt.get(), 15000);

    for (int i = 0; i < GRID_CNT; i++)
      grid(i).events().stopLocalListen(lsnr, EventType.EVT_CACHE_REBALANCE_STOPPED);

    assert success;
  }
コード例 #7
0
  /**
   * Check how prefetch override works.
   *
   * @throws Exception IF failed.
   */
  public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));

    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;

    IgfsOutputStream out = igfsSecondary.append(FILE, false);

    int totalWritten = 0;

    while (totalWritten < blockSize * 2 + chunk.length) {
      out.write(chunk);

      totalWritten += chunk.length;
    }

    out.close();

    awaitFileClose(igfsSecondary.asSecondary(), FILE);

    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));

    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;

    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);

    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);

    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());

    FSDataInputStream fsIn = fs.open(file);

    final byte[] readBuf = new byte[blockSize * 2];

    fsIn.readFully(0, readBuf, 0, readBuf.length);

    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();

    IgfsFileInfo info = meta.info(meta.fileId(FILE));

    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);

    IgniteCache<IgfsBlockKey, byte[]> dataCache =
        igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName());

    for (int i = 0; i < 10; i++) {
      if (dataCache.containsKey(key)) break;
      else U.sleep(100);
    }

    fsIn.close();

    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);

    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);

            in0.seek(blockSize * 2);

            try {
              in0.read(readBuf);
            } finally {
              U.closeQuiet(in0);
            }

            return null;
          }
        },
        IOException.class,
        "Failed to read data due to secondary file system exception: /dir/subdir/file");
  }
  /**
   * Checks that gets work for implicit txs.
   *
   * @param cache Cache to test.
   * @throws Exception If failed.
   */
  private void checkImplicitTx(IgniteCache<String, String> cache) throws Exception {
    assertNull(cache.get("key1"));

    IgniteCache<String, String> asyncCache = cache.withAsync();

    asyncCache.get("key2");

    assertNull(asyncCache.future().get());

    assertTrue(cache.getAll(F.asSet("key3", "key4")).isEmpty());

    asyncCache.getAll(F.asSet("key5", "key6"));

    assertTrue(((Map) asyncCache.future().get()).isEmpty());

    cache.put("key7", "key7");
    cache.remove("key7", "key7");
    assertNull(cache.get("key7"));

    checkEmpty(cache);
  }
コード例 #9
0
 /**
  * @param c Cache.
  * @param qry Query.
  * @param args Arguments.
  * @return Column as list.
  */
 private static <X> List<X> columnQuery(IgniteCache<?, ?> c, String qry, Object... args) {
   return column(0, c.query(new SqlFieldsQuery(qry).setArgs(args)).getAll());
 }