/**
   * @param concurrency Concurrency.
   * @param isolation Isolation.
   * @throws GridException If test failed.
   */
  private void checkTransactionTimeout(
      GridCacheTxConcurrency concurrency, GridCacheTxIsolation isolation) throws Exception {

    boolean wasEx = false;

    GridCacheTx tx = null;

    try {
      GridCache<Integer, String> cache = grid.cache(null);

      tx = cache.txStart(concurrency, isolation, 50, 0);

      cache.put(1, "1");

      Thread.sleep(100);

      cache.put(1, "2");

      tx.commit();
    } catch (GridCacheTxOptimisticException e) {
      info("Received expected optimistic exception: " + e.getMessage());

      wasEx = true;

      tx.rollback();
    } catch (GridCacheTxTimeoutException e) {
      info("Received expected timeout exception: " + e.getMessage());

      wasEx = true;

      tx.rollback();
    }

    assert wasEx;
  }
Exemple #2
0
  /**
   * Populates cache with test data.
   *
   * @param cache Cache.
   * @throws GridException In case of error.
   */
  private static void populate(GridCache<Object, Object> cache) throws GridException {
    cache.put("o1", new Organization(1, "GridGain"));
    cache.put("o2", new Organization(2, "Other"));

    // Persons are collocated with their organizations to support joins.
    cache.put(new GridCacheAffinityKey<String>("p1", "o1"), new Person(1, "John White", 25, 1));
    cache.put(new GridCacheAffinityKey<String>("p2", "o1"), new Person(2, "Joe Black", 35, 1));
    cache.put(new GridCacheAffinityKey<String>("p3", "o2"), new Person(3, "Mike Green", 40, 2));
  }
  /**
   * Listen to events coming from all grid nodes.
   *
   * @throws GridException If failed.
   */
  private static void remoteListen() throws GridException {
    Grid g = GridGain.grid();

    GridCache<Integer, String> cache = g.cache(CACHE_NAME);

    // Register remote event listeners on all nodes running cache.
    GridFuture<?> fut =
        g.forCache(CACHE_NAME)
            .events()
            .remoteListen(
                // This optional local callback is called for each event notification
                // that passed remote predicate filter.
                new GridBiPredicate<UUID, GridCacheEvent>() {
                  @Override
                  public boolean apply(UUID nodeId, GridCacheEvent evt) {
                    System.out.println();
                    System.out.println(
                        "Received event [evt="
                            + evt.name()
                            + ", key="
                            + evt.key()
                            + ", oldVal="
                            + evt.oldValue()
                            + ", newVal="
                            + evt.newValue());

                    return true; // Return true to continue listening.
                  }
                },
                // Remote filter which only accepts events for keys that are
                // greater or equal than 10 and if local node is primary for this key.
                new GridPredicate<GridCacheEvent>() {
                  /** Auto-inject grid instance. */
                  @GridInstanceResource private Grid g;

                  @Override
                  public boolean apply(GridCacheEvent evt) {
                    Integer key = evt.key();

                    return key >= 10
                        && g.cache(CACHE_NAME).affinity().isPrimary(g.localNode(), key);
                  }
                },
                // Types of events for which listeners are registered.
                EVT_CACHE_OBJECT_PUT,
                EVT_CACHE_OBJECT_READ,
                EVT_CACHE_OBJECT_REMOVED);

    // Wait until event listeners are subscribed on all nodes.
    fut.get();

    int keyCnt = 20;

    // Generate cache events.
    for (int i = 0; i < keyCnt; i++) cache.putx(i, Integer.toString(i));
  }
  /** @throws Exception If failed. */
  public void testInvalidateFlag() throws Exception {
    GridEx g0 = grid(0);

    GridCache<String, String> cache = g0.cache(PARTITIONED_CACHE_NAME);

    String key = null;

    for (int i = 0; i < 10_000; i++) {
      if (!cache.affinity().isPrimaryOrBackup(g0.localNode(), String.valueOf(i))) {
        key = String.valueOf(i);

        break;
      }
    }

    assertNotNull(key);

    cache.put(key, key); // Create entry in near cache, it is invalidated if INVALIDATE flag is set.

    assertNotNull(cache.peek(key));

    GridClientData d = client.data(PARTITIONED_CACHE_NAME);

    d.flagsOn(GridClientCacheFlag.INVALIDATE).put(key, "zzz");

    for (Grid g : G.allGrids()) {
      cache = g.cache(PARTITIONED_CACHE_NAME);

      if (cache.affinity().isPrimaryOrBackup(g.localNode(), key))
        assertEquals("zzz", cache.peek(key));
      else assertNull(cache.peek(key));
    }
  }
  /** @throws Exception If failed. */
  public void testPutAndEvictAll() throws Exception {
    GridCache<Object, String> c = grid().cache(DATA_CACHE_NAME);

    assertEquals("value1", svc.cachePut(1));
    assertEquals("value2", svc.cachePut(2));

    assertEquals(4, c.size());

    assertEquals("value1", c.get(key("testCache1", 1)));
    assertEquals("value1", c.get(key("testCache2", 1)));
    assertEquals("value2", c.get(key("testCache1", 2)));
    assertEquals("value2", c.get(key("testCache2", 2)));

    svc.cacheEvictAll();

    assertEquals(2, c.size());

    assertEquals(null, c.get(key("testCache1", 1)));
    assertEquals("value1", c.get(key("testCache2", 1)));
    assertEquals(null, c.get(key("testCache1", 2)));
    assertEquals("value2", c.get(key("testCache2", 2)));
  }
  /**
   * Test GGFS construction.
   *
   * @throws GridException In case of exception.
   */
  public void testConfiguration() throws GridException {
    GridCache metaCache = getFieldValue(fs, "meta", "metaCache");
    GridCache dataCache = getFieldValue(fs, "data", "dataCache");

    assertNotNull(metaCache);
    assertEquals(META_CACHE_NAME, metaCache.name());
    assertEquals(REPLICATED, metaCache.configuration().getCacheMode());

    assertNotNull(dataCache);
    assertEquals(DATA_CACHE_NAME, dataCache.name());
    assertEquals(PARTITIONED, dataCache.configuration().getCacheMode());
  }
  /** @throws Exception If failed. */
  public void testNodeLeave() throws Exception {
    try {
      cache = true;

      for (int i = 0; i < 2; i++) {
        nearOnly = i == 0;

        startGrid(i);
      }

      for (int i = 0; i < 10; i++) grid(1).cache(null).put(i, i);

      final GridCache<Object, Object> nearOnly = grid(0).cache(null);

      // Populate near cache.
      for (int i = 0; i < 10; i++) {
        assertEquals(i, nearOnly.get(i));
        assertEquals(i, nearOnly.peek(i));
      }

      // Stop the only dht node.
      stopGrid(1);

      for (int i = 0; i < 10; i++) {
        assertNull(nearOnly.peek(i));

        final int key = i;

        GridTestUtils.assertThrows(
            log,
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                return nearOnly.get(key);
              }
            },
            GridTopologyException.class,
            null);
      }

      // Test optimistic transaction.
      GridTestUtils.assertThrows(
          log,
          new Callable<Object>() {
            @Override
            public Object call() throws Exception {
              try (GridCacheTx tx = nearOnly.txStart(OPTIMISTIC, REPEATABLE_READ)) {
                nearOnly.putx("key", "val");

                tx.commit();
              }

              return null;
            }
          },
          GridTopologyException.class,
          null);

      // Test pessimistic transaction.
      GridTestUtils.assertThrows(
          log,
          new Callable<Object>() {
            @Override
            public Object call() throws Exception {
              try (GridCacheTx tx = nearOnly.txStart(PESSIMISTIC, REPEATABLE_READ)) {
                nearOnly.put("key", "val");

                tx.commit();
              }

              return null;
            }
          },
          GridTopologyException.class,
          null);

    } finally {
      stopAllGrids();
    }
  }
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }
  /**
   * JUnit.
   *
   * @throws Exception In case of error.
   */
  @SuppressWarnings({"TooBroadScope"})
  public void testH2Text() throws Exception {
    int duration = 60 * 1000;
    final int keyCnt = 5000;
    final int logFreq = 50;
    final String txt = "Value";

    final GridCache<Integer, H2TextValue> c = grid(0).cache(null);

    GridFuture<?> fut1 =
        multithreadedAsync(
            new Callable() {
              @Override
              public Object call() throws Exception {
                for (int i = 0; i < keyCnt; i++) {
                  c.putx(i, new H2TextValue(txt));

                  if (i % logFreq == 0) X.println("Stored values: " + i);
                }

                return null;
              }
            },
            1);

    // Create query.
    final GridCacheQuery<Map.Entry<Integer, H2TextValue>> qry =
        c.queries().createFullTextQuery(H2TextValue.class, txt);

    qry.enableDedup(false);
    qry.includeBackups(false);
    qry.timeout(TEST_TIMEOUT);

    final AtomicBoolean stop = new AtomicBoolean();

    GridFuture<?> fut2 =
        multithreadedAsync(
            new Callable() {
              @Override
              public Object call() throws Exception {
                int cnt = 0;

                while (!stop.get()) {
                  Collection<Map.Entry<Integer, H2TextValue>> res = qry.execute().get();

                  cnt++;

                  if (cnt % logFreq == 0) {
                    X.println("Result set: " + res.size());
                    X.println("Executed queries: " + cnt);
                  }
                }

                return null;
              }
            },
            1);

    Thread.sleep(duration);

    fut1.get();

    stop.set(true);

    fut2.get();
  }