Example #1
0
  /** @throws Exception If failed. */
  @SuppressWarnings({"ObjectEquality"})
  public void testDifferentTasks() throws Exception {
    Grid grid1 = null;
    Grid grid2 = null;

    try {
      grid1 = startGrid(1, new GridSpringResourceContextImpl(new GenericApplicationContext()));
      grid2 = startGrid(2, new GridSpringResourceContextImpl(new GenericApplicationContext()));

      // Execute different tasks.
      grid1.compute().execute(SharedResourceTask1.class, null).get();
      grid1.compute().execute(SharedResourceTask2.class, null).get();

      // In ISOLATED_CLASSLOADER mode tasks should have the class
      // loaders because they have the same CL locally and thus the same
      // resources.
      // So 1 resource locally and 1 remotely
      assert task1Rsrc1 == task2Rsrc1;
      assert task1Rsrc2 == task2Rsrc2;
      assert task1Rsrc3 == task2Rsrc3;
      assert task1Rsrc4 == task2Rsrc4;

      checkUsageCount(createClss, UserResource1.class, 4);
      checkUsageCount(createClss, UserResource2.class, 4);

      checkUsageCount(deployClss, UserResource1.class, 4);
      checkUsageCount(deployClss, UserResource2.class, 4);
    } finally {
      GridTestUtils.close(grid1, log());
      GridTestUtils.close(grid2, log());
    }

    checkUsageCount(undeployClss, UserResource1.class, 4);
    checkUsageCount(undeployClss, UserResource2.class, 4);
  }
Example #2
0
  /** @throws Exception If failed. */
  @SuppressWarnings({"ObjectEquality"})
  public void testUndeployedTask() throws Exception {
    Grid grid1 = null;
    Grid grid2 = null;

    try {
      grid1 = startGrid(1, new GridSpringResourceContextImpl(new GenericApplicationContext()));
      grid2 = startGrid(2, new GridSpringResourceContextImpl(new GenericApplicationContext()));

      // Execute tasks.
      grid1.compute().execute(SharedResourceTask1.class, null).get();
      grid1.compute().execute(SharedResourceTask2.class, null).get();

      grid1.compute().undeployTask(SharedResourceTask1.class.getName());

      // Wait until resources get undeployed remotely
      // because undeploy is asynchronous apply.
      Thread.sleep(3000);

      // 1 local and 1 remote resource instances
      checkUsageCount(createClss, UserResource1.class, 4);
      checkUsageCount(deployClss, UserResource1.class, 4);
      checkUsageCount(createClss, UserResource2.class, 4);
      checkUsageCount(deployClss, UserResource2.class, 4);
      checkUsageCount(undeployClss, UserResource1.class, 4);
      checkUsageCount(undeployClss, UserResource2.class, 4);

      grid1.compute().undeployTask(SharedResourceTask2.class.getName());

      // Wait until resources get undeployed remotely
      // because undeploy is asynchronous apply.
      Thread.sleep(3000);

      // We undeployed last task for this class loader and resources.
      // All resources should be undeployed.
      checkUsageCount(undeployClss, UserResource1.class, 4);
      checkUsageCount(undeployClss, UserResource2.class, 4);

      // Execute the same tasks.
      grid1.compute().execute(SharedResourceTask1.class, null).get();
      grid1.compute().execute(SharedResourceTask2.class, null).get();

      // 2 new resources.
      checkUsageCount(createClss, UserResource1.class, 8);
      checkUsageCount(deployClss, UserResource1.class, 8);
      checkUsageCount(createClss, UserResource2.class, 8);
      checkUsageCount(deployClss, UserResource2.class, 8);
    } finally {
      GridTestUtils.close(grid1, log());
      GridTestUtils.close(grid2, log());
    }

    checkUsageCount(undeployClss, UserResource1.class, 8);
    checkUsageCount(undeployClss, UserResource2.class, 8);
  }
  /** @throws Exception If failed. */
  public void testClusterNodeMetrics() throws Exception {
    final Ignite ignite0 = grid();
    final Ignite ignite1 = startGrid(1);

    GridTestUtils.waitForCondition(
        new GridAbsPredicate() {
          @Override
          public boolean apply() {
            return ignite0.cluster().nodes().size() == 2 && ignite1.cluster().nodes().size() == 2;
          }
        },
        3000L);

    ClusterMetrics metrics0 = ignite0.cluster().localNode().metrics();

    ClusterMetrics nodesMetrics =
        ignite0
            .cluster()
            .forNode(ignite0.cluster().localNode(), ignite1.cluster().localNode())
            .metrics();

    assertEquals(metrics0.getTotalCpus(), nodesMetrics.getTotalCpus());
    assertEquals(1, metrics0.getTotalNodes());
    assertEquals(2, nodesMetrics.getTotalNodes());

    assert metrics0.getHeapMemoryUsed() > 0;
    assert metrics0.getHeapMemoryTotal() > 0;
    assert metrics0.getNonHeapMemoryMaximum() > 0;
  }
Example #4
0
  /** @throws Exception If failed. */
  public void testSameTaskFromTwoNodesLeft() throws Exception {
    Grid grid1 = null;
    Grid grid2 = null;
    Grid grid3 = null;

    try {
      grid1 = startGrid(1, new GridSpringResourceContextImpl(new GenericApplicationContext()));
      grid2 = startGrid(2, new GridSpringResourceContextImpl(new GenericApplicationContext()));
      grid3 = startGrid(3, new GridSpringResourceContextImpl(new GenericApplicationContext()));

      grid1.compute().execute(SharedResourceTask1.class, null).get();
      grid2.compute().execute(SharedResourceTask1.class, null).get();

      checkUsageCount(createClss, UserResource1.class, 6);
      checkUsageCount(deployClss, UserResource1.class, 6);
      checkUsageCount(createClss, UserResource2.class, 6);
      checkUsageCount(deployClss, UserResource2.class, 6);

      checkUsageCount(undeployClss, UserResource1.class, 0);
      checkUsageCount(undeployClss, UserResource2.class, 0);

      GridTestUtils.close(grid1, log());

      // Wait until other nodes get notified
      // this grid1 left.
      Thread.sleep(1000);

      // Undeployment happened only on Grid1.
      checkUsageCount(undeployClss, UserResource1.class, 2);
      checkUsageCount(undeployClss, UserResource2.class, 2);

      GridTestUtils.close(grid2, log());

      // Wait until resources get undeployed remotely
      // because undeploy is asynchronous apply.
      Thread.sleep(1000);

      // Grid1 and Grid2
      checkUsageCount(undeployClss, UserResource1.class, 4);
      checkUsageCount(undeployClss, UserResource2.class, 4);
    } finally {
      GridTestUtils.close(grid1, log());
      GridTestUtils.close(grid2, log());
      GridTestUtils.close(grid3, log());
    }
  }
Example #5
0
  /** @throws Exception If failed. */
  public void testSameTaskFromTwoNodesUndeploy() throws Exception {
    Grid grid1 = null;
    Grid grid2 = null;
    Grid grid3 = null;

    try {
      grid1 = startGrid(1, new GridSpringResourceContextImpl(new GenericApplicationContext()));
      grid2 = startGrid(2, new GridSpringResourceContextImpl(new GenericApplicationContext()));
      grid3 = startGrid(3, new GridSpringResourceContextImpl(new GenericApplicationContext()));

      grid1.compute().execute(SharedResourceTask1.class, null).get();
      grid2.compute().execute(SharedResourceTask1.class, null).get();

      checkUsageCount(createClss, UserResource1.class, 6);
      checkUsageCount(deployClss, UserResource1.class, 6);
      checkUsageCount(createClss, UserResource2.class, 6);
      checkUsageCount(deployClss, UserResource2.class, 6);

      checkUsageCount(undeployClss, UserResource1.class, 0);
      checkUsageCount(undeployClss, UserResource2.class, 0);

      grid1.compute().undeployTask(SharedResourceTask1.class.getName());

      // Wait until resources get undeployed remotely
      // because undeploy is asynchronous apply.
      Thread.sleep(3000);

      checkUsageCount(undeployClss, UserResource1.class, 6);
      checkUsageCount(undeployClss, UserResource2.class, 6);

      grid2.compute().undeployTask(SharedResourceTask1.class.getName());

      // Wait until resources get undeployed remotely
      // because undeploy is asynchronous apply.
      Thread.sleep(3000);

      // All Tasks from originating nodes were undeployed. All resources should be cleaned up.
      checkUsageCount(undeployClss, UserResource1.class, 6);
      checkUsageCount(undeployClss, UserResource2.class, 6);
    } finally {
      GridTestUtils.close(grid1, log());
      GridTestUtils.close(grid2, log());
      GridTestUtils.close(grid3, log());
    }
  }
Example #6
0
  /** @throws Exception If failed. */
  @SuppressWarnings("unchecked")
  public void testRedeployedTask() throws Exception {
    Grid grid = startGrid(0, new GridSpringResourceContextImpl(new GenericApplicationContext()));

    try {
      // Execute same task with different class loaders. Second execution should redeploy first one.
      grid.compute().execute(SharedResourceTask1.class, null).get();

      checkUsageCount(createClss, UserResource1.class, 2);
      checkUsageCount(createClss, UserResource2.class, 2);

      checkUsageCount(deployClss, UserResource1.class, 2);
      checkUsageCount(deployClss, UserResource2.class, 2);

      // Change class loader of the task. So it's just implicit redeploy.
      ClassLoader tstClsLdr =
          new GridTestClassLoader(
              null,
              getClass().getClassLoader(),
              SharedResourceTask1.class.getName(),
              GridResourceSharedUndeploySelfTest.SharedResourceTask1.GridSharedJob1.class.getName(),
              GridResourceSharedUndeploySelfTest.class.getName());

      Class<? extends GridComputeTask<Object, Object>> taskCls =
          (Class<? extends GridComputeTask<Object, Object>>)
              tstClsLdr.loadClass(SharedResourceTask1.class.getName());

      grid.compute().execute(taskCls, null).get();

      // Old resources should be undeployed at this point.
      checkUsageCount(undeployClss, UserResource1.class, 2);
      checkUsageCount(undeployClss, UserResource2.class, 2);

      // We should detect redeployment and create new resources.
      checkUsageCount(createClss, UserResource1.class, 4);
      checkUsageCount(createClss, UserResource2.class, 4);

      checkUsageCount(deployClss, UserResource1.class, 4);
      checkUsageCount(deployClss, UserResource2.class, 4);
    } finally {
      GridTestUtils.close(grid, log());
    }

    checkUsageCount(undeployClss, UserResource1.class, 4);
    checkUsageCount(undeployClss, UserResource2.class, 4);
  }
Example #7
0
  /** @throws Exception If failed. */
  public void testDifferentTaskNameLocally() throws Exception {
    Grid grid = startGrid(0, new GridSpringResourceContextImpl(new GenericApplicationContext()));

    // Versions are different - should not share
    // 2 resource created locally
    try {
      grid.compute().execute(SharedResourceTask1Version1.class, null).get();

      try {
        grid.compute().execute(SharedResourceTask1Version2.class, null).get();

        assert false : "SharedResourceTask4 should not be allowed to deploy.";
      } catch (GridException e) {
        info("Received expected exception: " + e);
      }
    } finally {
      GridTestUtils.close(grid, log());
    }
  }
Example #8
0
  /** @throws Exception If failed. */
  public void testSameTaskLocally() throws Exception {
    Grid grid = startGrid(0, new GridSpringResourceContextImpl(new GenericApplicationContext()));

    try {
      // Execute the same task twice.
      // 1 resource created locally
      grid.compute().execute(SharedResourceTask1.class, null).get();
      grid.compute().execute(SharedResourceTask1.class, null).get();

      checkUsageCount(createClss, UserResource1.class, 2);
      checkUsageCount(createClss, UserResource2.class, 2);

      checkUsageCount(deployClss, UserResource1.class, 2);
      checkUsageCount(deployClss, UserResource2.class, 2);
    } finally {
      GridTestUtils.close(grid, log());
    }

    checkUsageCount(undeployClss, UserResource1.class, 2);
    checkUsageCount(undeployClss, UserResource2.class, 2);
  }
  /** @throws Exception If failed. */
  public void testSystemCache() throws Exception {
    CollectionConfiguration colCfg = collectionConfiguration();

    IgniteQueue queue = grid(0).queue("Queue1", 0, colCfg);

    final CacheConfiguration ccfg = getQueueCache(queue);

    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            grid(0).cache(ccfg.getName());
            return null;
          }
        },
        IllegalStateException.class,
        "Failed to get cache because it is a system cache");

    assertNotNull(((IgniteKernal) grid(0)).internalCache(ccfg.getName()));
  }
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }
  /**
   * Check how prefetch override works.
   *
   * @throws Exception IF failed.
   */
  public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));

    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;

    IgfsOutputStream out = igfsSecondary.append(FILE, false);

    int totalWritten = 0;

    while (totalWritten < blockSize * 2 + chunk.length) {
      out.write(chunk);

      totalWritten += chunk.length;
    }

    out.close();

    awaitFileClose(igfsSecondary.asSecondary(), FILE);

    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));

    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;

    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);

    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);

    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());

    FSDataInputStream fsIn = fs.open(file);

    final byte[] readBuf = new byte[blockSize * 2];

    fsIn.readFully(0, readBuf, 0, readBuf.length);

    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();

    IgfsFileInfo info = meta.info(meta.fileId(FILE));

    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);

    IgniteCache<IgfsBlockKey, byte[]> dataCache =
        igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName());

    for (int i = 0; i < 10; i++) {
      if (dataCache.containsKey(key)) break;
      else U.sleep(100);
    }

    fsIn.close();

    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);

    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);

            in0.seek(blockSize * 2);

            try {
              in0.read(readBuf);
            } finally {
              U.closeQuiet(in0);
            }

            return null;
          }
        },
        IOException.class,
        "Failed to read data due to secondary file system exception: /dir/subdir/file");
  }
  /**
   * @param cacheMode Cache mode.
   * @param sameAff If {@code false} uses different number of partitions for caches.
   * @param concurrency Transaction concurrency.
   * @param isolation Transaction isolation.
   * @throws Exception If failed.
   */
  private void crossCacheTxFailover(
      CacheMode cacheMode,
      boolean sameAff,
      final TransactionConcurrency concurrency,
      final TransactionIsolation isolation)
      throws Exception {
    IgniteKernal ignite0 = (IgniteKernal) ignite(0);

    final AtomicBoolean stop = new AtomicBoolean();

    try {
      ignite0.createCache(cacheConfiguration(CACHE1, cacheMode, 256));
      ignite0.createCache(cacheConfiguration(CACHE2, cacheMode, sameAff ? 256 : 128));

      final AtomicInteger threadIdx = new AtomicInteger();

      IgniteInternalFuture<?> fut =
          GridTestUtils.runMultiThreadedAsync(
              new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                  int idx = threadIdx.getAndIncrement();

                  Ignite ignite = ignite(idx % GRID_CNT);

                  log.info(
                      "Started update thread [node="
                          + ignite.name()
                          + ", client="
                          + ignite.configuration().isClientMode()
                          + ']');

                  IgniteCache<TestKey, TestValue> cache1 = ignite.cache(CACHE1);
                  IgniteCache<TestKey, TestValue> cache2 = ignite.cache(CACHE2);

                  assertNotSame(cache1, cache2);

                  IgniteTransactions txs = ignite.transactions();

                  ThreadLocalRandom rnd = ThreadLocalRandom.current();

                  long iter = 0;

                  while (!stop.get()) {
                    boolean sameKey = rnd.nextBoolean();

                    try {
                      try (Transaction tx = txs.txStart(concurrency, isolation)) {
                        if (sameKey) {
                          TestKey key = new TestKey(rnd.nextLong(KEY_RANGE));

                          cacheOperation(rnd, cache1, key);
                          cacheOperation(rnd, cache2, key);
                        } else {
                          TestKey key1 = new TestKey(rnd.nextLong(KEY_RANGE));
                          TestKey key2 = new TestKey(key1.key() + 1);

                          cacheOperation(rnd, cache1, key1);
                          cacheOperation(rnd, cache2, key2);
                        }

                        tx.commit();
                      }
                    } catch (CacheException | IgniteException e) {
                      log.info("Update error: " + e);
                    }

                    if (iter++ % 500 == 0) log.info("Iteration: " + iter);
                  }

                  return null;
                }

                /**
                 * @param rnd Random.
                 * @param cache Cache.
                 * @param key Key.
                 */
                private void cacheOperation(
                    ThreadLocalRandom rnd, IgniteCache<TestKey, TestValue> cache, TestKey key) {
                  switch (rnd.nextInt(4)) {
                    case 0:
                      cache.put(key, new TestValue(rnd.nextLong()));

                      break;

                    case 1:
                      cache.remove(key);

                      break;

                    case 2:
                      cache.invoke(key, new TestEntryProcessor(rnd.nextBoolean() ? 1L : null));

                      break;

                    case 3:
                      cache.get(key);

                      break;

                    default:
                      assert false;
                  }
                }
              },
              10,
              "tx-thread");

      long stopTime = System.currentTimeMillis() + 3 * 60_000;

      long topVer = ignite0.cluster().topologyVersion();

      boolean failed = false;

      while (System.currentTimeMillis() < stopTime) {
        log.info("Start node.");

        IgniteKernal ignite = (IgniteKernal) startGrid(GRID_CNT);

        assertFalse(ignite.configuration().isClientMode());

        topVer++;

        IgniteInternalFuture<?> affFut =
            ignite
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after start: " + topVer);

          failed = true;

          break;
        }

        Thread.sleep(500);

        log.info("Stop node.");

        stopGrid(GRID_CNT);

        topVer++;

        affFut =
            ignite0
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after stop: " + topVer);

          failed = true;

          break;
        }
      }

      stop.set(true);

      fut.get();

      assertFalse("Test failed, see log for details.", failed);
    } finally {
      stop.set(true);

      ignite0.destroyCache(CACHE1);
      ignite0.destroyCache(CACHE2);

      awaitPartitionMapExchange();
    }
  }