/** @throws Exception If failed. */
  public void testClusterNodeMetrics() throws Exception {
    final Ignite ignite0 = grid();
    final Ignite ignite1 = startGrid(1);

    GridTestUtils.waitForCondition(
        new GridAbsPredicate() {
          @Override
          public boolean apply() {
            return ignite0.cluster().nodes().size() == 2 && ignite1.cluster().nodes().size() == 2;
          }
        },
        3000L);

    ClusterMetrics metrics0 = ignite0.cluster().localNode().metrics();

    ClusterMetrics nodesMetrics =
        ignite0
            .cluster()
            .forNode(ignite0.cluster().localNode(), ignite1.cluster().localNode())
            .metrics();

    assertEquals(metrics0.getTotalCpus(), nodesMetrics.getTotalCpus());
    assertEquals(1, metrics0.getTotalNodes());
    assertEquals(2, nodesMetrics.getTotalNodes());

    assert metrics0.getHeapMemoryUsed() > 0;
    assert metrics0.getHeapMemoryTotal() > 0;
    assert metrics0.getNonHeapMemoryMaximum() > 0;
  }
  /** {@inheritDoc} */
  @Override
  protected GridConfiguration getConfiguration(String gridName) throws Exception {
    GridConfiguration c = super.getConfiguration(gridName);

    c.setLocalHost(HOST);

    assert c.getClientConnectionConfiguration() == null;

    GridClientConnectionConfiguration clientCfg = new GridClientConnectionConfiguration();

    clientCfg.setRestTcpPort(REST_TCP_PORT_BASE);

    GridSslContextFactory sslCtxFactory = sslContextFactory();

    if (sslCtxFactory != null) {
      clientCfg.setRestTcpSslEnabled(true);
      clientCfg.setRestTcpSslContextFactory(sslCtxFactory);
    }

    c.setClientConnectionConfiguration(clientCfg);

    GridTcpDiscoverySpi disco = new GridTcpDiscoverySpi();

    disco.setIpFinder(IP_FINDER);

    c.setDiscoverySpi(disco);

    TestCommunicationSpi spi = new TestCommunicationSpi();

    spi.setLocalPort(GridTestUtils.getNextCommPort(getClass()));

    c.setCommunicationSpi(spi);

    c.setCacheConfiguration(
        cacheConfiguration(null),
        cacheConfiguration(PARTITIONED_CACHE_NAME),
        cacheConfiguration(REPLICATED_CACHE_NAME),
        cacheConfiguration(REPLICATED_ASYNC_CACHE_NAME));

    ThreadPoolExecutor exec =
        new ThreadPoolExecutor(40, 40, 0, MILLISECONDS, new LinkedBlockingQueue<Runnable>());

    exec.prestartAllCoreThreads();

    c.setExecutorService(exec);

    c.setExecutorServiceShutdown(true);

    ThreadPoolExecutor sysExec =
        new ThreadPoolExecutor(40, 40, 0, MILLISECONDS, new LinkedBlockingQueue<Runnable>());

    sysExec.prestartAllCoreThreads();

    c.setSystemExecutorService(sysExec);

    c.setSystemExecutorServiceShutdown(true);

    return c;
  }
Exemple #3
0
  /** @throws Exception If failed. */
  public void testTopologyVersionDrId() throws Exception {
    GridCacheVersion ver = version(10, 0);

    assertEquals(10, ver.nodeOrder());
    assertEquals(0, ver.dataCenterId());

    // Check with max topology version and some dr IDs.
    ver = version(0x7FFFFFF, 0);
    assertEquals(0x7FFFFFF, ver.nodeOrder());
    assertEquals(0, ver.dataCenterId());

    ver = version(0x7FFFFFF, 15);
    assertEquals(0x7FFFFFF, ver.nodeOrder());
    assertEquals(15, ver.dataCenterId());

    ver = version(0x7FFFFFF, 31);
    assertEquals(0x7FFFFFF, ver.nodeOrder());
    assertEquals(31, ver.dataCenterId());

    // Check max dr ID with some topology versions.
    ver = version(11, 31);
    assertEquals(11, ver.nodeOrder());
    assertEquals(31, ver.dataCenterId());

    ver = version(256, 31);
    assertEquals(256, ver.nodeOrder());
    assertEquals(31, ver.dataCenterId());

    ver = version(1025, 31);
    assertEquals(1025, ver.nodeOrder());
    assertEquals(31, ver.dataCenterId());

    // Check overflow exception.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return version(0x7FFFFFF + 1, 1);
          }
        },
        IllegalArgumentException.class,
        null);
  }
  /** @throws Exception If failed. */
  public void testSystemCache() throws Exception {
    CollectionConfiguration colCfg = collectionConfiguration();

    IgniteQueue queue = grid(0).queue("Queue1", 0, colCfg);

    final CacheConfiguration ccfg = getQueueCache(queue);

    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            grid(0).cache(ccfg.getName());
            return null;
          }
        },
        IllegalStateException.class,
        "Failed to get cache because it is a system cache");

    assertNotNull(((IgniteKernal) grid(0)).internalCache(ccfg.getName()));
  }
  /** @throws Exception If failed. */
  public void testNodeLeave() throws Exception {
    try {
      cache = true;

      for (int i = 0; i < 2; i++) {
        nearOnly = i == 0;

        startGrid(i);
      }

      for (int i = 0; i < 10; i++) grid(1).cache(null).put(i, i);

      final GridCache<Object, Object> nearOnly = grid(0).cache(null);

      // Populate near cache.
      for (int i = 0; i < 10; i++) {
        assertEquals(i, nearOnly.get(i));
        assertEquals(i, nearOnly.peek(i));
      }

      // Stop the only dht node.
      stopGrid(1);

      for (int i = 0; i < 10; i++) {
        assertNull(nearOnly.peek(i));

        final int key = i;

        GridTestUtils.assertThrows(
            log,
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                return nearOnly.get(key);
              }
            },
            GridTopologyException.class,
            null);
      }

      // Test optimistic transaction.
      GridTestUtils.assertThrows(
          log,
          new Callable<Object>() {
            @Override
            public Object call() throws Exception {
              try (GridCacheTx tx = nearOnly.txStart(OPTIMISTIC, REPEATABLE_READ)) {
                nearOnly.putx("key", "val");

                tx.commit();
              }

              return null;
            }
          },
          GridTopologyException.class,
          null);

      // Test pessimistic transaction.
      GridTestUtils.assertThrows(
          log,
          new Callable<Object>() {
            @Override
            public Object call() throws Exception {
              try (GridCacheTx tx = nearOnly.txStart(PESSIMISTIC, REPEATABLE_READ)) {
                nearOnly.put("key", "val");

                tx.commit();
              }

              return null;
            }
          },
          GridTopologyException.class,
          null);

    } finally {
      stopAllGrids();
    }
  }
  /** @throws Exception If failed. */
  public void testCreateFileFragmented() throws Exception {
    GridGgfsEx impl = (GridGgfsEx) grid(0).ggfs("ggfs");

    GridGgfsFragmentizerManager fragmentizer = impl.context().fragmentizer();

    GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", false);

    GridGgfsPath path = new GridGgfsPath("/file");

    try {
      GridGgfs fs0 = grid(0).ggfs("ggfs");
      GridGgfs fs1 = grid(1).ggfs("ggfs");
      GridGgfs fs2 = grid(2).ggfs("ggfs");

      try (GridGgfsOutputStream out =
          fs0.create(
              path,
              128,
              false,
              1,
              CFG_GRP_SIZE,
              F.asMap(GridGgfs.PROP_PREFER_LOCAL_WRITES, "true"))) {
        // 1.5 blocks
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 1);

        out.write(data);
      }

      try (GridGgfsOutputStream out = fs1.append(path, false)) {
        // 1.5 blocks.
        byte[] data = new byte[CFG_BLOCK_SIZE * 3 / 2];

        Arrays.fill(data, (byte) 2);

        out.write(data);
      }

      // After this we should have first two block colocated with grid 0 and last block colocated
      // with grid 1.
      GridGgfsFileImpl fileImpl = (GridGgfsFileImpl) fs.info(path);

      GridCache<Object, Object> metaCache = grid(0).cachex(META_CACHE_NAME);

      GridGgfsFileInfo fileInfo = (GridGgfsFileInfo) metaCache.get(fileImpl.fileId());

      GridGgfsFileMap map = fileInfo.fileMap();

      List<GridGgfsFileAffinityRange> ranges = map.ranges();

      assertEquals(2, ranges.size());

      assertTrue(ranges.get(0).startOffset() == 0);
      assertTrue(ranges.get(0).endOffset() == 2 * CFG_BLOCK_SIZE - 1);

      assertTrue(ranges.get(1).startOffset() == 2 * CFG_BLOCK_SIZE);
      assertTrue(ranges.get(1).endOffset() == 3 * CFG_BLOCK_SIZE - 1);

      // Validate data read after colocated writes.
      try (GridGgfsInputStream in = fs2.open(path)) {
        // Validate first part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 1, in.read());

        // Validate second part of file.
        for (int i = 0; i < CFG_BLOCK_SIZE * 3 / 2; i++) assertEquals((byte) 2, in.read());

        assertEquals(-1, in.read());
      }
    } finally {
      GridTestUtils.setFieldValue(fragmentizer, "fragmentizerEnabled", true);

      boolean hasData = false;

      for (int i = 0; i < NODES_CNT; i++) hasData |= !grid(i).cachex(DATA_CACHE_NAME).isEmpty();

      assertTrue(hasData);

      fs.delete(path, true);
    }

    GridTestUtils.retryAssert(
        log,
        ASSERT_RETRIES,
        ASSERT_RETRY_INTERVAL,
        new CAX() {
          @Override
          public void applyx() {
            for (int i = 0; i < NODES_CNT; i++)
              assertTrue(grid(i).cachex(DATA_CACHE_NAME).isEmpty());
          }
        });
  }
  /** @throws Exception If failed. */
  public void testInvalidRangeUpdates() throws Exception {
    final IgfsFileMap map = new IgfsFileMap();

    final IgniteUuid affKey1 = IgniteUuid.randomUuid();
    final IgniteUuid affKey2 = IgniteUuid.randomUuid();

    map.addRange(new IgfsFileAffinityRange(10, 19, affKey1));
    map.addRange(new IgfsFileAffinityRange(30, 39, affKey1));

    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            map.updateRangeStatus(new IgfsFileAffinityRange(0, 5, affKey1), RANGE_STATUS_MOVING);

            return null;
          }
        },
        IgfsInvalidRangeException.class,
        null);

    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            map.updateRangeStatus(new IgfsFileAffinityRange(15, 19, affKey1), RANGE_STATUS_MOVING);

            return null;
          }
        },
        IgfsInvalidRangeException.class,
        null);

    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            map.updateRangeStatus(new IgfsFileAffinityRange(10, 19, affKey2), RANGE_STATUS_MOVING);

            return null;
          }
        },
        AssertionError.class,
        null);

    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            map.updateRangeStatus(new IgfsFileAffinityRange(10, 22, affKey1), RANGE_STATUS_MOVING);

            return null;
          }
        },
        AssertionError.class,
        null);

    assertEquals(2, map.ranges().size());
  }
  /** @throws Exception If failed. */
  public void testEmptyProjections() throws Exception {
    final GridClientCompute dflt = client.compute();

    Collection<? extends GridClientNode> nodes = dflt.nodes();

    assertEquals(NODES_CNT, nodes.size());

    Iterator<? extends GridClientNode> iter = nodes.iterator();

    final GridClientCompute singleNodePrj = dflt.projection(Collections.singletonList(iter.next()));

    final GridClientNode second = iter.next();

    final GridClientPredicate<GridClientNode> noneFilter =
        new GridClientPredicate<GridClientNode>() {
          @Override
          public boolean apply(GridClientNode node) {
            return false;
          }
        };

    final GridClientPredicate<GridClientNode> targetFilter =
        new GridClientPredicate<GridClientNode>() {
          @Override
          public boolean apply(GridClientNode node) {
            return node.nodeId().equals(second.nodeId());
          }
        };

    GridTestUtils.assertThrows(
        log(),
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return dflt.projection(noneFilter).log(-1, -1);
          }
        },
        GridServerUnreachableException.class,
        null);

    GridTestUtils.assertThrows(
        log(),
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return singleNodePrj.projection(second);
          }
        },
        GridClientException.class,
        null);

    GridTestUtils.assertThrows(
        log(),
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return singleNodePrj.projection(targetFilter);
          }
        },
        GridClientException.class,
        null);
  }
  /**
   * Check how prefetch override works.
   *
   * @throws Exception IF failed.
   */
  public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));

    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;

    IgfsOutputStream out = igfsSecondary.append(FILE, false);

    int totalWritten = 0;

    while (totalWritten < blockSize * 2 + chunk.length) {
      out.write(chunk);

      totalWritten += chunk.length;
    }

    out.close();

    awaitFileClose(igfsSecondary.asSecondary(), FILE);

    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));

    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;

    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);

    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);

    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());

    FSDataInputStream fsIn = fs.open(file);

    final byte[] readBuf = new byte[blockSize * 2];

    fsIn.readFully(0, readBuf, 0, readBuf.length);

    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();

    IgfsFileInfo info = meta.info(meta.fileId(FILE));

    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);

    IgniteCache<IgfsBlockKey, byte[]> dataCache =
        igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName());

    for (int i = 0; i < 10; i++) {
      if (dataCache.containsKey(key)) break;
      else U.sleep(100);
    }

    fsIn.close();

    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);

    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);

            in0.seek(blockSize * 2);

            try {
              in0.read(readBuf);
            } finally {
              U.closeQuiet(in0);
            }

            return null;
          }
        },
        IOException.class,
        "Failed to read data due to secondary file system exception: /dir/subdir/file");
  }
  /**
   * @param cacheMode Cache mode.
   * @param sameAff If {@code false} uses different number of partitions for caches.
   * @param concurrency Transaction concurrency.
   * @param isolation Transaction isolation.
   * @throws Exception If failed.
   */
  private void crossCacheTxFailover(
      CacheMode cacheMode,
      boolean sameAff,
      final TransactionConcurrency concurrency,
      final TransactionIsolation isolation)
      throws Exception {
    IgniteKernal ignite0 = (IgniteKernal) ignite(0);

    final AtomicBoolean stop = new AtomicBoolean();

    try {
      ignite0.createCache(cacheConfiguration(CACHE1, cacheMode, 256));
      ignite0.createCache(cacheConfiguration(CACHE2, cacheMode, sameAff ? 256 : 128));

      final AtomicInteger threadIdx = new AtomicInteger();

      IgniteInternalFuture<?> fut =
          GridTestUtils.runMultiThreadedAsync(
              new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                  int idx = threadIdx.getAndIncrement();

                  Ignite ignite = ignite(idx % GRID_CNT);

                  log.info(
                      "Started update thread [node="
                          + ignite.name()
                          + ", client="
                          + ignite.configuration().isClientMode()
                          + ']');

                  IgniteCache<TestKey, TestValue> cache1 = ignite.cache(CACHE1);
                  IgniteCache<TestKey, TestValue> cache2 = ignite.cache(CACHE2);

                  assertNotSame(cache1, cache2);

                  IgniteTransactions txs = ignite.transactions();

                  ThreadLocalRandom rnd = ThreadLocalRandom.current();

                  long iter = 0;

                  while (!stop.get()) {
                    boolean sameKey = rnd.nextBoolean();

                    try {
                      try (Transaction tx = txs.txStart(concurrency, isolation)) {
                        if (sameKey) {
                          TestKey key = new TestKey(rnd.nextLong(KEY_RANGE));

                          cacheOperation(rnd, cache1, key);
                          cacheOperation(rnd, cache2, key);
                        } else {
                          TestKey key1 = new TestKey(rnd.nextLong(KEY_RANGE));
                          TestKey key2 = new TestKey(key1.key() + 1);

                          cacheOperation(rnd, cache1, key1);
                          cacheOperation(rnd, cache2, key2);
                        }

                        tx.commit();
                      }
                    } catch (CacheException | IgniteException e) {
                      log.info("Update error: " + e);
                    }

                    if (iter++ % 500 == 0) log.info("Iteration: " + iter);
                  }

                  return null;
                }

                /**
                 * @param rnd Random.
                 * @param cache Cache.
                 * @param key Key.
                 */
                private void cacheOperation(
                    ThreadLocalRandom rnd, IgniteCache<TestKey, TestValue> cache, TestKey key) {
                  switch (rnd.nextInt(4)) {
                    case 0:
                      cache.put(key, new TestValue(rnd.nextLong()));

                      break;

                    case 1:
                      cache.remove(key);

                      break;

                    case 2:
                      cache.invoke(key, new TestEntryProcessor(rnd.nextBoolean() ? 1L : null));

                      break;

                    case 3:
                      cache.get(key);

                      break;

                    default:
                      assert false;
                  }
                }
              },
              10,
              "tx-thread");

      long stopTime = System.currentTimeMillis() + 3 * 60_000;

      long topVer = ignite0.cluster().topologyVersion();

      boolean failed = false;

      while (System.currentTimeMillis() < stopTime) {
        log.info("Start node.");

        IgniteKernal ignite = (IgniteKernal) startGrid(GRID_CNT);

        assertFalse(ignite.configuration().isClientMode());

        topVer++;

        IgniteInternalFuture<?> affFut =
            ignite
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after start: " + topVer);

          failed = true;

          break;
        }

        Thread.sleep(500);

        log.info("Stop node.");

        stopGrid(GRID_CNT);

        topVer++;

        affFut =
            ignite0
                .context()
                .cache()
                .context()
                .exchange()
                .affinityReadyFuture(new AffinityTopologyVersion(topVer));

        try {
          if (affFut != null) affFut.get(30_000);
        } catch (IgniteFutureTimeoutCheckedException e) {
          log.error("Failed to wait for affinity future after stop: " + topVer);

          failed = true;

          break;
        }
      }

      stop.set(true);

      fut.get();

      assertFalse("Test failed, see log for details.", failed);
    } finally {
      stop.set(true);

      ignite0.destroyCache(CACHE1);
      ignite0.destroyCache(CACHE2);

      awaitPartitionMapExchange();
    }
  }