コード例 #1
0
  /**
   * @param c Connection.
   * @param space Space.
   * @param qry Query.
   * @return Cursor for plans.
   * @throws IgniteCheckedException if failed.
   */
  private Iterator<List<?>> explainPlan(JdbcConnection c, String space, GridCacheTwoStepQuery qry)
      throws IgniteCheckedException {
    List<List<?>> lists = new ArrayList<>();

    for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++) {
      ResultSet rs =
          h2.executeSqlQueryWithTimer(space, c, "SELECT PLAN FROM " + table(i), null, false);

      lists.add(F.asList(getPlan(rs)));
    }

    int tblIdx = 0;

    for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
      GridMergeTable tbl = createMergeTable(c, mapQry, false);

      fakeTable(c, tblIdx++).setInnerTable(tbl);
    }

    GridCacheSqlQuery rdc = qry.reduceQuery();

    ResultSet rs =
        h2.executeSqlQueryWithTimer(
            space, c, "EXPLAIN " + rdc.query(), F.asList(rdc.parameters()), false);

    lists.add(F.asList(getPlan(rs)));

    return lists.iterator();
  }
コード例 #2
0
  /**
   * @param ctx Context.
   * @param e entry.
   * @return Entry collection.
   */
  private Collection<CacheContinuousQueryEntry> handleEvent(
      GridKernalContext ctx, CacheContinuousQueryEntry e) {
    assert e != null;

    if (internal) {
      if (e.isFiltered()) return Collections.emptyList();
      else return F.asList(e);
    }

    // Initial query entry or evicted entry. These events should be fired immediately.
    if (e.updateCounter() == -1L) return F.asList(e);

    PartitionRecovery rec = rcvs.get(e.partition());

    if (rec == null) {
      rec =
          new PartitionRecovery(
              ctx.log(getClass()),
              initTopVer,
              initUpdCntrs == null ? null : initUpdCntrs.get(e.partition()));

      PartitionRecovery oldRec = rcvs.putIfAbsent(e.partition(), rec);

      if (oldRec != null) rec = oldRec;
    }

    return rec.collectEntries(e);
  }
コード例 #3
0
  /**
   * @param nodeId Node ID.
   * @param retryCnt Number of retries.
   */
  private void sendAllPartitions(final UUID nodeId, final int retryCnt) {
    ClusterNode n = cctx.node(nodeId);

    try {
      if (n != null) sendAllPartitions(F.asList(n), exchId);
    } catch (IgniteCheckedException e) {
      if (e instanceof ClusterTopologyCheckedException || !cctx.discovery().alive(n)) {
        log.debug(
            "Failed to send full partition map to node, node left grid "
                + "[rmtNode="
                + nodeId
                + ", exchangeId="
                + exchId
                + ']');

        return;
      }

      if (retryCnt > 0) {
        long timeout = cctx.gridConfig().getNetworkSendRetryDelay();

        LT.error(
            log,
            e,
            "Failed to send full partition map to node (will retry after timeout) "
                + "[node="
                + nodeId
                + ", exchangeId="
                + exchId
                + ", timeout="
                + timeout
                + ']');

        cctx.time()
            .addTimeoutObject(
                new GridTimeoutObjectAdapter(timeout) {
                  @Override
                  public void onTimeout() {
                    sendAllPartitions(nodeId, retryCnt - 1);
                  }
                });
      } else
        U.error(
            log,
            "Failed to send full partition map [node=" + n + ", exchangeId=" + exchId + ']',
            e);
    }
  }
コード例 #4
0
  /**
   * Finds all files in folder and in it's sub-tree of specified depth.
   *
   * @param file Starting folder
   * @param maxDepth Depth of the tree. If 1 - just look in the folder, no sub-folders.
   * @param filter file filter.
   * @return List of found files.
   */
  public static List<VisorLogFile> fileTree(File file, int maxDepth, @Nullable FileFilter filter) {
    if (file.isDirectory()) {
      File[] files = (filter == null) ? file.listFiles() : file.listFiles(filter);

      if (files == null) return Collections.emptyList();

      List<VisorLogFile> res = new ArrayList<>(files.length);

      for (File f : files) {
        if (f.isFile() && f.length() > 0) res.add(new VisorLogFile(f));
        else if (maxDepth > 1) res.addAll(fileTree(f, maxDepth - 1, filter));
      }

      return res;
    }

    return F.asList(new VisorLogFile(file));
  }
コード例 #5
0
  /** @throws Exception If failed. */
  public void testGroupIndexOperations() throws Exception {
    IgniteCache<Integer, GroupIndexTestValue> c =
        ignite(0)
            .getOrCreateCache(cacheConfig("grp", false, Integer.class, GroupIndexTestValue.class));

    try {
      // Check group index usage.
      String qry = "select 1 from GroupIndexTestValue ";

      String plan = columnQuery(c, "explain " + qry + "where a = 1 and b > 0").get(0).toString();

      info("Plan: " + plan);

      assertTrue(plan.contains("grpIdx"));

      // Sorted list
      List<GroupIndexTestValue> list =
          F.asList(
              new GroupIndexTestValue(0, 0),
              new GroupIndexTestValue(0, 5),
              new GroupIndexTestValue(1, 1),
              new GroupIndexTestValue(1, 3),
              new GroupIndexTestValue(2, -1),
              new GroupIndexTestValue(2, 2));

      // Fill cache.
      for (int i = 0; i < list.size(); i++) c.put(i, list.get(i));

      // Check results.
      assertEquals(1, columnQuery(c, qry + "where a = 1 and b = 1").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b < 4").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b <= 3").size());
      assertEquals(1, columnQuery(c, qry + "where a = 1 and b < 3").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b > 0").size());
      assertEquals(1, columnQuery(c, qry + "where a = 1 and b > 1").size());
      assertEquals(2, columnQuery(c, qry + "where a = 1 and b >= 1").size());
      assertEquals(4, columnQuery(c, qry + "where a > 0 and b > 0").size());
      assertEquals(4, columnQuery(c, qry + "where a > 0 and b >= 1").size());
      assertEquals(3, columnQuery(c, qry + "where a > 0 and b > 1").size());
    } finally {
      c.destroy();
    }
  }
コード例 #6
0
    /**
     * Add continuous entry.
     *
     * @param entry Cache continuous query entry.
     * @return Collection entries which will be fired.
     */
    public Collection<CacheContinuousQueryEntry> collectEntries(CacheContinuousQueryEntry entry) {
      assert entry != null;

      List<CacheContinuousQueryEntry> entries;

      synchronized (pendingEvts) {
        // Received first event.
        if (curTop == AffinityTopologyVersion.NONE) {
          lastFiredEvt = entry.updateCounter();

          curTop = entry.topologyVersion();

          return F.asList(entry);
        }

        if (curTop.compareTo(entry.topologyVersion()) < 0) {
          if (entry.updateCounter() == 1L && !entry.isBackup()) {
            entries = new ArrayList<>(pendingEvts.size());

            for (CacheContinuousQueryEntry evt : pendingEvts.values()) {
              if (evt != HOLE && !evt.isFiltered()) entries.add(evt);
            }

            pendingEvts.clear();

            curTop = entry.topologyVersion();

            lastFiredEvt = entry.updateCounter();

            entries.add(entry);

            return entries;
          }

          curTop = entry.topologyVersion();
        }

        // Check duplicate.
        if (entry.updateCounter() > lastFiredEvt) {
          pendingEvts.put(entry.updateCounter(), entry);

          // Put filtered events.
          if (entry.filteredEvents() != null) {
            for (long cnrt : entry.filteredEvents()) {
              if (cnrt > lastFiredEvt) pendingEvts.put(cnrt, HOLE);
            }
          }
        } else {
          if (log.isDebugEnabled()) log.debug("Skip duplicate continuous query message: " + entry);

          return Collections.emptyList();
        }

        if (pendingEvts.isEmpty()) return Collections.emptyList();

        Iterator<Map.Entry<Long, CacheContinuousQueryEntry>> iter =
            pendingEvts.entrySet().iterator();

        entries = new ArrayList<>();

        if (pendingEvts.size() >= MAX_BUFF_SIZE) {
          for (int i = 0; i < MAX_BUFF_SIZE - (MAX_BUFF_SIZE / 10); i++) {
            Map.Entry<Long, CacheContinuousQueryEntry> e = iter.next();

            if (e.getValue() != HOLE && !e.getValue().isFiltered()) entries.add(e.getValue());

            lastFiredEvt = e.getKey();

            iter.remove();
          }
        } else {
          // Elements are consistently.
          while (iter.hasNext()) {
            Map.Entry<Long, CacheContinuousQueryEntry> e = iter.next();

            if (e.getKey() == lastFiredEvt + 1) {
              ++lastFiredEvt;

              if (e.getValue() != HOLE && !e.getValue().isFiltered()) entries.add(e.getValue());

              iter.remove();
            } else break;
          }
        }
      }

      return entries;
    }
コード例 #7
0
  /** @throws Exception If failed. */
  public void testFindCycle3() throws Exception {
    Map<GridCacheVersion, Set<GridCacheVersion>> wfg =
        new HashMap<GridCacheVersion, Set<GridCacheVersion>>() {
          {
            put(T1, Collections.singleton(T2));
            put(T2, Collections.singleton(T3));
            put(T3, Collections.singleton(T1));
          }
        };

    assertEquals(F.asList(T3, T2, T1, T3), findCycle(wfg, T1));
    assertEquals(F.asList(T1, T3, T2, T1), findCycle(wfg, T2));
    assertEquals(F.asList(T2, T1, T3, T2), findCycle(wfg, T3));
    assertAllNull(wfg, T1, T2, T3);

    wfg =
        new HashMap<GridCacheVersion, Set<GridCacheVersion>>() {
          {
            put(T1, Collections.singleton(T2));
            put(T2, Collections.singleton(T3));
            put(T3, Collections.singleton(T4));
            put(T4, asLinkedHashSet(T2, T5));
          }
        };

    assertEquals(F.asList(T4, T3, T2, T4), findCycle(wfg, T1));
    assertEquals(F.asList(T4, T3, T2, T4), findCycle(wfg, T2));
    assertEquals(F.asList(T2, T4, T3, T2), findCycle(wfg, T3));
    assertEquals(F.asList(T3, T2, T4, T3), findCycle(wfg, T4));
    assertAllNull(wfg, T1, T2, T3, T4);

    wfg =
        new HashMap<GridCacheVersion, Set<GridCacheVersion>>() {
          {
            put(T1, Collections.singleton(T2));
            put(T2, asLinkedHashSet(T3, T4));
            put(T3, Collections.singleton(T1));
            put(T4, Collections.singleton(T5));
            put(T5, Collections.singleton(T6));
            put(T6, Collections.singleton(T4));
          }
        };

    assertEquals(F.asList(T6, T5, T4, T6), findCycle(wfg, T1));
    assertEquals(F.asList(T6, T5, T4, T6), findCycle(wfg, T2));
    assertEquals(F.asList(T2, T1, T3, T2), findCycle(wfg, T3));

    wfg =
        new HashMap<GridCacheVersion, Set<GridCacheVersion>>() {
          {
            put(T1, Collections.singleton(T2));
            put(T2, Collections.singleton(T3));
            put(T3, Collections.singleton(T4));
            put(T4, Collections.singleton(T5));
            put(T5, Collections.singleton(T6));
            put(T6, Collections.singleton(T4));
          }
        };

    assertEquals(F.asList(T6, T5, T4, T6), findCycle(wfg, T1));
    assertEquals(F.asList(T6, T5, T4, T6), findCycle(wfg, T2));
    assertEquals(F.asList(T6, T5, T4, T6), findCycle(wfg, T3));
    assertEquals(F.asList(T6, T5, T4, T6), findCycle(wfg, T4));
    assertEquals(F.asList(T4, T6, T5, T4), findCycle(wfg, T5));
    assertEquals(F.asList(T5, T4, T6, T5), findCycle(wfg, T6));
  }
コード例 #8
0
  /**
   * @param cctx Cache context.
   * @param qry Query.
   * @param keepPortable Keep portable.
   * @return Cursor.
   */
  public Iterator<List<?>> query(
      GridCacheContext<?, ?> cctx, GridCacheTwoStepQuery qry, boolean keepPortable) {
    for (int attempt = 0; ; attempt++) {
      if (attempt != 0) {
        try {
          Thread.sleep(attempt * 10); // Wait for exchange.
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();

          throw new CacheException("Query was interrupted.", e);
        }
      }

      long qryReqId = reqIdGen.incrementAndGet();

      QueryRun r = new QueryRun();

      r.pageSize = qry.pageSize() <= 0 ? GridCacheTwoStepQuery.DFLT_PAGE_SIZE : qry.pageSize();

      r.idxs = new ArrayList<>(qry.mapQueries().size());

      String space = cctx.name();

      r.conn = (JdbcConnection) h2.connectionForSpace(space);

      AffinityTopologyVersion topVer = h2.readyTopologyVersion();

      List<String> extraSpaces = extraSpaces(space, qry.spaces());

      Collection<ClusterNode> nodes;

      // Explicit partition mapping for unstable topology.
      Map<ClusterNode, IntArray> partsMap = null;

      if (isPreloadingActive(cctx, extraSpaces)) {
        if (cctx.isReplicated()) nodes = replicatedUnstableDataNodes(cctx, extraSpaces);
        else {
          partsMap = partitionedUnstableDataNodes(cctx, extraSpaces);

          nodes = partsMap == null ? null : partsMap.keySet();
        }
      } else nodes = stableDataNodes(topVer, cctx, extraSpaces);

      if (nodes == null) continue; // Retry.

      assert !nodes.isEmpty();

      if (cctx.isReplicated() || qry.explain()) {
        assert qry.explain() || !nodes.contains(ctx.discovery().localNode())
            : "We must be on a client node.";

        // Select random data node to run query on a replicated data or get EXPLAIN PLAN from a
        // single node.
        nodes = Collections.singleton(F.rand(nodes));
      }

      int tblIdx = 0;

      final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();

      for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
        GridMergeIndex idx;

        if (!skipMergeTbl) {
          GridMergeTable tbl;

          try {
            tbl = createMergeTable(r.conn, mapQry, qry.explain());
          } catch (IgniteCheckedException e) {
            throw new IgniteException(e);
          }

          idx = tbl.getScanIndex(null);

          fakeTable(r.conn, tblIdx++).setInnerTable(tbl);
        } else idx = GridMergeIndexUnsorted.createDummy();

        for (ClusterNode node : nodes) idx.addSource(node.id());

        r.idxs.add(idx);
      }

      r.latch = new CountDownLatch(r.idxs.size() * nodes.size());

      runs.put(qryReqId, r);

      try {
        if (ctx.clientDisconnected()) {
          throw new CacheException(
              "Query was cancelled, client node disconnected.",
              new IgniteClientDisconnectedException(
                  ctx.cluster().clientReconnectFuture(), "Client node disconnected."));
        }

        Collection<GridCacheSqlQuery> mapQrys = qry.mapQueries();

        if (qry.explain()) {
          mapQrys = new ArrayList<>(qry.mapQueries().size());

          for (GridCacheSqlQuery mapQry : qry.mapQueries())
            mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query(), mapQry.parameters()));
        }

        if (nodes.size() != 1 || !F.first(nodes).isLocal()) { // Marshall params for remotes.
          Marshaller m = ctx.config().getMarshaller();

          for (GridCacheSqlQuery mapQry : mapQrys) mapQry.marshallParams(m);
        }

        boolean retry = false;

        if (send(
            nodes,
            new GridQueryRequest(qryReqId, r.pageSize, space, mapQrys, topVer, extraSpaces, null),
            partsMap)) {
          awaitAllReplies(r, nodes);

          Object state = r.state.get();

          if (state != null) {
            if (state instanceof CacheException) {
              CacheException err = (CacheException) state;

              if (err.getCause() instanceof IgniteClientDisconnectedException) throw err;

              throw new CacheException("Failed to run map query remotely.", err);
            }

            if (state instanceof AffinityTopologyVersion) {
              retry = true;

              // If remote node asks us to retry then we have outdated full partition map.
              h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state);
            }
          }
        } else // Send failed.
        retry = true;

        Iterator<List<?>> resIter = null;

        if (!retry) {
          if (qry.explain()) return explainPlan(r.conn, space, qry);

          if (skipMergeTbl) {
            List<List<?>> res = new ArrayList<>();

            assert r.idxs.size() == 1 : r.idxs;

            GridMergeIndex idx = r.idxs.get(0);

            Cursor cur = idx.findInStream(null, null);

            while (cur.next()) {
              Row row = cur.get();

              int cols = row.getColumnCount();

              List<Object> resRow = new ArrayList<>(cols);

              for (int c = 0; c < cols; c++) resRow.add(row.getValue(c).getObject());

              res.add(resRow);
            }

            resIter = res.iterator();
          } else {
            GridCacheSqlQuery rdc = qry.reduceQuery();

            // Statement caching is prohibited here because we can't guarantee correct merge index
            // reuse.
            ResultSet res =
                h2.executeSqlQueryWithTimer(
                    space, r.conn, rdc.query(), F.asList(rdc.parameters()), false);

            resIter = new Iter(res);
          }
        }

        for (GridMergeIndex idx : r.idxs) {
          if (!idx.fetchedAll()) // We have to explicitly cancel queries on remote nodes.
          send(nodes, new GridQueryCancelRequest(qryReqId), null);
        }

        if (retry) {
          if (Thread.currentThread().isInterrupted())
            throw new IgniteInterruptedCheckedException("Query was interrupted.");

          continue;
        }

        return new GridQueryCacheObjectsIterator(resIter, cctx, keepPortable);
      } catch (IgniteCheckedException | RuntimeException e) {
        U.closeQuiet(r.conn);

        if (e instanceof CacheException) throw (CacheException) e;

        Throwable cause = e;

        if (e instanceof IgniteCheckedException) {
          Throwable disconnectedErr =
              ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);

          if (disconnectedErr != null) cause = disconnectedErr;
        }

        throw new CacheException("Failed to run reduce query locally.", cause);
      } finally {
        if (!runs.remove(qryReqId, r)) U.warn(log, "Query run was already removed: " + qryReqId);

        if (!skipMergeTbl) {
          for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++)
            fakeTable(null, i).setInnerTable(null); // Drop all merge tables.
        }
      }
    }
  }
コード例 #9
0
  /** @throws Exception Thrown if test failed. */
  public void testA() throws Exception {
    Collection<Integer> set = new GridConcurrentWeakHashSet<>();

    Integer i = 1;

    assert set.add(i);
    assert !set.add(i);

    assert set.contains(i);

    assert set.size() == 1;

    Collection<Integer> c = F.asList(2, 3, 4, 5);

    assert set.addAll(c);
    assert !set.addAll(c);

    assert set.containsAll(c);

    assert set.size() == 1 + c.size();

    assert set.remove(i);
    assert !set.remove(i);

    assert !set.contains(i);

    assert set.size() == c.size();

    assert set.removeAll(c);
    assert !set.removeAll(c);

    assert !set.containsAll(c);

    assert set.isEmpty();

    Collection<Integer> c1 = Arrays.asList(1, 3, 5, 7, 9);

    int cnt = 0;

    for (Iterator<Integer> iter = set.iterator(); iter.hasNext(); cnt++) c1.contains(iter.next());

    assert set.size() == cnt;

    assert set.size() == set.toArray().length;

    assert set.addAll(c1);

    assert set.retainAll(c);
    assert !set.retainAll(c);

    Collection<Integer> c2 = F.retain(c1, true, c);

    assert set.containsAll(c2);
    assert !set.containsAll(c1);
    assert !set.containsAll(c);

    assert set.size() == c2.size();

    set.clear();

    assert set.isEmpty();

    try {
      set.iterator().next();

      assert false;
    } catch (NoSuchElementException ignored) {
      assert true;
    }

    try {
      set.add(null);

      assert false;
    } catch (NullPointerException ignored) {
      assert true;
    }
  }
コード例 #10
0
  /** @throws Exception Thrown if test failed. */
  @SuppressWarnings({"UnusedAssignment"})
  public void testB() throws Exception {
    Collection<SampleBean> set = new GridConcurrentWeakHashSet<>();

    SampleBean bean1 = new SampleBean(1);

    assert set.add(bean1);
    assert !set.add(bean1);

    assert set.size() == 1;

    assert set.contains(bean1);

    bean1 = null;

    gc();

    assert set.isEmpty();

    Collection<SampleBean> c =
        F.asList(new SampleBean(1), new SampleBean(2), new SampleBean(3), new SampleBean(4));

    assert set.addAll(c);
    assert !set.addAll(c);

    assert set.size() == c.size();

    assert set.containsAll(c);

    c = null;

    gc();

    assert set.isEmpty();

    SampleBean b1 = new SampleBean(1);
    SampleBean b2 = new SampleBean(2);
    SampleBean b3 = new SampleBean(3);
    SampleBean b4 = new SampleBean(4);
    SampleBean b5 = new SampleBean(5);

    set.add(b1);
    set.add(b2);
    set.add(b3);
    set.add(b4);
    set.add(b5);

    Iterator iter = set.iterator();

    assert iter.hasNext();

    b2 = null;
    b3 = null;
    b4 = null;

    gc();

    int cnt = 0;

    while (iter.hasNext()) {
      info(iter.next().toString());

      cnt++;
    }

    assert set.size() == cnt;
  }