Ejemplo n.º 1
0
  /**
   * In ATOMIC cache with CLOCK mode if key is updated from different nodes at same time only one
   * update wins others are ignored (can happen in test event when updates are executed from
   * different nodes sequentially), this delay is used to avoid lost updates.
   *
   * @param cache Cache.
   * @throws Exception If failed.
   */
  protected void atomicClockModeDelay(IgniteCache cache) throws Exception {
    CacheConfiguration ccfg = (CacheConfiguration) cache.getConfiguration(CacheConfiguration.class);

    if (ccfg.getCacheMode() != LOCAL
        && ccfg.getAtomicityMode() == CacheAtomicityMode.ATOMIC
        && ccfg.getAtomicWriteOrderMode() == CacheAtomicWriteOrderMode.CLOCK) U.sleep(50);
  }
Ejemplo n.º 2
0
  /** @throws InterruptedException If interrupted. */
  @SuppressWarnings("BusyWait")
  protected void awaitPartitionMapExchange() throws InterruptedException {
    for (Ignite g : G.allGrids()) {
      IgniteKernal g0 = (IgniteKernal) g;

      for (IgniteCacheProxy<?, ?> c : g0.context().cache().jcaches()) {
        CacheConfiguration cfg = c.context().config();

        if (cfg.getCacheMode() == PARTITIONED
            && cfg.getRebalanceMode() != NONE
            && g.cluster().nodes().size() > 1) {
          AffinityFunction aff = cfg.getAffinity();

          GridDhtCacheAdapter<?, ?> dht = dht(c);

          GridDhtPartitionTopology top = dht.topology();

          for (int p = 0; p < aff.partitions(); p++) {
            long start = 0;

            for (int i = 0; ; i++) {
              boolean match = false;

              AffinityTopologyVersion readyVer =
                  dht.context().shared().exchange().readyAffinityVersion();

              if (readyVer.topologyVersion() > 0 && c.context().started()) {
                // Must map on updated version of topology.
                Collection<ClusterNode> affNodes =
                    g0.affinity(cfg.getName()).mapPartitionToPrimaryAndBackups(p);

                int exp = affNodes.size();

                GridDhtTopologyFuture topFut = top.topologyVersionFuture();

                Collection<ClusterNode> owners =
                    (topFut != null && topFut.isDone())
                        ? top.nodes(p, AffinityTopologyVersion.NONE)
                        : Collections.<ClusterNode>emptyList();

                int actual = owners.size();

                if (affNodes.size() != owners.size() || !affNodes.containsAll(owners)) {
                  LT.warn(
                      log(),
                      null,
                      "Waiting for topology map update ["
                          + "grid="
                          + g.name()
                          + ", cache="
                          + cfg.getName()
                          + ", cacheId="
                          + dht.context().cacheId()
                          + ", topVer="
                          + top.topologyVersion()
                          + ", topFut="
                          + topFut
                          + ", p="
                          + p
                          + ", affNodesCnt="
                          + exp
                          + ", ownersCnt="
                          + actual
                          + ", affNodes="
                          + affNodes
                          + ", owners="
                          + owners
                          + ", locNode="
                          + g.cluster().localNode()
                          + ']');
                } else match = true;
              } else {
                LT.warn(
                    log(),
                    null,
                    "Waiting for topology map update ["
                        + "grid="
                        + g.name()
                        + ", cache="
                        + cfg.getName()
                        + ", cacheId="
                        + dht.context().cacheId()
                        + ", topVer="
                        + top.topologyVersion()
                        + ", started="
                        + dht.context().started()
                        + ", p="
                        + p
                        + ", readVer="
                        + readyVer
                        + ", locNode="
                        + g.cluster().localNode()
                        + ']');
              }

              if (!match) {
                if (i == 0) start = System.currentTimeMillis();

                if (System.currentTimeMillis() - start > 30_000)
                  throw new IgniteException(
                      "Timeout of waiting for topology map update ["
                          + "grid="
                          + g.name()
                          + ", cache="
                          + cfg.getName()
                          + ", cacheId="
                          + dht.context().cacheId()
                          + ", topVer="
                          + top.topologyVersion()
                          + ", p="
                          + p
                          + ", readVer="
                          + readyVer
                          + ", locNode="
                          + g.cluster().localNode()
                          + ']');

                Thread.sleep(200); // Busy wait.

                continue;
              }

              if (i > 0)
                log()
                    .warning(
                        "Finished waiting for topology map update [grid="
                            + g.name()
                            + ", p="
                            + p
                            + ", duration="
                            + (System.currentTimeMillis() - start)
                            + "ms]");

              break;
            }
          }
        }
      }
    }
  }
Ejemplo n.º 3
0
  /**
   * @param ignite Grid.
   * @param cacheName Cache name.
   * @param sample Sample size.
   * @return Data transfer object for given cache.
   * @throws IgniteCheckedException If failed to create data transfer object.
   */
  public VisorCache from(IgniteEx ignite, String cacheName, int sample)
      throws IgniteCheckedException {
    assert ignite != null;

    GridCacheAdapter ca = ignite.context().cache().internalCache(cacheName);

    // Cache was not started.
    if (ca == null || !ca.context().started()) return null;

    name = cacheName;

    try {
      swapSize = ca.swapSize();
      swapKeys = ca.swapKeys();
    } catch (IgniteCheckedException ignored) {
      swapSize = -1;
      swapKeys = -1;
    }

    primaryPartitions = Collections.emptyList();
    backupPartitions = Collections.emptyList();

    CacheConfiguration cfg = ca.configuration();

    mode = cfg.getCacheMode();

    boolean partitioned =
        (mode == CacheMode.PARTITIONED || mode == CacheMode.REPLICATED)
            && ca.context().affinityNode();

    if (partitioned) {
      GridDhtCacheAdapter dca = null;

      if (ca instanceof GridNearCacheAdapter) dca = ((GridNearCacheAdapter) ca).dht();
      else if (ca instanceof GridDhtCacheAdapter) dca = (GridDhtCacheAdapter) ca;

      if (dca != null) {
        GridDhtPartitionTopology top = dca.topology();

        if (cfg.getCacheMode() != CacheMode.LOCAL && cfg.getBackups() > 0) {
          GridDhtPartitionMap2 map2 = top.localPartitionMap();

          partitionsMap = new GridDhtPartitionMap(map2.nodeId(), map2.updateSequence(), map2.map());
        }

        List<GridDhtLocalPartition> parts = top.localPartitions();

        primaryPartitions = new ArrayList<>(parts.size());
        backupPartitions = new ArrayList<>(parts.size());

        for (GridDhtLocalPartition part : parts) {
          int p = part.id();

          int sz = part.size();

          // Pass -1 as topology version in order not to wait for topology version.
          if (part.primary(AffinityTopologyVersion.NONE))
            primaryPartitions.add(new IgnitePair<>(p, sz));
          else if (part.state() == GridDhtPartitionState.OWNING
              && part.backup(AffinityTopologyVersion.NONE))
            backupPartitions.add(new IgnitePair<>(p, sz));
        }
      } else {
        // Old way of collecting partitions info.
        ClusterNode node = ignite.cluster().localNode();

        int[] pp = ca.affinity().primaryPartitions(node);

        primaryPartitions = new ArrayList<>(pp.length);

        for (int p : pp) {
          Set set = ca.entrySet(p);

          primaryPartitions.add(new IgnitePair<>(p, set != null ? set.size() : 0));
        }

        int[] bp = ca.affinity().backupPartitions(node);

        backupPartitions = new ArrayList<>(bp.length);

        for (int p : bp) {
          Set set = ca.entrySet(p);

          backupPartitions.add(new IgnitePair<>(p, set != null ? set.size() : 0));
        }
      }
    }

    size = ca.size();
    nearSize = ca.nearSize();

    dynamicDeploymentId = ca.context().dynamicDeploymentId();
    dhtSize = size - nearSize;
    primarySize = ca.primarySize();
    offHeapAllocatedSize = ca.offHeapAllocatedSize();
    offHeapEntriesCnt = ca.offHeapEntriesCount();
    partitions = ca.affinity().partitions();
    metrics = VisorCacheMetrics.from(ignite, cacheName);

    estimateMemorySize(ignite, ca, sample);

    return this;
  }