コード例 #1
0
  /** Clears swap entries for evicted partition. */
  private void clearSwap() {
    assert state() == EVICTED;
    assert !GridQueryProcessor.isEnabled(cctx.config())
        : "Indexing needs to have unswapped values.";

    try {
      GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> it = cctx.swap().iterator(id);

      boolean isLocStore = cctx.store().isLocal();

      if (it != null) {
        // We can safely remove these values because no entries will be created for evicted
        // partition.
        while (it.hasNext()) {
          Map.Entry<byte[], GridCacheSwapEntry> entry = it.next();

          byte[] keyBytes = entry.getKey();

          KeyCacheObject key = cctx.toCacheKeyObject(keyBytes);

          cctx.swap().remove(key);

          if (isLocStore) cctx.store().remove(null, key.value(cctx.cacheObjectContext(), false));
        }
      }
    } catch (IgniteCheckedException e) {
      U.error(log, "Failed to clear swap for evicted partition: " + this, e);
    }
  }
コード例 #2
0
  /**
   * {@inheritDoc}
   *
   * @param ctx
   */
  @Override
  public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
    super.prepareMarshal(ctx);

    if (ownedVals != null) {
      ownedValKeys = ownedVals.keySet();

      ownedValVals = ownedVals.values();

      for (Map.Entry<IgniteTxKey, CacheVersionedValue> entry : ownedVals.entrySet()) {
        GridCacheContext cacheCtx = ctx.cacheContext(entry.getKey().cacheId());

        entry.getKey().prepareMarshal(cacheCtx);

        entry.getValue().prepareMarshal(cacheCtx.cacheObjectContext());
      }
    }

    if (retVal != null && retVal.cacheId() != 0) {
      GridCacheContext cctx = ctx.cacheContext(retVal.cacheId());

      assert cctx != null : retVal.cacheId();

      retVal.prepareMarshal(cctx);
    }

    if (filterFailedKeys != null) {
      for (IgniteTxKey key : filterFailedKeys) {
        GridCacheContext cctx = ctx.cacheContext(key.cacheId());

        key.prepareMarshal(cctx);
      }
    }
  }
コード例 #3
0
  /** Checks consistency after all operations. */
  private void consistencyCheck() {
    if (CONSISTENCY_CHECK) {
      assert lock.writeLock().isHeldByCurrentThread();

      if (node2part == null) return;

      for (Map.Entry<UUID, GridDhtPartitionMap> e : node2part.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> nodeIds = part2node.get(p);

          assert nodeIds != null
              : "Failed consistency check [part=" + p + ", nodeId=" + e.getKey() + ']';
          assert nodeIds.contains(e.getKey())
              : "Failed consistency check [part="
                  + p
                  + ", nodeId="
                  + e.getKey()
                  + ", nodeIds="
                  + nodeIds
                  + ']';
        }
      }

      for (Map.Entry<Integer, Set<UUID>> e : part2node.entrySet()) {
        for (UUID nodeId : e.getValue()) {
          GridDhtPartitionMap map = node2part.get(nodeId);

          assert map != null
              : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']';
          assert map.containsKey(e.getKey())
              : "Failed consistency check [part=" + e.getKey() + ", nodeId=" + nodeId + ']';
        }
      }
    }
  }
コード例 #4
0
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions single message.
   */
  private void updatePartitionSingleMap(GridDhtPartitionsSingleMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();
      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      GridDhtPartitionTopology top =
          cacheCtx != null ? cacheCtx.topology() : cctx.exchange().clientTopology(cacheId, this);

      top.update(exchId, entry.getValue());
    }
  }
コード例 #5
0
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions full messages.
   */
  private void updatePartitionFullMap(GridDhtPartitionsFullMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();

      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      if (cacheCtx != null) cacheCtx.topology().update(exchId, entry.getValue());
      else {
        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, AffinityTopologyVersion.NONE);

        if (oldest != null && oldest.isLocal())
          cctx.exchange().clientTopology(cacheId, this).update(exchId, entry.getValue());
      }
    }
  }
コード例 #6
0
  /**
   * Sets configuration properties from the map.
   *
   * @param endpointCfg Map of properties.
   * @throws IgniteCheckedException If invalid property name or value.
   */
  public void setupConfiguration(Map<String, String> endpointCfg) throws IgniteCheckedException {
    for (Map.Entry<String, String> e : endpointCfg.entrySet()) {
      try {
        switch (e.getKey()) {
          case "type":
          case "host":
          case "management":
            // Ignore these properties
            break;

          case "port":
            setPort(Integer.parseInt(e.getValue()));
            break;

          case "size":
            setSize(Integer.parseInt(e.getValue()));
            break;

          case "tokenDirectoryPath":
            setTokenDirectoryPath(e.getValue());
            break;

          default:
            throw new IgniteCheckedException(
                "Invalid property '" + e.getKey() + "' of " + getClass().getSimpleName());
        }
      } catch (Throwable t) {
        if (t instanceof IgniteCheckedException || t instanceof Error) throw t;

        throw new IgniteCheckedException(
            "Invalid value '"
                + e.getValue()
                + "' of the property '"
                + e.getKey()
                + "' in "
                + getClass().getSimpleName(),
            t);
      }
    }
  }
コード例 #7
0
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
コード例 #8
0
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');

    assert partMap != null;

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (node2part != null && node2part.compareTo(partMap) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale partition map for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ", curMap="
                  + node2part
                  + ", newMap="
                  + partMap
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      if (exchId != null) lastExchangeId = exchId;

      if (node2part != null) {
        for (GridDhtPartitionMap part : node2part.values()) {
          GridDhtPartitionMap newPart = partMap.get(part.nodeId());

          // If for some nodes current partition has a newer map,
          // then we keep the newer value.
          if (newPart != null && newPart.updateSequence() < part.updateSequence()) {
            if (log.isDebugEnabled())
              log.debug(
                  "Overriding partition map in full update map [exchId="
                      + exchId
                      + ", curPart="
                      + mapString(part)
                      + ", newPart="
                      + mapString(newPart)
                      + ']');

            partMap.put(part.nodeId(), part);
          }
        }

        for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
          UUID nodeId = it.next();

          if (!cctx.discovery().alive(nodeId)) {
            if (log.isDebugEnabled())
              log.debug(
                  "Removing left node from full map update [nodeId="
                      + nodeId
                      + ", partMap="
                      + partMap
                      + ']');

            it.remove();
          }
        }
      }

      node2part = partMap;

      Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);

      for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> ids = p2n.get(p);

          if (ids == null)
            // Initialize HashSet to size 3 in anticipation that there won't be
            // more than 3 nodes per partitions.
            p2n.put(p, ids = U.newHashSet(3));

          ids.add(e.getKey());
        }
      }

      part2node = p2n;

      boolean changed = checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }