Exemplo n.º 1
0
  /**
   * Completeness callback.
   *
   * @param success {@code True} if lock was acquired.
   * @param distribute {@code True} if need to distribute lock removal in case of failure.
   * @return {@code True} if complete by this operation.
   */
  private boolean onComplete(boolean success, boolean distribute) {
    if (log.isDebugEnabled())
      log.debug(
          "Received onComplete(..) callback [success="
              + success
              + ", distribute="
              + distribute
              + ", fut="
              + this
              + ']');

    if (!success) undoLocks(distribute);

    if (tx != null) cctx.tm().txContext(tx);

    if (super.onDone(success, err.get())) {
      if (log.isDebugEnabled()) log.debug("Completing future: " + this);

      // Clean up.
      cctx.mvcc().removeFuture(this);

      if (timeoutObj != null) cctx.time().removeTimeoutObject(timeoutObj);

      return true;
    }

    return false;
  }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(GridCacheTx tx, Throwable err) {
    if ((initialized() || err != null) && super.onDone(tx, err)) {
      if (error() instanceof GridCacheTxHeuristicException) {
        long topVer = this.tx.topologyVersion();

        for (GridCacheTxEntry<K, V> e : this.tx.writeMap().values()) {
          try {
            if (e.op() != NOOP && !cctx.affinity().localNode(e.key(), topVer)) {
              GridCacheEntryEx<K, V> cacheEntry = cctx.cache().peekEx(e.key());

              if (cacheEntry != null) cacheEntry.invalidate(null, this.tx.xidVersion());
            }
          } catch (Throwable t) {
            U.error(log, "Failed to invalidate entry.", t);

            if (t instanceof Error) throw (Error) t;
          }
        }
      }

      // Don't forget to clean up.
      cctx.mvcc().removeFuture(this);

      return true;
    }

    return false;
  }
  /**
   * @param cctx Cache context.
   * @param prj Projection (optional).
   * @return Collection of data nodes in provided projection (if any).
   */
  private static Collection<ClusterNode> nodes(
      final GridCacheContext<?, ?> cctx,
      @Nullable final ClusterGroup prj,
      @Nullable final Integer part) {
    assert cctx != null;

    final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();

    Collection<ClusterNode> affNodes = CU.affinityNodes(cctx);

    if (prj == null && part == null) return affNodes;

    final Set<ClusterNode> owners =
        part == null
            ? Collections.<ClusterNode>emptySet()
            : new HashSet<>(cctx.topology().owners(part, topVer));

    return F.view(
        affNodes,
        new P1<ClusterNode>() {
          @Override
          public boolean apply(ClusterNode n) {

            return cctx.discovery().cacheAffinityNode(n, cctx.name())
                && (prj == null || prj.node(n.id()) != null)
                && (part == null || owners.contains(n));
          }
        });
  }
  /** @return Nodes to execute on. */
  private Collection<GridNode> nodes() {
    GridCacheMode cacheMode = cctx.config().getCacheMode();

    switch (cacheMode) {
      case LOCAL:
        if (prj != null)
          U.warn(
              log,
              "Ignoring query projection because it's executed over LOCAL cache "
                  + "(only local node will be queried): "
                  + this);

        return Collections.singletonList(cctx.localNode());

      case REPLICATED:
        if (prj != null) return nodes(cctx, prj);

        GridCacheDistributionMode mode = cctx.config().getDistributionMode();

        return mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED
            ? Collections.singletonList(cctx.localNode())
            : Collections.singletonList(F.rand(nodes(cctx, null)));

      case PARTITIONED:
        return nodes(cctx, prj);

      default:
        throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode);
    }
  }
  /** {@inheritDoc} */
  @Override
  public GridDhtPartitionFullMap partitionMap(boolean onlyActive) {
    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node2part [node2part: "
              + node2part
              + ", cache="
              + cctx.name()
              + ", started="
              + cctx.started()
              + ", stopping="
              + stopping
              + ", locNodeId="
              + cctx.localNode().id()
              + ", locName="
              + cctx.gridName()
              + ']';

      GridDhtPartitionFullMap m = node2part;

      return new GridDhtPartitionFullMap(
          m.nodeId(), m.nodeOrder(), m.updateSequence(), m, onlyActive);
    } finally {
      lock.readLock().unlock();
    }
  }
  /**
   * {@inheritDoc}
   *
   * @param ctx
   */
  @Override
  public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
    super.prepareMarshal(ctx);

    if (ownedVals != null) {
      ownedValKeys = ownedVals.keySet();

      ownedValVals = ownedVals.values();

      for (Map.Entry<IgniteTxKey, CacheVersionedValue> entry : ownedVals.entrySet()) {
        GridCacheContext cacheCtx = ctx.cacheContext(entry.getKey().cacheId());

        entry.getKey().prepareMarshal(cacheCtx);

        entry.getValue().prepareMarshal(cacheCtx.cacheObjectContext());
      }
    }

    if (retVal != null && retVal.cacheId() != 0) {
      GridCacheContext cctx = ctx.cacheContext(retVal.cacheId());

      assert cctx != null : retVal.cacheId();

      retVal.prepareMarshal(cctx);
    }

    if (filterFailedKeys != null) {
      for (IgniteTxKey key : filterFailedKeys) {
        GridCacheContext cctx = ctx.cacheContext(key.cacheId());

        key.prepareMarshal(cctx);
      }
    }
  }
  /**
   * @param updateSeq Update sequence.
   * @return {@code True} if entry has been transitioned to state EVICTED.
   */
  boolean tryEvict(boolean updateSeq) {
    if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false;

    // Attempt to evict partition entries from cache.
    clearAll();

    if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) {
      if (log.isDebugEnabled()) log.debug("Evicted partition: " + this);

      if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap();

      if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id);

      cctx.dataStructures().onPartitionEvicted(id);

      rent.onDone();

      ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq);

      clearDeferredDeletes();

      return true;
    }

    return false;
  }
  /**
   * @param nodes Nodes.
   * @param id ID.
   * @throws IgniteCheckedException If failed.
   */
  private void sendAllPartitions(
      Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id)
      throws IgniteCheckedException {
    GridDhtPartitionsFullMessage m =
        new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion());

    for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
      if (!cacheCtx.isLocal()) {
        AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion();

        boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0;

        if (ready)
          m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true));
      }
    }

    // It is important that client topologies be added after contexts.
    for (GridClientPartitionTopology top : cctx.exchange().clientTopologies())
      m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true));

    if (log.isDebugEnabled())
      log.debug(
          "Sending full partition map [nodeIds="
              + F.viewReadOnly(nodes, F.node2id())
              + ", exchId="
              + exchId
              + ", msg="
              + m
              + ']');

    cctx.io().safeSend(nodes, m, SYSTEM_POOL, null);
  }
  /**
   * @param cctx Context.
   * @param id Partition ID.
   */
  @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor")
  GridDhtLocalPartition(GridCacheContext cctx, int id) {
    assert cctx != null;

    this.id = id;
    this.cctx = cctx;

    log = U.logger(cctx.kernalContext(), logRef, this);

    rent =
        new GridFutureAdapter<Object>() {
          @Override
          public String toString() {
            return "PartitionRentFuture [part=" + GridDhtLocalPartition.this + ", map=" + map + ']';
          }
        };

    map = new ConcurrentHashMap8<>(cctx.config().getStartSize() / cctx.affinity().partitions());

    int delQueueSize =
        CU.isSystemCache(cctx.name())
            ? 100
            : Math.max(MAX_DELETE_QUEUE_SIZE / cctx.affinity().partitions(), 20);

    rmvQueue = new GridCircularBuffer<>(U.ceilPow2(delQueueSize));
  }
  /** @return Nodes to execute on. */
  private Collection<ClusterNode> nodes() {
    CacheMode cacheMode = cctx.config().getCacheMode();

    switch (cacheMode) {
      case LOCAL:
        if (prj != null)
          U.warn(
              log,
              "Ignoring query projection because it's executed over LOCAL cache "
                  + "(only local node will be queried): "
                  + this);

        return Collections.singletonList(cctx.localNode());

      case REPLICATED:
        if (prj != null || partition() != null) return nodes(cctx, prj, partition());

        return cctx.affinityNode()
            ? Collections.singletonList(cctx.localNode())
            : Collections.singletonList(F.rand(nodes(cctx, null, partition())));

      case PARTITIONED:
        return nodes(cctx, prj, partition());

      default:
        throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode);
    }
  }
Exemplo n.º 11
0
  /**
   * @param mapping Mappings.
   * @param key Key to map.
   * @param topVer Topology version.
   * @return Near lock mapping.
   * @throws GridException If mapping for key failed.
   */
  private GridNearLockMapping<K, V> map(
      K key, @Nullable GridNearLockMapping<K, V> mapping, long topVer) throws GridException {
    assert mapping == null || mapping.node() != null;

    GridNode primary = cctx.affinity().primary(key, topVer);

    if (cctx.discovery().node(primary.id()) == null)
      // If primary node left the grid before lock acquisition, fail the whole future.
      throw newTopologyException(null, primary.id());

    if (inTx() && tx.groupLock() && !primary.isLocal())
      throw new GridException(
          "Failed to start group lock transaction (local node is not primary for "
              + " key) [key="
              + key
              + ", primaryNodeId="
              + primary.id()
              + ']');

    if (mapping == null || !primary.id().equals(mapping.node().id()))
      mapping = new GridNearLockMapping<>(primary, key);
    else mapping.addKey(key);

    return mapping;
  }
Exemplo n.º 12
0
  /** {@inheritDoc} */
  @Override
  public void prepareMarshal(GridCacheContext<K, V> ctx) throws GridException {
    super.prepareMarshal(ctx);

    if (entries != null) {
      if (ctx.deploymentEnabled()) prepareObjects(entries, ctx);

      entriesBytes = ctx.marshaller().marshal(entries);
    }
  }
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions single message.
   */
  private void updatePartitionSingleMap(GridDhtPartitionsSingleMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();
      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      GridDhtPartitionTopology top =
          cacheCtx != null ? cacheCtx.topology() : cctx.exchange().clientTopology(cacheId, this);

      top.update(exchId, entry.getValue());
    }
  }
  /**
   * @param cacheCtx Cache context.
   * @throws IgniteCheckedException If failed.
   */
  private void initTopology(GridCacheContext cacheCtx) throws IgniteCheckedException {
    if (stopping(cacheCtx.cacheId())) return;

    if (canCalculateAffinity(cacheCtx)) {
      if (log.isDebugEnabled())
        log.debug(
            "Will recalculate affinity [locNodeId="
                + cctx.localNodeId()
                + ", exchId="
                + exchId
                + ']');

      cacheCtx.affinity().calculateAffinity(exchId.topologyVersion(), discoEvt);
    } else {
      if (log.isDebugEnabled())
        log.debug(
            "Will request affinity from remote node [locNodeId="
                + cctx.localNodeId()
                + ", exchId="
                + exchId
                + ']');

      // Fetch affinity assignment from remote node.
      GridDhtAssignmentFetchFuture fetchFut =
          new GridDhtAssignmentFetchFuture(
              cacheCtx,
              exchId.topologyVersion(),
              CU.affinityNodes(cacheCtx, exchId.topologyVersion()));

      fetchFut.init();

      List<List<ClusterNode>> affAssignment = fetchFut.get();

      if (log.isDebugEnabled())
        log.debug(
            "Fetched affinity from remote node, initializing affinity assignment [locNodeId="
                + cctx.localNodeId()
                + ", topVer="
                + exchId.topologyVersion()
                + ']');

      if (affAssignment == null) {
        affAssignment = new ArrayList<>(cacheCtx.affinity().partitions());

        List<ClusterNode> empty = Collections.emptyList();

        for (int i = 0; i < cacheCtx.affinity().partitions(); i++) affAssignment.add(empty);
      }

      cacheCtx.affinity().initializeAffinity(exchId.topologyVersion(), affAssignment);
    }
  }
  /**
   * @param cacheId Cache ID to check.
   * @param topVer Topology version.
   * @return {@code True} if cache was added during this exchange.
   */
  public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) {
    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (req.start() && !req.clientStartOnly()) {
          if (CU.cacheId(req.cacheName()) == cacheId) return true;
        }
      }
    }

    GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId);

    return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer);
  }
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions full messages.
   */
  private void updatePartitionFullMap(GridDhtPartitionsFullMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();

      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      if (cacheCtx != null) cacheCtx.topology().update(exchId, entry.getValue());
      else {
        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, AffinityTopologyVersion.NONE);

        if (oldest != null && oldest.isLocal())
          cctx.exchange().clientTopology(cacheId, this).update(exchId, entry.getValue());
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) {
    Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer1="
              + topVer
              + ", topVer2="
              + this.topVer
              + ", cache="
              + cctx.name()
              + ", node2part="
              + node2part
              + ']';

      Collection<ClusterNode> nodes = null;

      Collection<UUID> nodeIds = part2node.get(p);

      if (!F.isEmpty(nodeIds)) {
        Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id()));

        for (UUID nodeId : nodeIds) {
          if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) {
            ClusterNode n = cctx.discovery().node(nodeId);

            if (n != null
                && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
              if (nodes == null) {
                nodes = new ArrayList<>(affNodes.size() + 2);

                nodes.addAll(affNodes);
              }

              nodes.add(n);
            }
          }
        }
      }

      return nodes != null ? nodes : affNodes;
    } finally {
      lock.readLock().unlock();
    }
  }
Exemplo n.º 18
0
  /**
   * @param rmtReducer Optional reducer.
   * @param rmtTransform Optional transformer.
   * @param args Arguments.
   * @return Future.
   */
  @SuppressWarnings("IfMayBeConditional")
  private <R> GridCacheQueryFuture<R> execute(
      @Nullable GridReducer<T, R> rmtReducer,
      @Nullable GridClosure<T, R> rmtTransform,
      @Nullable Object... args) {
    Collection<GridNode> nodes = nodes();

    cctx.checkSecurity(GridSecurityPermission.CACHE_READ);

    if (F.isEmpty(nodes))
      return new GridCacheQueryErrorFuture<>(
          cctx.kernalContext(),
          new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx()));

    if (log.isDebugEnabled())
      log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']');

    if (cctx.deploymentEnabled()) {
      try {
        cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform);
        cctx.deploy().registerClasses(args);
      } catch (GridException e) {
        return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e);
      }
    }

    if (subjId == null) subjId = cctx.localNodeId();

    taskHash = cctx.kernalContext().job().currentTaskNameHash();

    GridCacheQueryBean bean =
        new GridCacheQueryBean(
            this,
            (GridReducer<Object, Object>) rmtReducer,
            (GridClosure<Object, Object>) rmtTransform,
            args);

    GridCacheQueryManager qryMgr = cctx.queries();

    boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId());

    if (type == SQL_FIELDS)
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes));
    else
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes));
  }
Exemplo n.º 19
0
  /**
   * @param ctx Cache registry.
   * @param implicit Implicit flag.
   * @param implicitSingle Implicit with one key flag.
   * @param concurrency Concurrency.
   * @param isolation Isolation.
   * @param timeout Timeout.
   * @param invalidate Invalidation policy.
   * @param syncCommit Synchronous commit flag.
   * @param syncRollback Synchronous rollback flag.
   * @param swapEnabled Whether to use swap storage.
   * @param storeEnabled Whether to use read/write through.
   */
  GridNearTxLocal(
      GridCacheContext<K, V> ctx,
      boolean implicit,
      boolean implicitSingle,
      GridCacheTxConcurrency concurrency,
      GridCacheTxIsolation isolation,
      long timeout,
      boolean invalidate,
      boolean syncCommit,
      boolean syncRollback,
      boolean swapEnabled,
      boolean storeEnabled) {
    super(
        ctx,
        ctx.versions().next(),
        implicit,
        implicitSingle,
        concurrency,
        isolation,
        timeout,
        invalidate,
        swapEnabled,
        storeEnabled);

    assert ctx != null;

    this.syncCommit = syncCommit;
    this.syncRollback = syncRollback;
  }
Exemplo n.º 20
0
  /** {@inheritDoc} */
  @SuppressWarnings("TypeMayBeWeakened")
  @Nullable
  private Collection<Object> unmarshalFieldsCollection(
      @Nullable Collection<byte[]> byteCol, GridCacheContext<K, V> ctx, ClassLoader ldr)
      throws GridException {
    assert ctx != null;
    assert ldr != null;

    Collection<Object> col = unmarshalCollection(byteCol, ctx, ldr);
    Collection<Object> col0 = null;

    if (col != null) {
      col0 = new ArrayList<>(col.size());

      for (Object o : col) {
        List<Object> list = (List<Object>) o;
        List<Object> list0 = new ArrayList<>(list.size());

        for (Object obj : list)
          list0.add(obj != null ? ctx.marshaller().unmarshal((byte[]) obj, ldr) : null);

        col0.add(list0);
      }
    }

    return col0;
  }
Exemplo n.º 21
0
  /** {@inheritDoc} */
  @SuppressWarnings("TypeMayBeWeakened")
  @Nullable
  private Collection<byte[]> marshalFieldsCollection(
      @Nullable Collection<Object> col, GridCacheContext<K, V> ctx) throws GridException {
    assert ctx != null;

    if (col == null) return null;

    Collection<List<Object>> col0 = new ArrayList<>(col.size());

    for (Object o : col) {
      List<GridIndexingEntity<?>> list = (List<GridIndexingEntity<?>>) o;
      List<Object> list0 = new ArrayList<>(list.size());

      for (GridIndexingEntity<?> ent : list) {
        if (ent.bytes() != null) list0.add(ent.bytes());
        else {
          if (ctx.deploymentEnabled()) prepareObject(ent.value(), ctx);

          list0.add(CU.marshal(ctx, ent.value()));
        }
      }

      col0.add(list0);
    }

    return marshalCollection(col0, ctx);
  }
  /** @param cctx Context. */
  GridDhtPartitionTopologyImpl(GridCacheContext<?, ?> cctx) {
    assert cctx != null;

    this.cctx = cctx;

    log = cctx.logger(getClass());
  }
Exemplo n.º 23
0
  /**
   * @param cctx Context.
   * @param type Query type.
   * @param clsName Class name.
   * @param clause Clause.
   * @param filter Scan filter.
   * @param incMeta Include metadata flag.
   * @param keepPortable Keep portable flag.
   * @param prjPred Cache projection filter.
   */
  public GridCacheQueryAdapter(
      GridCacheContext<?, ?> cctx,
      GridCacheQueryType type,
      @Nullable GridPredicate<GridCacheEntry<Object, Object>> prjPred,
      @Nullable String clsName,
      @Nullable String clause,
      @Nullable GridBiPredicate<Object, Object> filter,
      boolean incMeta,
      boolean keepPortable) {
    assert cctx != null;
    assert type != null;

    this.cctx = cctx;
    this.type = type;
    this.clsName = clsName;
    this.clause = clause;
    this.prjPred = prjPred;
    this.filter = filter;
    this.incMeta = incMeta;
    this.keepPortable = keepPortable;

    log = cctx.logger(getClass());

    pageSize = DFLT_PAGE_SIZE;
    timeout = 0;
    keepAll = true;
    incBackups = false;
    dedup = false;
    prj = null;

    metrics = new GridCacheQueryMetricsAdapter();
  }
  /** {@inheritDoc} */
  @Override
  public void printMemoryStats(int threshold) {
    X.println(
        ">>>  Cache partition topology stats [grid="
            + cctx.gridName()
            + ", cache="
            + cctx.name()
            + ']');

    for (GridDhtLocalPartition part : locParts.values()) {
      int size = part.size();

      if (size >= threshold)
        X.println(">>>   Local partition [part=" + part.id() + ", size=" + size + ']');
    }
  }
Exemplo n.º 25
0
  /** {@inheritDoc} */
  @Override
  public GridCacheEntry<K, V> wrap(boolean prjAware) {
    GridCacheContext<K, V> nearCtx = cctx.dht().near().context();

    GridCacheProjectionImpl<K, V> prjPerCall = nearCtx.projectionPerCall();

    if (prjPerCall != null && prjAware)
      return new GridPartitionedCacheEntryImpl<K, V>(prjPerCall, nearCtx, key, this);

    GridCacheEntryImpl<K, V> wrapper = this.wrapper;

    if (wrapper == null)
      this.wrapper = wrapper = new GridPartitionedCacheEntryImpl<K, V>(null, nearCtx, key, this);

    return wrapper;
  }
  /**
   * @param cctx Context.
   * @param type Query type.
   * @param clsName Class name.
   * @param clause Clause.
   * @param filter Scan filter.
   * @param part Partition.
   * @param incMeta Include metadata flag.
   * @param keepPortable Keep portable flag.
   */
  public GridCacheQueryAdapter(
      GridCacheContext<?, ?> cctx,
      GridCacheQueryType type,
      @Nullable String clsName,
      @Nullable String clause,
      @Nullable IgniteBiPredicate<Object, Object> filter,
      @Nullable Integer part,
      boolean incMeta,
      boolean keepPortable) {
    assert cctx != null;
    assert type != null;
    assert part == null || part >= 0;

    this.cctx = cctx;
    this.type = type;
    this.clsName = clsName;
    this.clause = clause;
    this.filter = filter;
    this.part = part;
    this.incMeta = incMeta;
    this.keepPortable = keepPortable;

    log = cctx.logger(getClass());

    metrics = new GridCacheQueryMetricsAdapter();
  }
Exemplo n.º 27
0
  /**
   * @param ctx Cache context.
   * @param key Cache key.
   * @param hash Key hash value.
   * @param val Entry value.
   * @param next Next entry in the linked list.
   * @param ttl Time to live.
   */
  public GridDhtCacheEntry(
      GridCacheContext<K, V> ctx, K key, int hash, V val, GridCacheMapEntry<K, V> next, long ttl) {
    super(ctx, key, hash, val, next, ttl);

    // Record this entry with partition.
    locPart = ctx.dht().topology().onAdded(this);
  }
Exemplo n.º 28
0
  /**
   * Adds a Near key.
   *
   * @param key Key.
   * @param keyBytes Key bytes.
   * @param ctx Context.
   * @throws GridException If failed.
   */
  public void addNearKey(K key, byte[] keyBytes, GridCacheContext<K, V> ctx) throws GridException {
    if (ctx.deploymentEnabled()) prepareObject(key, ctx);

    if (nearKeyBytes == null) nearKeyBytes = new ArrayList<>();

    nearKeyBytes.add(keyBytes);
  }
  /**
   * @param p Partition.
   * @param topVer Topology version ({@code -1} for all nodes).
   * @param state Partition state.
   * @param states Additional partition states.
   * @return List of nodes for the partition.
   */
  private List<ClusterNode> nodes(
      int p,
      AffinityTopologyVersion topVer,
      GridDhtPartitionState state,
      GridDhtPartitionState... states) {
    Collection<UUID> allIds =
        topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null;

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer="
              + topVer
              + ", allIds="
              + allIds
              + ", node2part="
              + node2part
              + ", cache="
              + cctx.name()
              + ']';

      Collection<UUID> nodeIds = part2node.get(p);

      // Node IDs can be null if both, primary and backup, nodes disappear.
      int size = nodeIds == null ? 0 : nodeIds.size();

      if (size == 0) return Collections.emptyList();

      List<ClusterNode> nodes = new ArrayList<>(size);

      for (UUID id : nodeIds) {
        if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue;

        if (hasState(p, id, state, states)) {
          ClusterNode n = cctx.discovery().node(id);

          if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion()))
            nodes.add(n);
        }
      }

      return nodes;
    } finally {
      lock.readLock().unlock();
    }
  }
  /** @param m Mapping. */
  @SuppressWarnings({"unchecked"})
  private void finish(GridDistributedTxMapping<K, V> m) {
    GridRichNode n = m.node();

    assert !m.empty();

    GridNearTxFinishRequest req =
        new GridNearTxFinishRequest<K, V>(
            futId,
            tx.xidVersion(),
            tx.commitVersion(),
            tx.threadId(),
            commit,
            tx.isInvalidate(),
            m.explicitLock(),
            tx.topologyVersion(),
            null,
            null,
            null,
            commit && tx.pessimistic() ? m.writes() : null,
            tx.syncCommit() && commit || tx.syncRollback() && !commit);

    // If this is the primary node for the keys.
    if (n.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (CU.DHT_ENABLED) {
        GridFuture<GridCacheTx> fut =
            commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req);

        // Add new future.
        add(fut);
      } else
        // Add done future for testing.
        add(new GridFinishedFuture<GridCacheTx>(ctx));
    } else {
      MiniFuture fut = new MiniFuture(m);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      try {
        cctx.io().send(n, req);

        // If we don't wait for result, then mark future as done.
        if (!isSync() && !m.explicitLock()) fut.onDone();
      } catch (GridTopologyException e) {
        // Remove previous mapping.
        mappings.remove(m.node().id());

        fut.onResult(e);
      } catch (GridException e) {
        // Fail the whole thing.
        fut.onResult(e);
      }
    }
  }