/**
     * @param newEntries Infos.
     * @param lsnr Listener for the operation future.
     * @throws GridInterruptedException If failed.
     * @return Future for operation.
     */
    @Nullable
    GridFutureAdapter<?> update(
        Iterable<Map.Entry<K, V>> newEntries, GridInClosure<GridFuture<?>> lsnr)
        throws GridInterruptedException {
      List<Map.Entry<K, V>> entries0 = null;
      GridFutureAdapter<Object> curFut0;

      synchronized (this) {
        curFut0 = curFut;

        curFut0.listenAsync(lsnr);

        for (Map.Entry<K, V> entry : newEntries) entries.add(entry);

        if (entries.size() >= bufSize) {
          entries0 = entries;

          entries = newEntries();
          curFut = new GridFutureAdapter<>(ctx);
          curFut.listenAsync(signalC);
        }
      }

      if (entries0 != null) {
        submit(entries0, curFut0);

        if (cancelled)
          curFut0.onDone(
              new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this));
      }

      return curFut0;
    }
  /**
   * @param cancel {@code True} to close with cancellation.
   * @throws GridException If failed.
   */
  @Override
  public void close(boolean cancel) throws GridException {
    if (!closed.compareAndSet(false, true)) return;

    busyLock.block();

    if (log.isDebugEnabled())
      log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']');

    GridException e = null;

    try {
      // Assuming that no methods are called on this loader after this method is called.
      if (cancel) {
        cancelled = true;

        for (Buffer buf : bufMappings.values()) buf.cancelAll();
      } else doFlush();

      ctx.event().removeLocalEventListener(discoLsnr);

      ctx.io().removeMessageListener(topic);
    } catch (GridException e0) {
      e = e0;
    }

    fut.onDone(null, e);

    if (e != null) throw e;
  }
  /**
   * @param updateSeq Update sequence.
   * @return {@code True} if entry has been transitioned to state EVICTED.
   */
  boolean tryEvict(boolean updateSeq) {
    if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false;

    // Attempt to evict partition entries from cache.
    clearAll();

    if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) {
      if (log.isDebugEnabled()) log.debug("Evicted partition: " + this);

      if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap();

      if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id);

      cctx.dataStructures().onPartitionEvicted(id);

      rent.onDone();

      ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq);

      clearDeferredDeletes();

      return true;
    }

    return false;
  }
  /**
   * @param updateSeq Update sequence.
   * @return Future for evict attempt.
   */
  IgniteInternalFuture<Boolean> tryEvictAsync(boolean updateSeq) {
    if (map.isEmpty()
        && !GridQueryProcessor.isEnabled(cctx.config())
        && state.compareAndSet(RENTING, EVICTED, 0, 0)) {
      if (log.isDebugEnabled()) log.debug("Evicted partition: " + this);

      clearSwap();

      if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id);

      cctx.dataStructures().onPartitionEvicted(id);

      rent.onDone();

      ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq);

      clearDeferredDeletes();

      return new GridFinishedFuture<>(true);
    }

    return cctx.closures()
        .callLocalSafe(
            new GPC<Boolean>() {
              @Override
              public Boolean call() {
                return tryEvict(true);
              }
            }, /*system pool*/
            true);
  }
    void onNodeLeft() {
      assert !isLocNode;
      assert bufMappings.get(node.id()) != this;

      if (log.isDebugEnabled())
        log.debug("Forcibly completing futures (node has left): " + node.id());

      Exception e =
          new GridTopologyException(
              "Failed to wait for request completion " + "(node has left): " + node.id());

      for (GridFutureAdapter<Object> f : reqs.values()) f.onDone(e);

      // Make sure to complete current future.
      GridFutureAdapter<Object> curFut0;

      synchronized (this) {
        curFut0 = curFut;
      }

      curFut0.onDone(e);
    }
    /** @param res Response. */
    void onResponse(GridDataLoadResponse res) {
      if (log.isDebugEnabled()) log.debug("Received data load response: " + res);

      GridFutureAdapter<?> f = reqs.remove(res.requestId());

      if (f == null) {
        if (log.isDebugEnabled())
          log.debug("Future for request has not been found: " + res.requestId());

        return;
      }

      Throwable err = null;

      byte[] errBytes = res.errorBytes();

      if (errBytes != null) {
        try {
          GridPeerDeployAware jobPda0 = jobPda;

          err =
              ctx.config()
                  .getMarshaller()
                  .unmarshal(
                      errBytes, jobPda0 != null ? jobPda0.classLoader() : U.gridClassLoader());
        } catch (GridException e) {
          f.onDone(null, new GridException("Failed to unmarshal response.", e));

          return;
        }
      }

      f.onDone(null, err);

      if (log.isDebugEnabled())
        log.debug(
            "Finished future [fut=" + f + ", reqId=" + res.requestId() + ", err=" + err + ']');
    }
    void cancelAll() {
      GridException err =
          new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this);

      for (GridFuture<?> f : locFuts) {
        try {
          f.cancel();
        } catch (GridException e) {
          U.error(log, "Failed to cancel mini-future.", e);
        }
      }

      for (GridFutureAdapter<?> f : reqs.values()) f.onDone(err);
    }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(AffinityTopologyVersion res, Throwable err) {
    Map<Integer, Boolean> m = null;

    for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
      if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) {
        if (m == null) m = new HashMap<>();

        m.put(
            cacheCtx.cacheId(),
            cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes()));
      }
    }

    cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap();

    cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err);

    cctx.exchange().onExchangeDone(this, err);

    if (super.onDone(res, err) && !dummy && !forcePreload) {
      if (log.isDebugEnabled())
        log.debug(
            "Completed partition exchange [localNode="
                + cctx.localNodeId()
                + ", exchange= "
                + this
                + ']');

      initFut.onDone(err == null);

      GridTimeoutObject timeoutObj = this.timeoutObj;

      // Deschedule timeout object.
      if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj);

      if (exchId.isLeft()) {
        for (GridCacheContext cacheCtx : cctx.cacheContexts())
          cacheCtx.config().getAffinity().removeNode(exchId.nodeId());
      }

      return true;
    }

    return dummy;
  }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
Example #10
0
  /**
   * @param entries Entries.
   * @param resFut Result future.
   * @param activeKeys Active keys.
   * @param remaps Remaps count.
   */
  private void load0(
      Collection<? extends Map.Entry<K, V>> entries,
      final GridFutureAdapter<Object> resFut,
      final Collection<K> activeKeys,
      final int remaps) {
    assert entries != null;

    if (remaps >= MAX_REMAP_CNT) {
      resFut.onDone(new GridException("Failed to finish operation (too many remaps): " + remaps));

      return;
    }

    Map<GridNode, Collection<Map.Entry<K, V>>> mappings = new HashMap<>();

    boolean initPda = ctx.deploy().enabled() && jobPda == null;

    for (Map.Entry<K, V> entry : entries) {
      GridNode node;

      try {
        K key = entry.getKey();

        assert key != null;

        if (initPda) {
          jobPda = new DataLoaderPda(key, entry.getValue(), updater);

          initPda = false;
        }

        node = ctx.affinity().mapKeyToNode(cacheName, key);
      } catch (GridException e) {
        resFut.onDone(e);

        return;
      }

      if (node == null) {
        resFut.onDone(
            new GridTopologyException(
                "Failed to map key to node "
                    + "(no nodes with cache found in topology) [infos="
                    + entries.size()
                    + ", cacheName="
                    + cacheName
                    + ']'));

        return;
      }

      Collection<Map.Entry<K, V>> col = mappings.get(node);

      if (col == null) mappings.put(node, col = new ArrayList<>());

      col.add(entry);
    }

    for (final Map.Entry<GridNode, Collection<Map.Entry<K, V>>> e : mappings.entrySet()) {
      final UUID nodeId = e.getKey().id();

      Buffer buf = bufMappings.get(nodeId);

      if (buf == null) {
        Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey()));

        if (old != null) buf = old;
      }

      final Collection<Map.Entry<K, V>> entriesForNode = e.getValue();

      GridInClosure<GridFuture<?>> lsnr =
          new GridInClosure<GridFuture<?>>() {
            @Override
            public void apply(GridFuture<?> t) {
              try {
                t.get();

                for (Map.Entry<K, V> e : entriesForNode) activeKeys.remove(e.getKey());

                if (activeKeys.isEmpty()) resFut.onDone();
              } catch (GridException e1) {
                if (log.isDebugEnabled())
                  log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']');

                if (cancelled) {
                  resFut.onDone(
                      new GridException(
                          "Data loader has been cancelled: " + GridDataLoaderImpl.this, e1));
                } else load0(entriesForNode, resFut, activeKeys, remaps + 1);
              }
            }
          };

      GridFutureAdapter<?> f;

      try {
        f = buf.update(entriesForNode, lsnr);
      } catch (GridInterruptedException e1) {
        resFut.onDone(e1);

        return;
      }

      if (ctx.discovery().node(nodeId) == null) {
        if (bufMappings.remove(nodeId, buf)) buf.onNodeLeft();

        if (f != null)
          f.onDone(
              new GridTopologyException(
                  "Failed to wait for request completion " + "(node has left): " + nodeId));
      }
    }
  }