/**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
  /**
   * Asynchronous sequence update operation. Will add given amount to the sequence value.
   *
   * @param l Increment amount.
   * @param updateCall Cache call that will update sequence reservation count in accordance with l.
   * @param updated If {@code true}, will return sequence value after update, otherwise will return
   *     sequence value prior to update.
   * @return Future indicating sequence value.
   * @throws GridException If update failed.
   */
  private GridFuture<Long> internalUpdateAsync(
      long l, @Nullable Callable<Long> updateCall, boolean updated) throws GridException {
    checkRemoved();

    A.ensure(l > 0, " Parameter mustn't be less then 1: " + l);

    lock.lock();

    try {
      // If reserved range isn't exhausted.
      if (locVal + l <= upBound) {
        long curVal = locVal;

        locVal += l;

        return new GridFinishedFuture<Long>(ctx.kernalContext(), updated ? locVal : curVal);
      }
    } finally {
      lock.unlock();
    }

    if (updateCall == null) updateCall = internalUpdate(l, updated);

    while (true) {
      if (updateGuard.compareAndSet(false, true)) {
        try {
          // This call must be outside lock.
          return ctx.closures().callLocalSafe(updateCall, true);
        } finally {
          lock.lock();

          try {
            updateGuard.set(false);

            cond.signalAll();
          } finally {
            lock.unlock();
          }
        }
      } else {
        lock.lock();

        try {
          while (locVal >= upBound && updateGuard.get()) {
            try {
              cond.await(500, MILLISECONDS);
            } catch (InterruptedException e) {
              throw new GridInterruptedException(e);
            }
          }

          checkRemoved();

          // If reserved range isn't exhausted.
          if (locVal + l <= upBound) {
            long curVal = locVal;

            locVal += l;

            return new GridFinishedFuture<Long>(ctx.kernalContext(), updated ? locVal : curVal);
          }
        } finally {
          lock.unlock();
        }
      }
    }
  }
예제 #3
0
  public static boolean testRendezvousChannel() {

    final int SERVER_THREADS = 10;
    final int CLIENT_THREADS = 10;
    final int SERVER_MAX_TIMEOUT = 20;
    final int CLIENT_MAX_TIMEOUT = 100;
    final int TIMEOUT_PERCENT = 50;
    final int MAX_SERVICE_TIME = 30;
    final int EXIT_TIME = 50;
    final int FAILURES_PERCENT = 1;

    Thread[] servers = new Thread[SERVER_THREADS];
    Thread[] clients = new Thread[CLIENT_THREADS];
    final RendezvousChannel_<Integer, Integer> rvc = new RendezvousChannel_<Integer, Integer>();
    final AtomicBoolean shutdown = new AtomicBoolean();
    final int[] successfulServices = new int[SERVER_THREADS];
    final int[] failedServices = new int[SERVER_THREADS];
    final int[] successfulRequests = new int[CLIENT_THREADS];
    final int[] failedRequests = new int[CLIENT_THREADS];

    //
    // Create and start the server threads.
    //

    for (int i = 0; i < SERVER_THREADS; i++) {
      final int tid = i;
      servers[i] =
          new Thread() {
            public void run() {
              Random r = new Random(tid);
              int count = 0;
              int timeouts = 0;
              long timeout;

              System.out.println("++server #" + tid + " started...");
              // loops:
              // do {
              RendezvousChannel.RendezVousToken<Integer, Integer> request;
              try {
                timeout = r.nextInt(100) < TIMEOUT_PERCENT ? r.nextInt(SERVER_MAX_TIMEOUT) : -1L;
                timeout = 100;
                while ((request = rvc.accept(timeout)) == null) {
                  timeouts++;
                  //								if (shutdown.get()) {
                  //									break loops;
                  //								}
                }

                //
                // Simulate the service time.
                //

                sleepCatchingInterrupt(r.nextInt(MAX_SERVICE_TIME));

                //
                // Compute the result doubling the request argument.
                //

                int serviceResult = request.service.intValue() << 1;

                //
                // Inject failures.
                //

                if (r.nextInt(100) < FAILURES_PERCENT) {
                  serviceResult++; // fail!
                  failedServices[tid]++;
                } else {
                  successfulServices[tid]++;
                }

                //
                // Reply with the service result.
                //

                rvc.reply(request, serviceResult);

                //
                // Increment service count and periodically display the "alive" message.
                //

                if ((++count % 100) == 0) {
                  System.out.print("[s#" + tid + "]");
                }
              } catch (InterruptedException ie) {
              }
              // } while (!shutdown.get());

              //
              // Show server thread results.
              //

              System.out.println(
                  "--server #"
                      + tid
                      + " exiting, "
                      + count
                      + " requests accepted, "
                      + timeouts
                      + " timed out");
            }
          };
      servers[i].start();
    }

    // Create and start the client threads.
    for (int i = 0; i < CLIENT_THREADS; i++) {
      final int tid = i;
      clients[i] =
          new Thread() {
            public void run() {
              Random r = new Random(tid + SERVER_THREADS);
              int count = 0;
              int timeouts = 0;

              System.out.println("++client #" + tid + " started...");
              // loops:
              // do {
              Integer response;
              try {
                long timeout =
                    r.nextInt(100) < TIMEOUT_PERCENT ? r.nextInt(CLIENT_MAX_TIMEOUT) : -1L;
                timeout = 100;
                int serviceArgument = tid + 1;
                while ((response = rvc.request(serviceArgument, timeout)) == null) {
                  timeouts++;
                  //								if (shutdown.get()) {
                  //									break loops;
                  //								}
                }
                int serviceResult = response.intValue();

                if (serviceResult == serviceArgument << 1) {
                  successfulRequests[tid]++;
                } else {
                  failedRequests[tid]++;
                  /*
                  System.out.println("\n!!client #" + tid + " service failure: expected/received " +
                  				   (serviceArgument << 1) + "/" + serviceResult);
                  */
                }

                //
                // Increment request count and periodically display the "alive" menssage.
                //

                if ((++count % 100) == 0) {
                  System.out.print("[c#" + tid + "]");
                }
              } catch (InterruptedException ie) {
              }
              // } while (!shutdown.get());
              System.out.println(
                  "--client #"
                      + tid
                      + " exiting, "
                      + count
                      + " requests processed, "
                      + timeouts
                      + " timed out");
            }
          };
      clients[i].start();
    }

    //
    // Run the test until <enter> and then set the shutdown flag.
    //

    System.out.print("\n+++ hit <enter> to terminate the test...");
    readln();
    shutdown.set(true);

    //
    // Sleep for a while to let all threads exited.
    //

    sleepCatchingInterrupt(EXIT_TIME);

    //
    // Wait until all client threads have been exited.
    //

    for (int i = 0; i < CLIENT_THREADS; i++) {
      if (clients[i].isAlive()) {
        System.out.println("!!! client #" + i + " is still alive, so it will be interrupted");
        clients[i].interrupt();
      }
      joinUninterruptibly(clients[i]);
    }

    //
    // Wait until all server threads have been exited.
    //

    for (int i = 0; i < SERVER_THREADS; i++) {
      if (servers[i].isAlive()) {
        System.out.println("!!! server #" + i + " is still alive, so it will be interrupted");
        servers[i].interrupt();
      }
      joinUninterruptibly(servers[i]);
    }

    //
    // Compute and display results.
    //

    long sumFailedRequests = 0, sumSuccessfulRequests = 0;
    for (int i = 0; i < CLIENT_THREADS; i++) {
      sumSuccessfulRequests += successfulRequests[i];
      sumFailedRequests += failedRequests[i];
    }
    long sumFailedServices = 0, sumSuccessfulServices = 0;
    for (int i = 0; i < SERVER_THREADS; i++) {
      sumSuccessfulServices += successfulServices[i];
      sumFailedServices += failedServices[i];
    }
    System.out.println(
        "+++ successfull requests/services: "
            + sumSuccessfulRequests
            + "/"
            + sumSuccessfulServices
            + ", failed requests/services: "
            + sumFailedRequests
            + "/"
            + sumFailedServices);
    return sumSuccessfulRequests == sumSuccessfulServices && sumFailedRequests == sumFailedServices;
  }
  /**
   * Synchronous sequence update operation. Will add given amount to the sequence value.
   *
   * @param l Increment amount.
   * @param updateCall Cache call that will update sequence reservation count in accordance with l.
   * @param updated If {@code true}, will return sequence value after update, otherwise will return
   *     sequence value prior to update.
   * @return Sequence value.
   * @throws GridException If update failed.
   */
  private long internalUpdate(long l, @Nullable Callable<Long> updateCall, boolean updated)
      throws GridException {
    checkRemoved();

    assert l > 0;

    lock.lock();

    try {
      // If reserved range isn't exhausted.
      if (locVal + l <= upBound) {
        long curVal = locVal;

        locVal += l;

        return updated ? locVal : curVal;
      }
    } finally {
      lock.unlock();
    }

    if (updateCall == null) updateCall = internalUpdate(l, updated);

    while (true) {
      if (updateGuard.compareAndSet(false, true)) {
        try {
          // This call must be outside lock.
          return CU.outTx(updateCall, ctx);
        } finally {
          lock.lock();

          try {
            updateGuard.set(false);

            cond.signalAll();
          } finally {
            lock.unlock();
          }
        }
      } else {
        lock.lock();

        try {
          while (locVal >= upBound && updateGuard.get()) {
            try {
              cond.await(500, MILLISECONDS);
            } catch (InterruptedException e) {
              throw new GridInterruptedException(e);
            }
          }

          checkRemoved();

          // If reserved range isn't exhausted.
          if (locVal + l <= upBound) {
            long curVal = locVal;

            locVal += l;

            return updated ? locVal : curVal;
          }
        } finally {
          lock.unlock();
        }
      }
    }
  }