/**
     * @param nodeId Node ID.
     * @return Class loader ID for node ID.
     */
    GridTuple2<GridUuid, Long> getClassLoaderId(UUID nodeId) {
      assert nodeId != null;

      assert Thread.holdsLock(mux);

      return loader().registeredClassLoaderId(nodeId);
    }
    /**
     * Checks if node is participating in deployment.
     *
     * @param nodeId Node ID to check.
     * @param ldrId Class loader ID.
     * @return {@code True} if node is participating in deployment.
     */
    boolean hasParticipant(UUID nodeId, GridUuid ldrId) {
      assert nodeId != null;
      assert ldrId != null;

      assert Thread.holdsLock(mux);

      return loader().hasRegisteredNode(nodeId, ldrId);
    }
  /** {@inheritDoc} */
  @Override
  public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException {
    ExecutorService exec =
        new ThreadPoolExecutor(
            threadsCnt,
            threadsCnt,
            0L,
            MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(batchQueueSize),
            new BlockingRejectedExecutionHandler());

    Iterator<I> iter = inputIterator(args);

    Collection<I> buf = new ArrayList<>(batchSize);

    try {
      while (iter.hasNext()) {
        if (Thread.currentThread().isInterrupted()) {
          U.warn(log, "Working thread was interrupted while loading data.");

          break;
        }

        buf.add(iter.next());

        if (buf.size() == batchSize) {
          exec.submit(new Worker(c, buf, args));

          buf = new ArrayList<>(batchSize);
        }
      }

      if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args));
    } catch (RejectedExecutionException ignored) {
      // Because of custom RejectedExecutionHandler.
      assert false : "RejectedExecutionException was thrown while it shouldn't.";
    } finally {
      exec.shutdown();

      try {
        exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while waiting for put operations to complete.");

        Thread.currentThread().interrupt();
      }
    }
  }
  /** @throws Exception If failed. */
  public void testDisabledRest() throws Exception {
    restEnabled = false;

    final Grid g = startGrid("disabled-rest");

    try {
      Thread.sleep(2 * TOP_REFRESH_FREQ);

      // As long as we have round robin load balancer this will cause every node to be queried.
      for (int i = 0; i < NODES_CNT + 1; i++)
        assertEquals(NODES_CNT + 1, client.compute().refreshTopology(false, false).size());

      final GridClientData data = client.data(PARTITIONED_CACHE_NAME);

      // Check rest-disabled node is unavailable.
      try {
        String affKey;

        do {
          affKey = UUID.randomUUID().toString();
        } while (!data.affinity(affKey).equals(g.localNode().id()));

        data.put(affKey, "asdf");

        assertEquals("asdf", cache(0, PARTITIONED_CACHE_NAME).get(affKey));
      } catch (GridServerUnreachableException e) {
        // Thrown for direct client-node connections.
        assertTrue(
            "Unexpected exception message: " + e.getMessage(),
            e.getMessage()
                .startsWith("No available endpoints to connect (is rest enabled for this node?)"));
      } catch (GridClientException e) {
        // Thrown for routed client-router-node connections.
        String msg = e.getMessage();

        assertTrue(
            "Unexpected exception message: " + msg,
            protocol() == GridClientProtocol.TCP
                ? msg.contains("No available endpoints to connect (is rest enabled for this node?)")
                : // TCP router.
                msg.startsWith(
                    "No available nodes on the router for destination node ID")); // HTTP router.
      }

      // Check rest-enabled nodes are available.
      String affKey;

      do {
        affKey = UUID.randomUUID().toString();
      } while (data.affinity(affKey).equals(g.localNode().id()));

      data.put(affKey, "fdsa");

      assertEquals("fdsa", cache(0, PARTITIONED_CACHE_NAME).get(affKey));
    } finally {
      restEnabled = true;

      G.stop(g.name(), true);
    }
  }
  /** @throws Exception If failed. */
  public void testAffinityPut() throws Exception {
    Thread.sleep(2 * TOP_REFRESH_FREQ);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT);

    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    GridClientCompute compute = client.compute();

    for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i));

    for (int i = 0; i < 100; i++) {
      String key = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key));

      assertEquals(primaryNodeId, partitioned.affinity(key));

      // Must go to primary node only. Since backup count is 0, value must present on
      // primary node only.
      partitioned.put(key, "val" + key);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key);

        if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val);
        else assertNull(val);
      }
    }

    // Now check that we will see value in near cache in pinned mode.
    for (int i = 100; i < 200; i++) {
      String pinnedKey = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id();

      UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId)));

      GridClientNode node = compute.node(pinnedNodeId);

      partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey);

        if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey()))
          assertEquals("val" + pinnedKey, val);
        else assertNull(val);
      }
    }
  }
    /** {@inheritDoc} */
    @Override
    public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
      try {
        if (executor.isShutdown()) throw new RejectedExecutionException();
        else executor.getQueue().put(r);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while loading data.");

        Thread.currentThread().interrupt();
      }
    }
  /**
   * Adds new participant to deployment.
   *
   * @param dep Shared deployment.
   * @param meta Request metadata.
   * @return {@code True} if participant was added.
   */
  private boolean addParticipant(SharedDeployment dep, GridDeploymentMetadata meta) {
    assert dep != null;
    assert meta != null;

    assert Thread.holdsLock(mux);

    if (!checkModeMatch(dep, meta)) return false;

    if (meta.participants() != null) {
      for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet()) {
        dep.addParticipant(e.getKey(), e.getValue().get1(), e.getValue().get2());

        if (log.isDebugEnabled())
          log.debug(
              "Added new participant [nodeId="
                  + e.getKey()
                  + ", clsLdrId="
                  + e.getValue().get1()
                  + ", seqNum="
                  + e.getValue().get2()
                  + ']');
      }
    }

    if (dep.deployMode() == CONTINUOUS || meta.participants() == null) {
      if (!dep.addParticipant(meta.senderNodeId(), meta.classLoaderId(), meta.sequenceNumber())) {
        U.warn(
            log,
            "Failed to create shared mode deployment "
                + "(requested class loader was already undeployed, did sender node leave grid?) "
                + "[clsLdrId="
                + meta.classLoaderId()
                + ", senderNodeId="
                + meta.senderNodeId()
                + ']');

        return false;
      }

      if (log.isDebugEnabled())
        log.debug(
            "Added new participant [nodeId="
                + meta.senderNodeId()
                + ", clsLdrId="
                + meta.classLoaderId()
                + ", seqNum="
                + meta.sequenceNumber()
                + ']');
    }

    return true;
  }
    /** @param nodeId Node ID to remove. */
    void removeParticipant(UUID nodeId) {
      assert nodeId != null;

      assert Thread.holdsLock(mux);

      GridUuid ldrId = loader().unregister(nodeId);

      if (log.isDebugEnabled()) log.debug("Registering dead class loader ID: " + ldrId);

      synchronized (mux) {
        deadClsLdrs.add(ldrId);
      }
    }
  /**
   * @param meta Request metadata.
   * @return {@code True} if class loader is obsolete.
   */
  private boolean isDeadClassLoader(GridDeploymentMetadata meta) {
    assert Thread.holdsLock(mux);

    synchronized (mux) {
      if (deadClsLdrs.contains(meta.classLoaderId())) {
        if (log.isDebugEnabled()) log.debug("Ignoring request for obsolete class loader: " + meta);

        return true;
      }

      return false;
    }
  }
    /** Sets property removed. */
    void onRemoved() {
      assert Thread.holdsLock(mux);

      removed = true;

      Collection<GridUuid> deadIds = loader().registeredClassLoaderIds();

      if (log.isDebugEnabled()) log.debug("Registering dead class loader IDs: " + deadIds);

      synchronized (mux) {
        deadClsLdrs.addAll(deadIds);
      }
    }
예제 #11
0
  /**
   * Executes example.
   *
   * @param args Command line arguments, none required.
   * @throws GridException If example execution failed.
   */
  public static void main(String[] args) throws Exception {
    try (Grid grid = GridGain.start("examples/config/example-cache.xml")) {
      System.out.println();
      System.out.println(">>> Events API example started.");

      // Listen to events happening on local node.
      localListen();

      // Listen to events happening on all grid nodes.
      remoteListen();

      // Wait for a while while callback is notified about remaining puts.
      Thread.sleep(1000);
    }
  }
    /**
     * @param nodeId Grid node ID.
     * @param ldrId Class loader ID.
     * @param seqNum Sequence number for the class loader.
     * @return Whether actually added or not.
     */
    boolean addParticipant(UUID nodeId, GridUuid ldrId, long seqNum) {
      assert nodeId != null;
      assert ldrId != null;

      assert Thread.holdsLock(mux);

      synchronized (mux) {
        if (!deadClsLdrs.contains(ldrId)) {
          loader().register(nodeId, ldrId, seqNum);

          return true;
        }

        return false;
      }
    }
예제 #13
0
  /**
   * Basically, future mapping consists from two parts. First, we must determine the topology
   * version this future will map on. Locking is performed within a user transaction, we must
   * continue to map keys on the same topology version as it started. If topology version is
   * undefined, we get current topology future and wait until it completes so the topology is ready
   * to use.
   *
   * <p>During the second part we map keys to primary nodes using topology snapshot we obtained
   * during the first part. Note that if primary node leaves grid, the future will fail and
   * transaction will be rolled back.
   */
  void map() {
    // Obtain the topology version to use.
    GridDiscoveryTopologySnapshot snapshot =
        tx != null
            ? tx.topologySnapshot()
            : cctx.mvcc().lastExplicitLockTopologySnapshot(Thread.currentThread().getId());

    if (snapshot != null) {
      // Continue mapping on the same topology version as it was before.
      topSnapshot.compareAndSet(null, snapshot);

      map(keys);

      markInitialized();

      return;
    }

    // Must get topology snapshot and map on that version.
    mapOnTopology();
  }
예제 #14
0
  /** Notifies all registered listeners. */
  private void notifyListeners() {
    final Collection<GridInClosure<? super GridFuture<R>>> tmp;

    synchronized (mux) {
      tmp = new ArrayList<GridInClosure<? super GridFuture<R>>>(lsnrs);
    }

    boolean concurNotify = this.concurNotify;
    boolean syncNotify = this.syncNotify;

    if (concurNotify) {
      for (final GridInClosure<? super GridFuture<R>> lsnr : tmp)
        ctx.closure()
            .runLocalSafe(
                new GPR() {
                  @Override
                  public void run() {
                    notifyListener(lsnr);
                  }
                },
                true);
    } else {
      // Always notify in the thread different from start thread.
      if (Thread.currentThread() == thread && !syncNotify) {
        ctx.closure()
            .runLocalSafe(
                new GPR() {
                  @Override
                  public void run() {
                    // Since concurrent notifications are off, we notify
                    // all listeners in one thread.
                    for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr);
                  }
                },
                true);
      } else {
        for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr);
      }
    }
  }
예제 #15
0
  /**
   * @param cctx Registry.
   * @param keys Keys to lock.
   * @param tx Transaction.
   * @param read Read flag.
   * @param retval Flag to return value or not.
   * @param timeout Lock acquisition timeout.
   * @param filter Filter.
   */
  public GridNearLockFuture(
      GridCacheContext<K, V> cctx,
      Collection<? extends K> keys,
      @Nullable GridNearTxLocal<K, V> tx,
      boolean read,
      boolean retval,
      long timeout,
      GridPredicate<GridCacheEntry<K, V>>[] filter) {
    super(cctx.kernalContext(), CU.boolReducer());
    assert cctx != null;
    assert keys != null;

    this.cctx = cctx;
    this.keys = keys;
    this.tx = tx;
    this.read = read;
    this.retval = retval;
    this.timeout = timeout;
    this.filter = filter;

    threadId = tx == null ? Thread.currentThread().getId() : tx.threadId();

    lockVer = tx != null ? tx.xidVersion() : cctx.versions().next();

    futId = GridUuid.randomUuid();

    entries = new ArrayList<>(keys.size());

    log = U.logger(ctx, logRef, GridNearLockFuture.class);

    if (timeout > 0) {
      timeoutObj = new LockTimeoutObject();

      cctx.time().addTimeoutObject(timeoutObj);
    }

    valMap = new ConcurrentHashMap8<>(keys.size(), 1f);
  }
    /**
     * Called to record all undeployed classes..
     *
     * @param leftNodeId Left node ID.
     */
    void recordUndeployed(@Nullable UUID leftNodeId) {
      assert !Thread.holdsLock(mux);

      for (Map.Entry<String, Class<?>> depCls : deployedClassMap().entrySet()) {
        boolean isTask = isTask(depCls.getValue());

        String msg =
            (isTask ? "Task" : "Class")
                + " was undeployed in SHARED or CONTINUOUS mode: "
                + depCls.getValue();

        int type = isTask ? EVT_TASK_UNDEPLOYED : EVT_CLASS_UNDEPLOYED;

        if (ctx.event().isRecordable(type)) {
          GridDeploymentEvent evt = new GridDeploymentEvent();

          evt.nodeId(ctx.localNodeId());
          evt.message(msg);
          evt.type(type);
          evt.alias(depCls.getKey());

          ctx.event().record(evt);
        }

        if (log.isInfoEnabled()) log.info(msg);
      }

      if (isObsolete()) {
        // Resource cleanup.
        ctx.resource().onUndeployed(this);

        ctx.cache().onUndeployed(leftNodeId, loader());

        clearSerializationCaches();
      }
    }
    /** {@inheritDoc} */
    @Override
    public void onDeployed(Class<?> cls) {
      assert !Thread.holdsLock(mux);

      boolean isTask = isTask(cls);

      String msg =
          (isTask ? "Task" : "Class") + " was deployed in SHARED or CONTINUOUS mode: " + cls;

      int type = isTask ? EVT_TASK_DEPLOYED : EVT_CLASS_DEPLOYED;

      if (ctx.event().isRecordable(type)) {
        GridDeploymentEvent evt = new GridDeploymentEvent();

        evt.nodeId(ctx.localNodeId());
        evt.message(msg);
        evt.type(type);
        evt.alias(cls.getName());

        ctx.event().record(evt);
      }

      if (log.isInfoEnabled()) log.info(msg);
    }
  /** {@inheritDoc} */
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return;

    try {
      GridCacheVersion ver = null;

      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null;

      Collection<K> locKeys = new LinkedList<K>();

      GridCacheVersion obsoleteVer = ctx.versions().next();

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While.

          try {
            GridCacheMvccCandidate<K> cand =
                entry.candidate(ctx.nodeId(), Thread.currentThread().getId());

            if (cand != null) {
              ver = cand.version();

              if (affNodes == null) {
                affNodes = CU.allNodes(ctx, cand.topologyVersion());

                keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size());
              }

              // Send request to remove from remote nodes.
              GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

              GridNearUnlockRequest<K, V> req = map.get(primary);

              if (req == null) {
                map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                req.version(ver);
              }

              // Remove candidate from local node first.
              GridCacheMvccCandidate<K> rmv = entry.removeLock();

              if (rmv != null) {
                if (!rmv.reentry()) {
                  if (ver != null && !ver.equals(rmv.version()))
                    throw new GridException(
                        "Failed to unlock (if keys were locked separately, "
                            + "then they need to be unlocked separately): "
                            + keys);

                  if (!primary.isLocal()) {
                    assert req != null;

                    req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                  } else locKeys.add(key);

                  if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
                } else if (log.isDebugEnabled())
                  log.debug(
                      "Current thread still owns lock (or there are no other nodes)"
                          + " [lock="
                          + rmv
                          + ", curThreadId="
                          + Thread.currentThread().getId()
                          + ']');
              }

              // Try to evict near entry if it's dht-mapped locally.
              evictNearEntry(entry, obsoleteVer);
            }

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Attempted to unlock removed entry (will retry): " + entry);
          }
        }
      }

      if (ver == null) return;

      for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridRichNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
        else if (!req.keyBytes().isEmpty())
          // We don't wait for reply to this message.
          ctx.io().send(n, req);
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return;

    Collection<? extends GridNode> nodes = ctx.remoteNodes(keys);

    try {
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      for (K key : keys) {
        GridDistributedCacheEntry<K, V> entry = entryexx(key);

        if (!ctx.isAll(entry.wrap(false), filter)) continue;

        // Unlock local lock first.
        GridCacheMvccCandidate<K> rmv = entry.removeLock();

        if (rmv != null && !nodes.isEmpty()) {
          if (!rmv.reentry()) {
            req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);

            // We are assuming that lock ID is the same for all keys.
            req.version(rmv.version());

            if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
          } else {
            if (log.isDebugEnabled())
              log.debug(
                  "Locally unlocked lock reentry without distributing to other nodes [removed="
                      + rmv
                      + ", entry="
                      + entry
                      + ']');
          }
        } else {
          if (log.isDebugEnabled())
            log.debug(
                "Current thread still owns lock (or there are no other nodes) [lock="
                    + rmv
                    + ", curThreadId="
                    + Thread.currentThread().getId()
                    + ']');
        }
      }

      // Don't proceed of no keys to unlock.
      if (req.keyBytes().isEmpty()) {
        if (log.isDebugEnabled())
          log.debug("No keys to unlock locally (was it reentry unlock?): " + keys);

        return;
      }

      // We don't wait for reply to this message. Receiving side will have
      // to make sure that unlock requests don't come before lock requests.
      ctx.io().safeSend(nodes, req, null);
    } catch (GridException e) {
      U.error(log, "Failed to unlock keys: " + keys, e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  @Override
  protected GridFuture<Boolean> lockAllAsync(
      Collection<? extends K> keys,
      long timeout,
      GridCacheTxLocalEx<K, V> tx,
      boolean isInvalidate,
      boolean isRead,
      boolean retval,
      GridCacheTxIsolation isolation,
      GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true);

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    final GridReplicatedLockFuture<K, V> fut =
        new GridReplicatedLockFuture<K, V>(ctx, keys, tx, this, nodes, timeout, filter);

    GridDistributedLockRequest<K, V> req =
        new GridDistributedLockRequest<K, V>(
            locNodeId,
            Thread.currentThread().getId(),
            fut.futureId(),
            fut.version(),
            tx != null,
            isRead,
            isolation,
            isInvalidate,
            timeout,
            keys.size());

    try {
      // Must add future before redying locks.
      if (!ctx.mvcc().addFuture(fut))
        throw new IllegalStateException("Duplicate future ID: " + fut);

      boolean distribute = false;

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = null;

          try {
            entry = entryexx(key);

            if (!ctx.isAll(entry.wrap(false), filter)) {
              if (log.isDebugEnabled())
                log.debug("Entry being locked did not pass filter (will not lock): " + entry);

              fut.onDone(false);

              return fut;
            }

            // Removed exception may be thrown here.
            GridCacheMvccCandidate<K> cand = fut.addEntry(entry);

            if (cand != null) {
              req.addKeyBytes(
                  key,
                  cand.reentry() ? null : entry.getOrMarshalKeyBytes(),
                  retval,
                  entry.localCandidates(fut.version()),
                  ctx);

              req.completedVersions(
                  ctx.tm().committedVersions(fut.version()),
                  ctx.tm().rolledbackVersions(fut.version()));

              distribute = !cand.reentry();
            } else if (fut.isDone()) return fut;

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
          }
        }
      }

      // If nothing to distribute at this point,
      // then all locks are reentries.
      if (!distribute) fut.complete(true);

      if (nodes.isEmpty()) fut.readyLocks();

      // No reason to send request if all locks are locally re-entered,
      // or if timeout is negative and local locks could not be acquired.
      if (fut.isDone()) return fut;

      try {
        ctx.io()
            .safeSend(
                fut.nodes(),
                req,
                new P1<GridNode>() {
                  @Override
                  public boolean apply(GridNode node) {
                    fut.onNodeLeft(node.id());

                    return !fut.isDone();
                  }
                });
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send lock request to node [nodes="
                + U.toShortString(nodes)
                + ", req="
                + req
                + ']',
            e);

        fut.onError(e);
      }

      return fut;
    } catch (GridException e) {
      Throwable err = new GridException("Failed to acquire asynchronous lock for keys: " + keys, e);

      // Clean-up.
      fut.onError(err);

      ctx.mvcc().removeFuture(fut);

      return fut;
    }
  }
    /** @return Set of participating nodes. */
    Collection<UUID> getParticipantNodeIds() {
      assert Thread.holdsLock(mux);

      return loader().registeredNodeIds();
    }
  /**
   * Creates and caches new deployment.
   *
   * @param meta Deployment metadata.
   * @param isCache Whether or not to cache.
   * @return New deployment.
   */
  private SharedDeployment createNewDeployment(GridDeploymentMetadata meta, boolean isCache) {
    assert Thread.holdsLock(mux);

    assert meta.parentLoader() == null;

    GridUuid ldrId = GridUuid.randomUuid();

    GridDeploymentClassLoader clsLdr;

    if (meta.deploymentMode() == CONTINUOUS || meta.participants() == null) {
      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.classLoaderId(),
              meta.senderNodeId(),
              meta.sequenceNumber(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              meta.deploymentMode() == CONTINUOUS /* enable class byte cache in CONTINUOUS mode */);

      if (meta.participants() != null)
        for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet())
          clsLdr.register(e.getKey(), e.getValue().get1(), e.getValue().get2());

      if (log.isDebugEnabled())
        log.debug(
            "Created class loader in CONTINUOUS mode or without participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    } else {
      assert meta.deploymentMode() == SHARED;

      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.participants(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              false);

      if (log.isDebugEnabled())
        log.debug(
            "Created classloader in SHARED mode with participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    }

    // Give this deployment a unique class loader to emphasize that this
    // ID is unique to this shared deployment and is not ID of loader on
    // sender node.
    SharedDeployment dep =
        new SharedDeployment(
            meta.deploymentMode(), clsLdr, ldrId, -1, meta.userVersion(), meta.alias());

    if (log.isDebugEnabled()) log.debug("Created new deployment: " + dep);

    if (isCache) {
      List<SharedDeployment> deps =
          F.addIfAbsent(cache, meta.userVersion(), new LinkedList<SharedDeployment>());

      assert deps != null;

      deps.add(dep);

      if (log.isDebugEnabled()) log.debug("Added deployment to cache: " + cache);
    }

    return dep;
  }
  /**
   * Removes obsolete deployments in case of redeploy.
   *
   * @param meta Request metadata.
   * @return List of shares deployment.
   */
  private GridTuple2<Boolean, SharedDeployment> checkRedeploy(GridDeploymentMetadata meta) {
    assert Thread.holdsLock(mux);

    SharedDeployment newDep = null;

    for (List<SharedDeployment> deps : cache.values()) {
      for (SharedDeployment dep : deps) {
        if (!dep.isUndeployed() && !dep.isPendingUndeploy()) {
          long undeployTimeout = ctx.config().getNetworkTimeout();

          SharedDeployment doomed = null;

          // Only check deployments with no participants.
          if (!dep.hasParticipants()) {
            // In case of SHARED deployment it is possible to get hear if
            // unmarshalling happens during undeploy. In this case, we
            // simply don't do anything.
            if (dep.deployMode() == CONTINUOUS) {
              if (dep.existingDeployedClass(meta.className()) != null) {
                // Change from shared deploy to shared undeploy or user version change.
                // Simply remove all deployments with no participating nodes.
                if (meta.deploymentMode() == SHARED
                    || !meta.userVersion().equals(dep.userVersion())) doomed = dep;
              }
            }
          }
          // If there are participants, we undeploy if class loader ID on some node changed.
          else if (dep.existingDeployedClass(meta.className()) != null) {
            GridTuple2<GridUuid, Long> ldr = dep.getClassLoaderId(meta.senderNodeId());

            if (ldr != null) {
              if (!ldr.get1().equals(meta.classLoaderId())) {
                // If deployed sequence number is less, then schedule for undeployment.
                if (ldr.get2() < meta.sequenceNumber()) {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Received request for a class with newer sequence number "
                            + "(will schedule current class for undeployment) [newSeq="
                            + meta.sequenceNumber()
                            + ", oldSeq="
                            + ldr.get2()
                            + ", senderNodeId="
                            + meta.senderNodeId()
                            + ", newClsLdrId="
                            + meta.classLoaderId()
                            + ", oldClsLdrId="
                            + ldr.get1()
                            + ']');

                  doomed = dep;
                } else if (ldr.get2() > meta.sequenceNumber()) {
                  long time = System.currentTimeMillis() - dep.timestamp();

                  if (newDep == null && time < ctx.config().getNetworkTimeout()) {
                    // Set undeployTimeout, so the class will be scheduled
                    // for undeployment.
                    undeployTimeout = ctx.config().getNetworkTimeout() - time;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a stale class (will deploy and "
                              + "schedule undeployment in "
                              + undeployTimeout
                              + "ms) "
                              + "[curSeq="
                              + ldr.get2()
                              + ", staleSeq="
                              + meta.sequenceNumber()
                              + ", cls="
                              + meta.className()
                              + ", senderNodeId="
                              + meta.senderNodeId()
                              + ", curLdrId="
                              + ldr.get1()
                              + ", staleLdrId="
                              + meta.classLoaderId()
                              + ']');

                    // We got the redeployed class before the old one.
                    // Simply create a temporary deployment for the sender node,
                    // and schedule undeploy for it.
                    newDep = createNewDeployment(meta, false);

                    doomed = newDep;
                  } else {
                    U.warn(
                        log,
                        "Received execution request for a class that has been redeployed "
                            + "(will ignore): "
                            + meta.alias());

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a class that has been redeployed "
                              + "(will ignore) [alias="
                              + meta.alias()
                              + ", dep="
                              + dep
                              + ']');

                    return F.t(false, null);
                  }
                } else {
                  U.error(
                      log,
                      "Sequence number does not correspond to class loader ID [seqNum="
                          + meta.sequenceNumber()
                          + ", dep="
                          + dep
                          + ']');

                  return F.t(false, null);
                }
              }
            }
          }

          if (doomed != null) {
            doomed.onUndeployScheduled();

            if (log.isDebugEnabled()) log.debug("Deployment was scheduled for undeploy: " + doomed);

            // Lifespan time.
            final long endTime = System.currentTimeMillis() + undeployTimeout;

            // Deployment to undeploy.
            final SharedDeployment undep = doomed;

            ctx.timeout()
                .addTimeoutObject(
                    new GridTimeoutObject() {
                      @Override
                      public GridUuid timeoutId() {
                        return undep.classLoaderId();
                      }

                      @Override
                      public long endTime() {
                        return endTime < 0 ? Long.MAX_VALUE : endTime;
                      }

                      @Override
                      public void onTimeout() {
                        boolean removed = false;

                        // Hot redeployment.
                        synchronized (mux) {
                          assert undep.isPendingUndeploy();

                          if (!undep.isUndeployed()) {
                            undep.undeploy();

                            undep.onRemoved();

                            removed = true;

                            Collection<SharedDeployment> deps = cache.get(undep.userVersion());

                            if (deps != null) {
                              for (Iterator<SharedDeployment> i = deps.iterator(); i.hasNext(); )
                                if (i.next() == undep) i.remove();

                              if (deps.isEmpty()) cache.remove(undep.userVersion());
                            }

                            if (log.isInfoEnabled())
                              log.info(
                                  "Undeployed class loader due to deployment mode change, "
                                      + "user version change, or hot redeployment: "
                                      + undep);
                          }
                        }

                        // Outside synchronization.
                        if (removed) undep.recordUndeployed(null);
                      }
                    });
          }
        }
      }
    }

    if (newDep != null) {
      List<SharedDeployment> list =
          F.addIfAbsent(cache, meta.userVersion(), F.<SharedDeployment>newList());

      assert list != null;

      list.add(newDep);
    }

    return F.t(true, newDep);
  }
    /** @return Registered class loader IDs. */
    Collection<GridUuid> getClassLoaderIds() {
      assert Thread.holdsLock(mux);

      return loader().registeredClassLoaderIds();
    }
    /** @return {@code True} if deployment has any node participants. */
    boolean hasParticipants() {
      assert Thread.holdsLock(mux);

      return loader().hasRegisteredNodes();
    }
예제 #26
0
/**
 * Future adapter.
 *
 * @author 2005-2011 Copyright (C) GridGain Systems, Inc.
 * @version 3.1.1c.19062011
 */
public class GridFutureAdapter<R> extends GridMetadataAwareAdapter
    implements GridFuture<R>, Externalizable {
  /** Synchronous notification flag. */
  private static final boolean SYNC_NOTIFY = U.isFutureNotificationSynchronous();

  /** Concurrent notification flag. */
  private static final boolean CONCUR_NOTIFY = U.isFutureNotificationConcurrent();

  /** Done flag. */
  private boolean done;

  /** Cancelled flag. */
  private boolean cancelled;

  /** Result. */
  @GridToStringInclude private R res;

  /** Error. */
  private Throwable err;

  /** Set to {@code false} on deserialization whenever incomplete future is serialized. */
  private boolean valid = true;

  /** Asynchronous listener. */
  private final Set<GridInClosure<? super GridFuture<R>>> lsnrs =
      new GridLeanSet<GridInClosure<? super GridFuture<R>>>();

  /** Creator thread. */
  private Thread thread = Thread.currentThread();

  /** Mutex. */
  private final Object mux = new Object();

  /** Context. */
  protected GridKernalContext ctx;

  /** Logger. */
  protected GridLogger log;

  /** Future start time. */
  protected final long startTime = System.currentTimeMillis();

  /** Synchronous notification flag. */
  private volatile boolean syncNotify = SYNC_NOTIFY;

  /** Concurrent notification flag. */
  private volatile boolean concurNotify = CONCUR_NOTIFY;

  /** Future end time. */
  private volatile long endTime;

  /** Watch. */
  protected GridStopwatch watch;

  /** Empty constructor required for {@link Externalizable}. */
  public GridFutureAdapter() {
    // No-op.
  }

  /** @param ctx Kernal context. */
  public GridFutureAdapter(GridKernalContext ctx) {
    assert ctx != null;

    this.ctx = ctx;

    log = ctx.log(getClass());
  }

  /** {@inheritDoc} */
  @Override
  public long startTime() {
    return startTime;
  }

  /** {@inheritDoc} */
  @Override
  public long duration() {
    long endTime = this.endTime;

    return endTime == 0 ? System.currentTimeMillis() - startTime : endTime - startTime;
  }

  /** {@inheritDoc} */
  @Override
  public boolean concurrentNotify() {
    return concurNotify;
  }

  /** {@inheritDoc} */
  @Override
  public void concurrentNotify(boolean concurNotify) {
    this.concurNotify = concurNotify;
  }

  /** {@inheritDoc} */
  @Override
  public boolean syncNotify() {
    return syncNotify;
  }

  /** {@inheritDoc} */
  @Override
  public void syncNotify(boolean syncNotify) {
    this.syncNotify = syncNotify;
  }

  /**
   * Adds a watch to this future.
   *
   * @param name Name of the watch.
   */
  public void addWatch(String name) {
    assert name != null;

    watch = W.stopwatch(name);
  }

  /**
   * Adds a watch to this future.
   *
   * @param watch Watch to add.
   */
  public void addWatch(GridStopwatch watch) {
    assert watch != null;

    this.watch = watch;
  }

  /** Checks that future is in usable state. */
  protected void checkValid() {
    if (!valid)
      throw new IllegalStateException(
          "Incomplete future was serialized and cannot " + "be used after deserialization.");
  }

  /** @return Valid flag. */
  protected boolean isValid() {
    return valid;
  }

  /**
   * Gets internal mutex.
   *
   * @return Internal mutex.
   */
  protected Object mutex() {
    checkValid();

    return mux;
  }

  /** @return Value of error. */
  protected Throwable error() {
    checkValid();

    synchronized (mux) {
      return err;
    }
  }

  /** @return Value of result. */
  protected R result() {
    checkValid();

    synchronized (mux) {
      return res;
    }
  }

  /** {@inheritDoc} */
  @Override
  public R call() throws Exception {
    return get();
  }

  /** {@inheritDoc} */
  @Override
  public R get(long timeout) throws GridException {
    return get(timeout, MILLISECONDS);
  }

  /** {@inheritDoc} */
  @Override
  public R get() throws GridException {
    checkValid();

    try {
      synchronized (mux) {
        while (!done && !cancelled) mux.wait();

        if (done) {
          if (err != null) throw U.cast(err);

          return res;
        }

        throw new GridFutureCancelledException("Future was cancelled: " + this);
      }
    } catch (InterruptedException e) {
      throw new GridInterruptedException(e);
    }
  }

  /** {@inheritDoc} */
  @Override
  public R get(long timeout, TimeUnit unit) throws GridException {
    A.ensure(timeout >= 0, "timeout cannot be negative: " + timeout);
    A.notNull(unit, "unit");

    checkValid();

    try {
      long now = System.currentTimeMillis();

      long end = timeout == 0 ? Long.MAX_VALUE : now + MILLISECONDS.convert(timeout, unit);

      // Account for overflow.
      if (end < 0) end = Long.MAX_VALUE;

      synchronized (mux) {
        while (!done && !cancelled && now < end) {
          mux.wait(end - now);

          if (!done) now = System.currentTimeMillis();
        }

        if (done) {
          if (err != null) throw U.cast(err);

          return res;
        }

        if (cancelled) throw new GridFutureCancelledException("Future was cancelled: " + this);

        throw new GridFutureTimeoutException(
            "Timeout was reached before computation completed [duration="
                + duration()
                + "ms, timeout="
                + unit.toMillis(timeout)
                + "ms]");
      }
    } catch (InterruptedException e) {
      throw new GridInterruptedException(
          "Got interrupted while waiting for future to complete [duration="
              + duration()
              + "ms, timeout="
              + unit.toMillis(timeout)
              + "ms]",
          e);
    }
  }

  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void listenAsync(@Nullable final GridInClosure<? super GridFuture<R>> lsnr) {
    if (lsnr != null) {
      checkValid();

      boolean done;

      synchronized (mux) {
        done = this.done;

        if (!done) lsnrs.add(lsnr);
      }

      if (done) {
        try {
          if (syncNotify) notifyListener(lsnr);
          else
            ctx.closure()
                .runLocalSafe(
                    new GPR() {
                      @Override
                      public void run() {
                        notifyListener(lsnr);
                      }
                    },
                    true);
        } catch (IllegalStateException ignore) {
          U.warn(
              null,
              "Future notification will not proceed because grid is stopped: " + ctx.gridName());
        }
      }
    }
  }

  /** {@inheritDoc} */
  @Override
  public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) {
    if (F.isEmpty(lsnr))
      synchronized (mux) {
        lsnrs.clear();
      }
    else
      synchronized (mux) {
        lsnrs.removeAll(F.asList(lsnr));
      }
  }

  /** Notifies all registered listeners. */
  private void notifyListeners() {
    final Collection<GridInClosure<? super GridFuture<R>>> tmp;

    synchronized (mux) {
      tmp = new ArrayList<GridInClosure<? super GridFuture<R>>>(lsnrs);
    }

    boolean concurNotify = this.concurNotify;
    boolean syncNotify = this.syncNotify;

    if (concurNotify) {
      for (final GridInClosure<? super GridFuture<R>> lsnr : tmp)
        ctx.closure()
            .runLocalSafe(
                new GPR() {
                  @Override
                  public void run() {
                    notifyListener(lsnr);
                  }
                },
                true);
    } else {
      // Always notify in the thread different from start thread.
      if (Thread.currentThread() == thread && !syncNotify) {
        ctx.closure()
            .runLocalSafe(
                new GPR() {
                  @Override
                  public void run() {
                    // Since concurrent notifications are off, we notify
                    // all listeners in one thread.
                    for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr);
                  }
                },
                true);
      } else {
        for (GridInClosure<? super GridFuture<R>> lsnr : tmp) notifyListener(lsnr);
      }
    }
  }

  /**
   * Notifies single listener.
   *
   * @param lsnr Listener.
   */
  private void notifyListener(GridInClosure<? super GridFuture<R>> lsnr) {
    assert lsnr != null;

    try {
      lsnr.apply(this);
    } catch (IllegalStateException ignore) {
      U.warn(
          null,
          "Failed to notify listener (grid is stopped) [grid="
              + ctx.gridName()
              + ", lsnr="
              + lsnr
              + ']');
    } catch (RuntimeException e) {
      U.error(log, "Failed to notify listener: " + lsnr, e);

      throw e;
    } catch (Error e) {
      U.error(log, "Failed to notify listener: " + lsnr, e);

      throw e;
    }
  }

  /**
   * Default no-op implementation that always returns {@code false}. Futures that do support
   * cancellation should override this method and call {@link #onCancelled()} callback explicitly if
   * cancellation indeed did happen.
   */
  @Override
  public boolean cancel() throws GridException {
    checkValid();

    return false;
  }

  /** {@inheritDoc} */
  @Override
  public boolean isDone() {
    // Don't check for "valid" here, as "done" flag can be read
    // even in invalid state.
    synchronized (mux) {
      return done || cancelled;
    }
  }

  /** {@inheritDoc} */
  @Override
  public GridAbsPredicate predicate() {
    return new PA() {
      @Override
      public boolean apply() {
        return isDone();
      }
    };
  }

  /** {@inheritDoc} */
  @Override
  public boolean isCancelled() {
    checkValid();

    synchronized (mux) {
      return cancelled;
    }
  }

  /**
   * Callback to notify that future is finished with {@code null} result. This method must delegate
   * to {@link #onDone(Object, Throwable)} method.
   *
   * @return {@code True} if result was set by this call.
   */
  public final boolean onDone() {
    return onDone(null, null);
  }

  /**
   * Callback to notify that future is finished. This method must delegate to {@link #onDone(Object,
   * Throwable)} method.
   *
   * @param res Result.
   * @return {@code True} if result was set by this call.
   */
  public final boolean onDone(@Nullable R res) {
    return onDone(res, null);
  }

  /**
   * Callback to notify that future is finished. This method must delegate to {@link #onDone(Object,
   * Throwable)} method.
   *
   * @param err Error.
   * @return {@code True} if result was set by this call.
   */
  public final boolean onDone(@Nullable Throwable err) {
    return onDone(null, err);
  }

  /**
   * Callback to notify that future is finished. Note that if non-{@code null} exception is passed
   * in the result value will be ignored.
   *
   * @param res Optional result.
   * @param err Optional error.
   * @return {@code True} if result was set by this call.
   */
  public boolean onDone(@Nullable R res, @Nullable Throwable err) {
    checkValid();

    boolean notify = false;

    boolean gotDone = false;

    try {
      synchronized (mux) {
        if (!done) {
          gotDone = true;

          endTime = System.currentTimeMillis();

          this.res = res;
          this.err = err;

          done = true;

          notify = true;

          mux.notifyAll(); // Notify possibly waiting child classes.

          return true;
        }

        return false;
      }
    } finally {
      if (gotDone) {
        GridStopwatch w = watch;

        if (w != null) w.stop();
      }

      if (notify) notifyListeners();
    }
  }

  /**
   * Callback to notify that future is cancelled.
   *
   * @return {@code True} if cancel flag was set by this call.
   */
  public boolean onCancelled() {
    checkValid();

    synchronized (mux) {
      if (cancelled || done) return false;

      cancelled = true;

      mux.notifyAll(); // Notify possibly waiting child classes.
    }

    return true;
  }

  /** {@inheritDoc} */
  @Override
  public void writeExternal(ObjectOutput out) throws IOException {
    boolean done;
    boolean cancelled;
    Object res;
    Throwable err;
    boolean syncNotify;
    boolean concurNotify;

    synchronized (mux) {
      done = this.done;
      cancelled = this.cancelled;
      res = this.res;
      err = this.err;
      syncNotify = this.syncNotify;
      concurNotify = this.concurNotify;
    }

    out.writeBoolean(done);
    out.writeBoolean(syncNotify);
    out.writeBoolean(concurNotify);

    // Don't write any further if not done, as deserialized future
    // will be invalid anyways.
    if (done) {
      out.writeBoolean(cancelled);
      out.writeObject(res);
      out.writeObject(err);
    }
  }

  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
    boolean done = in.readBoolean();

    syncNotify = in.readBoolean();
    concurNotify = in.readBoolean();

    if (!done) valid = false;
    else {
      boolean cancelled = in.readBoolean();

      R res = (R) in.readObject();

      Throwable err = (Throwable) in.readObject();

      synchronized (mux) {
        this.done = done;
        this.cancelled = cancelled;
        this.res = res;
        this.err = err;
      }
    }
  }

  /** {@inheritDoc} */
  @Override
  public String toString() {
    return S.toString(GridFutureAdapter.class, this);
  }
}
    /**
     * Gets property removed.
     *
     * @return Property removed.
     */
    boolean isRemoved() {
      assert Thread.holdsLock(mux);

      return removed;
    }