/** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map = peekAll0(keys, filter, skipped);

    if (map.size() + skipped.size() != keys.size()) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              filter));
    }

    return map;
  }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, Object arg)
        throws GridException {
      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize);

      this.gridSize = gridSize;

      final String locNodeId = grid.localNode().id().toString();

      for (int i = 0; i < gridSize; i++) {
        jobs.add(
            new GridComputeJobAdapter() {
              @SuppressWarnings("OverlyStrongTypeCast")
              @Override
              public Object execute() {
                try {
                  Thread.sleep(1000);
                } catch (InterruptedException ignored) {
                  Thread.currentThread().interrupt();
                }

                return new GridBiTuple<>(locNodeId, 1);
              }
            });
      }

      return jobs;
    }
  /** @throws Exception If failed. */
  public void testClientAffinity() throws Exception {
    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    Collection<Object> keys = new ArrayList<>();

    keys.addAll(Arrays.asList(Boolean.TRUE, Boolean.FALSE, 1, Integer.MAX_VALUE));

    Random rnd = new Random();
    StringBuilder sb = new StringBuilder();

    // Generate some random strings.
    for (int i = 0; i < 100; i++) {
      sb.setLength(0);

      for (int j = 0; j < 255; j++)
        // Only printable ASCII symbols for test.
        sb.append((char) (rnd.nextInt(0x7f - 0x20) + 0x20));

      keys.add(sb.toString());
    }

    // Generate some more keys to achieve better coverage.
    for (int i = 0; i < 100; i++) keys.add(UUID.randomUUID());

    for (Object key : keys) {
      UUID nodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      UUID clientNodeId = partitioned.affinity(key);

      assertEquals(
          "Invalid affinity mapping for REST response for key: " + key, nodeId, clientNodeId);
    }
  }
  /** @throws Exception If failed. */
  public void testCreateFileColocated() throws Exception {
    GridGgfsPath path = new GridGgfsPath("/colocated");

    UUID uuid = UUID.randomUUID();

    GridUuid affKey;

    long idx = 0;

    while (true) {
      affKey = new GridUuid(uuid, idx);

      if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id()))
        break;

      idx++;
    }

    try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) {
      // Write 5M, should be enough to test distribution.
      for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]);
    }

    GridGgfsFile info = fs.info(path);

    Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length());

    assertEquals(1, affNodes.size());
    Collection<UUID> nodeIds = F.first(affNodes).nodeIds();

    assertEquals(1, nodeIds.size());
    assertEquals(grid(0).localNode().id(), F.first(nodeIds));
  }
  /** @throws Exception If failed. */
  public void testProjectionRun() throws Exception {
    GridClientCompute dflt = client.compute();

    Collection<? extends GridClientNode> nodes = dflt.nodes();

    assertEquals(NODES_CNT, nodes.size());

    for (int i = 0; i < NODES_CNT; i++) {
      Grid g = grid(i);

      assert g != null;

      GridClientNode clientNode = dflt.node(g.localNode().id());

      assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode);

      GridClientCompute prj = dflt.projection(clientNode);

      String res = prj.execute(TestTask.class.getName(), null);

      assertNotNull(res);

      assertEquals(g.localNode().id().toString(), res);
    }
  }
  /**
   * Check whether provided path must be excluded from evictions.
   *
   * @param path Path.
   * @return {@code True} in case non block of related file must be excluded.
   * @throws GridException In case of faulty patterns.
   */
  public boolean exclude(GridGgfsPath path) throws GridException {
    assert path != null;

    Collection<Pattern> excludePatterns0;

    if (excludeRecompile.compareAndSet(true, false)) {
      // Recompile.
      Collection<String> excludePaths0 = excludePaths;

      if (excludePaths0 != null) {
        excludePatterns0 = new HashSet<>(excludePaths0.size(), 1.0f);

        for (String excludePath : excludePaths0) {
          try {
            excludePatterns0.add(Pattern.compile(excludePath));
          } catch (PatternSyntaxException ignore) {
            throw new GridException("Invalid regex pattern: " + excludePath);
          }
        }

        excludePatterns = excludePatterns0;
      } else excludePatterns0 = excludePatterns = null;
    } else excludePatterns0 = excludePatterns;

    if (excludePatterns0 != null) {
      String pathStr = path.toString();

      for (Pattern pattern : excludePatterns0) {
        if (pattern.matcher(pathStr).matches()) return true;
      }
    }

    return false;
  }
  /** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes)
      throws GridException {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map =
        !modes.contains(PARTITIONED_ONLY)
            ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped)
            : new GridLeanMap<K, V>(0);

    if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              modes));
    }

    return map;
  }
  /** @throws Exception If failed. */
  public void testNames() throws Exception {
    assertEquals("value1", svc.cacheable(1));

    Collection<String> names = mgr.getCacheNames();

    assertEquals(names.toString(), 2, names.size());
  }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, String arg)
        throws GridException {
      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(jobCnt);

      for (int i = 0; i < jobCnt; i++) jobs.add(new TestJob());

      return jobs;
    }
Esempio n. 10
0
  /** {@inheritDoc} */
  @Override
  public Collection<UUID> nodeIds() {
    Collection<UUID> ids = new GridLeanSet<UUID>();

    ids.add(cctx.nodeId());
    ids.addAll(mappings.keySet());

    return ids;
  }
  /**
   * First positive integer that is not present in started set.
   *
   * @param started Already started indices.
   * @return First positive integer that is not present in started set.
   */
  private int nextIndex(Collection<Integer> started) {
    assert started.contains(0);

    for (int i = 1; i < 10000; i++) {
      if (!started.contains(i)) return i;
    }

    throw new IllegalStateException();
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    try {
      // Send request to remove from remote nodes.
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      req.version(ver);

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  // If there is only local node in this lock's topology,
                  // then there is no reason to distribute the request.
                  if (nodes.isEmpty()) continue;

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (nodes.isEmpty()) return;

      req.completedVersions(ctx.tm().committedVersions(ver), ctx.tm().rolledbackVersions(ver));

      if (!req.keyBytes().isEmpty())
        // We don't wait for reply to this message.
        ctx.io().safeSend(nodes, req, null);
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** @throws Exception If failed. */
  public void testTopologyListener() throws Exception {
    final Collection<UUID> added = new ArrayList<>(1);
    final Collection<UUID> rmvd = new ArrayList<>(1);

    final CountDownLatch addedLatch = new CountDownLatch(1);
    final CountDownLatch rmvLatch = new CountDownLatch(1);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    GridClientTopologyListener lsnr =
        new GridClientTopologyListener() {
          @Override
          public void onNodeAdded(GridClientNode node) {
            added.add(node.nodeId());

            addedLatch.countDown();
          }

          @Override
          public void onNodeRemoved(GridClientNode node) {
            rmvd.add(node.nodeId());

            rmvLatch.countDown();
          }
        };

    client.addTopologyListener(lsnr);

    try {
      Grid g = startGrid(NODES_CNT + 1);

      UUID id = g.localNode().id();

      assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS));

      assertEquals(1, added.size());
      assertEquals(id, F.first(added));

      stopGrid(NODES_CNT + 1);

      assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS));

      assertEquals(1, rmvd.size());
      assertEquals(id, F.first(rmvd));
    } finally {
      client.removeTopologyListener(lsnr);

      stopGrid(NODES_CNT + 1);
    }
  }
  /**
   * @param rmtReducer Optional reducer.
   * @param rmtTransform Optional transformer.
   * @param args Arguments.
   * @return Future.
   */
  @SuppressWarnings("IfMayBeConditional")
  private <R> GridCacheQueryFuture<R> execute(
      @Nullable GridReducer<T, R> rmtReducer,
      @Nullable GridClosure<T, R> rmtTransform,
      @Nullable Object... args) {
    Collection<GridNode> nodes = nodes();

    cctx.checkSecurity(GridSecurityPermission.CACHE_READ);

    if (F.isEmpty(nodes))
      return new GridCacheQueryErrorFuture<>(
          cctx.kernalContext(),
          new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx()));

    if (log.isDebugEnabled())
      log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']');

    if (cctx.deploymentEnabled()) {
      try {
        cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform);
        cctx.deploy().registerClasses(args);
      } catch (GridException e) {
        return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e);
      }
    }

    if (subjId == null) subjId = cctx.localNodeId();

    taskHash = cctx.kernalContext().job().currentTaskNameHash();

    GridCacheQueryBean bean =
        new GridCacheQueryBean(
            this,
            (GridReducer<Object, Object>) rmtReducer,
            (GridClosure<Object, Object>) rmtTransform,
            args);

    GridCacheQueryManager qryMgr = cctx.queries();

    boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId());

    if (type == SQL_FIELDS)
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes));
    else
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes));
  }
  /** {@inheritDoc} */
  @Override
  public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException {
    ExecutorService exec =
        new ThreadPoolExecutor(
            threadsCnt,
            threadsCnt,
            0L,
            MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(batchQueueSize),
            new BlockingRejectedExecutionHandler());

    Iterator<I> iter = inputIterator(args);

    Collection<I> buf = new ArrayList<>(batchSize);

    try {
      while (iter.hasNext()) {
        if (Thread.currentThread().isInterrupted()) {
          U.warn(log, "Working thread was interrupted while loading data.");

          break;
        }

        buf.add(iter.next());

        if (buf.size() == batchSize) {
          exec.submit(new Worker(c, buf, args));

          buf = new ArrayList<>(batchSize);
        }
      }

      if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args));
    } catch (RejectedExecutionException ignored) {
      // Because of custom RejectedExecutionHandler.
      assert false : "RejectedExecutionException was thrown while it shouldn't.";
    } finally {
      exec.shutdown();

      try {
        exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while waiting for put operations to complete.");

        Thread.currentThread().interrupt();
      }
    }
  }
  /** {@inheritDoc} */
  @Override
  public V peek(K key, @Nullable Collection<GridCachePeekMode> modes) throws GridException {
    V val = null;

    if (!modes.contains(PARTITIONED_ONLY)) {
      try {
        val = peek0(true, key, modes, ctx.tm().txx());
      } catch (GridCacheFilterFailedException ignored) {
        if (log.isDebugEnabled()) log.debug("Filter validation failed for key: " + key);

        return null;
      }
    }

    return val == null && !modes.contains(NEAR_ONLY) ? dht.peek(key, modes) : val;
  }
  /** {@inheritDoc} */
  @Override
  public Map<GridRichNode, Collection<K>> mapKeysToNodes(Collection<? extends K> keys) {
    Map<GridRichNode, Collection<K>> map = new HashMap<GridRichNode, Collection<K>>();

    for (K key : keys) {
      Collection<GridRichNode> nodes = ctx.allNodes(key);

      for (GridRichNode node : nodes) {
        Collection<K> keyCol = map.get(node);

        if (keyCol == null) map.put(node, keyCol = new LinkedList<K>());

        keyCol.add(key);
      }
    }

    return map;
  }
  /** @throws Exception If failed. */
  public void testAffinityExecute() throws Exception {
    GridClientCompute dflt = client.compute();

    GridClientData data = client.data(PARTITIONED_CACHE_NAME);

    Collection<? extends GridClientNode> nodes = dflt.nodes();

    assertEquals(NODES_CNT, nodes.size());

    for (int i = 0; i < NODES_CNT; i++) {
      Grid g = grid(i);

      assert g != null;

      int affinityKey = -1;

      for (int key = 0; key < 10000; key++) {
        if (g.localNode().id().equals(data.affinity(key))) {
          affinityKey = key;

          break;
        }
      }

      if (affinityKey == -1)
        throw new Exception("Unable to found key for which node is primary: " + g.localNode().id());

      GridClientNode clientNode = dflt.node(g.localNode().id());

      assertNotNull("Client node for " + g.localNode().id() + " was not found", clientNode);

      String res =
          dflt.affinityExecute(TestTask.class.getName(), PARTITIONED_CACHE_NAME, affinityKey, null);

      assertNotNull(res);

      assertEquals(g.localNode().id().toString(), res);
    }
  }
Esempio n. 19
0
  /**
   * @param cctx Registry.
   * @param keys Keys to lock.
   * @param tx Transaction.
   * @param read Read flag.
   * @param retval Flag to return value or not.
   * @param timeout Lock acquisition timeout.
   * @param filter Filter.
   */
  public GridNearLockFuture(
      GridCacheContext<K, V> cctx,
      Collection<? extends K> keys,
      @Nullable GridNearTxLocal<K, V> tx,
      boolean read,
      boolean retval,
      long timeout,
      GridPredicate<GridCacheEntry<K, V>>[] filter) {
    super(cctx.kernalContext(), CU.boolReducer());
    assert cctx != null;
    assert keys != null;

    this.cctx = cctx;
    this.keys = keys;
    this.tx = tx;
    this.read = read;
    this.retval = retval;
    this.timeout = timeout;
    this.filter = filter;

    threadId = tx == null ? Thread.currentThread().getId() : tx.threadId();

    lockVer = tx != null ? tx.xidVersion() : cctx.versions().next();

    futId = GridUuid.randomUuid();

    entries = new ArrayList<>(keys.size());

    log = U.logger(ctx, logRef, GridNearLockFuture.class);

    if (timeout > 0) {
      timeoutObj = new LockTimeoutObject();

      cctx.time().addTimeoutObject(timeoutObj);
    }

    valMap = new ConcurrentHashMap8<>(keys.size(), 1f);
  }
Esempio n. 20
0
  /** {@inheritDoc} */
  @Override
  public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) {
    A.notEmpty(entries, "entries");

    enterBusy();

    try {
      GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx);

      activeFuts.add(resFut);

      resFut.listenAsync(rmvActiveFut);

      Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16);

      for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey());

      load0(entries, resFut, keys, 0);

      return resFut;
    } finally {
      leaveBusy();
    }
  }
Esempio n. 21
0
    /** {@inheritDoc} */
    @Override
    public Class<?> deployClass() {
      if (cls == null) {
        Class<?> cls0 = null;

        if (depCls != null) cls0 = depCls;
        else {
          for (Iterator<Object> it = objs.iterator();
              (cls0 == null || U.isJdk(cls0)) && it.hasNext(); ) {
            Object o = it.next();

            if (o != null) cls0 = U.detectClass(o);
          }

          if (cls0 == null || U.isJdk(cls0)) cls0 = GridDataLoaderImpl.class;
        }

        assert cls0 != null : "Failed to detect deploy class [objs=" + objs + ']';

        cls = cls0;
      }

      return cls;
    }
  /** {@inheritDoc} */
  @Override
  public void execute(@Nullable GridProjection prj) throws GridException {
    if (cb == null)
      throw new IllegalStateException("Mandatory local callback is not set for the query: " + this);

    if (prj == null) prj = ctx.grid();

    prj = prj.forCache(ctx.name());

    if (prj.nodes().isEmpty())
      throw new GridTopologyException("Failed to execute query (projection is empty): " + this);

    GridCacheMode mode = ctx.config().getCacheMode();

    if (mode == LOCAL || mode == REPLICATED) {
      Collection<GridNode> nodes = prj.nodes();

      GridNode node = nodes.contains(ctx.localNode()) ? ctx.localNode() : F.rand(nodes);

      assert node != null;

      if (nodes.size() > 1 && !ctx.cache().isDrSystemCache()) {
        if (node.id().equals(ctx.localNodeId()))
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on local node. "
                  + "Will execute query locally: "
                  + this);
        else
          U.warn(
              log,
              "Continuous query for "
                  + mode
                  + " cache can be run only on single node. "
                  + "Will execute query on remote node [qry="
                  + this
                  + ", node="
                  + node
                  + ']');
      }

      prj = prj.forNode(node);
    }

    closeLock.lock();

    try {
      if (routineId != null)
        throw new IllegalStateException("Continuous query can't be executed twice.");

      guard.block();

      GridContinuousHandler hnd =
          new GridCacheContinuousQueryHandler<>(ctx.name(), topic, cb, filter, prjPred);

      routineId =
          ctx.kernalContext()
              .continuous()
              .startRoutine(hnd, bufSize, timeInterval, autoUnsubscribe, prj.predicate())
              .get();
    } finally {
      closeLock.unlock();
    }
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    try {
      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridNode, GridNearUnlockRequest<K, V>> map = null;

      for (K key : keys) {
        // Send request to remove from remote nodes.
        GridNearUnlockRequest<K, V> req = null;

        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                if (affNodes == null) {
                  affNodes = CU.allNodes(ctx, cand.topologyVersion());

                  keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                  map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size());
                }

                GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

                if (!primary.isLocal()) {
                  req = map.get(primary);

                  if (req == null) {
                    map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                    req.version(ver);
                  }
                }

                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  if (primary.isLocal()) {
                    dht.removeLocks(primary.id(), ver, F.asList(key), true);

                    assert req == null;

                    continue;
                  }

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (map == null || map.isEmpty()) return;

      Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver);
      Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver);

      for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (!req.keyBytes().isEmpty()) {
          req.completedVersions(committed, rolledback);

          // We don't wait for reply to this message.
          ctx.io().send(n, req);
        }
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return;

    try {
      GridCacheVersion ver = null;

      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null;

      Collection<K> locKeys = new LinkedList<K>();

      GridCacheVersion obsoleteVer = ctx.versions().next();

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While.

          try {
            GridCacheMvccCandidate<K> cand =
                entry.candidate(ctx.nodeId(), Thread.currentThread().getId());

            if (cand != null) {
              ver = cand.version();

              if (affNodes == null) {
                affNodes = CU.allNodes(ctx, cand.topologyVersion());

                keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size());
              }

              // Send request to remove from remote nodes.
              GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

              GridNearUnlockRequest<K, V> req = map.get(primary);

              if (req == null) {
                map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                req.version(ver);
              }

              // Remove candidate from local node first.
              GridCacheMvccCandidate<K> rmv = entry.removeLock();

              if (rmv != null) {
                if (!rmv.reentry()) {
                  if (ver != null && !ver.equals(rmv.version()))
                    throw new GridException(
                        "Failed to unlock (if keys were locked separately, "
                            + "then they need to be unlocked separately): "
                            + keys);

                  if (!primary.isLocal()) {
                    assert req != null;

                    req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                  } else locKeys.add(key);

                  if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
                } else if (log.isDebugEnabled())
                  log.debug(
                      "Current thread still owns lock (or there are no other nodes)"
                          + " [lock="
                          + rmv
                          + ", curThreadId="
                          + Thread.currentThread().getId()
                          + ']');
              }

              // Try to evict near entry if it's dht-mapped locally.
              evictNearEntry(entry, obsoleteVer);
            }

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Attempted to unlock removed entry (will retry): " + entry);
          }
        }
      }

      if (ver == null) return;

      for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridRichNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
        else if (!req.keyBytes().isEmpty())
          // We don't wait for reply to this message.
          ctx.io().send(n, req);
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
Esempio n. 25
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }
  /** @throws Exception If failed. */
  public void testEmptyProjections() throws Exception {
    final GridClientCompute dflt = client.compute();

    Collection<? extends GridClientNode> nodes = dflt.nodes();

    assertEquals(NODES_CNT, nodes.size());

    Iterator<? extends GridClientNode> iter = nodes.iterator();

    final GridClientCompute singleNodePrj = dflt.projection(Collections.singletonList(iter.next()));

    final GridClientNode second = iter.next();

    final GridClientPredicate<GridClientNode> noneFilter =
        new GridClientPredicate<GridClientNode>() {
          @Override
          public boolean apply(GridClientNode node) {
            return false;
          }
        };

    final GridClientPredicate<GridClientNode> targetFilter =
        new GridClientPredicate<GridClientNode>() {
          @Override
          public boolean apply(GridClientNode node) {
            return node.nodeId().equals(second.nodeId());
          }
        };

    GridTestUtils.assertThrows(
        log(),
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return dflt.projection(noneFilter).log(-1, -1);
          }
        },
        GridServerUnreachableException.class,
        null);

    GridTestUtils.assertThrows(
        log(),
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return singleNodePrj.projection(second);
          }
        },
        GridClientException.class,
        null);

    GridTestUtils.assertThrows(
        log(),
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            return singleNodePrj.projection(targetFilter);
          }
        },
        GridClientException.class,
        null);
  }
  /**
   * Test file creation.
   *
   * @param path Path to file to store.
   * @param size Size of file to store.
   * @param salt Salt for file content generation.
   * @throws Exception In case of any exception.
   */
  private void testCreateFile(final GridGgfsPath path, final long size, final int salt)
      throws Exception {
    info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']');

    final AtomicInteger cnt = new AtomicInteger(0);
    final Collection<GridGgfsPath> cleanUp = new ConcurrentLinkedQueue<>();

    long time =
        runMultiThreaded(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                int id = cnt.incrementAndGet();

                GridGgfsPath f = new GridGgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : ""));

                try (GridGgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) {
                  assertNotNull(out);

                  cleanUp.add(f); // Add all created into cleanup list.

                  U.copy(new GridGgfsTestInputStream(size, salt), out);
                }

                return null;
              }
            },
            WRITING_THREADS_CNT,
            "perform-multi-thread-writing");

    if (time > 0) {
      double rate = size * 1000. / time / 1024 / 1024;

      info(
          String.format(
              "Write file [path=%s, size=%d kB, rate=%2.1f MB/s]",
              path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate));
    }

    info("Read and validate saved file: " + path);

    final InputStream expIn = new GridGgfsTestInputStream(size, salt);
    final GridGgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10);

    // Validate continuous reading of whole file.
    assertEqualStreams(expIn, actIn, size, null);

    // Validate random seek and reading.
    final Random rnd = new Random();

    runMultiThreaded(
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            long skip = Math.abs(rnd.nextLong() % (size + 1));
            long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400));

            assertEqualStreams(new GridGgfsTestInputStream(size, salt), actIn, range, skip);

            return null;
          }
        },
        READING_THREADS_CNT,
        "validate-multi-thread-reading");

    expIn.close();
    actIn.close();

    info("Get stored file info: " + path);

    GridGgfsFile desc = fs.info(path);

    info("Validate stored file info: " + desc);

    assertNotNull(desc);

    if (log.isDebugEnabled()) log.debug("File descriptor: " + desc);

    Collection<GridGgfsBlockLocation> aff = fs.affinity(path, 0, desc.length());

    assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty());

    int blockSize = desc.blockSize();

    assertEquals("File size", size, desc.length());
    assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize);
    // assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString());
    // assertEquals("Permission sticky bit marks this is file", false,
    // desc.getPermission().getStickyBit());
    assertEquals("Type", true, desc.isFile());
    assertEquals("Type", false, desc.isDirectory());

    info("Cleanup files: " + cleanUp);

    for (GridGgfsPath f : cleanUp) {
      fs.delete(f, true);
      assertNull(fs.info(f));
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return;

    Collection<? extends GridNode> nodes = ctx.remoteNodes(keys);

    try {
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      for (K key : keys) {
        GridDistributedCacheEntry<K, V> entry = entryexx(key);

        if (!ctx.isAll(entry.wrap(false), filter)) continue;

        // Unlock local lock first.
        GridCacheMvccCandidate<K> rmv = entry.removeLock();

        if (rmv != null && !nodes.isEmpty()) {
          if (!rmv.reentry()) {
            req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);

            // We are assuming that lock ID is the same for all keys.
            req.version(rmv.version());

            if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
          } else {
            if (log.isDebugEnabled())
              log.debug(
                  "Locally unlocked lock reentry without distributing to other nodes [removed="
                      + rmv
                      + ", entry="
                      + entry
                      + ']');
          }
        } else {
          if (log.isDebugEnabled())
            log.debug(
                "Current thread still owns lock (or there are no other nodes) [lock="
                    + rmv
                    + ", curThreadId="
                    + Thread.currentThread().getId()
                    + ']');
        }
      }

      // Don't proceed of no keys to unlock.
      if (req.keyBytes().isEmpty()) {
        if (log.isDebugEnabled())
          log.debug("No keys to unlock locally (was it reentry unlock?): " + keys);

        return;
      }

      // We don't wait for reply to this message. Receiving side will have
      // to make sure that unlock requests don't come before lock requests.
      ctx.io().safeSend(nodes, req, null);
    } catch (GridException e) {
      U.error(log, "Failed to unlock keys: " + keys, e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  @Override
  protected GridFuture<Boolean> lockAllAsync(
      Collection<? extends K> keys,
      long timeout,
      GridCacheTxLocalEx<K, V> tx,
      boolean isInvalidate,
      boolean isRead,
      boolean retval,
      GridCacheTxIsolation isolation,
      GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true);

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    final GridReplicatedLockFuture<K, V> fut =
        new GridReplicatedLockFuture<K, V>(ctx, keys, tx, this, nodes, timeout, filter);

    GridDistributedLockRequest<K, V> req =
        new GridDistributedLockRequest<K, V>(
            locNodeId,
            Thread.currentThread().getId(),
            fut.futureId(),
            fut.version(),
            tx != null,
            isRead,
            isolation,
            isInvalidate,
            timeout,
            keys.size());

    try {
      // Must add future before redying locks.
      if (!ctx.mvcc().addFuture(fut))
        throw new IllegalStateException("Duplicate future ID: " + fut);

      boolean distribute = false;

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = null;

          try {
            entry = entryexx(key);

            if (!ctx.isAll(entry.wrap(false), filter)) {
              if (log.isDebugEnabled())
                log.debug("Entry being locked did not pass filter (will not lock): " + entry);

              fut.onDone(false);

              return fut;
            }

            // Removed exception may be thrown here.
            GridCacheMvccCandidate<K> cand = fut.addEntry(entry);

            if (cand != null) {
              req.addKeyBytes(
                  key,
                  cand.reentry() ? null : entry.getOrMarshalKeyBytes(),
                  retval,
                  entry.localCandidates(fut.version()),
                  ctx);

              req.completedVersions(
                  ctx.tm().committedVersions(fut.version()),
                  ctx.tm().rolledbackVersions(fut.version()));

              distribute = !cand.reentry();
            } else if (fut.isDone()) return fut;

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
          }
        }
      }

      // If nothing to distribute at this point,
      // then all locks are reentries.
      if (!distribute) fut.complete(true);

      if (nodes.isEmpty()) fut.readyLocks();

      // No reason to send request if all locks are locally re-entered,
      // or if timeout is negative and local locks could not be acquired.
      if (fut.isDone()) return fut;

      try {
        ctx.io()
            .safeSend(
                fut.nodes(),
                req,
                new P1<GridNode>() {
                  @Override
                  public boolean apply(GridNode node) {
                    fut.onNodeLeft(node.id());

                    return !fut.isDone();
                  }
                });
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send lock request to node [nodes="
                + U.toShortString(nodes)
                + ", req="
                + req
                + ']',
            e);

        fut.onError(e);
      }

      return fut;
    } catch (GridException e) {
      Throwable err = new GridException("Failed to acquire asynchronous lock for keys: " + keys, e);

      // Clean-up.
      fut.onError(err);

      ctx.mvcc().removeFuture(fut);

      return fut;
    }
  }