/**
   * @param in Object input.
   * @return Read collection.
   * @throws IOException If failed.
   * @throws ClassNotFoundException If failed.
   */
  private Collection<Object> readFieldsCollection(ObjectInput in)
      throws IOException, ClassNotFoundException {
    assert fields;

    int size = in.readInt();

    if (size == -1) return null;

    Collection<Object> res = new ArrayList<>(size);

    for (int i = 0; i < size; i++) {
      int size0 = in.readInt();

      Collection<Object> col = new ArrayList<>(size0);

      for (int j = 0; j < size0; j++) col.add(in.readObject());

      assert col.size() == size0;

      res.add(col);
    }

    assert res.size() == size;

    return res;
  }
  /** @throws Exception If failed. */
  public void testClientAffinity() throws Exception {
    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    Collection<Object> keys = new ArrayList<>();

    keys.addAll(Arrays.asList(Boolean.TRUE, Boolean.FALSE, 1, Integer.MAX_VALUE));

    Random rnd = new Random();
    StringBuilder sb = new StringBuilder();

    // Generate some random strings.
    for (int i = 0; i < 100; i++) {
      sb.setLength(0);

      for (int j = 0; j < 255; j++)
        // Only printable ASCII symbols for test.
        sb.append((char) (rnd.nextInt(0x7f - 0x20) + 0x20));

      keys.add(sb.toString());
    }

    // Generate some more keys to achieve better coverage.
    for (int i = 0; i < 100; i++) keys.add(UUID.randomUUID());

    for (Object key : keys) {
      UUID nodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      UUID clientNodeId = partitioned.affinity(key);

      assertEquals(
          "Invalid affinity mapping for REST response for key: " + key, nodeId, clientNodeId);
    }
  }
  /** {@inheritDoc} */
  @Override
  public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds)
      throws GridSpiException {
    assert !F.isEmpty(nodeIds);

    long now = U.currentTimeMillis();

    Collection<UUID> expired = new LinkedList<>();

    for (UUID id : nodeIds) {
      GridNodeMetrics nodeMetrics = metricsMap.get(id);

      Long ts = tsMap.get(id);

      if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id);
    }

    if (!expired.isEmpty()) {
      Map<UUID, GridNodeMetrics> refreshed = metrics0(expired);

      for (UUID id : refreshed.keySet()) tsMap.put(id, now);

      metricsMap.putAll(refreshed);
    }

    return F.view(metricsMap, F.contains(nodeIds));
  }
  /** {@inheritDoc} */
  @SuppressWarnings("TypeMayBeWeakened")
  @Nullable
  private Collection<Object> unmarshalFieldsCollection(
      @Nullable Collection<byte[]> byteCol, GridCacheContext<K, V> ctx, ClassLoader ldr)
      throws GridException {
    assert ctx != null;
    assert ldr != null;

    Collection<Object> col = unmarshalCollection(byteCol, ctx, ldr);
    Collection<Object> col0 = null;

    if (col != null) {
      col0 = new ArrayList<>(col.size());

      for (Object o : col) {
        List<Object> list = (List<Object>) o;
        List<Object> list0 = new ArrayList<>(list.size());

        for (Object obj : list)
          list0.add(obj != null ? ctx.marshaller().unmarshal((byte[]) obj, ldr) : null);

        col0.add(list0);
      }
    }

    return col0;
  }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, Object arg)
        throws GridException {
      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize);

      this.gridSize = gridSize;

      final String locNodeId = grid.localNode().id().toString();

      for (int i = 0; i < gridSize; i++) {
        jobs.add(
            new GridComputeJobAdapter() {
              @SuppressWarnings("OverlyStrongTypeCast")
              @Override
              public Object execute() {
                try {
                  Thread.sleep(1000);
                } catch (InterruptedException ignored) {
                  Thread.currentThread().interrupt();
                }

                return new GridBiTuple<>(locNodeId, 1);
              }
            });
      }

      return jobs;
    }
  /**
   * Add information about key and version to request.
   *
   * <p>Other version has to be provided if suspect lock is DHT local.
   *
   * @param key Key.
   * @param cand Candidate.
   */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  void addCandidate(K key, GridCacheDgcLockCandidate cand) {
    Collection<GridCacheDgcLockCandidate> col =
        F.addIfAbsent(map, key, new ArrayList<GridCacheDgcLockCandidate>());

    assert col != null;

    col.add(cand);
  }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, String arg)
        throws GridException {
      Collection<GridComputeJobAdapter> jobs = new ArrayList<>(jobCnt);

      for (int i = 0; i < jobCnt; i++) jobs.add(new TestJob());

      return jobs;
    }
    /** {@inheritDoc} */
    @Override
    protected Collection<? extends GridComputeJob> split(int gridSize, Object arg)
        throws GridException {
      if (log.isInfoEnabled())
        log.info("Splitting job [job=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']');

      Collection<GridComputeJob> jobs = new ArrayList<>(SPLIT_COUNT);

      for (int i = 1; i <= SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob(i));

      return jobs;
    }
Example #9
0
  /** {@inheritDoc} */
  @Override
  public void start0() throws GridException {
    aff =
        new GridAffinityAssignmentCache(
            cctx,
            cctx.namex(),
            cctx.config().getAffinity(),
            cctx.config().getAffinityMapper(),
            cctx.config().getBackups());

    // Generate internal keys for partitions.
    int partCnt = partitions();

    partAffKeys = new GridPartitionLockKey[partCnt];

    Collection<Integer> found = new HashSet<>();

    long affKey = 0;

    while (true) {
      GridPartitionLockKey key = new GridPartitionLockKey(affKey);

      int part = aff.partition(key);

      if (found.add(part)) {
        // This is a key for not yet calculated partition.
        key.partitionId(part);

        partAffKeys[part] = key;

        if (found.size() == partCnt) break;
      }

      affKey++;

      if (affKey > partCnt * MAX_PARTITION_KEY_ATTEMPT_RATIO)
        throw new IllegalStateException(
            "Failed to calculate partition affinity keys for given affinity "
                + "function [attemptCnt="
                + affKey
                + ", found="
                + found
                + ", cacheName="
                + cctx.name()
                + ']');
    }
  }
Example #10
0
  /** {@inheritDoc} */
  @Override
  public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) {
    A.notEmpty(entries, "entries");

    enterBusy();

    try {
      GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx);

      activeFuts.add(resFut);

      resFut.listenAsync(rmvActiveFut);

      Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16);

      for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey());

      load0(entries, resFut, keys, 0);

      return resFut;
    } finally {
      leaveBusy();
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("all")
  @Override
  public boolean readFrom(ByteBuffer buf) {
    commState.setBuffer(buf);

    if (!super.readFrom(buf)) return false;

    switch (commState.idx) {
      case 2:
        if (commState.readSize == -1) {
          if (buf.remaining() < 4) return false;

          commState.readSize = commState.getInt();
        }

        if (commState.readSize >= 0) {
          if (dataBytes == null) dataBytes = new ArrayList<>(commState.readSize);

          for (int i = commState.readItems; i < commState.readSize; i++) {
            byte[] _val = commState.getByteArray();

            if (_val == BYTE_ARR_NOT_READ) return false;

            dataBytes.add((byte[]) _val);

            commState.readItems++;
          }
        }

        commState.readSize = -1;
        commState.readItems = 0;

        commState.idx++;

      case 3:
        byte[] errBytes0 = commState.getByteArray();

        if (errBytes0 == BYTE_ARR_NOT_READ) return false;

        errBytes = errBytes0;

        commState.idx++;

      case 4:
        if (buf.remaining() < 1) return false;

        fields = commState.getBoolean();

        commState.idx++;

      case 5:
        if (buf.remaining() < 1) return false;

        finished = commState.getBoolean();

        commState.idx++;

      case 6:
        if (commState.readSize == -1) {
          if (buf.remaining() < 4) return false;

          commState.readSize = commState.getInt();
        }

        if (commState.readSize >= 0) {
          if (metaDataBytes == null) metaDataBytes = new ArrayList<>(commState.readSize);

          for (int i = commState.readItems; i < commState.readSize; i++) {
            byte[] _val = commState.getByteArray();

            if (_val == BYTE_ARR_NOT_READ) return false;

            metaDataBytes.add((byte[]) _val);

            commState.readItems++;
          }
        }

        commState.readSize = -1;
        commState.readItems = 0;

        commState.idx++;

      case 7:
        commState.idx++;

      case 8:
        if (buf.remaining() < 8) return false;

        reqId = commState.getLong();

        commState.idx++;
    }

    return true;
  }
Example #12
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }