コード例 #1
0
  /** {@inheritDoc} */
  @Override
  public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds)
      throws GridSpiException {
    assert !F.isEmpty(nodeIds);

    long now = U.currentTimeMillis();

    Collection<UUID> expired = new LinkedList<>();

    for (UUID id : nodeIds) {
      GridNodeMetrics nodeMetrics = metricsMap.get(id);

      Long ts = tsMap.get(id);

      if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id);
    }

    if (!expired.isEmpty()) {
      Map<UUID, GridNodeMetrics> refreshed = metrics0(expired);

      for (UUID id : refreshed.keySet()) tsMap.put(id, now);

      metricsMap.putAll(refreshed);
    }

    return F.view(metricsMap, F.contains(nodeIds));
  }
コード例 #2
0
  /** {@inheritDoc} */
  @Override
  public void loadCache(GridBiInClosure<K, V> c, @Nullable Object... args) throws GridException {
    ExecutorService exec =
        new ThreadPoolExecutor(
            threadsCnt,
            threadsCnt,
            0L,
            MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(batchQueueSize),
            new BlockingRejectedExecutionHandler());

    Iterator<I> iter = inputIterator(args);

    Collection<I> buf = new ArrayList<>(batchSize);

    try {
      while (iter.hasNext()) {
        if (Thread.currentThread().isInterrupted()) {
          U.warn(log, "Working thread was interrupted while loading data.");

          break;
        }

        buf.add(iter.next());

        if (buf.size() == batchSize) {
          exec.submit(new Worker(c, buf, args));

          buf = new ArrayList<>(batchSize);
        }
      }

      if (!buf.isEmpty()) exec.submit(new Worker(c, buf, args));
    } catch (RejectedExecutionException ignored) {
      // Because of custom RejectedExecutionHandler.
      assert false : "RejectedExecutionException was thrown while it shouldn't.";
    } finally {
      exec.shutdown();

      try {
        exec.awaitTermination(Long.MAX_VALUE, MILLISECONDS);
      } catch (InterruptedException ignored) {
        U.warn(log, "Working thread was interrupted while waiting for put operations to complete.");

        Thread.currentThread().interrupt();
      }
    }
  }
コード例 #3
0
  /**
   * Test file creation.
   *
   * @param path Path to file to store.
   * @param size Size of file to store.
   * @param salt Salt for file content generation.
   * @throws Exception In case of any exception.
   */
  private void testCreateFile(final GridGgfsPath path, final long size, final int salt)
      throws Exception {
    info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']');

    final AtomicInteger cnt = new AtomicInteger(0);
    final Collection<GridGgfsPath> cleanUp = new ConcurrentLinkedQueue<>();

    long time =
        runMultiThreaded(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                int id = cnt.incrementAndGet();

                GridGgfsPath f = new GridGgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : ""));

                try (GridGgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) {
                  assertNotNull(out);

                  cleanUp.add(f); // Add all created into cleanup list.

                  U.copy(new GridGgfsTestInputStream(size, salt), out);
                }

                return null;
              }
            },
            WRITING_THREADS_CNT,
            "perform-multi-thread-writing");

    if (time > 0) {
      double rate = size * 1000. / time / 1024 / 1024;

      info(
          String.format(
              "Write file [path=%s, size=%d kB, rate=%2.1f MB/s]",
              path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate));
    }

    info("Read and validate saved file: " + path);

    final InputStream expIn = new GridGgfsTestInputStream(size, salt);
    final GridGgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10);

    // Validate continuous reading of whole file.
    assertEqualStreams(expIn, actIn, size, null);

    // Validate random seek and reading.
    final Random rnd = new Random();

    runMultiThreaded(
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            long skip = Math.abs(rnd.nextLong() % (size + 1));
            long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400));

            assertEqualStreams(new GridGgfsTestInputStream(size, salt), actIn, range, skip);

            return null;
          }
        },
        READING_THREADS_CNT,
        "validate-multi-thread-reading");

    expIn.close();
    actIn.close();

    info("Get stored file info: " + path);

    GridGgfsFile desc = fs.info(path);

    info("Validate stored file info: " + desc);

    assertNotNull(desc);

    if (log.isDebugEnabled()) log.debug("File descriptor: " + desc);

    Collection<GridGgfsBlockLocation> aff = fs.affinity(path, 0, desc.length());

    assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty());

    int blockSize = desc.blockSize();

    assertEquals("File size", size, desc.length());
    assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize);
    // assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString());
    // assertEquals("Permission sticky bit marks this is file", false,
    // desc.getPermission().getStickyBit());
    assertEquals("Type", true, desc.isFile());
    assertEquals("Type", false, desc.isDirectory());

    info("Cleanup files: " + cleanUp);

    for (GridGgfsPath f : cleanUp) {
      fs.delete(f, true);
      assertNull(fs.info(f));
    }
  }
コード例 #4
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }