Ejemplo n.º 1
0
  /**
   * @return Collection of readers after check.
   * @throws GridCacheEntryRemovedException If removed.
   */
  public Collection<ReaderId> checkReaders() throws GridCacheEntryRemovedException {
    synchronized (mux) {
      checkObsolete();

      if (!readers.isEmpty()) {
        List<ReaderId> rmv = null;

        for (ReaderId reader : readers) {
          if (!cctx.discovery().alive(reader.nodeId())) {
            if (rmv == null) rmv = new LinkedList<ReaderId>();

            rmv.add(reader);
          }
        }

        if (rmv != null) {
          readers = new LinkedList<ReaderId>(readers);

          for (ReaderId rdr : rmv) readers.remove(rdr);

          readers = Collections.unmodifiableList(readers);
        }
      }

      return readers;
    }
  }
  /** {@inheritDoc} */
  @Override
  public void explicitUndeploy(UUID nodeId, String rsrcName) {
    Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>();

    synchronized (mux) {
      for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) {
        List<SharedDeployment> deps = i1.next();

        for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) {
          SharedDeployment dep = i2.next();

          if (dep.hasName(rsrcName)) {
            if (!dep.isUndeployed()) {
              dep.undeploy();

              dep.onRemoved();

              // Undeploy.
              i2.remove();

              undeployed.add(dep);

              if (log.isInfoEnabled()) log.info("Undeployed per-version class loader: " + dep);
            }

            break;
          }
        }

        if (deps.isEmpty()) i1.remove();
      }
    }

    recordUndeployed(null, undeployed);
  }
Ejemplo n.º 3
0
  /** {@inheritDoc} */
  @Override
  public void addAttributeListener(GridTaskSessionAttributeListener lsnr, boolean rewind) {
    A.notNull(lsnr, "lsnr");

    Map<Object, Object> attrs = null;

    List<GridTaskSessionAttributeListener> lsnrs;

    synchronized (mux) {
      lsnrs = new ArrayList<GridTaskSessionAttributeListener>(this.lsnrs.size());

      lsnrs.addAll(this.lsnrs);

      lsnrs.add(lsnr);

      lsnrs = Collections.unmodifiableList(lsnrs);

      this.lsnrs = lsnrs;

      if (rewind) attrs = new HashMap<Object, Object>(this.attrs);
    }

    if (rewind) {
      for (Map.Entry<Object, Object> entry : attrs.entrySet()) {
        for (GridTaskSessionAttributeListener l : lsnrs) {
          l.onAttributeSet(entry.getKey(), entry.getValue());
        }
      }
    }
  }
  /**
   * Increases priority if job has bumped down.
   *
   * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting
   *     for execution.
   * @param passiveJobs Reordered collection of collision contexts for waiting jobs.
   */
  private void bumpPriority(
      Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) {
    assert waitJobs != null;
    assert passiveJobs != null;
    assert waitJobs.size() == passiveJobs.size();

    for (int i = 0; i < passiveJobs.size(); i++) {
      GridCollisionJobContext ctx = passiveJobs.get(i);

      if (i > indexOf(waitJobs, ctx))
        ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc);
    }
  }
Ejemplo n.º 5
0
  /**
   * Gets values referenced by sequential keys, e.g. {@code key1...keyN}.
   *
   * @param keyPrefix Key prefix, e.g. {@code key} for {@code key1...keyN}.
   * @param params Parameters map.
   * @return Values.
   */
  @Nullable
  protected List<Object> values(String keyPrefix, Map<String, Object> params) {
    assert keyPrefix != null;

    List<Object> vals = new LinkedList<>();

    for (int i = 1; ; i++) {
      String key = keyPrefix + i;

      if (params.containsKey(key)) vals.add(params.get(key));
      else break;
    }

    return vals;
  }
Ejemplo n.º 6
0
  /** {@inheritDoc} */
  @Override
  public boolean removeAttributeListener(GridTaskSessionAttributeListener lsnr) {
    A.notNull(lsnr, "lsnr");

    synchronized (mux) {
      List<GridTaskSessionAttributeListener> lsnrs =
          new ArrayList<GridTaskSessionAttributeListener>(this.lsnrs);

      boolean rmv = lsnrs.remove(lsnr);

      this.lsnrs = Collections.unmodifiableList(lsnrs);

      return rmv;
    }
  }
Ejemplo n.º 7
0
  /** {@inheritDoc} */
  @Override
  protected boolean hasReaders() throws GridCacheEntryRemovedException {
    synchronized (mux) {
      checkReaders();

      return !readers.isEmpty();
    }
  }
Ejemplo n.º 8
0
  /**
   * @param nodeId Reader to remove.
   * @param msgId Message ID.
   * @return {@code True} if reader was removed as a result of this operation.
   * @throws GridCacheEntryRemovedException If entry was removed.
   */
  public boolean removeReader(UUID nodeId, long msgId) throws GridCacheEntryRemovedException {
    synchronized (mux) {
      checkObsolete();

      ReaderId reader = readerId(nodeId);

      if (reader == null || reader.messageId() > msgId) return false;

      readers = new LinkedList<ReaderId>(readers);

      readers.remove(reader);

      // Seal.
      readers = Collections.unmodifiableList(readers);

      return true;
    }
  }
  /** {@inheritDoc} */
  @Override
  public GridDeployment getDeployment(GridDeploymentMetadata meta) {
    assert meta != null;

    assert ctx.config().isPeerClassLoadingEnabled();

    // Validate metadata.
    assert meta.classLoaderId() != null;
    assert meta.senderNodeId() != null;
    assert meta.sequenceNumber() >= -1;
    assert meta.parentLoader() == null;

    if (log.isDebugEnabled())
      log.debug("Starting to peer-load class based on deployment metadata: " + meta);

    while (true) {
      List<SharedDeployment> depsToCheck = null;

      SharedDeployment dep = null;

      synchronized (mux) {
        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          for (SharedDeployment d : deps) {
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())
                || meta.senderNodeId().equals(ctx.localNodeId())) {
              // Done.
              dep = d;

              break;
            }
          }

          if (dep == null) {
            GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

            if (!redeployCheck.get1()) {
              // Checking for redeployment encountered invalid state.
              if (log.isDebugEnabled())
                log.debug("Checking for redeployment encountered invalid state: " + meta);

              return null;
            }

            dep = redeployCheck.get2();

            if (dep == null) {
              // Find existing deployments that need to be checked
              // whether they should be reused for this request.
              for (SharedDeployment d : deps) {
                if (!d.isPendingUndeploy() && !d.isUndeployed()) {
                  if (depsToCheck == null) depsToCheck = new LinkedList<SharedDeployment>();

                  if (log.isDebugEnabled()) log.debug("Adding deployment to check: " + d);

                  depsToCheck.add(d);
                }
              }

              // If no deployment can be reused, create a new one.
              if (depsToCheck == null) {
                dep = createNewDeployment(meta, false);

                deps.add(dep);
              }
            }
          }
        } else {
          GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

          if (!redeployCheck.get1()) {
            // Checking for redeployment encountered invalid state.
            if (log.isDebugEnabled())
              log.debug("Checking for redeployment encountered invalid state: " + meta);

            return null;
          }

          dep = redeployCheck.get2();

          if (dep == null)
            // Create peer class loader.
            dep = createNewDeployment(meta, true);
        }
      }

      if (dep != null) {
        if (log.isDebugEnabled())
          log.debug("Found SHARED or CONTINUOUS deployment after first check: " + dep);

        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }

        return dep;
      }

      assert meta.parentLoader() == null;
      assert depsToCheck != null;
      assert !depsToCheck.isEmpty();

      /*
       * Logic below must be performed outside of synchronization
       * because it involves network calls.
       */

      // Check if class can be loaded from existing nodes.
      // In most cases this loop will find something.
      for (SharedDeployment d : depsToCheck) {
        // Load class. Note, that remote node will not load this class.
        // The class will only be loaded on this node.
        Class<?> cls = d.deployedClass(meta.className(), meta.alias());

        if (cls != null) {
          synchronized (mux) {
            if (!d.isUndeployed() && !d.isPendingUndeploy()) {
              if (!addParticipant(d, meta)) return null;

              if (log.isDebugEnabled())
                log.debug(
                    "Acquired deployment after verifying it's availability on "
                        + "existing nodes [depCls="
                        + cls
                        + ", dep="
                        + d
                        + ", meta="
                        + meta
                        + ']');

              return d;
            }
          }
        } else if (log.isDebugEnabled()) {
          log.debug(
              "Deployment cannot be reused (class does not exist on participating nodes) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
        }
      }

      // We are here either because all participant nodes failed
      // or the class indeed should have a separate deployment.
      for (SharedDeployment d : depsToCheck) {
        // Temporary class loader.
        ClassLoader temp =
            new GridDeploymentClassLoader(
                GridUuid.randomUuid(),
                meta.userVersion(),
                meta.deploymentMode(),
                true,
                ctx,
                ctxLdr,
                meta.classLoaderId(),
                meta.senderNodeId(),
                meta.sequenceNumber(),
                comm,
                ctx.config().getNetworkTimeout(),
                log,
                ctx.config().getPeerClassLoadingClassPathExclude(),
                0,
                false);

        String path = U.classNameToResourceName(d.sampleClassName());

        // We check if any random class from existing deployment can be
        // loaded from sender node. If it can, then we reuse existing
        // deployment.
        InputStream rsrcIn = temp.getResourceAsStream(path);

        if (rsrcIn != null) {
          // We don't need the actual stream.
          U.closeQuiet(rsrcIn);

          synchronized (mux) {
            if (d.isUndeployed() || d.isPendingUndeploy()) continue;

            // Add new node prior to loading the class, so we attempt
            // to load the class from the latest node.
            if (!addParticipant(d, meta)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Failed to add participant to deployment "
                        + "[meta="
                        + meta
                        + ", dep="
                        + dep
                        + ']');

              return null;
            }
          }

          Class<?> depCls = d.deployedClass(meta.className(), meta.alias());

          if (depCls == null) {
            U.error(
                log,
                "Failed to peer load class after loading it as a resource [alias="
                    + meta.alias()
                    + ", dep="
                    + dep
                    + ']');

            return null;
          }

          if (log.isDebugEnabled())
            log.debug(
                "Acquired deployment class after verifying other class "
                    + "availability on sender node [depCls="
                    + depCls
                    + ", rndCls="
                    + d.sampleClass()
                    + ", sampleClsName="
                    + d.sampleClassName()
                    + ", meta="
                    + meta
                    + ']');

          return d;
        } else if (log.isDebugEnabled())
          log.debug(
              "Deployment cannot be reused (random class could not be loaded from sender node) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
      }

      synchronized (mux) {
        if (log.isDebugEnabled())
          log.debug(
              "None of the existing class-loaders fit (will try to create a new one): " + meta);

        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        // Check that deployment picture has not changed.
        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          boolean retry = false;

          for (SharedDeployment d : deps) {
            // Double check if sender was already added.
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) {
              dep = d;

              retry = false;

              break;
            }

            // New deployment was added while outside of synchronization.
            // Need to recheck it again.
            if (!d.isPendingUndeploy() && !d.isUndeployed() && !depsToCheck.contains(d))
              retry = true;
          }

          if (retry) {
            if (log.isDebugEnabled()) log.debug("Retrying due to concurrency issues: " + meta);

            // Outer while loop.
            continue;
          }

          if (dep == null) {
            // No new deployments were added, so we can safely add ours.
            dep = createNewDeployment(meta, false);

            deps.add(dep);

            if (log.isDebugEnabled())
              log.debug(
                  "Adding new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
          }
        } else {
          dep = createNewDeployment(meta, true);

          if (log.isDebugEnabled())
            log.debug(
                "Created new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
        }
      }

      if (dep != null) {
        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }
      }

      return dep;
    }
  }
Ejemplo n.º 10
0
  /**
   * Creates REST request.
   *
   * @param cmd Command.
   * @param params Parameters.
   * @return REST request.
   * @throws GridException If creation failed.
   */
  @Nullable
  private GridRestRequest createRequest(
      GridRestCommand cmd, Map<String, Object> params, ServletRequest req) throws GridException {
    GridRestRequest restReq;

    switch (cmd) {
      case CACHE_GET:
      case CACHE_GET_ALL:
      case CACHE_PUT:
      case CACHE_PUT_ALL:
      case CACHE_REMOVE:
      case CACHE_REMOVE_ALL:
      case CACHE_ADD:
      case CACHE_CAS:
      case CACHE_METRICS:
      case CACHE_REPLACE:
      case CACHE_DECREMENT:
      case CACHE_INCREMENT:
      case CACHE_APPEND:
      case CACHE_PREPEND:
        {
          GridRestCacheRequest restReq0 = new GridRestCacheRequest();

          restReq0.cacheName((String) params.get("cacheName"));
          restReq0.key(params.get("key"));
          restReq0.value(params.get("val"));
          restReq0.value2(params.get("val2"));

          Object val1 = params.get("val1");

          if (val1 != null) restReq0.value(val1);

          restReq0.cacheFlags(intValue("cacheFlags", params, 0));
          restReq0.ttl(longValue("exp", params, null));
          restReq0.initial(longValue("init", params, null));
          restReq0.delta(longValue("delta", params, null));

          if (cmd == CACHE_GET_ALL || cmd == CACHE_PUT_ALL || cmd == CACHE_REMOVE_ALL) {
            List<Object> keys = values("k", params);
            List<Object> vals = values("v", params);

            if (keys.size() < vals.size())
              throw new GridException(
                  "Number of keys must be greater or equals to number of values.");

            Map<Object, Object> map = U.newHashMap(keys.size());

            Iterator<Object> keyIt = keys.iterator();
            Iterator<Object> valIt = vals.iterator();

            while (keyIt.hasNext()) map.put(keyIt.next(), valIt.hasNext() ? valIt.next() : null);

            restReq0.values(map);
          }

          restReq = restReq0;

          break;
        }

      case TOPOLOGY:
      case NODE:
        {
          GridRestTopologyRequest restReq0 = new GridRestTopologyRequest();

          restReq0.includeMetrics(Boolean.parseBoolean((String) params.get("mtr")));
          restReq0.includeAttributes(Boolean.parseBoolean((String) params.get("attr")));

          restReq0.nodeIp((String) params.get("ip"));

          restReq0.nodeId(uuidValue("id", params));

          restReq = restReq0;

          break;
        }

      case EXE:
      case RESULT:
      case NOOP:
        {
          GridRestTaskRequest restReq0 = new GridRestTaskRequest();

          restReq0.taskId((String) params.get("id"));
          restReq0.taskName((String) params.get("name"));

          restReq0.params(values("p", params));

          restReq0.async(Boolean.parseBoolean((String) params.get("async")));

          restReq0.timeout(longValue("timeout", params, 0L));

          restReq = restReq0;

          break;
        }

      case LOG:
        {
          GridRestLogRequest restReq0 = new GridRestLogRequest();

          restReq0.path((String) params.get("path"));

          restReq0.from(intValue("from", params, -1));
          restReq0.to(intValue("to", params, -1));

          restReq = restReq0;

          break;
        }

      case VERSION:
        {
          restReq = new GridRestRequest();

          break;
        }

      default:
        throw new GridException("Invalid command: " + cmd);
    }

    restReq.address(new InetSocketAddress(req.getRemoteAddr(), req.getRemotePort()));

    restReq.command(cmd);

    if (params.containsKey("gridgain.login") || params.containsKey("gridgain.password")) {
      GridSecurityCredentials cred =
          new GridSecurityCredentials(
              (String) params.get("gridgain.login"), (String) params.get("gridgain.password"));

      restReq.credentials(cred);
    }

    String clientId = (String) params.get("clientId");

    try {
      if (clientId != null) restReq.clientId(UUID.fromString(clientId));
    } catch (Exception ignored) {
      // Ignore invalid client id. Rest handler will process this logic.
    }

    String destId = (String) params.get("destId");

    try {
      if (destId != null) restReq.destinationId(UUID.fromString(destId));
    } catch (IllegalArgumentException ignored) {
      // Don't fail - try to execute locally.
    }

    String sesTokStr = (String) params.get("sessionToken");

    try {
      if (sesTokStr != null) restReq.sessionToken(U.hexString2ByteArray(sesTokStr));
    } catch (IllegalArgumentException ignored) {
      // Ignore invalid session token.
    }

    return restReq;
  }
  /**
   * @param nodeId Primary node ID.
   * @param req Request.
   * @return Remote transaction.
   * @throws GridException If failed.
   * @throws GridDistributedLockCancelledException If lock has been cancelled.
   */
  @SuppressWarnings({"RedundantTypeArguments"})
  @Nullable
  public GridNearTxRemote<K, V> startRemoteTx(UUID nodeId, GridDhtLockRequest<K, V> req)
      throws GridException, GridDistributedLockCancelledException {
    List<byte[]> nearKeyBytes = req.nearKeyBytes();

    GridNearTxRemote<K, V> tx = null;

    ClassLoader ldr = ctx.deploy().globalLoader();

    if (ldr != null) {
      for (int i = 0; i < nearKeyBytes.size(); i++) {
        byte[] bytes = nearKeyBytes.get(i);

        if (bytes == null) continue;

        K key = req.nearKeys().get(i);

        Collection<GridCacheMvccCandidate<K>> cands = req.candidatesByIndex(i);

        if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

        GridNearCacheEntry<K, V> entry = null;

        while (true) {
          try {
            entry = peekExx(key);

            if (entry != null) {
              entry.keyBytes(bytes);

              // Handle implicit locks for pessimistic transactions.
              if (req.inTx()) {
                tx = ctx.tm().tx(req.version());

                if (tx != null) tx.addWrite(key, bytes, null /*Value.*/, null /*Value bytes.*/);
                else {
                  tx =
                      new GridNearTxRemote<K, V>(
                          nodeId,
                          req.nearNodeId(),
                          req.threadId(),
                          req.version(),
                          null,
                          PESSIMISTIC,
                          req.isolation(),
                          req.isInvalidate(),
                          req.timeout(),
                          key,
                          bytes,
                          null, // Value.
                          null, // Value bytes.
                          ctx);

                  if (tx.empty()) return tx;

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + req.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  req.nodeId(),
                  nodeId,
                  req.threadId(),
                  req.version(),
                  req.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, req.version(), req.committedVersions(), req.rolledbackVersions());

              entry.orderOwned(req.version(), req.owned(entry.key()));
            }

            // Double-check in case if sender node left the grid.
            if (ctx.discovery().node(req.nodeId()) == null) {
              if (log.isDebugEnabled())
                log.debug("Node requesting lock left grid (lock request will be ignored): " + req);

              if (tx != null) tx.rollback();

              return null;
            }

            // Entry is legit.
            break;
          } catch (GridCacheEntryRemovedException ignored) {
            assert entry.obsoleteVersion() != null
                : "Obsolete flag not set on removed entry: " + entry;

            if (log.isDebugEnabled())
              log.debug("Received entry removed exception (will retry on renewed entry): " + entry);

            if (tx != null) {
              tx.clearEntry(entry.key());

              if (log.isDebugEnabled())
                log.debug(
                    "Cleared removed entry from remote transaction (will retry) [entry="
                        + entry
                        + ", tx="
                        + tx
                        + ']');
            }
          }
        }
      }
    } else {
      String err = "Failed to acquire deployment class loader for message: " + req;

      U.warn(log, err);

      throw new GridException(err);
    }

    return tx;
  }
  /**
   * Creates and caches new deployment.
   *
   * @param meta Deployment metadata.
   * @param isCache Whether or not to cache.
   * @return New deployment.
   */
  private SharedDeployment createNewDeployment(GridDeploymentMetadata meta, boolean isCache) {
    assert Thread.holdsLock(mux);

    assert meta.parentLoader() == null;

    GridUuid ldrId = GridUuid.randomUuid();

    GridDeploymentClassLoader clsLdr;

    if (meta.deploymentMode() == CONTINUOUS || meta.participants() == null) {
      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.classLoaderId(),
              meta.senderNodeId(),
              meta.sequenceNumber(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              meta.deploymentMode() == CONTINUOUS /* enable class byte cache in CONTINUOUS mode */);

      if (meta.participants() != null)
        for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet())
          clsLdr.register(e.getKey(), e.getValue().get1(), e.getValue().get2());

      if (log.isDebugEnabled())
        log.debug(
            "Created class loader in CONTINUOUS mode or without participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    } else {
      assert meta.deploymentMode() == SHARED;

      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.participants(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              false);

      if (log.isDebugEnabled())
        log.debug(
            "Created classloader in SHARED mode with participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    }

    // Give this deployment a unique class loader to emphasize that this
    // ID is unique to this shared deployment and is not ID of loader on
    // sender node.
    SharedDeployment dep =
        new SharedDeployment(
            meta.deploymentMode(), clsLdr, ldrId, -1, meta.userVersion(), meta.alias());

    if (log.isDebugEnabled()) log.debug("Created new deployment: " + dep);

    if (isCache) {
      List<SharedDeployment> deps =
          F.addIfAbsent(cache, meta.userVersion(), new LinkedList<SharedDeployment>());

      assert deps != null;

      deps.add(dep);

      if (log.isDebugEnabled()) log.debug("Added deployment to cache: " + cache);
    }

    return dep;
  }
  /** {@inheritDoc} */
  @Override
  public void onCollision(
      Collection<GridCollisionJobContext> waitJobs,
      Collection<GridCollisionJobContext> activeJobs) {
    assert waitJobs != null;
    assert activeJobs != null;

    int activeSize = F.size(activeJobs, RUNNING_JOBS);

    waitingCnt.set(waitJobs.size());
    runningCnt.set(activeSize);
    heldCnt.set(activeJobs.size() - activeSize);

    int waitSize = waitJobs.size();

    int activateCnt = parallelJobsNum - activeSize;

    if (activateCnt > 0 && !waitJobs.isEmpty()) {
      if (waitJobs.size() <= activateCnt) {
        for (GridCollisionJobContext waitJob : waitJobs) {
          waitJob.activate();

          waitSize--;
        }
      } else {
        List<GridCollisionJobContext> passiveList =
            new ArrayList<GridCollisionJobContext>(waitJobs);

        Collections.sort(
            passiveList,
            new Comparator<GridCollisionJobContext>() {
              /** {@inheritDoc} */
              @Override
              public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
                int p1 = getJobPriority(o1);
                int p2 = getJobPriority(o2);

                return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
              }
            });

        if (preventStarvation) bumpPriority(waitJobs, passiveList);

        for (int i = 0; i < activateCnt; i++) {
          passiveList.get(i).activate();

          waitSize--;
        }
      }
    }

    if (waitSize > waitJobsNum) {
      List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs);

      // Put jobs with highest priority first.
      Collections.sort(
          waitList,
          new Comparator<GridCollisionJobContext>() {
            /** {@inheritDoc} */
            @Override
            public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
              int p1 = getJobPriority(o1);
              int p2 = getJobPriority(o2);

              return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
            }
          });

      int skip = waitJobs.size() - waitSize;

      int i = 0;

      for (GridCollisionJobContext waitCtx : waitList) {
        if (++i >= skip) {
          waitCtx.cancel();

          if (--waitSize <= waitJobsNum) break;
        }
      }
    }
  }
Ejemplo n.º 14
0
  /**
   * Performs flush.
   *
   * @throws GridException If failed.
   */
  private void doFlush() throws GridException {
    lastFlushTime = U.currentTimeMillis();

    List<GridFuture> activeFuts0 = null;

    int doneCnt = 0;

    for (GridFuture<?> f : activeFuts) {
      if (!f.isDone()) {
        if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));

        activeFuts0.add(f);
      } else {
        f.get();

        doneCnt++;
      }
    }

    if (activeFuts0 == null || activeFuts0.isEmpty()) return;

    while (true) {
      Queue<GridFuture<?>> q = null;

      for (Buffer buf : bufMappings.values()) {
        GridFuture<?> flushFut = buf.flush();

        if (flushFut != null) {
          if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2);

          q.add(flushFut);
        }
      }

      if (q != null) {
        assert !q.isEmpty();

        boolean err = false;

        for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) {
          try {
            fut.get();
          } catch (GridException e) {
            if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e);

            err = true;
          }
        }

        if (err)
          // Remaps needed - flush buffers.
          continue;
      }

      doneCnt = 0;

      for (int i = 0; i < activeFuts0.size(); i++) {
        GridFuture f = activeFuts0.get(i);

        if (f == null) doneCnt++;
        else if (f.isDone()) {
          f.get();

          doneCnt++;

          activeFuts0.set(i, null);
        } else break;
      }

      if (doneCnt == activeFuts0.size()) return;
    }
  }
  /**
   * Processes lock request.
   *
   * @param nodeId Sender node ID.
   * @param msg Lock request.
   */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  private void processLockRequest(UUID nodeId, GridDistributedLockRequest<K, V> msg) {
    assert !nodeId.equals(locNodeId);

    List<byte[]> keys = msg.keyBytes();

    int cnt = keys.size();

    GridReplicatedTxRemote<K, V> tx = null;

    GridDistributedLockResponse res;

    ClassLoader ldr = null;

    try {
      ldr = ctx.deploy().globalLoader();

      if (ldr != null) {
        res = new GridDistributedLockResponse(msg.version(), msg.futureId(), cnt);

        for (int i = 0; i < keys.size(); i++) {
          byte[] bytes = keys.get(i);
          K key = msg.keys().get(i);

          Collection<GridCacheMvccCandidate<K>> cands = msg.candidatesByIndex(i);

          if (bytes == null) continue;

          if (log.isDebugEnabled()) log.debug("Unmarshalled key: " + key);

          GridDistributedCacheEntry<K, V> entry = null;

          while (true) {
            try {
              entry = entryexx(key);

              // Handle implicit locks for pessimistic transactions.
              if (msg.inTx()) {
                tx = ctx.tm().tx(msg.version());

                if (tx != null) {
                  if (msg.txRead()) tx.addRead(key, bytes);
                  else tx.addWrite(key, bytes);
                } else {
                  tx =
                      new GridReplicatedTxRemote<K, V>(
                          nodeId,
                          msg.threadId(),
                          msg.version(),
                          null,
                          PESSIMISTIC,
                          msg.isolation(),
                          msg.isInvalidate(),
                          msg.timeout(),
                          key,
                          bytes,
                          msg.txRead(),
                          ctx);

                  tx = ctx.tm().onCreated(tx);

                  if (tx == null || !ctx.tm().onStarted(tx))
                    throw new GridCacheTxRollbackException(
                        "Failed to acquire lock "
                            + "(transaction has been completed): "
                            + msg.version());
                }
              }

              // Add remote candidate before reordering.
              entry.addRemote(
                  msg.nodeId(),
                  null,
                  msg.threadId(),
                  msg.version(),
                  msg.timeout(),
                  tx != null && tx.ec(),
                  tx != null,
                  tx != null && tx.implicitSingle());

              // Remote candidates for ordered lock queuing.
              entry.addRemoteCandidates(
                  cands, msg.version(), msg.committedVersions(), msg.rolledbackVersions());

              // Double-check in case if sender node left the grid.
              if (ctx.discovery().node(msg.nodeId()) == null) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Node requesting lock left grid (lock request will be ignored): " + msg);

                if (tx != null) tx.rollback();

                return;
              }

              res.setCandidates(
                  i,
                  entry.localCandidates(),
                  ctx.tm().committedVersions(msg.version()),
                  ctx.tm().rolledbackVersions(msg.version()));

              res.addValueBytes(
                  entry.rawGet(), msg.returnValue(i) ? entry.valueBytes(null) : null, ctx);

              // Entry is legit.
              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsoleteVersion() != null
                  : "Obsolete flag not set on removed entry: " + entry;

              if (log.isDebugEnabled())
                log.debug(
                    "Received entry removed exception (will retry on renewed entry): " + entry);

              if (tx != null) {
                tx.clearEntry(entry.key());

                if (log.isDebugEnabled())
                  log.debug(
                      "Cleared removed entry from remote transaction (will retry) [entry="
                          + entry
                          + ", tx="
                          + tx
                          + ']');
              }
            }
          }
        }
      } else {
        String err = "Failed to acquire deployment class for message: " + msg;

        U.warn(log, err);

        res =
            new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err));
      }
    } catch (GridCacheTxRollbackException e) {
      if (log.isDebugEnabled())
        log.debug("Received lock request for completed transaction (will ignore): " + e);

      res = new GridDistributedLockResponse(msg.version(), msg.futureId(), e);
    } catch (GridException e) {
      String err = "Failed to unmarshal at least one of the keys for lock request message: " + msg;

      log.error(err, e);

      res =
          new GridDistributedLockResponse(msg.version(), msg.futureId(), new GridException(err, e));

      if (tx != null) tx.rollback();
    } catch (GridDistributedLockCancelledException ignored) {
      // Received lock request for cancelled lock.
      if (log.isDebugEnabled())
        log.debug("Received lock request for canceled lock (will ignore): " + msg);

      if (tx != null) tx.rollback();

      // Don't send response back.
      return;
    }

    GridNode node = ctx.discovery().node(msg.nodeId());

    boolean releaseAll = false;

    if (node != null) {
      try {
        // Reply back to sender.
        ctx.io().send(node, res);
      } catch (GridException e) {
        U.error(log, "Failed to send message to node (did the node leave grid?): " + node.id(), e);

        releaseAll = ldr != null;
      }
    }
    // If sender left grid, release all locks acquired so far.
    else releaseAll = ldr != null;

    // Release all locks because sender node left grid.
    if (releaseAll) {
      for (K key : msg.keys()) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) entry.removeExplicitNodeLocks(msg.nodeId());

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock on removed entity during failure "
                      + "of replicated lock request handling (will retry): "
                      + entry);
          }
        }
      }

      U.warn(
          log, "Sender node left grid in the midst of lock acquisition (locks will be released).");
    }
  }
  /**
   * This method is called to map or split grid task into multiple grid jobs. This is the first
   * method that gets called when task execution starts.
   *
   * @param data Task execution argument. Can be {@code null}. This is the same argument as the one
   *     passed into {@code Grid#execute(...)} methods.
   * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed
   *     to be randomized by container. This ensures that every time you simply iterate through grid
   *     nodes, the order of nodes will be random which over time should result into all nodes being
   *     used equally.
   * @return Map of grid jobs assigned to subgrid node. Unless {@link
   *     GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is
   *     returned, exception will be thrown.
   * @throws GridException If mapping could not complete successfully. This exception will be thrown
   *     out of {@link GridComputeTaskFuture#get()} method.
   */
  @Override
  public Map<? extends GridComputeJob, GridNode> map(
      List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException {
    assert !subgrid.isEmpty();

    // Give preference to wanted node. Otherwise, take the first one.
    GridNode targetNode =
        F.find(
            subgrid,
            subgrid.get(0),
            new GridPredicate<GridNode>() {
              @Override
              public boolean apply(GridNode e) {
                return preferredNode.equals(e.id());
              }
            });

    return Collections.singletonMap(
        new GridComputeJobAdapter() {
          @GridLoggerResource private GridLogger log;

          @GridInstanceResource private Grid grid;

          @Override
          public Object execute() throws GridException {
            log.info("Going to put data: " + data.size());

            GridCache<Object, Object> cache = grid.cache(cacheName);

            assert cache != null;

            Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data);

            for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) {
              T2<Integer, Collection<Integer>> pair = entry.getValue();

              Object affKey = pair.get1();

              // Group lock partition.
              try (GridCacheTx tx =
                  cache.txStartPartition(
                      cache.affinity().partition(affKey),
                      optimistic ? OPTIMISTIC : PESSIMISTIC,
                      REPEATABLE_READ,
                      0,
                      pair.get2().size())) {
                for (Integer val : pair.get2()) cache.put(val, val);

                tx.commit();
              }
            }

            log.info("Finished put data: " + data.size());

            return data;
          }

          /**
           * Groups values by partitions.
           *
           * @param data Data to put.
           * @return Grouped map.
           */
          private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) {
            GridCache<Object, Object> cache = grid.cache(cacheName);

            Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>();

            for (Integer val : data) {
              int part = cache.affinity().partition(val);

              T2<Integer, Collection<Integer>> tup = res.get(part);

              if (tup == null) {
                tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>());

                res.put(part, tup);
              }

              tup.get2().add(val);
            }

            return res;
          }
        },
        targetNode);
  }
  /**
   * Removes obsolete deployments in case of redeploy.
   *
   * @param meta Request metadata.
   * @return List of shares deployment.
   */
  private GridTuple2<Boolean, SharedDeployment> checkRedeploy(GridDeploymentMetadata meta) {
    assert Thread.holdsLock(mux);

    SharedDeployment newDep = null;

    for (List<SharedDeployment> deps : cache.values()) {
      for (SharedDeployment dep : deps) {
        if (!dep.isUndeployed() && !dep.isPendingUndeploy()) {
          long undeployTimeout = ctx.config().getNetworkTimeout();

          SharedDeployment doomed = null;

          // Only check deployments with no participants.
          if (!dep.hasParticipants()) {
            // In case of SHARED deployment it is possible to get hear if
            // unmarshalling happens during undeploy. In this case, we
            // simply don't do anything.
            if (dep.deployMode() == CONTINUOUS) {
              if (dep.existingDeployedClass(meta.className()) != null) {
                // Change from shared deploy to shared undeploy or user version change.
                // Simply remove all deployments with no participating nodes.
                if (meta.deploymentMode() == SHARED
                    || !meta.userVersion().equals(dep.userVersion())) doomed = dep;
              }
            }
          }
          // If there are participants, we undeploy if class loader ID on some node changed.
          else if (dep.existingDeployedClass(meta.className()) != null) {
            GridTuple2<GridUuid, Long> ldr = dep.getClassLoaderId(meta.senderNodeId());

            if (ldr != null) {
              if (!ldr.get1().equals(meta.classLoaderId())) {
                // If deployed sequence number is less, then schedule for undeployment.
                if (ldr.get2() < meta.sequenceNumber()) {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Received request for a class with newer sequence number "
                            + "(will schedule current class for undeployment) [newSeq="
                            + meta.sequenceNumber()
                            + ", oldSeq="
                            + ldr.get2()
                            + ", senderNodeId="
                            + meta.senderNodeId()
                            + ", newClsLdrId="
                            + meta.classLoaderId()
                            + ", oldClsLdrId="
                            + ldr.get1()
                            + ']');

                  doomed = dep;
                } else if (ldr.get2() > meta.sequenceNumber()) {
                  long time = System.currentTimeMillis() - dep.timestamp();

                  if (newDep == null && time < ctx.config().getNetworkTimeout()) {
                    // Set undeployTimeout, so the class will be scheduled
                    // for undeployment.
                    undeployTimeout = ctx.config().getNetworkTimeout() - time;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a stale class (will deploy and "
                              + "schedule undeployment in "
                              + undeployTimeout
                              + "ms) "
                              + "[curSeq="
                              + ldr.get2()
                              + ", staleSeq="
                              + meta.sequenceNumber()
                              + ", cls="
                              + meta.className()
                              + ", senderNodeId="
                              + meta.senderNodeId()
                              + ", curLdrId="
                              + ldr.get1()
                              + ", staleLdrId="
                              + meta.classLoaderId()
                              + ']');

                    // We got the redeployed class before the old one.
                    // Simply create a temporary deployment for the sender node,
                    // and schedule undeploy for it.
                    newDep = createNewDeployment(meta, false);

                    doomed = newDep;
                  } else {
                    U.warn(
                        log,
                        "Received execution request for a class that has been redeployed "
                            + "(will ignore): "
                            + meta.alias());

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a class that has been redeployed "
                              + "(will ignore) [alias="
                              + meta.alias()
                              + ", dep="
                              + dep
                              + ']');

                    return F.t(false, null);
                  }
                } else {
                  U.error(
                      log,
                      "Sequence number does not correspond to class loader ID [seqNum="
                          + meta.sequenceNumber()
                          + ", dep="
                          + dep
                          + ']');

                  return F.t(false, null);
                }
              }
            }
          }

          if (doomed != null) {
            doomed.onUndeployScheduled();

            if (log.isDebugEnabled()) log.debug("Deployment was scheduled for undeploy: " + doomed);

            // Lifespan time.
            final long endTime = System.currentTimeMillis() + undeployTimeout;

            // Deployment to undeploy.
            final SharedDeployment undep = doomed;

            ctx.timeout()
                .addTimeoutObject(
                    new GridTimeoutObject() {
                      @Override
                      public GridUuid timeoutId() {
                        return undep.classLoaderId();
                      }

                      @Override
                      public long endTime() {
                        return endTime < 0 ? Long.MAX_VALUE : endTime;
                      }

                      @Override
                      public void onTimeout() {
                        boolean removed = false;

                        // Hot redeployment.
                        synchronized (mux) {
                          assert undep.isPendingUndeploy();

                          if (!undep.isUndeployed()) {
                            undep.undeploy();

                            undep.onRemoved();

                            removed = true;

                            Collection<SharedDeployment> deps = cache.get(undep.userVersion());

                            if (deps != null) {
                              for (Iterator<SharedDeployment> i = deps.iterator(); i.hasNext(); )
                                if (i.next() == undep) i.remove();

                              if (deps.isEmpty()) cache.remove(undep.userVersion());
                            }

                            if (log.isInfoEnabled())
                              log.info(
                                  "Undeployed class loader due to deployment mode change, "
                                      + "user version change, or hot redeployment: "
                                      + undep);
                          }
                        }

                        // Outside synchronization.
                        if (removed) undep.recordUndeployed(null);
                      }
                    });
          }
        }
      }
    }

    if (newDep != null) {
      List<SharedDeployment> list =
          F.addIfAbsent(cache, meta.userVersion(), F.<SharedDeployment>newList());

      assert list != null;

      list.add(newDep);
    }

    return F.t(true, newDep);
  }
  /** {@inheritDoc} */
  @Override
  public void onKernalStart() throws GridException {
    discoLsnr =
        new GridLocalEventListener() {
          @Override
          public void onEvent(GridEvent evt) {
            assert evt instanceof GridDiscoveryEvent;

            assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED;

            GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt;

            Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>();

            if (log.isDebugEnabled()) log.debug("Processing node departure event: " + evt);

            synchronized (mux) {
              for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator();
                  i1.hasNext(); ) {
                List<SharedDeployment> deps = i1.next();

                for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) {
                  SharedDeployment dep = i2.next();

                  dep.removeParticipant(discoEvt.eventNodeId());

                  if (!dep.hasParticipants()) {
                    if (dep.deployMode() == SHARED) {
                      if (!dep.isUndeployed()) {
                        dep.undeploy();

                        // Undeploy.
                        i2.remove();

                        assert !dep.isRemoved();

                        dep.onRemoved();

                        undeployed.add(dep);

                        if (log.isDebugEnabled())
                          log.debug(
                              "Undeployed class loader as there are no participating "
                                  + "nodes: "
                                  + dep);
                      }
                    } else if (log.isDebugEnabled())
                      log.debug("Preserving deployment without node participants: " + dep);
                  } else if (log.isDebugEnabled())
                    log.debug("Keeping deployment as it still has participants: " + dep);
                }

                if (deps.isEmpty()) i1.remove();
              }
            }

            recordUndeployed(discoEvt.eventNodeId(), undeployed);
          }
        };

    ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT);

    Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>();

    synchronized (mux) {
      for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) {
        List<SharedDeployment> deps = i1.next();

        for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) {
          SharedDeployment dep = i2.next();

          for (UUID nodeId : dep.getParticipantNodeIds())
            if (ctx.discovery().node(nodeId) == null) dep.removeParticipant(nodeId);

          if (!dep.hasParticipants()) {
            if (dep.deployMode() == SHARED) {
              if (!dep.isUndeployed()) {
                dep.undeploy();

                // Undeploy.
                i2.remove();

                dep.onRemoved();

                undeployed.add(dep);

                if (log.isDebugEnabled())
                  log.debug("Undeployed class loader as there are no participating nodes: " + dep);
              }
            } else if (log.isDebugEnabled())
              log.debug("Preserving deployment without node participants: " + dep);
          } else if (log.isDebugEnabled())
            log.debug("Keeping deployment as it still has participants: " + dep);
        }

        if (deps.isEmpty()) i1.remove();
      }
    }

    recordUndeployed(null, undeployed);

    if (log.isDebugEnabled()) log.debug("Registered deployment discovery listener: " + discoLsnr);
  }
Ejemplo n.º 19
0
  /**
   * @param nodeId Reader to add.
   * @param msgId Message ID.
   * @return Future for all relevant transactions that were active at the time of adding reader, or
   *     {@code null} if reader was added
   * @throws GridCacheEntryRemovedException If entry was removed.
   */
  @Nullable
  public GridFuture<Boolean> addReader(UUID nodeId, long msgId)
      throws GridCacheEntryRemovedException {
    // Don't add local node as reader.
    if (cctx.nodeId().equals(nodeId)) return null;

    GridNode node = cctx.discovery().node(nodeId);

    // If remote node has no near cache, don't add it.
    if (node == null || !U.hasNearCache(node, cctx.dht().near().name())) return null;

    // If remote node is (primary?) or back up, don't add it as a reader.
    if (U.nodeIds(cctx.affinity(partition(), CU.allNodes(cctx))).contains(nodeId)) return null;

    boolean ret = false;

    GridCacheMultiTxFuture<K, V> txFut;

    Collection<GridCacheMvccCandidate<K>> cands = null;

    synchronized (mux) {
      checkObsolete();

      txFut = this.txFut;

      ReaderId reader = readerId(nodeId);

      if (reader == null) {
        reader = new ReaderId(nodeId, msgId);

        readers = new LinkedList<ReaderId>(readers);

        readers.add(reader);

        // Seal.
        readers = Collections.unmodifiableList(readers);

        txFut = this.txFut = new GridCacheMultiTxFuture<K, V>(cctx);

        cands = localCandidates();

        ret = true;
      } else {
        long id = reader.messageId();

        if (id < msgId) reader.messageId(msgId);
      }
    }

    if (ret) {
      assert txFut != null;

      if (!F.isEmpty(cands)) {
        for (GridCacheMvccCandidate<K> c : cands) {
          GridCacheTxEx<K, V> tx = cctx.tm().<GridCacheTxEx<K, V>>tx(c.version());

          if (tx != null) {
            assert tx.local();

            txFut.addTx(tx);
          }
        }
      }

      txFut.init();

      if (!txFut.isDone()) {
        txFut.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> f) {
                synchronized (mux) {
                  // Release memory.
                  GridDhtCacheEntry.this.txFut = null;
                }
              }
            });
      } else
        // Release memory.
        txFut = this.txFut = null;
    }

    return txFut;
  }