@NotNull
 public static char[] adaptiveLoadText(@NotNull Reader reader) throws IOException {
   char[] chars = new char[4096];
   List<char[]> buffers = null;
   int count = 0;
   int total = 0;
   while (true) {
     int n = reader.read(chars, count, chars.length - count);
     if (n <= 0) break;
     count += n;
     if (total > 1024 * 1024 * 10) throw new FileTooBigException("File too big " + reader);
     total += n;
     if (count == chars.length) {
       if (buffers == null) {
         buffers = new ArrayList<char[]>();
       }
       buffers.add(chars);
       int newLength = Math.min(1024 * 1024, chars.length * 2);
       chars = new char[newLength];
       count = 0;
     }
   }
   char[] result = new char[total];
   if (buffers != null) {
     for (char[] buffer : buffers) {
       System.arraycopy(buffer, 0, result, result.length - total, buffer.length);
       total -= buffer.length;
     }
   }
   System.arraycopy(chars, 0, result, result.length - total, total);
   return result;
 }
  /** {@inheritDoc} */
  @SuppressWarnings("TypeMayBeWeakened")
  @Nullable
  private Collection<byte[]> marshalFieldsCollection(
      @Nullable Collection<Object> col, GridCacheContext<K, V> ctx) throws GridException {
    assert ctx != null;

    if (col == null) return null;

    Collection<List<Object>> col0 = new ArrayList<>(col.size());

    for (Object o : col) {
      List<GridIndexingEntity<?>> list = (List<GridIndexingEntity<?>>) o;
      List<Object> list0 = new ArrayList<>(list.size());

      for (GridIndexingEntity<?> ent : list) {
        if (ent.bytes() != null) list0.add(ent.bytes());
        else {
          if (ctx.deploymentEnabled()) prepareObject(ent.value(), ctx);

          list0.add(CU.marshal(ctx, ent.value()));
        }
      }

      col0.add(list0);
    }

    return marshalCollection(col0, ctx);
  }
  /** {@inheritDoc} */
  @SuppressWarnings("TypeMayBeWeakened")
  @Nullable
  private Collection<Object> unmarshalFieldsCollection(
      @Nullable Collection<byte[]> byteCol, GridCacheContext<K, V> ctx, ClassLoader ldr)
      throws GridException {
    assert ctx != null;
    assert ldr != null;

    Collection<Object> col = unmarshalCollection(byteCol, ctx, ldr);
    Collection<Object> col0 = null;

    if (col != null) {
      col0 = new ArrayList<>(col.size());

      for (Object o : col) {
        List<Object> list = (List<Object>) o;
        List<Object> list0 = new ArrayList<>(list.size());

        for (Object obj : list)
          list0.add(obj != null ? ctx.marshaller().unmarshal((byte[]) obj, ldr) : null);

        col0.add(list0);
      }
    }

    return col0;
  }
Example #4
0
  /** {@inheritDoc} */
  @Override
  public void addAttributeListener(GridTaskSessionAttributeListener lsnr, boolean rewind) {
    A.notNull(lsnr, "lsnr");

    Map<Object, Object> attrs = null;

    List<GridTaskSessionAttributeListener> lsnrs;

    synchronized (mux) {
      lsnrs = new ArrayList<GridTaskSessionAttributeListener>(this.lsnrs.size());

      lsnrs.addAll(this.lsnrs);

      lsnrs.add(lsnr);

      lsnrs = Collections.unmodifiableList(lsnrs);

      this.lsnrs = lsnrs;

      if (rewind) attrs = new HashMap<Object, Object>(this.attrs);
    }

    if (rewind) {
      for (Map.Entry<Object, Object> entry : attrs.entrySet()) {
        for (GridTaskSessionAttributeListener l : lsnrs) {
          l.onAttributeSet(entry.getKey(), entry.getValue());
        }
      }
    }
  }
Example #5
0
  /**
   * @return Collection of readers after check.
   * @throws GridCacheEntryRemovedException If removed.
   */
  public Collection<ReaderId> checkReaders() throws GridCacheEntryRemovedException {
    synchronized (mux) {
      checkObsolete();

      if (!readers.isEmpty()) {
        List<ReaderId> rmv = null;

        for (ReaderId reader : readers) {
          if (!cctx.discovery().alive(reader.nodeId())) {
            if (rmv == null) rmv = new LinkedList<ReaderId>();

            rmv.add(reader);
          }
        }

        if (rmv != null) {
          readers = new LinkedList<ReaderId>(readers);

          for (ReaderId rdr : rmv) readers.remove(rdr);

          readers = Collections.unmodifiableList(readers);
        }
      }

      return readers;
    }
  }
 @NotNull
 public static byte[] adaptiveLoadBytes(@NotNull InputStream stream) throws IOException {
   byte[] bytes = new byte[4096];
   List<byte[]> buffers = null;
   int count = 0;
   int total = 0;
   while (true) {
     int n = stream.read(bytes, count, bytes.length - count);
     if (n <= 0) break;
     count += n;
     if (total > 1024 * 1024 * 10) throw new FileTooBigException("File too big " + stream);
     total += n;
     if (count == bytes.length) {
       if (buffers == null) {
         buffers = new ArrayList<byte[]>();
       }
       buffers.add(bytes);
       int newLength = Math.min(1024 * 1024, bytes.length * 2);
       bytes = new byte[newLength];
       count = 0;
     }
   }
   byte[] result = new byte[total];
   if (buffers != null) {
     for (byte[] buffer : buffers) {
       System.arraycopy(buffer, 0, result, result.length - total, buffer.length);
       total -= buffer.length;
     }
   }
   System.arraycopy(bytes, 0, result, result.length - total, total);
   return result;
 }
    /** {@inheritDoc} */
    @Override
    protected IgniteBiTuple<Long, List<IgniteExceptionRegistry.ExceptionInfo>> run(
        Map<UUID, Long> arg) {
      Long lastOrder = arg.get(ignite.localNode().id());

      long order = lastOrder != null ? lastOrder : 0;

      List<IgniteExceptionRegistry.ExceptionInfo> errors =
          ignite.context().exceptionRegistry().getErrors(order);

      List<IgniteExceptionRegistry.ExceptionInfo> wrapped = new ArrayList<>(errors.size());

      for (IgniteExceptionRegistry.ExceptionInfo error : errors) {
        if (error.order() > order) order = error.order();

        wrapped.add(
            new IgniteExceptionRegistry.ExceptionInfo(
                error.order(),
                new VisorExceptionWrapper(error.error()),
                error.message(),
                error.threadId(),
                error.threadName(),
                error.time()));
      }

      return new IgniteBiTuple<>(order, wrapped);
    }
 private void findTestedAndInjectableFields(@NotNull Field[] fieldsDeclaredInTestClass) {
   for (Field field : fieldsDeclaredInTestClass) {
     if (field.isAnnotationPresent(Injectable.class)) {
       MockedType mockedType = new MockedType(field);
       injectableFields.add(mockedType);
     } else {
       addAsTestedFieldIfApplicable(field);
     }
   }
 }
    /** {@inheritDoc} */
    @Override
    public <T extends Extension> void registerExtension(Class<T> extensionItf, T extensionImpl) {
      List<Object> list = extensionsCollector.get(extensionItf);

      if (list == null) {
        list = new ArrayList<>();

        extensionsCollector.put(extensionItf, list);
      }

      list.add(extensionImpl);
    }
Example #10
0
 @NotNull
 public static List<String> loadLines(@NotNull Reader reader) throws IOException {
   List<String> lines = new ArrayList<String>();
   BufferedReader bufferedReader = new BufferedReader(reader);
   try {
     String line;
     while ((line = bufferedReader.readLine()) != null) {
       lines.add(line);
     }
   } finally {
     bufferedReader.close();
   }
   return lines;
 }
 @NotNull
 public static Future<Void> asyncDelete(@NotNull Collection<File> files) {
   List<File> tempFiles = new ArrayList<File>();
   for (File file : files) {
     final File tempFile = renameToTempFileOrDelete(file);
     if (tempFile != null) {
       tempFiles.add(tempFile);
     }
   }
   if (!tempFiles.isEmpty()) {
     return startDeletionThread(tempFiles.toArray(new File[tempFiles.size()]));
   }
   return new FixedFuture<Void>(null);
 }
  /**
   * Gets values referenced by sequential keys, e.g. {@code key1...keyN}.
   *
   * @param keyPrefix Key prefix, e.g. {@code key} for {@code key1...keyN}.
   * @param params Parameters map.
   * @return Values.
   */
  @Nullable
  protected List<Object> values(String keyPrefix, Map<String, Object> params) {
    assert keyPrefix != null;

    List<Object> vals = new LinkedList<>();

    for (int i = 1; ; i++) {
      String key = keyPrefix + i;

      if (params.containsKey(key)) vals.add(params.get(key));
      else break;
    }

    return vals;
  }
 /** @param removeProcessor parent, child */
 public static <T> Collection<T> removeAncestors(
     final Collection<T> files,
     final Convertor<T, String> convertor,
     final PairProcessor<T, T> removeProcessor) {
   if (files.isEmpty()) return files;
   final TreeMap<String, T> paths = new TreeMap<String, T>();
   for (T file : files) {
     final String path = convertor.convert(file);
     assert path != null;
     final String canonicalPath = toCanonicalPath(path);
     paths.put(canonicalPath, file);
   }
   final List<Map.Entry<String, T>> ordered =
       new ArrayList<Map.Entry<String, T>>(paths.entrySet());
   final List<T> result = new ArrayList<T>(ordered.size());
   result.add(ordered.get(0).getValue());
   for (int i = 1; i < ordered.size(); i++) {
     final Map.Entry<String, T> entry = ordered.get(i);
     final String child = entry.getKey();
     boolean parentNotFound = true;
     for (int j = i - 1; j >= 0; j--) {
       // possible parents
       final String parent = ordered.get(j).getKey();
       if (parent == null) continue;
       if (startsWith(child, parent)
           && removeProcessor.process(ordered.get(j).getValue(), entry.getValue())) {
         parentNotFound = false;
         break;
       }
     }
     if (parentNotFound) {
       result.add(entry.getValue());
     }
   }
   return result;
 }
  /**
   * @param p Partition.
   * @param topVer Topology version ({@code -1} for all nodes).
   * @param state Partition state.
   * @param states Additional partition states.
   * @return List of nodes for the partition.
   */
  private List<ClusterNode> nodes(
      int p,
      AffinityTopologyVersion topVer,
      GridDhtPartitionState state,
      GridDhtPartitionState... states) {
    Collection<UUID> allIds =
        topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null;

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer="
              + topVer
              + ", allIds="
              + allIds
              + ", node2part="
              + node2part
              + ", cache="
              + cctx.name()
              + ']';

      Collection<UUID> nodeIds = part2node.get(p);

      // Node IDs can be null if both, primary and backup, nodes disappear.
      int size = nodeIds == null ? 0 : nodeIds.size();

      if (size == 0) return Collections.emptyList();

      List<ClusterNode> nodes = new ArrayList<>(size);

      for (UUID id : nodeIds) {
        if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue;

        if (hasState(p, id, state, states)) {
          ClusterNode n = cctx.discovery().node(id);

          if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion()))
            nodes.add(n);
        }
      }

      return nodes;
    } finally {
      lock.readLock().unlock();
    }
  }
  private void addAsTestedFieldIfApplicable(@NotNull Field fieldFromTestClass) {
    for (Annotation fieldAnnotation : fieldFromTestClass.getDeclaredAnnotations()) {
      Tested testedMetadata;

      if (fieldAnnotation instanceof Tested) {
        testedMetadata = (Tested) fieldAnnotation;
      } else {
        testedMetadata = fieldAnnotation.annotationType().getAnnotation(Tested.class);
      }

      if (testedMetadata != null) {
        TestedField testedField =
            new TestedField(injectionState, fieldFromTestClass, testedMetadata);
        testedFields.add(testedField);
        break;
      }
    }
  }
  /**
   * Finds all files in folder and in it's sub-tree of specified depth.
   *
   * @param file Starting folder
   * @param maxDepth Depth of the tree. If 1 - just look in the folder, no sub-folders.
   * @param filter file filter.
   * @return List of found files.
   */
  public static List<VisorLogFile> fileTree(File file, int maxDepth, @Nullable FileFilter filter) {
    if (file.isDirectory()) {
      File[] files = (filter == null) ? file.listFiles() : file.listFiles(filter);

      if (files == null) return Collections.emptyList();

      List<VisorLogFile> res = new ArrayList<>(files.length);

      for (File f : files) {
        if (f.isFile() && f.length() > 0) res.add(new VisorLogFile(f));
        else if (maxDepth > 1) res.addAll(fileTree(f, maxDepth - 1, filter));
      }

      return res;
    }

    return F.asList(new VisorLogFile(file));
  }
 private static void collectMatchedFiles(
     @NotNull File absoluteRoot,
     @NotNull File root,
     @NotNull Pattern pattern,
     @NotNull List<File> files) {
   final File[] dirs = root.listFiles();
   if (dirs == null) return;
   for (File dir : dirs) {
     if (dir.isFile()) {
       final String relativePath = getRelativePath(absoluteRoot, dir);
       if (relativePath != null) {
         final String path = toSystemIndependentName(relativePath);
         if (pattern.matcher(path).matches()) {
           files.add(dir);
         }
       }
     } else {
       collectMatchedFiles(absoluteRoot, dir, pattern, files);
     }
   }
 }
  /**
   * Creates and caches new deployment.
   *
   * @param meta Deployment metadata.
   * @param isCache Whether or not to cache.
   * @return New deployment.
   */
  private SharedDeployment createNewDeployment(GridDeploymentMetadata meta, boolean isCache) {
    assert Thread.holdsLock(mux);

    assert meta.parentLoader() == null;

    GridUuid ldrId = GridUuid.randomUuid();

    GridDeploymentClassLoader clsLdr;

    if (meta.deploymentMode() == CONTINUOUS || meta.participants() == null) {
      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.classLoaderId(),
              meta.senderNodeId(),
              meta.sequenceNumber(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              meta.deploymentMode() == CONTINUOUS /* enable class byte cache in CONTINUOUS mode */);

      if (meta.participants() != null)
        for (Map.Entry<UUID, GridTuple2<GridUuid, Long>> e : meta.participants().entrySet())
          clsLdr.register(e.getKey(), e.getValue().get1(), e.getValue().get2());

      if (log.isDebugEnabled())
        log.debug(
            "Created class loader in CONTINUOUS mode or without participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    } else {
      assert meta.deploymentMode() == SHARED;

      // Create peer class loader.
      // Note that we are passing empty list for local P2P exclude, as it really
      // does not make sense with shared deployment.
      clsLdr =
          new GridDeploymentClassLoader(
              ldrId,
              meta.userVersion(),
              meta.deploymentMode(),
              false,
              ctx,
              ctxLdr,
              meta.participants(),
              comm,
              ctx.config().getNetworkTimeout(),
              log,
              ctx.config().getPeerClassLoadingClassPathExclude(),
              ctx.config().getPeerClassLoadingMissedResourcesCacheSize(),
              false);

      if (log.isDebugEnabled())
        log.debug(
            "Created classloader in SHARED mode with participants "
                + "[ldr="
                + clsLdr
                + ", meta="
                + meta
                + ']');
    }

    // Give this deployment a unique class loader to emphasize that this
    // ID is unique to this shared deployment and is not ID of loader on
    // sender node.
    SharedDeployment dep =
        new SharedDeployment(
            meta.deploymentMode(), clsLdr, ldrId, -1, meta.userVersion(), meta.alias());

    if (log.isDebugEnabled()) log.debug("Created new deployment: " + dep);

    if (isCache) {
      List<SharedDeployment> deps =
          F.addIfAbsent(cache, meta.userVersion(), new LinkedList<SharedDeployment>());

      assert deps != null;

      deps.add(dep);

      if (log.isDebugEnabled()) log.debug("Added deployment to cache: " + cache);
    }

    return dep;
  }
  /** {@inheritDoc} */
  @Override
  public GridDeployment getDeployment(GridDeploymentMetadata meta) {
    assert meta != null;

    assert ctx.config().isPeerClassLoadingEnabled();

    // Validate metadata.
    assert meta.classLoaderId() != null;
    assert meta.senderNodeId() != null;
    assert meta.sequenceNumber() >= -1;
    assert meta.parentLoader() == null;

    if (log.isDebugEnabled())
      log.debug("Starting to peer-load class based on deployment metadata: " + meta);

    while (true) {
      List<SharedDeployment> depsToCheck = null;

      SharedDeployment dep = null;

      synchronized (mux) {
        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          for (SharedDeployment d : deps) {
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())
                || meta.senderNodeId().equals(ctx.localNodeId())) {
              // Done.
              dep = d;

              break;
            }
          }

          if (dep == null) {
            GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

            if (!redeployCheck.get1()) {
              // Checking for redeployment encountered invalid state.
              if (log.isDebugEnabled())
                log.debug("Checking for redeployment encountered invalid state: " + meta);

              return null;
            }

            dep = redeployCheck.get2();

            if (dep == null) {
              // Find existing deployments that need to be checked
              // whether they should be reused for this request.
              for (SharedDeployment d : deps) {
                if (!d.isPendingUndeploy() && !d.isUndeployed()) {
                  if (depsToCheck == null) depsToCheck = new LinkedList<SharedDeployment>();

                  if (log.isDebugEnabled()) log.debug("Adding deployment to check: " + d);

                  depsToCheck.add(d);
                }
              }

              // If no deployment can be reused, create a new one.
              if (depsToCheck == null) {
                dep = createNewDeployment(meta, false);

                deps.add(dep);
              }
            }
          }
        } else {
          GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta);

          if (!redeployCheck.get1()) {
            // Checking for redeployment encountered invalid state.
            if (log.isDebugEnabled())
              log.debug("Checking for redeployment encountered invalid state: " + meta);

            return null;
          }

          dep = redeployCheck.get2();

          if (dep == null)
            // Create peer class loader.
            dep = createNewDeployment(meta, true);
        }
      }

      if (dep != null) {
        if (log.isDebugEnabled())
          log.debug("Found SHARED or CONTINUOUS deployment after first check: " + dep);

        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }

        return dep;
      }

      assert meta.parentLoader() == null;
      assert depsToCheck != null;
      assert !depsToCheck.isEmpty();

      /*
       * Logic below must be performed outside of synchronization
       * because it involves network calls.
       */

      // Check if class can be loaded from existing nodes.
      // In most cases this loop will find something.
      for (SharedDeployment d : depsToCheck) {
        // Load class. Note, that remote node will not load this class.
        // The class will only be loaded on this node.
        Class<?> cls = d.deployedClass(meta.className(), meta.alias());

        if (cls != null) {
          synchronized (mux) {
            if (!d.isUndeployed() && !d.isPendingUndeploy()) {
              if (!addParticipant(d, meta)) return null;

              if (log.isDebugEnabled())
                log.debug(
                    "Acquired deployment after verifying it's availability on "
                        + "existing nodes [depCls="
                        + cls
                        + ", dep="
                        + d
                        + ", meta="
                        + meta
                        + ']');

              return d;
            }
          }
        } else if (log.isDebugEnabled()) {
          log.debug(
              "Deployment cannot be reused (class does not exist on participating nodes) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
        }
      }

      // We are here either because all participant nodes failed
      // or the class indeed should have a separate deployment.
      for (SharedDeployment d : depsToCheck) {
        // Temporary class loader.
        ClassLoader temp =
            new GridDeploymentClassLoader(
                GridUuid.randomUuid(),
                meta.userVersion(),
                meta.deploymentMode(),
                true,
                ctx,
                ctxLdr,
                meta.classLoaderId(),
                meta.senderNodeId(),
                meta.sequenceNumber(),
                comm,
                ctx.config().getNetworkTimeout(),
                log,
                ctx.config().getPeerClassLoadingClassPathExclude(),
                0,
                false);

        String path = U.classNameToResourceName(d.sampleClassName());

        // We check if any random class from existing deployment can be
        // loaded from sender node. If it can, then we reuse existing
        // deployment.
        InputStream rsrcIn = temp.getResourceAsStream(path);

        if (rsrcIn != null) {
          // We don't need the actual stream.
          U.closeQuiet(rsrcIn);

          synchronized (mux) {
            if (d.isUndeployed() || d.isPendingUndeploy()) continue;

            // Add new node prior to loading the class, so we attempt
            // to load the class from the latest node.
            if (!addParticipant(d, meta)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Failed to add participant to deployment "
                        + "[meta="
                        + meta
                        + ", dep="
                        + dep
                        + ']');

              return null;
            }
          }

          Class<?> depCls = d.deployedClass(meta.className(), meta.alias());

          if (depCls == null) {
            U.error(
                log,
                "Failed to peer load class after loading it as a resource [alias="
                    + meta.alias()
                    + ", dep="
                    + dep
                    + ']');

            return null;
          }

          if (log.isDebugEnabled())
            log.debug(
                "Acquired deployment class after verifying other class "
                    + "availability on sender node [depCls="
                    + depCls
                    + ", rndCls="
                    + d.sampleClass()
                    + ", sampleClsName="
                    + d.sampleClassName()
                    + ", meta="
                    + meta
                    + ']');

          return d;
        } else if (log.isDebugEnabled())
          log.debug(
              "Deployment cannot be reused (random class could not be loaded from sender node) [dep="
                  + d
                  + ", meta="
                  + meta
                  + ']');
      }

      synchronized (mux) {
        if (log.isDebugEnabled())
          log.debug(
              "None of the existing class-loaders fit (will try to create a new one): " + meta);

        // Check obsolete request.
        if (isDeadClassLoader(meta)) return null;

        // Check that deployment picture has not changed.
        List<SharedDeployment> deps = cache.get(meta.userVersion());

        if (deps != null) {
          assert !deps.isEmpty();

          boolean retry = false;

          for (SharedDeployment d : deps) {
            // Double check if sender was already added.
            if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) {
              dep = d;

              retry = false;

              break;
            }

            // New deployment was added while outside of synchronization.
            // Need to recheck it again.
            if (!d.isPendingUndeploy() && !d.isUndeployed() && !depsToCheck.contains(d))
              retry = true;
          }

          if (retry) {
            if (log.isDebugEnabled()) log.debug("Retrying due to concurrency issues: " + meta);

            // Outer while loop.
            continue;
          }

          if (dep == null) {
            // No new deployments were added, so we can safely add ours.
            dep = createNewDeployment(meta, false);

            deps.add(dep);

            if (log.isDebugEnabled())
              log.debug(
                  "Adding new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
          }
        } else {
          dep = createNewDeployment(meta, true);

          if (log.isDebugEnabled())
            log.debug(
                "Created new deployment within second check [dep=" + dep + ", meta=" + meta + ']');
        }
      }

      if (dep != null) {
        // Cache the deployed class.
        Class<?> cls = dep.deployedClass(meta.className(), meta.alias());

        if (cls == null) {
          U.warn(
              log,
              "Failed to load peer class (ignore if class got undeployed during preloading) [alias="
                  + meta.alias()
                  + ", dep="
                  + dep
                  + ']');

          return null;
        }
      }

      return dep;
    }
  }
Example #20
0
  /**
   * @param nodeId Reader to add.
   * @param msgId Message ID.
   * @return Future for all relevant transactions that were active at the time of adding reader, or
   *     {@code null} if reader was added
   * @throws GridCacheEntryRemovedException If entry was removed.
   */
  @Nullable
  public GridFuture<Boolean> addReader(UUID nodeId, long msgId)
      throws GridCacheEntryRemovedException {
    // Don't add local node as reader.
    if (cctx.nodeId().equals(nodeId)) return null;

    GridNode node = cctx.discovery().node(nodeId);

    // If remote node has no near cache, don't add it.
    if (node == null || !U.hasNearCache(node, cctx.dht().near().name())) return null;

    // If remote node is (primary?) or back up, don't add it as a reader.
    if (U.nodeIds(cctx.affinity(partition(), CU.allNodes(cctx))).contains(nodeId)) return null;

    boolean ret = false;

    GridCacheMultiTxFuture<K, V> txFut;

    Collection<GridCacheMvccCandidate<K>> cands = null;

    synchronized (mux) {
      checkObsolete();

      txFut = this.txFut;

      ReaderId reader = readerId(nodeId);

      if (reader == null) {
        reader = new ReaderId(nodeId, msgId);

        readers = new LinkedList<ReaderId>(readers);

        readers.add(reader);

        // Seal.
        readers = Collections.unmodifiableList(readers);

        txFut = this.txFut = new GridCacheMultiTxFuture<K, V>(cctx);

        cands = localCandidates();

        ret = true;
      } else {
        long id = reader.messageId();

        if (id < msgId) reader.messageId(msgId);
      }
    }

    if (ret) {
      assert txFut != null;

      if (!F.isEmpty(cands)) {
        for (GridCacheMvccCandidate<K> c : cands) {
          GridCacheTxEx<K, V> tx = cctx.tm().<GridCacheTxEx<K, V>>tx(c.version());

          if (tx != null) {
            assert tx.local();

            txFut.addTx(tx);
          }
        }
      }

      txFut.init();

      if (!txFut.isDone()) {
        txFut.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> f) {
                synchronized (mux) {
                  // Release memory.
                  GridDhtCacheEntry.this.txFut = null;
                }
              }
            });
      } else
        // Release memory.
        txFut = this.txFut = null;
    }

    return txFut;
  }
  /**
   * @param key Key to add.
   * @param val Optional update value.
   * @param conflictTtl Conflict TTL (optional).
   * @param conflictExpireTime Conflict expire time (optional).
   * @param conflictVer Conflict version (optional).
   * @param primary If given key is primary on this mapping.
   */
  public void addUpdateEntry(
      KeyCacheObject key,
      @Nullable Object val,
      long conflictTtl,
      long conflictExpireTime,
      @Nullable GridCacheVersion conflictVer,
      boolean primary) {
    EntryProcessor<Object, Object, Object> entryProcessor = null;

    if (op == TRANSFORM) {
      assert val instanceof EntryProcessor : val;

      entryProcessor = (EntryProcessor<Object, Object, Object>) val;
    }

    assert val != null || op == DELETE;

    keys.add(key);

    if (entryProcessor != null) {
      if (entryProcessors == null) entryProcessors = new ArrayList<>();

      entryProcessors.add(entryProcessor);
    } else if (val != null) {
      assert val instanceof CacheObject : val;

      if (vals == null) vals = new ArrayList<>();

      vals.add((CacheObject) val);
    }

    hasPrimary |= primary;

    // In case there is no conflict, do not create the list.
    if (conflictVer != null) {
      if (conflictVers == null) {
        conflictVers = new ArrayList<>();

        for (int i = 0; i < keys.size() - 1; i++) conflictVers.add(null);
      }

      conflictVers.add(conflictVer);
    } else if (conflictVers != null) conflictVers.add(null);

    if (conflictTtl >= 0) {
      if (conflictTtls == null) {
        conflictTtls = new GridLongList(keys.size());

        for (int i = 0; i < keys.size() - 1; i++) conflictTtls.add(CU.TTL_NOT_CHANGED);
      }

      conflictTtls.add(conflictTtl);
    }

    if (conflictExpireTime >= 0) {
      if (conflictExpireTimes == null) {
        conflictExpireTimes = new GridLongList(keys.size());

        for (int i = 0; i < keys.size() - 1; i++) conflictExpireTimes.add(CU.EXPIRE_TIME_CALCULATE);
      }

      conflictExpireTimes.add(conflictExpireTime);
    }
  }
Example #22
0
  /**
   * Performs flush.
   *
   * @throws GridException If failed.
   */
  private void doFlush() throws GridException {
    lastFlushTime = U.currentTimeMillis();

    List<GridFuture> activeFuts0 = null;

    int doneCnt = 0;

    for (GridFuture<?> f : activeFuts) {
      if (!f.isDone()) {
        if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));

        activeFuts0.add(f);
      } else {
        f.get();

        doneCnt++;
      }
    }

    if (activeFuts0 == null || activeFuts0.isEmpty()) return;

    while (true) {
      Queue<GridFuture<?>> q = null;

      for (Buffer buf : bufMappings.values()) {
        GridFuture<?> flushFut = buf.flush();

        if (flushFut != null) {
          if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2);

          q.add(flushFut);
        }
      }

      if (q != null) {
        assert !q.isEmpty();

        boolean err = false;

        for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) {
          try {
            fut.get();
          } catch (GridException e) {
            if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e);

            err = true;
          }
        }

        if (err)
          // Remaps needed - flush buffers.
          continue;
      }

      doneCnt = 0;

      for (int i = 0; i < activeFuts0.size(); i++) {
        GridFuture f = activeFuts0.get(i);

        if (f == null) doneCnt++;
        else if (f.isDone()) {
          f.get();

          doneCnt++;

          activeFuts0.set(i, null);
        } else break;
      }

      if (doneCnt == activeFuts0.size()) return;
    }
  }
  /**
   * Removes obsolete deployments in case of redeploy.
   *
   * @param meta Request metadata.
   * @return List of shares deployment.
   */
  private GridTuple2<Boolean, SharedDeployment> checkRedeploy(GridDeploymentMetadata meta) {
    assert Thread.holdsLock(mux);

    SharedDeployment newDep = null;

    for (List<SharedDeployment> deps : cache.values()) {
      for (SharedDeployment dep : deps) {
        if (!dep.isUndeployed() && !dep.isPendingUndeploy()) {
          long undeployTimeout = ctx.config().getNetworkTimeout();

          SharedDeployment doomed = null;

          // Only check deployments with no participants.
          if (!dep.hasParticipants()) {
            // In case of SHARED deployment it is possible to get hear if
            // unmarshalling happens during undeploy. In this case, we
            // simply don't do anything.
            if (dep.deployMode() == CONTINUOUS) {
              if (dep.existingDeployedClass(meta.className()) != null) {
                // Change from shared deploy to shared undeploy or user version change.
                // Simply remove all deployments with no participating nodes.
                if (meta.deploymentMode() == SHARED
                    || !meta.userVersion().equals(dep.userVersion())) doomed = dep;
              }
            }
          }
          // If there are participants, we undeploy if class loader ID on some node changed.
          else if (dep.existingDeployedClass(meta.className()) != null) {
            GridTuple2<GridUuid, Long> ldr = dep.getClassLoaderId(meta.senderNodeId());

            if (ldr != null) {
              if (!ldr.get1().equals(meta.classLoaderId())) {
                // If deployed sequence number is less, then schedule for undeployment.
                if (ldr.get2() < meta.sequenceNumber()) {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Received request for a class with newer sequence number "
                            + "(will schedule current class for undeployment) [newSeq="
                            + meta.sequenceNumber()
                            + ", oldSeq="
                            + ldr.get2()
                            + ", senderNodeId="
                            + meta.senderNodeId()
                            + ", newClsLdrId="
                            + meta.classLoaderId()
                            + ", oldClsLdrId="
                            + ldr.get1()
                            + ']');

                  doomed = dep;
                } else if (ldr.get2() > meta.sequenceNumber()) {
                  long time = System.currentTimeMillis() - dep.timestamp();

                  if (newDep == null && time < ctx.config().getNetworkTimeout()) {
                    // Set undeployTimeout, so the class will be scheduled
                    // for undeployment.
                    undeployTimeout = ctx.config().getNetworkTimeout() - time;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a stale class (will deploy and "
                              + "schedule undeployment in "
                              + undeployTimeout
                              + "ms) "
                              + "[curSeq="
                              + ldr.get2()
                              + ", staleSeq="
                              + meta.sequenceNumber()
                              + ", cls="
                              + meta.className()
                              + ", senderNodeId="
                              + meta.senderNodeId()
                              + ", curLdrId="
                              + ldr.get1()
                              + ", staleLdrId="
                              + meta.classLoaderId()
                              + ']');

                    // We got the redeployed class before the old one.
                    // Simply create a temporary deployment for the sender node,
                    // and schedule undeploy for it.
                    newDep = createNewDeployment(meta, false);

                    doomed = newDep;
                  } else {
                    U.warn(
                        log,
                        "Received execution request for a class that has been redeployed "
                            + "(will ignore): "
                            + meta.alias());

                    if (log.isDebugEnabled())
                      log.debug(
                          "Received execution request for a class that has been redeployed "
                              + "(will ignore) [alias="
                              + meta.alias()
                              + ", dep="
                              + dep
                              + ']');

                    return F.t(false, null);
                  }
                } else {
                  U.error(
                      log,
                      "Sequence number does not correspond to class loader ID [seqNum="
                          + meta.sequenceNumber()
                          + ", dep="
                          + dep
                          + ']');

                  return F.t(false, null);
                }
              }
            }
          }

          if (doomed != null) {
            doomed.onUndeployScheduled();

            if (log.isDebugEnabled()) log.debug("Deployment was scheduled for undeploy: " + doomed);

            // Lifespan time.
            final long endTime = System.currentTimeMillis() + undeployTimeout;

            // Deployment to undeploy.
            final SharedDeployment undep = doomed;

            ctx.timeout()
                .addTimeoutObject(
                    new GridTimeoutObject() {
                      @Override
                      public GridUuid timeoutId() {
                        return undep.classLoaderId();
                      }

                      @Override
                      public long endTime() {
                        return endTime < 0 ? Long.MAX_VALUE : endTime;
                      }

                      @Override
                      public void onTimeout() {
                        boolean removed = false;

                        // Hot redeployment.
                        synchronized (mux) {
                          assert undep.isPendingUndeploy();

                          if (!undep.isUndeployed()) {
                            undep.undeploy();

                            undep.onRemoved();

                            removed = true;

                            Collection<SharedDeployment> deps = cache.get(undep.userVersion());

                            if (deps != null) {
                              for (Iterator<SharedDeployment> i = deps.iterator(); i.hasNext(); )
                                if (i.next() == undep) i.remove();

                              if (deps.isEmpty()) cache.remove(undep.userVersion());
                            }

                            if (log.isInfoEnabled())
                              log.info(
                                  "Undeployed class loader due to deployment mode change, "
                                      + "user version change, or hot redeployment: "
                                      + undep);
                          }
                        }

                        // Outside synchronization.
                        if (removed) undep.recordUndeployed(null);
                      }
                    });
          }
        }
      }
    }

    if (newDep != null) {
      List<SharedDeployment> list =
          F.addIfAbsent(cache, meta.userVersion(), F.<SharedDeployment>newList());

      assert list != null;

      list.add(newDep);
    }

    return F.t(true, newDep);
  }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }