/** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map = peekAll0(keys, filter, skipped);

    if (map.size() + skipped.size() != keys.size()) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              filter));
    }

    return map;
  }
 /**
  * Update index (delete and remove files)
  *
  * @param project the project
  * @param root a vcs root
  * @param added added/modified files to commit
  * @param removed removed files to commit
  * @param exceptions a list of exceptions to update
  * @return true if index was updated successfully
  */
 private static boolean updateIndex(
     final Project project,
     final VirtualFile root,
     final Collection<FilePath> added,
     final Collection<FilePath> removed,
     final List<VcsException> exceptions) {
   boolean rc = true;
   if (!added.isEmpty()) {
     try {
       GitFileUtils.addPaths(project, root, added);
     } catch (VcsException ex) {
       exceptions.add(ex);
       rc = false;
     }
   }
   if (!removed.isEmpty()) {
     try {
       GitFileUtils.delete(project, root, removed, "--ignore-unmatch");
     } catch (VcsException ex) {
       exceptions.add(ex);
       rc = false;
     }
   }
   return rc;
 }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    try {
      // Send request to remove from remote nodes.
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      req.version(ver);

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekexx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  // If there is only local node in this lock's topology,
                  // then there is no reason to distribute the request.
                  if (nodes.isEmpty()) continue;

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (nodes.isEmpty()) return;

      req.completedVersions(ctx.tm().committedVersions(ver), ctx.tm().rolledbackVersions(ver));

      if (!req.keyBytes().isEmpty())
        // We don't wait for reply to this message.
        ctx.io().safeSend(nodes, req, null);
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
예제 #4
0
  public boolean process(String[] args, BytecodeReader reader, JavaParser parser) {
    program.initBytecodeReader(reader);
    program.initJavaParser(parser);

    initOptions();
    processArgs(args);

    Collection files = program.options().files();

    if (program.options().hasOption("-version")) {
      printVersion();
      return false;
    }
    if (program.options().hasOption("-help") || files.isEmpty()) {
      printUsage();
      return false;
    }

    try {
      for (Iterator iter = files.iterator(); iter.hasNext(); ) {
        String name = (String) iter.next();
        if (!new File(name).exists())
          System.err.println("WARNING: file \"" + name + "\" does not exist");
        program.addSourceFile(name);
      }

      for (Iterator iter = program.compilationUnitIterator(); iter.hasNext(); ) {
        CompilationUnit unit = (CompilationUnit) iter.next();
        if (unit.fromSource()) {
          Collection errors = unit.parseErrors();
          Collection warnings = new LinkedList();
          // compute static semantic errors when there are no parse errors or
          // the recover from parse errors option is specified
          if (errors.isEmpty() || program.options().hasOption("-recover"))
            unit.errorCheck(errors, warnings);
          if (!errors.isEmpty()) {
            processErrors(errors, unit);
            return false;
          } else {
            if (!warnings.isEmpty()) processWarnings(warnings, unit);
            processNoErrors(unit);
          }
        }
      }
    } catch (Exception e) {
      System.err.println(e.getMessage());
      e.printStackTrace();
    }
    return true;
  }
예제 #5
0
 /**
  * Returns the file representing the Git repository directory for the given file path or any of
  * its parent in the filesystem. If the file doesn't exits, is not a Git repository or an error
  * occurred while transforming the given path into a store <code>null</code> is returned.
  *
  * @param path expected format /file/{Workspace}/{projectName}[/{path}]
  * @return the .git folder if found or <code>null</code> the give path cannot be resolved to a
  *     file or it's not under control of a git repository
  * @throws CoreException
  */
 public static File getGitDir(IPath path) throws CoreException {
   Map<IPath, File> gitDirs = GitUtils.getGitDirs(path, Traverse.GO_UP);
   if (gitDirs == null) return null;
   Collection<File> values = gitDirs.values();
   if (values.isEmpty()) return null;
   return values.toArray(new File[] {})[0];
 }
  /** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes)
      throws GridException {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map =
        !modes.contains(PARTITIONED_ONLY)
            ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped)
            : new GridLeanMap<K, V>(0);

    if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              modes));
    }

    return map;
  }
예제 #7
0
  @Test
  public void testCompactionLog() throws Exception {
    SystemKeyspace.discardCompactionsInProgress();

    String cf = "Standard4";
    ColumnFamilyStore cfs = Keyspace.open(KEYSPACE1).getColumnFamilyStore(cf);
    insertData(KEYSPACE1, cf, 0, 1);
    cfs.forceBlockingFlush();

    Collection<SSTableReader> sstables = cfs.getSSTables();
    assert !sstables.isEmpty();
    Set<Integer> generations =
        Sets.newHashSet(
            Iterables.transform(
                sstables,
                new Function<SSTableReader, Integer>() {
                  public Integer apply(SSTableReader sstable) {
                    return sstable.descriptor.generation;
                  }
                }));
    UUID taskId = SystemKeyspace.startCompaction(cfs, sstables);
    SetMultimap<Pair<String, String>, Integer> compactionLogs =
        SystemKeyspace.getUnfinishedCompactions();
    Set<Integer> unfinishedCompactions = compactionLogs.get(Pair.create(KEYSPACE1, cf));
    assert unfinishedCompactions.containsAll(generations);

    SystemKeyspace.finishCompaction(taskId);
    compactionLogs = SystemKeyspace.getUnfinishedCompactions();
    assert !compactionLogs.containsKey(Pair.create(KEYSPACE1, cf));
  }
예제 #8
0
파일: Group.java 프로젝트: regadou/nalasys
 public boolean addAll(int index, Collection c) {
   if (c == null || c.isEmpty()) return false;
   boolean changed = false;
   Iterator i = c.iterator();
   while (i.hasNext()) {
     add(index, i.next());
     changed = true;
     if (index >= 0) index++;
   }
   return changed;
 }
예제 #9
0
  protected static void printUserList(Collection<UserID> users) {

    Iterator<UserID> it = users.iterator();

    while (it.hasNext()) {
      UserID user = it.next();
      System.out.println(
          "Username: "******"Name: " + user.getName() + " " + user.getAttributes());
    }

    if (users.isEmpty()) System.out.println("No results");
  }
  private void applyFSEvent(
      ProjectDescriptor pd, @Nullable CmdlineRemoteProto.Message.ControllerMessage.FSEvent event)
      throws IOException {
    if (event == null) {
      return;
    }

    final Timestamps timestamps = pd.timestamps.getStorage();

    for (String deleted : event.getDeletedPathsList()) {
      final File file = new File(deleted);
      Collection<BuildRootDescriptor> descriptor =
          pd.getBuildRootIndex().findAllParentDescriptors(file, null, null);
      if (!descriptor.isEmpty()) {
        if (Utils.IS_TEST_MODE) {
          LOG.info("Applying deleted path from fs event: " + file.getPath());
        }
        for (BuildRootDescriptor rootDescriptor : descriptor) {
          pd.fsState.registerDeleted(rootDescriptor.getTarget(), file, timestamps);
        }
      } else if (Utils.IS_TEST_MODE) {
        LOG.info("Skipping deleted path: " + file.getPath());
      }
    }
    for (String changed : event.getChangedPathsList()) {
      final File file = new File(changed);
      Collection<BuildRootDescriptor> descriptors =
          pd.getBuildRootIndex().findAllParentDescriptors(file, null, null);
      if (!descriptors.isEmpty()) {
        if (Utils.IS_TEST_MODE) {
          LOG.info("Applying dirty path from fs event: " + file.getPath());
        }
        for (BuildRootDescriptor descriptor : descriptors) {
          pd.fsState.markDirty(null, file, descriptor, timestamps);
        }
      } else if (Utils.IS_TEST_MODE) {
        LOG.info("Skipping dirty path: " + file.getPath());
      }
    }
  }
예제 #11
0
  public Collection<DiscoveredPlugin> call() {
    this.remoteDiscoveryImpl.syso("cache 1");
    final Collection<Entry> entries = getEntryList(getPlugin());
    this.remoteDiscoveryImpl.syso("cache entries " + entries.size());
    this.remoteDiscoveryImpl.syso("cache 2");
    Collection<DiscoveredPlugin> available = remoteUnavailable(entries);
    this.remoteDiscoveryImpl.syso("cache 3");
    this.remoteDiscoveryImpl.syso("cache available " + available.size());
    this.remoteDiscoveryImpl.syso("cache 4");
    if (!available.isEmpty()) {
      this.remoteDiscoveryImpl.syso("cache 5");
      this.remoteDiscoveryImpl.doFilter(available, getDiscoverOptions());
    }
    this.remoteDiscoveryImpl.syso("cache 6");
    this.remoteDiscoveryImpl.syso("cache: available " + available.size());

    this.remoteDiscoveryImpl.syso("cache 7");
    if (!available.isEmpty()) this.remoteDiscoveryImpl.syso("---> discovery cache hit.");

    this.remoteDiscoveryImpl.syso("cache 8");
    return available;
  }
예제 #12
0
 private boolean containsOnlyInlineLevelChildElements(final Element element) {
   // returns true if the element contains only inline-level elements except for SCRIPT elements.
   final Collection childElements = element.getChildElements();
   if (childElements.isEmpty()) return true;
   for (final Iterator i = childElements.iterator(); i.hasNext(); ) {
     final Element childElement = (Element) i.next();
     final String elementName = childElement.getName();
     if (elementName == HTMLElementName.SCRIPT
         || !HTMLElements.getInlineLevelElementNames().contains(elementName)) return false;
     if (!containsOnlyInlineLevelChildElements(childElement)) return false;
   }
   return true;
 }
예제 #13
0
  private boolean hasTypeDiscriminatorFieldOrMethod(final Class<?> persistentType) {
    Collection<?> hits =
        eachField(
            persistentType,
            new Predicate<Field>() {
              public boolean apply(Field input) {
                return hasAnnotation(input, TypeDiscriminator.class);
              }
            });
    if (!hits.isEmpty()) {
      return true;
    }

    hits =
        eachMethod(
            persistentType,
            new Predicate<Method>() {
              public boolean apply(Method input) {
                return hasAnnotation(input, TypeDiscriminator.class);
              }
            });
    return !hits.isEmpty();
  }
예제 #14
0
 /**
  * Unregister the au with this Tdb for its plugin.
  *
  * @param au the TdbAu
  * @return <code>false</code> if au was not registered, otherwise <code>true</code>
  */
 private boolean removeTdbAuForPlugin(TdbAu au) {
   // if can't add au to title, we need to undo the au
   // registration and re-throw the exception we just caught
   String pluginId = au.getPluginId();
   Collection<TdbAu.Id> c = pluginIdTdbAuIdsMap.get(pluginId);
   if (c.remove(au.getId())) {
     if (c.isEmpty()) {
       pluginIdTdbAuIdsMap.remove(c);
     }
     tdbAuCount--;
     return true;
   }
   return false;
 }
예제 #15
0
 /** Try to figure out whether the predefined Templates have already been migrated. */
 static boolean hasDeployed() {
   if (hasDeployed) {
     return true;
   }
   Collection<String> names = getPredefinedTemplateNames();
   File dir = TemplateDatabase.TemplateDir;
   if (dir.isDirectory()) {
     File[] files = dir.listFiles();
     for (File file : files) {
       String name = file.getName();
       names.remove(name);
     }
   }
   return names.isEmpty();
 }
예제 #16
0
 static List<ZLFile> archiveEntries(ZLFile archive) {
   try {
     final ZipFile zf = ZLZipEntryFile.getZipFile(archive);
     final Collection<LocalFileHeader> headers = zf.headers();
     if (!headers.isEmpty()) {
       ArrayList<ZLFile> entries = new ArrayList<ZLFile>(headers.size());
       for (LocalFileHeader h : headers) {
         entries.add(new ZLZipEntryFile(archive, h.FileName));
       }
       return entries;
     }
   } catch (IOException e) {
   }
   return Collections.emptyList();
 }
    @Override
    @SuppressWarnings("unchecked")
    public Object aggregate(Collection<?> countersList) {
      if (countersList.isEmpty()) {
        return null;
      }

      BasicCounters<MutableLong> tempFileCounters =
          (BasicCounters<MutableLong>) countersList.iterator().next();
      MutableLong globalProcessedFiles =
          tempFileCounters.getCounter(FileCounters.GLOBAL_PROCESSED_FILES);
      MutableLong globalNumberOfFailures =
          tempFileCounters.getCounter(FileCounters.GLOBAL_NUMBER_OF_FAILURES);
      MutableLong globalNumberOfRetries =
          tempFileCounters.getCounter(FileCounters.GLOBAL_NUMBER_OF_RETRIES);
      totalLocalProcessedFiles.setValue(0);
      pendingFiles.setValue(0);
      totalLocalNumberOfFailures.setValue(0);
      totalLocalNumberOfRetries.setValue(0);

      for (Object fileCounters : countersList) {
        BasicCounters<MutableLong> basicFileCounters = (BasicCounters<MutableLong>) fileCounters;
        totalLocalProcessedFiles.add(
            basicFileCounters.getCounter(FileCounters.LOCAL_PROCESSED_FILES));
        pendingFiles.add(basicFileCounters.getCounter(FileCounters.PENDING_FILES));
        totalLocalNumberOfFailures.add(
            basicFileCounters.getCounter(FileCounters.LOCAL_NUMBER_OF_FAILURES));
        totalLocalNumberOfRetries.add(
            basicFileCounters.getCounter(FileCounters.LOCAL_NUMBER_OF_RETRIES));
      }

      globalProcessedFiles.add(totalLocalProcessedFiles);
      globalProcessedFiles.subtract(pendingFiles);
      globalNumberOfFailures.add(totalLocalNumberOfFailures);
      globalNumberOfRetries.add(totalLocalNumberOfRetries);

      BasicCounters<MutableLong> aggregatedCounters =
          new BasicCounters<MutableLong>(MutableLong.class);
      aggregatedCounters.setCounter(AggregatedFileCounters.PROCESSED_FILES, globalProcessedFiles);
      aggregatedCounters.setCounter(AggregatedFileCounters.PENDING_FILES, pendingFiles);
      aggregatedCounters.setCounter(
          AggregatedFileCounters.NUMBER_OF_ERRORS, totalLocalNumberOfFailures);
      aggregatedCounters.setCounter(
          AggregatedFileCounters.NUMBER_OF_RETRIES, totalLocalNumberOfRetries);

      return aggregatedCounters;
    }
예제 #18
0
  /**
   * collect saved Reflections resources from all urls that contains the given packagePrefix and
   * matches the given resourceNameFilter and de-serializes them using the default serializer {@link
   * org.reflections.serializers.XmlSerializer} or using the optionally supplied optionalSerializer
   *
   * <p>it is preferred to use a designated resource prefix (for example META-INF/reflections but
   * not just META-INF), so that relevant urls could be found much faster
   *
   * @param optionalSerializer - optionally supply one serializer instance. if not specified or
   *     null, {@link org.reflections.serializers.XmlSerializer} will be used
   */
  public static Reflections collect(
      final String packagePrefix,
      final Predicate<String> resourceNameFilter,
      @Nullable Serializer... optionalSerializer) {
    Serializer serializer =
        optionalSerializer != null && optionalSerializer.length == 1
            ? optionalSerializer[0]
            : new XmlSerializer();

    Collection<URL> urls = ClasspathHelper.forPackage(packagePrefix);
    if (urls.isEmpty()) return null;
    long start = System.currentTimeMillis();
    final Reflections reflections = new Reflections();
    Iterable<Vfs.File> files = Vfs.findFiles(urls, packagePrefix, resourceNameFilter);
    for (final Vfs.File file : files) {
      InputStream inputStream = null;
      try {
        inputStream = file.openInputStream();
        reflections.merge(serializer.read(inputStream));
      } catch (IOException e) {
        throw new ReflectionsException("could not merge " + file, e);
      } finally {
        close(inputStream);
      }
    }

    if (log != null) {
      Store store = reflections.getStore();
      int keys = 0;
      int values = 0;
      for (String index : store.keySet()) {
        keys += store.get(index).keySet().size();
        values += store.get(index).size();
      }

      log.info(
          format(
              "Reflections took %d ms to collect %d url%s, producing %d keys and %d values [%s]",
              System.currentTimeMillis() - start,
              urls.size(),
              urls.size() > 1 ? "s" : "",
              keys,
              values,
              Joiner.on(", ").join(urls)));
    }
    return reflections;
  }
예제 #19
0
  private static void addManifestationFileSet(
      SolrInputDocument doc, Collection<DcsManifestationFile> set, ArchiveStore store)
      throws IOException {
    for (DcsManifestationFile mf : set) {

      final String fileRef = mf.getRef() == null ? null : mf.getRef().getRef();
      setadd(doc, ManifestationFileField.FILE_REF, fileRef);
      setadd(doc, ManifestationFileField.PATH, mf.getPath());

      final Collection<DcsRelation> rels = mf.getRelSet();

      if (rels != null && !rels.isEmpty() && fileRef != null) {

        addRelationSet(doc, rels);

        for (DcsRelation rel : rels) {

          // <doc
          // field="mf_rel_urn:dataconservancy.org:file/4326762_hasRelationship">urn:dataconservancy.org:rel/isMetadataFor</doc>
          setadd(
              doc,
              ManifestationFileField.DYNAMIC_MF_REL_PREFIX.solrName()
                  + fileRef
                  + "_"
                  + RelationField.RELATION.solrName(),
              rel.getRelUri());

          // <doc
          // field="mf_rel_urn:dataconservancy.org:file/4326762_relatedTo">http://dataconservancy.org/dcs/entity/article_du</doc>
          setadd(
              doc,
              ManifestationFileField.DYNAMIC_MF_REL_PREFIX.solrName()
                  + fileRef
                  + "_"
                  + RelationField.TARGET.solrName(),
              rel.getRef().getRef());
        }
      }
    }
  }
예제 #20
0
 /**
  * Get the linked titles for the specified link type.
  *
  * @param linkType the link type {@see TdbTitle} for description of link types
  * @param title the TdbTitle with links
  * @return a collection of linked titles for the specified type
  */
 public Collection<TdbTitle> getLinkedTdbTitlesForType(
     TdbTitle.LinkType linkType, TdbTitle title) {
   if (linkType == null) {
     throw new IllegalArgumentException("linkType cannot be null");
   }
   if (title == null) {
     throw new IllegalArgumentException("title cannot be null");
   }
   Collection<String> titleIds = title.getLinkedTdbTitleIdsForType(linkType);
   if (titleIds.isEmpty()) {
     return Collections.emptyList();
   }
   ArrayList<TdbTitle> titles = new ArrayList<TdbTitle>();
   for (String titleId : titleIds) {
     TdbTitle aTitle = getTdbTitleById(titleId);
     if (aTitle != null) {
       titles.add(aTitle);
     }
   }
   titles.trimToSize();
   return titles;
 }
 /** @param removeProcessor parent, child */
 public static <T> Collection<T> removeAncestors(
     final Collection<T> files,
     final Convertor<T, String> convertor,
     final PairProcessor<T, T> removeProcessor) {
   if (files.isEmpty()) return files;
   final TreeMap<String, T> paths = new TreeMap<String, T>();
   for (T file : files) {
     final String path = convertor.convert(file);
     assert path != null;
     final String canonicalPath = toCanonicalPath(path);
     paths.put(canonicalPath, file);
   }
   final List<Map.Entry<String, T>> ordered =
       new ArrayList<Map.Entry<String, T>>(paths.entrySet());
   final List<T> result = new ArrayList<T>(ordered.size());
   result.add(ordered.get(0).getValue());
   for (int i = 1; i < ordered.size(); i++) {
     final Map.Entry<String, T> entry = ordered.get(i);
     final String child = entry.getKey();
     boolean parentNotFound = true;
     for (int j = i - 1; j >= 0; j--) {
       // possible parents
       final String parent = ordered.get(j).getKey();
       if (parent == null) continue;
       if (startsWith(child, parent)
           && removeProcessor.process(ordered.get(j).getValue(), entry.getValue())) {
         parentNotFound = false;
         break;
       }
     }
     if (parentNotFound) {
       result.add(entry.getValue());
     }
   }
   return result;
 }
예제 #22
0
파일: Graph.java 프로젝트: safdariqbal/frex
  /* Print the current type graph to the dotfile */
  public void printDot(String title, Call call) {
    boolean printUnifications = false;
    PrintStream ps = PointsToAnalysis.v().file;
    if (ps == null) return;

    ps.println("\ndigraph F {");
    ps.println("   size = \"7,7\"; rankdir = LR;");
    ps.println("   orientation = landscape;");

    ps.println("   subgraph cluster1 {");
    ps.println("   \"Method: " + method.getName() + "\" [color=white];");

    if (nodes.isEmpty()) {
      ps.println("   \"empty graph\" [color = white];");
      ps.println("   }");
      ps.println("}");
      return;
    }

    for (Node node : nodes) {
      if (!printUnifications && !node.isRep()) continue;
      String color = "style=filled,fillcolor=";
      if (node.isheap && node.hasallocs) color += "red,";
      else if (node.isheap) color += "orange,";
      else if (node.hasallocs) color += "grey,";
      else color += "white,";
      // if (node.istouched) color = "khaki";
      // if (node.hassync) color = "khaki";
      String shape = "shape=";
      if (node.istouched) shape += "box";
      else shape += "ellipse";

      ps.println("   o" + node.id + "[label = \"" + node.getName() + "\"," + color + shape + "];");
    }
    ps.println("   }");

    Map<Integer, Map<Integer, String>> labels = new HashMap<Integer, Map<Integer, String>>();
    for (Field f : fedges.keySet())
      for (FieldEdge e : fedges.get(f)) {
        if (labels.containsKey(e.src.id)) {
          if (labels.get(e.src.id).containsKey(e.dst.id)) {
            labels.get(e.src.id).put(e.dst.id, "*");
            //                            labels.get(e.src.id).get(e.dst.id) + ", " +
            //                            e.field.getName());
          } else labels.get(e.src.id).put(e.dst.id, e.field.getName());

        } else {
          Map<Integer, String> is = new HashMap<Integer, String>();
          is.put(e.dst.id, e.field.getName());
          labels.put(e.src.id, is);
        }
      }
    for (Integer i : labels.keySet())
      for (Integer j : labels.get(i).keySet())
        ps.print(
            "   o"
                + i
                + " -> o"
                + j
                + "[label=\""
                + labels.get(i).get(j)
                + "\",style=solid,color=black];");

    for (Call ce : cedges.keySet())
      for (CallEdge e : cedges.get(ce)) {
        if (!(e.call instanceof VirtualCallExpr)) continue;
        // if (!e.call.equals(call)) continue;
        ps.print(
            "   o"
                + e.src.id
                + " -> o"
                + e.dst.id
                + "[label=\""
                + e.call
                + "\",style=solid,color=red];");
      }

    if (printUnifications)
      for (Node node : nodes)
        if (node.parent != null)
          ps.println("   o" + node.id + " -> o" + node.parent.id + " [color = blue];");

    ps.println("}");
  }
  /**
   * Removes locks regardless of whether they are owned or not for given version and keys.
   *
   * @param ver Lock version.
   * @param keys Keys.
   */
  @SuppressWarnings({"unchecked"})
  public void removeLocks(GridCacheVersion ver, Collection<? extends K> keys) {
    if (keys.isEmpty()) return;

    try {
      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridNode, GridNearUnlockRequest<K, V>> map = null;

      for (K key : keys) {
        // Send request to remove from remote nodes.
        GridNearUnlockRequest<K, V> req = null;

        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          try {
            if (entry != null) {
              GridCacheMvccCandidate<K> cand = entry.candidate(ver);

              if (cand != null) {
                if (affNodes == null) {
                  affNodes = CU.allNodes(ctx, cand.topologyVersion());

                  keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                  map = new HashMap<GridNode, GridNearUnlockRequest<K, V>>(affNodes.size());
                }

                GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

                if (!primary.isLocal()) {
                  req = map.get(primary);

                  if (req == null) {
                    map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                    req.version(ver);
                  }
                }

                // Remove candidate from local node first.
                if (entry.removeLock(cand.version())) {
                  if (primary.isLocal()) {
                    dht.removeLocks(primary.id(), ver, F.asList(key), true);

                    assert req == null;

                    continue;
                  }

                  req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                }
              }
            }

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug(
                  "Attempted to remove lock from removed entry (will retry) [rmvVer="
                      + ver
                      + ", entry="
                      + entry
                      + ']');
          }
        }
      }

      if (map == null || map.isEmpty()) return;

      Collection<GridCacheVersion> committed = ctx.tm().committedVersions(ver);
      Collection<GridCacheVersion> rolledback = ctx.tm().rolledbackVersions(ver);

      for (Map.Entry<GridNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (!req.keyBytes().isEmpty()) {
          req.completedVersions(committed, rolledback);

          // We don't wait for reply to this message.
          ctx.io().send(n, req);
        }
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return;

    try {
      GridCacheVersion ver = null;

      Collection<GridRichNode> affNodes = null;

      int keyCnt = -1;

      Map<GridRichNode, GridNearUnlockRequest<K, V>> map = null;

      Collection<K> locKeys = new LinkedList<K>();

      GridCacheVersion obsoleteVer = ctx.versions().next();

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = peekExx(key);

          if (entry == null || !ctx.isAll(entry.wrap(false), filter)) break; // While.

          try {
            GridCacheMvccCandidate<K> cand =
                entry.candidate(ctx.nodeId(), Thread.currentThread().getId());

            if (cand != null) {
              ver = cand.version();

              if (affNodes == null) {
                affNodes = CU.allNodes(ctx, cand.topologyVersion());

                keyCnt = (int) Math.ceil((double) keys.size() / affNodes.size());

                map = new HashMap<GridRichNode, GridNearUnlockRequest<K, V>>(affNodes.size());
              }

              // Send request to remove from remote nodes.
              GridRichNode primary = CU.primary0(ctx.affinity(key, affNodes));

              GridNearUnlockRequest<K, V> req = map.get(primary);

              if (req == null) {
                map.put(primary, req = new GridNearUnlockRequest<K, V>(keyCnt));

                req.version(ver);
              }

              // Remove candidate from local node first.
              GridCacheMvccCandidate<K> rmv = entry.removeLock();

              if (rmv != null) {
                if (!rmv.reentry()) {
                  if (ver != null && !ver.equals(rmv.version()))
                    throw new GridException(
                        "Failed to unlock (if keys were locked separately, "
                            + "then they need to be unlocked separately): "
                            + keys);

                  if (!primary.isLocal()) {
                    assert req != null;

                    req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);
                  } else locKeys.add(key);

                  if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
                } else if (log.isDebugEnabled())
                  log.debug(
                      "Current thread still owns lock (or there are no other nodes)"
                          + " [lock="
                          + rmv
                          + ", curThreadId="
                          + Thread.currentThread().getId()
                          + ']');
              }

              // Try to evict near entry if it's dht-mapped locally.
              evictNearEntry(entry, obsoleteVer);
            }

            break;
          } catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Attempted to unlock removed entry (will retry): " + entry);
          }
        }
      }

      if (ver == null) return;

      for (Map.Entry<GridRichNode, GridNearUnlockRequest<K, V>> mapping : map.entrySet()) {
        GridRichNode n = mapping.getKey();

        GridDistributedUnlockRequest<K, V> req = mapping.getValue();

        if (n.isLocal()) dht.removeLocks(ctx.nodeId(), req.version(), locKeys, true);
        else if (!req.keyBytes().isEmpty())
          // We don't wait for reply to this message.
          ctx.io().send(n, req);
      }
    } catch (GridException ex) {
      U.error(log, "Failed to unlock the lock for keys: " + keys, ex);
    }
  }
    BuildStatus iterativeCompile(
        final ModuleChunk chunk,
        final Set<String> sources,
        final Set<String> outdated,
        final Set<String> removed,
        final Flags flags) {
      final Collection<String> filesToCompile =
          DefaultGroovyMethods.intersect(affectedFiles, sources);

      if (outdated != null) {
        for (String s : outdated) {
          assert (s != null);
        }

        filesToCompile.addAll(outdated);
      }

      filesToCompile.removeAll(compiledFiles);

      if (!filesToCompile.isEmpty() || removed != null) {
        final Set<String> outputFiles = new HashSet<String>();

        for (String f : filesToCompile) {
          final Set<ClassRepr> classes = dependencyMapping.getClasses(f);

          if (classes != null) {
            for (ClassRepr cr : classes) {
              outputFiles.add(cr.getFileName());
            }
          }
        }

        if (removed != null) {
          for (String f : removed) {
            final Set<ClassRepr> classes = dependencyMapping.getClasses(f);
            if (classes != null) {
              for (ClassRepr cr : classes) {
                outputFiles.add(cr.getFileName());
              }
            }
          }
        }

        if (!outputFiles.isEmpty()) {
          new Logger(flags) {
            @Override
            public void log(PrintStream stream) {
              stream.println("Cleaning output files:");
              logFilePaths(stream, outputFiles);
              stream.println("End of files");
            }
          }.log();

          builder.clearChunk(chunk, outputFiles, ProjectWrapper.this);
        }

        final Mappings delta = dependencyMapping.createDelta();
        final Callbacks.Backend deltaBackend = delta.getCallback();

        new Logger(flags) {
          @Override
          public void log(PrintStream stream) {
            stream.println("Compiling files:");
            logFilePaths(stream, filesToCompile);
            stream.println("End of files");
          }
        }.log();

        boolean buildException = false;

        try {
          builder.buildChunk(
              chunk, flags.tests(), filesToCompile, deltaBackend, ProjectWrapper.this);
        } catch (Exception e) {
          e.printStackTrace();
          buildException = true;
        }

        if (!buildException) {
          compiledFiles.addAll(filesToCompile);
          affectedFiles.removeAll(filesToCompile);

          final Collection<File> files = new HashSet<File>();
          final Collection<File> compiled = new HashSet<File>();

          for (String f : filesToCompile) {
            files.add(new File(f));
          }

          for (String f : compiledFiles) {
            compiled.add(new File(f));
          }

          final Collection<File> affected = new HashSet<File>();

          final boolean incremental =
              dependencyMapping.differentiate(delta, removed, files, compiled, affected);

          for (File a : affected) {
            affectedFiles.add(FileUtil.toSystemIndependentName(a.getAbsolutePath()));
          }

          dependencyMapping.integrate(delta, files, removed);

          if (!incremental) {
            affectedFiles.addAll(sources);
            affectedFiles.removeAll(compiledFiles);

            final BuildStatus result = iterativeCompile(chunk, sources, null, null, flags);

            if (result == BuildStatus.FAILURE) {
              return result;
            }

            return BuildStatus.CONSERVATIVE;
          }

          return iterativeCompile(chunk, sources, null, null, flags);
        } else {
          return BuildStatus.FAILURE;
        }
      } else {
        for (Module m : chunk.getElements()) {
          Reporter.reportBuildSuccess(m, flags.tests());
        }
      }

      return BuildStatus.INCREMENTAL;
    }
  /**
   * Test file creation.
   *
   * @param path Path to file to store.
   * @param size Size of file to store.
   * @param salt Salt for file content generation.
   * @throws Exception In case of any exception.
   */
  private void testCreateFile(final GridGgfsPath path, final long size, final int salt)
      throws Exception {
    info("Create file [path=" + path + ", size=" + size + ", salt=" + salt + ']');

    final AtomicInteger cnt = new AtomicInteger(0);
    final Collection<GridGgfsPath> cleanUp = new ConcurrentLinkedQueue<>();

    long time =
        runMultiThreaded(
            new Callable<Object>() {
              @Override
              public Object call() throws Exception {
                int id = cnt.incrementAndGet();

                GridGgfsPath f = new GridGgfsPath(path.parent(), "asdf" + (id > 1 ? "-" + id : ""));

                try (GridGgfsOutputStream out = fs.create(f, 0, true, null, 0, 1024, null)) {
                  assertNotNull(out);

                  cleanUp.add(f); // Add all created into cleanup list.

                  U.copy(new GridGgfsTestInputStream(size, salt), out);
                }

                return null;
              }
            },
            WRITING_THREADS_CNT,
            "perform-multi-thread-writing");

    if (time > 0) {
      double rate = size * 1000. / time / 1024 / 1024;

      info(
          String.format(
              "Write file [path=%s, size=%d kB, rate=%2.1f MB/s]",
              path, WRITING_THREADS_CNT * size / 1024, WRITING_THREADS_CNT * rate));
    }

    info("Read and validate saved file: " + path);

    final InputStream expIn = new GridGgfsTestInputStream(size, salt);
    final GridGgfsInputStream actIn = fs.open(path, CFG_BLOCK_SIZE * READING_THREADS_CNT * 11 / 10);

    // Validate continuous reading of whole file.
    assertEqualStreams(expIn, actIn, size, null);

    // Validate random seek and reading.
    final Random rnd = new Random();

    runMultiThreaded(
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            long skip = Math.abs(rnd.nextLong() % (size + 1));
            long range = Math.min(size - skip, rnd.nextInt(CFG_BLOCK_SIZE * 400));

            assertEqualStreams(new GridGgfsTestInputStream(size, salt), actIn, range, skip);

            return null;
          }
        },
        READING_THREADS_CNT,
        "validate-multi-thread-reading");

    expIn.close();
    actIn.close();

    info("Get stored file info: " + path);

    GridGgfsFile desc = fs.info(path);

    info("Validate stored file info: " + desc);

    assertNotNull(desc);

    if (log.isDebugEnabled()) log.debug("File descriptor: " + desc);

    Collection<GridGgfsBlockLocation> aff = fs.affinity(path, 0, desc.length());

    assertFalse("Affinity: " + aff, desc.length() != 0 && aff.isEmpty());

    int blockSize = desc.blockSize();

    assertEquals("File size", size, desc.length());
    assertEquals("Binary block size", CFG_BLOCK_SIZE, blockSize);
    // assertEquals("Permission", "rwxr-xr-x", desc.getPermission().toString());
    // assertEquals("Permission sticky bit marks this is file", false,
    // desc.getPermission().getStickyBit());
    assertEquals("Type", true, desc.isFile());
    assertEquals("Type", false, desc.isDirectory());

    info("Cleanup files: " + cleanUp);

    for (GridGgfsPath f : cleanUp) {
      fs.delete(f, true);
      assertNull(fs.info(f));
    }
  }
  public static void hashesWithParents(
      Project project,
      FilePath path,
      final AsynchConsumer<CommitHashPlusParents> consumer,
      final Getter<Boolean> isCanceled,
      Collection<VirtualFile> paths,
      final String... parameters)
      throws VcsException {
    // adjust path using change manager
    path = getLastCommitName(project, path);
    final VirtualFile root = GitUtil.getGitRoot(path);
    final GitLineHandler h = new GitLineHandler(project, root, GitCommand.LOG);
    final GitLogParser parser =
        new GitLogParser(
            project,
            GitLogParser.NameStatus.NAME,
            SHORT_HASH,
            COMMIT_TIME,
            SHORT_PARENTS,
            AUTHOR_NAME);
    h.setNoSSH(true);
    h.setStdoutSuppressed(true);
    h.addParameters(parameters);
    h.addParameters(parser.getPretty(), "--encoding=UTF-8", "--full-history");

    if (paths != null && !paths.isEmpty()) {
      h.endOptions();
      h.addRelativeFiles(paths);
    } else {
      h.addParameters("--sparse");
      h.endOptions();
      h.addRelativePaths(path);
    }

    final Semaphore semaphore = new Semaphore();
    h.addLineListener(
        new GitLineHandlerListener() {
          @Override
          public void onLineAvailable(final String line, final Key outputType) {
            try {
              if (ProcessOutputTypes.STDOUT.equals(outputType)) {
                if (isCanceled != null && isCanceled.get()) {
                  h.cancel();
                  return;
                }
                GitLogRecord record = parser.parseOneRecord(line);
                consumer.consume(
                    new CommitHashPlusParents(
                        record.getShortHash(),
                        record.getParentsShortHashes(),
                        record.getLongTimeStamp() * 1000,
                        record.getAuthorName()));
              }
            } catch (ProcessCanceledException e) {
              h.cancel();
              semaphore.up();
            }
          }

          @Override
          public void processTerminated(int exitCode) {
            semaphore.up();
          }

          @Override
          public void startFailed(Throwable exception) {
            semaphore.up();
          }
        });
    semaphore.down();
    h.start();
    semaphore.waitFor();
    consumer.finished();
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked"})
  @Override
  public void unlockAll(
      Collection<? extends K> keys, GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return;

    Collection<? extends GridNode> nodes = ctx.remoteNodes(keys);

    try {
      GridDistributedUnlockRequest<K, V> req = new GridDistributedUnlockRequest<K, V>(keys.size());

      for (K key : keys) {
        GridDistributedCacheEntry<K, V> entry = entryexx(key);

        if (!ctx.isAll(entry.wrap(false), filter)) continue;

        // Unlock local lock first.
        GridCacheMvccCandidate<K> rmv = entry.removeLock();

        if (rmv != null && !nodes.isEmpty()) {
          if (!rmv.reentry()) {
            req.addKey(entry.key(), entry.getOrMarshalKeyBytes(), ctx);

            // We are assuming that lock ID is the same for all keys.
            req.version(rmv.version());

            if (log.isDebugEnabled()) log.debug("Removed lock (will distribute): " + rmv);
          } else {
            if (log.isDebugEnabled())
              log.debug(
                  "Locally unlocked lock reentry without distributing to other nodes [removed="
                      + rmv
                      + ", entry="
                      + entry
                      + ']');
          }
        } else {
          if (log.isDebugEnabled())
            log.debug(
                "Current thread still owns lock (or there are no other nodes) [lock="
                    + rmv
                    + ", curThreadId="
                    + Thread.currentThread().getId()
                    + ']');
        }
      }

      // Don't proceed of no keys to unlock.
      if (req.keyBytes().isEmpty()) {
        if (log.isDebugEnabled())
          log.debug("No keys to unlock locally (was it reentry unlock?): " + keys);

        return;
      }

      // We don't wait for reply to this message. Receiving side will have
      // to make sure that unlock requests don't come before lock requests.
      ctx.io().safeSend(nodes, req, null);
    } catch (GridException e) {
      U.error(log, "Failed to unlock keys: " + keys, e);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"unchecked", "ThrowableInstanceNeverThrown"})
  @Override
  protected GridFuture<Boolean> lockAllAsync(
      Collection<? extends K> keys,
      long timeout,
      GridCacheTxLocalEx<K, V> tx,
      boolean isInvalidate,
      boolean isRead,
      boolean retval,
      GridCacheTxIsolation isolation,
      GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys.isEmpty()) return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true);

    Collection<GridRichNode> nodes = ctx.remoteNodes(keys);

    final GridReplicatedLockFuture<K, V> fut =
        new GridReplicatedLockFuture<K, V>(ctx, keys, tx, this, nodes, timeout, filter);

    GridDistributedLockRequest<K, V> req =
        new GridDistributedLockRequest<K, V>(
            locNodeId,
            Thread.currentThread().getId(),
            fut.futureId(),
            fut.version(),
            tx != null,
            isRead,
            isolation,
            isInvalidate,
            timeout,
            keys.size());

    try {
      // Must add future before redying locks.
      if (!ctx.mvcc().addFuture(fut))
        throw new IllegalStateException("Duplicate future ID: " + fut);

      boolean distribute = false;

      for (K key : keys) {
        while (true) {
          GridDistributedCacheEntry<K, V> entry = null;

          try {
            entry = entryexx(key);

            if (!ctx.isAll(entry.wrap(false), filter)) {
              if (log.isDebugEnabled())
                log.debug("Entry being locked did not pass filter (will not lock): " + entry);

              fut.onDone(false);

              return fut;
            }

            // Removed exception may be thrown here.
            GridCacheMvccCandidate<K> cand = fut.addEntry(entry);

            if (cand != null) {
              req.addKeyBytes(
                  key,
                  cand.reentry() ? null : entry.getOrMarshalKeyBytes(),
                  retval,
                  entry.localCandidates(fut.version()),
                  ctx);

              req.completedVersions(
                  ctx.tm().committedVersions(fut.version()),
                  ctx.tm().rolledbackVersions(fut.version()));

              distribute = !cand.reentry();
            } else if (fut.isDone()) return fut;

            break;
          } catch (GridCacheEntryRemovedException ignored) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
          }
        }
      }

      // If nothing to distribute at this point,
      // then all locks are reentries.
      if (!distribute) fut.complete(true);

      if (nodes.isEmpty()) fut.readyLocks();

      // No reason to send request if all locks are locally re-entered,
      // or if timeout is negative and local locks could not be acquired.
      if (fut.isDone()) return fut;

      try {
        ctx.io()
            .safeSend(
                fut.nodes(),
                req,
                new P1<GridNode>() {
                  @Override
                  public boolean apply(GridNode node) {
                    fut.onNodeLeft(node.id());

                    return !fut.isDone();
                  }
                });
      } catch (GridException e) {
        U.error(
            log,
            "Failed to send lock request to node [nodes="
                + U.toShortString(nodes)
                + ", req="
                + req
                + ']',
            e);

        fut.onError(e);
      }

      return fut;
    } catch (GridException e) {
      Throwable err = new GridException("Failed to acquire asynchronous lock for keys: " + keys, e);

      // Clean-up.
      fut.onError(err);

      ctx.mvcc().removeFuture(fut);

      return fut;
    }
  }
  public static void historyWithLinks(
      final Project project,
      FilePath path,
      @Nullable final SymbolicRefsI refs,
      @NotNull final AsynchConsumer<GitCommit> gitCommitConsumer,
      @Nullable final Getter<Boolean> isCanceled,
      @Nullable Collection<VirtualFile> paths,
      final String... parameters)
      throws VcsException {
    // adjust path using change manager
    path = getLastCommitName(project, path);
    final VirtualFile root = GitUtil.getGitRoot(path);
    final GitLineHandler h = new GitLineHandler(project, root, GitCommand.LOG);
    final GitLogParser parser =
        new GitLogParser(
            project,
            GitLogParser.NameStatus.STATUS,
            SHORT_HASH,
            HASH,
            COMMIT_TIME,
            AUTHOR_NAME,
            AUTHOR_TIME,
            AUTHOR_EMAIL,
            COMMITTER_NAME,
            COMMITTER_EMAIL,
            SHORT_PARENTS,
            REF_NAMES,
            SUBJECT,
            BODY,
            RAW_BODY);
    h.setNoSSH(true);
    h.setStdoutSuppressed(true);
    h.addParameters(parameters);
    h.addParameters("--name-status", parser.getPretty(), "--encoding=UTF-8", "--full-history");
    if (paths != null && !paths.isEmpty()) {
      h.endOptions();
      h.addRelativeFiles(paths);
    } else {
      h.addParameters("--sparse");
      h.endOptions();
      h.addRelativePaths(path);
    }

    final VcsException[] exc = new VcsException[1];
    final Semaphore semaphore = new Semaphore();
    final StringBuilder sb = new StringBuilder();
    final Ref<Boolean> skipFirst = new Ref<Boolean>(true);
    h.addLineListener(
        new GitLineHandlerAdapter() {
          @Override
          public void onLineAvailable(final String line, final Key outputType) {
            try {
              if (ProcessOutputTypes.STDOUT.equals(outputType)) {
                if (isCanceled != null && isCanceled.get()) {
                  h.cancel();
                  return;
                }
                // if (line.charAt(line.length() - 1) != '\u0003') {
                if ((!line.startsWith("\u0001")) || skipFirst.get()) {
                  if (sb.length() > 0) {
                    sb.append("\n");
                  }
                  sb.append(line);
                  skipFirst.set(false);
                  return;
                }
                takeLine(project, line, sb, parser, refs, root, exc, h, gitCommitConsumer);
              }
            } catch (ProcessCanceledException e) {
              h.cancel();
              semaphore.up();
            }
          }

          @Override
          public void processTerminated(int exitCode) {
            semaphore.up();
          }

          @Override
          public void startFailed(Throwable exception) {
            semaphore.up();
          }
        });
    semaphore.down();
    h.start();
    semaphore.waitFor();
    takeLine(project, "", sb, parser, refs, root, exc, h, gitCommitConsumer);
    gitCommitConsumer.finished();
    if (exc[0] != null) {
      throw exc[0];
    }
  }