Exemple #1
0
  /**
   * Returns a set of DN that matches the resource and subject indexes.
   *
   * @param indexes Resource search indexes.
   * @param subjectIndexes Subject search indexes
   * @param bSubTree <code>true</code> for sub tree search mode.
   * @return A set of DN that matches the resource and subject indexes.
   */
  public Set<String> getMatchingEntries(
      ResourceSearchIndexes indexes, Set<String> subjectIndexes, boolean bSubTree) {
    rwlock.readLock().lock();
    try {
      Set<String> results = new HashSet<String>();

      boolean hasSubjectIndexes = (subjectIndexes != null) && !subjectIndexes.isEmpty();

      if (hasSubjectIndexes) {
        for (String i : subjectIndexes) {
          Set<String> r = (Set<String>) subjectIndexCache.get(i);
          if (r != null) {
            results.addAll(r);
          }
        }
        results.retainAll(getHostIndexes(indexes));
      } else {
        results.addAll(getHostIndexes(indexes));
      }

      if (bSubTree) {
        results.retainAll(getPathParentIndexes(indexes));
      } else {
        results.retainAll(getPathIndexes(indexes));
      }

      return results;
    } finally {
      rwlock.readLock().unlock();
    }
  }
Exemple #2
0
  /**
   * Checks if the generated elements from the first mapped element has either the desired value, or
   * has some value in common with the elements generated from second mapped element.
   *
   * @param match current pattern match
   * @param ind mapped indices
   * @return true if a value match is found
   */
  @Override
  public boolean satisfies(Match match, int... ind) {
    assertIndLength(ind);

    // Collect values of the element group
    Set values = new HashSet();
    for (BioPAXElement gen : con1.generate(match, ind)) {
      values.addAll(pa1.getValueFromBean(gen));
    }

    // If emptiness is desired, check that
    if (value == EMPTY) return values.isEmpty();

    // If cannot be empty, check it
    if (oper == Operation.NOT_EMPTY_AND_NOT_INTERSECT && values.isEmpty()) return false;

    // If the second element is desired value, check that
    else if (value == USE_SECOND_ARG) {
      BioPAXElement q = match.get(ind[1]);
      return oper == Operation.INTERSECT ? values.contains(q) : !values.contains(q);
    }

    // If element group is compared to preset value, but the value is actually a collection,
    // then iterate the collection, see if any of them matches
    else if (value instanceof Collection) {
      Collection query = (Collection) value;
      values.retainAll(query);

      if (oper == Operation.INTERSECT) return !values.isEmpty();
      else return values.isEmpty();
    }

    // If two set of elements should share a field value, check that
    else if (pa2 != null) {
      // Collect values of the second group
      Set others = new HashSet();
      for (BioPAXElement gen : con2.generate(match, ind)) {
        others.addAll(pa2.getValueFromBean(gen));
      }

      switch (oper) {
        case INTERSECT:
          others.retainAll(values);
          return !others.isEmpty();
        case NOT_INTERSECT:
          others.retainAll(values);
          return others.isEmpty();
        case NOT_EMPTY_AND_NOT_INTERSECT:
          if (others.isEmpty()) return false;
          others.retainAll(values);
          return others.isEmpty();
        default:
          throw new RuntimeException("Unhandled operation: " + oper);
      }
    }

    // Check if the element field values contain the parameter value
    else if (oper == Operation.INTERSECT) return values.contains(value);
    else return !values.contains(value);
  }
Exemple #3
0
  public void testKeySetRetainAllNullFromEmpty() {
    final Map<K, V> map;
    try {
      map = makeEmptyMap();
    } catch (UnsupportedOperationException e) {
      return;
    }

    Set<K> keySet = map.keySet();
    if (supportsRemove) {
      try {
        keySet.retainAll(null);
        // Returning successfully is not ideal, but tolerated.
      } catch (NullPointerException e) {
        // Expected.
      }
    } else {
      try {
        keySet.retainAll(null);
        // We have to tolerate a successful return (Sun bug 4802647)
      } catch (UnsupportedOperationException e) {
        // Expected.
      } catch (NullPointerException e) {
        // Expected.
      }
    }
    assertInvariants(map);
  }
Exemple #4
0
  public static String conjuntos(int salto) throws InterruptedException {
    bienvenido();
    Set a = new TreeSet();
    Set b = new TreeSet();
    a.add(2);
    a.add(4);
    a.add(6);
    a.add(8);
    b.add(6);
    b.add(8);
    b.add(10);
    b.add(12);

    System.out.println("\n\n\n             Número de elementos en A { " + a.size() + " }");
    System.out.println("             Elementos del conjunto A  " + a + " ");
    espera_porciento();
    System.out.println("\n             Número de elementos en B  " + b.size() + " ");
    System.out.println("             Elementos del conjunto B { " + b + " }");
    espera_porciento();
    a.retainAll(b);
    System.out.println("\n             Conjunto A intersección B  " + a);
    espera_porciento();
    a.add(2);
    a.add(4);
    a.addAll(b);
    System.out.println("\n             Conjunto A Unión B  " + a);
    espera_porciento();
    a.add(2);
    a.add(4);
    a.removeAll(b);
    System.out.println("\n             Conjunto A diferencia de B  " + a);
    espera_porciento();

    a.addAll(b);
    a.retainAll(b);
    System.out.println("\n             Diferencia simetrica a b: " + a);
    espera_porciento();

    System.out.println("\n1. Volver al menú principal\n2. Volver al menú computación");
    System.out.print("Seleccione una opción:  ");
    opcion = dato.next();
    while (!isNumber1(opcion)) {
      System.out.println("\nSolo puede ingresar datos numéricos");
      System.out.print("Seleccione una opción, por favor: ");
      opcion = dato.next();
    }
    if (opcion.equals("1")) {
      menu(salto);
    }
    if (opcion.equals("2")) {
      menu_comp(salto);
    }
    return null;
  }
  /**
   * Returns a Set of current children to remove and modifies newChildren to only contain the
   * children not already in children and not subsumed by any WeakExplorerNode in children.
   *
   * <p>Note: newChildren will be modified by this call.
   *
   * <p>Note: It is expected that a WeakExplorerNode will not be reused and thus they will always
   * initially be slated for removal, and only those nodes are in fact used to check subsumption of
   * new nodes. New nodes are not checked among themselves for subsumtion.
   *
   * @param children is the list of current children.
   * @param newChildren is the list of expected children.
   * @return the Set of current children to remove.
   * @throws UnsupportedOperationException if newChildren doesn't support remove or removeAll.
   * @throws NullPointerException if either argument is null.
   */
  private Set prepareAddRemoveSets(List children, List newChildren) {
    Set removeSet = new HashSet();
    Set commonObjects = new HashSet();
    if (children.size() < newChildren.size()) {
      commonObjects.addAll(children);
      commonObjects.retainAll(newChildren);
    } else {
      commonObjects.addAll(newChildren);
      commonObjects.retainAll(children);
    }
    newChildren.removeAll(commonObjects);
    removeSet.addAll(children);
    removeSet.removeAll(commonObjects);

    // Handle WeakExplorerNodes
    Iterator it = removeSet.iterator();
    List weakNodes = null;
    while (it.hasNext()) {
      Object obj = it.next();
      if (!(obj instanceof WeakExplorerNode)) {
        continue;
      }
      WeakExplorerNode node = (WeakExplorerNode) obj;

      if (weakNodes == null) {
        weakNodes = new LinkedList();
        Iterator it2 = newChildren.iterator();
        while (it2.hasNext()) {
          Object obj2 = it2.next();
          if (obj2 instanceof WeakExplorerNode) {
            weakNodes.add(obj2);
          }
        }
      }

      Iterator it3 = weakNodes.iterator();
      while (it3.hasNext()) {
        Object obj3 = it3.next();
        if (node.subsumes(obj3)) {
          // Remove the node from removeSet
          it.remove();
          // Remove obj3 from weakNodes and newChildren
          newChildren.remove(obj3);
          it3.remove();
          break;
        }
      }
    }

    return removeSet;
  }
  public static void main(String[] args) throws IOException {
    //		input = new Scanner(System.in);                     // for stdin
    input = new Scanner(new FileReader(args[0])); // for file reading

    //		output = System.out;                                // for stdout
    output = new PrintWriter(new FileWriter(args[1])); // for file writing

    Set<Integer> set1;
    Set<Integer> set2;
    Set<Integer> set3;

    String line = input.nextLine();
    Scanner scline = new Scanner(line);
    int n = scline.nextInt();

    for (int x = n; x >= 3; x -= 3) {
      line = input.nextLine();
      scline = new Scanner(line);
      set1 = getSet(scline);
      line = input.nextLine();
      scline = new Scanner(line);
      set2 = getSet(scline);
      line = input.nextLine();
      scline = new Scanner(line);
      set3 = getSet(scline);
      set1.retainAll(set2);
      set1.retainAll(set3);
      output.println(set1);
    }
    scline.close();
    output.flush();
    input.close();
    output.close();
  }
 /**
  * This method runs through all games, gets a list of all years represented in the database for
  * that game. Then, find the biggest year that is in common for all databases. If this year is the
  * same as the current year, just return an array containing this current year. If not, it will
  * contains every year from this year to the last year in the database inclusive.
  *
  * @return an array of years to update all games for
  */
 private int[] getLastYear() {
   int max = Lottery.MIN_COMMON_YEAR;
   // come up with a maximum shared year for all games
   try {
     // find all shared years
     final Set<Integer> commonYears = getYearsFromGame(UPDATE_THESE[0]);
     for (int i = 1; i < UPDATE_THESE.length; i++) {
       commonYears.retainAll(getYearsFromGame(UPDATE_THESE[i]));
     }
     // find the max
     for (int year : commonYears) {
       if (year > max) {
         max = year;
       }
     }
   } catch (final FileNotFoundException e) {
     e.printStackTrace();
     max = Lottery.MIN_COMMON_YEAR;
   }
   // compare with the current year and return a list of years between then
   // and now
   final int[] result;
   if (max == currentYear()) {
     result = new int[1];
     result[0] = max;
   } else {
     result = new int[currentYear() - max + 1];
     int year = max;
     for (int i = 0; i < result.length; i++) {
       result[i] = year;
       year++;
     }
   }
   return result;
 }
Exemple #8
0
  /** {@inheritDoc} */
  @Override
  public boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv) {
    if (roundEnv.processingOver()) return false;

    // We have: A sorted set of all priority levels: 'priorityLevels'

    // Step 1: Take all CUs which aren't already in the map. Give them the first priority level.

    for (Element element : roundEnv.getRootElements()) {
      JCCompilationUnit unit = toUnit(element);
      if (unit == null) continue;
      if (roots.containsKey(unit)) continue;
      roots.put(unit, priorityLevels[0]);
    }

    while (true) {
      // Step 2: For all CUs (in the map, not the roundEnv!), run them across all handlers at their
      // current prio level.

      for (long prio : priorityLevels) {
        List<JCCompilationUnit> cusForThisRound = new ArrayList<JCCompilationUnit>();
        for (Map.Entry<JCCompilationUnit, Long> entry : roots.entrySet()) {
          Long prioOfCu = entry.getValue();
          if (prioOfCu == null || prioOfCu != prio) continue;
          cusForThisRound.add(entry.getKey());
        }
        transformer.transform(prio, processingEnv.getContext(), cusForThisRound);
      }

      // Step 3: Push up all CUs to the next level. Set level to null if there is no next level.

      Set<Long> newLevels = new HashSet<Long>();
      for (int i = priorityLevels.length - 1; i >= 0; i--) {
        Long curLevel = priorityLevels[i];
        Long nextLevel = (i == priorityLevels.length - 1) ? null : priorityLevels[i + 1];
        for (Map.Entry<JCCompilationUnit, Long> entry : roots.entrySet()) {
          if (curLevel.equals(entry.getValue())) {
            entry.setValue(nextLevel);
            newLevels.add(nextLevel);
          }
        }
      }
      newLevels.remove(null);

      // Step 4: If ALL values are null, quit. Else, either do another loop right now or force a
      // resolution reset by forcing a new round in the annotation processor.

      if (newLevels.isEmpty()) return false;
      newLevels.retainAll(priorityLevelsRequiringResolutionReset);
      if (newLevels.isEmpty()) {
        // None of the new levels need resolution, so just keep going.
        continue;
      } else {
        // Force a new round to reset resolution. The next round will cause this method (process) to
        // be called again.
        forceNewRound((JavacFiler) processingEnv.getFiler());
        return false;
      }
    }
  }
Exemple #9
0
  public void validateAlternateAlleles() {
    if (!hasGenotypes()) return;

    List<Allele> reportedAlleles = getAlleles();
    Set<Allele> observedAlleles = new HashSet<Allele>();
    observedAlleles.add(getReference());
    for (final Genotype g : getGenotypes()) {
      if (g.isCalled()) observedAlleles.addAll(g.getAlleles());
    }

    if (reportedAlleles.size() != observedAlleles.size())
      throw new TribbleException.InternalCodecException(
          String.format(
              "the ALT allele(s) for the record at position %s:%d do not match what is observed in the per-sample genotypes",
              getChr(), getStart()));

    int originalSize = reportedAlleles.size();
    // take the intersection and see if things change
    observedAlleles.retainAll(reportedAlleles);
    if (observedAlleles.size() != originalSize)
      throw new TribbleException.InternalCodecException(
          String.format(
              "the ALT allele(s) for the record at position %s:%d do not match what is observed in the per-sample genotypes",
              getChr(), getStart()));
  }
Exemple #10
0
 public static void intersectSet(Set<String> target, Set<String> set) {
   if (target.isEmpty()) {
     target.addAll(set);
   } else {
     target.retainAll(set);
   }
 }
  public FloatWritable evaluate(final List<String> a, final List<String> b) {
    if (a == null && b == null) {
      return new FloatWritable(0.f);
    } else if (a == null || b == null) {
      return new FloatWritable(1.f);
    }
    final int asize = a.size();
    final int bsize = b.size();
    if (asize == 0 && bsize == 0) {
      return new FloatWritable(0.f);
    } else if (asize == 0 || bsize == 0) {
      return new FloatWritable(1.f);
    }

    union.addAll(a);
    union.addAll(b);
    float unionSize = union.size();
    union.clear();

    intersect.addAll(a);
    intersect.retainAll(b);
    float intersectSize = intersect.size();
    intersect.clear();

    float j = intersectSize / unionSize;
    return new FloatWritable(1.f - j);
  }
Exemple #12
0
  public static void main(String[] args) {
    Scanner in = new Scanner(System.in);
    int n = in.nextInt();
    while (n > 0) {
      String s1 = in.next();
      String s2 = in.next();

      Set<Character> alphaS1 = new HashSet<Character>();
      for (int i = 0; i < s1.length(); i++) {
        alphaS1.add(s1.charAt(i));
      }

      Set<Character> alphaS2 = new HashSet<Character>();
      for (int i = 0; i < s2.length(); i++) {
        alphaS2.add(s2.charAt(i));
      }

      alphaS1.retainAll(alphaS2);

      if (alphaS1.size() > 0) {
        System.out.println("YES");
      } else {
        System.out.println("NO");
      }
      n--;
    }
  }
Exemple #13
0
  protected boolean backScan(SequenceDatabase<T> sdb) {
    List<T> prefix = sdb.getPrefix();
    prefix_loop:
    for (int i = 0; i < prefix.size(); i++) {
      Set<T> intersection = null;

      for (Sequence<T> sequence : sdb.getSequences()) {
        List<T> smp_i = sequence.getSemiMaximumPeriodOf(prefix, i);
        if (smp_i == null || smp_i.isEmpty()) {
          continue prefix_loop;
        }

        if (intersection == null) {
          intersection = new HashSet<T>(smp_i);
        } else {
          intersection.retainAll(smp_i);
          if (intersection.isEmpty()) {
            continue prefix_loop;
          }
        }
      }

      if (logger.isTraceEnabled()) {
        logger.trace(
            "Pruned: Search space was pruned by the BackScan method. Items {} exists in each of the {} semi-maximum period.",
            intersection,
            toOrdinal(i));
      }
      return true;
    }
    return false;
  }
  /**
   * A dumb vector space model that counts each word's co-occurences with a predefined set of
   * content words and uses these co-occurence vectors directly as word representations. The context
   * in which a word occurs is the set of content words in an entire sentence.
   *
   * <p>N.B. Most people would probably not consider this an embedding model, since the words have
   * not been embedded in a lower dimensional subspace. However, it is a good starting point.
   *
   * <p>Since this approach does not share any information between representations of different
   * words, we can filter the training data to only include sentences that contain words of
   * interest. In other approaches this may not be a good idea.
   *
   * @param dataPath
   * @param targetVocab
   * @param contentVocab
   * @return
   */
  private static HashMap<String, float[]> getEmbeddings(
      String dataPath, HashMap<String, Integer> contentVocab, Set<String> targetVocab) {

    HashMap<String, float[]> embeddingMatrix = new HashMap<String, float[]>();
    for (String target_word : targetVocab) {
      embeddingMatrix.put(target_word, new float[contentVocab.size()]);
    }

    Collection<List<String>> sentenceCollection =
        SentenceCollection.Reader.readSentenceCollection(dataPath);

    for (List<String> sentence : sentenceCollection) {
      Set<String> sw = new HashSet<String>(sentence);
      sw.retainAll(targetVocab);
      for (String word : sentence) {
        if (!contentVocab.containsKey(word)) continue;
        int contentWordId = contentVocab.get(word);
        for (String targetWord : sw) {
          embeddingMatrix.get(targetWord)[contentWordId] =
              embeddingMatrix.get(targetWord)[contentWordId] + 1;
        }
      }
    }

    return embeddingMatrix;
  }
  @Test
  public void keySet_retainAll() {
    // a map with a null key
    MutableMap<Integer, Integer> map = this.newMapWithKeyValue(null, 0);

    MutableList<Object> retained = Lists.mutable.of();
    retained.add(null);
    Assert.assertFalse(map.keySet().retainAll(retained));
    Verify.assertContains(null, map.keySet());

    // a map with a chain containing empty slots
    MutableMap<Integer, Integer> map2 = this.mapWithCollisionsOfSize(5);
    Assert.assertFalse(map2.keySet().retainAll(FastList.<Integer>newListWith(0, 17, 34, 51, 68)));
    Verify.assertContainsAll(map2.keySet(), 0, 17, 34, 51, 68);

    // a map with no chaining, nothing retained
    MutableMap<Integer, String> map3 = this.newMapWithKeyValue(1, "One");
    Assert.assertTrue(map3.keySet().retainAll(FastList.<Integer>newListWith(9)));
    Verify.assertEmpty(map3);

    Set<Integer> keys =
        this.newMapWithKeysValues(1, "One", 2, "Two", 3, "Three", 4, "Four").keySet();
    Assert.assertTrue(keys.retainAll(FastList.<Integer>newListWith(1, 2, 3)));
    Verify.assertContainsAll(keys, 1, 2, 3);
  }
  /**
   * Hides nodes from the tree that do not match <code>text</code>.
   *
   * @param text search text
   */
  public void filterByText(String text) {
    text = normalize(text);
    if (text == null || text.length() == 0) {
      visibleNodes = null;
    } else {
      visibleNodes = new HashSet<Object>();
      String[] keywords = text.split(" ");
      for (int i = 0; i < keywords.length; i++) {
        SortedMap<String, List<Object>> nodeListByKey = getMatches(text);
        if (i == 0) {
          for (List<Object> nodes : nodeListByKey.values()) {
            visibleNodes.addAll(nodes);
          }
        } else {
          Set<Object> allNew = new HashSet<Object>();
          for (List<Object> nodes : nodeListByKey.values()) {
            allNew.addAll(nodes);
          }
          visibleNodes.retainAll(allNew);
        }
      }
      ensureParentsVisible();
    }

    TreeModelEvent event = new TreeModelEvent(this, new Object[] {model.getRoot()});
    for (TreeModelListener listener : listeners) {
      listener.treeStructureChanged(event);
    }
  }
Exemple #17
0
  @Override
  protected void getCandidates(ChunkTypeRequest request, Set<IIdentifier> results) {
    boolean firstInsertion = true;
    FastSet<IIdentifier> tmp = FastSet.newInstance();

    for (IConditionalSlot slot : request.getConditionalSlots())
      if (slot.getName().equals(IAuralModule.KIND_SLOT)) {
        tmp.clear();
        String kind = transformKind(slot.getValue());
        switch (slot.getCondition()) {
          case IConditionalSlot.NOT_EQUALS:
            not(kind, tmp);
            break;
          default:
            equals(kind, tmp);
            break;
        }

        if (firstInsertion) {
          results.addAll(tmp);
          firstInsertion = false;
        } else results.retainAll(tmp);
      }

    FastSet.recycle(tmp);
  }
Exemple #18
0
  @Test
  public void retainAll() {
    toTest.add("Test1");
    toTest.add("Test2");
    toTest.add("Test3");

    assertFalse(toTest.retainAll(toTest));
    try {
      toTest.retainAll(null);
      fail("It should be thrown an exception.");
    } catch (NullPointerException e) {
    }

    assertTrue(toTest.retainAll(Arrays.asList("Test2")));
    assertTrue(toTest.size() == 1);
  }
  private void afterDeploymentValidation(
      @Observes AfterDeploymentValidation adv, BeanManager manager) {
    Collection<CamelContext> contexts = new ArrayList<>();
    for (Bean<?> context : manager.getBeans(CamelContext.class, ANY)) {
      contexts.add(getReference(manager, CamelContext.class, context));
    }

    // Add type converters to Camel contexts
    CdiTypeConverterLoader loader = new CdiTypeConverterLoader();
    for (Class<?> converter : converters) {
      for (CamelContext context : contexts) {
        loader.loadConverterMethods(context.getTypeConverterRegistry(), converter);
      }
    }

    // Add routes to Camel contexts
    boolean deploymentException = false;
    Set<Bean<?>> routes = new HashSet<>(manager.getBeans(RoutesBuilder.class, ANY));
    routes.addAll(manager.getBeans(RouteContainer.class, ANY));
    for (Bean<?> context : manager.getBeans(CamelContext.class, ANY)) {
      for (Bean<?> route : routes) {
        Set<Annotation> qualifiers = new HashSet<>(context.getQualifiers());
        qualifiers.retainAll(route.getQualifiers());
        if (qualifiers.size() > 1) {
          deploymentException |= !addRouteToContext(route, context, manager, adv);
        }
      }
    }
    // Let's return to avoid starting misconfigured contexts
    if (deploymentException) {
      return;
    }

    // Trigger eager beans instantiation (calling toString is necessary to force
    // the initialization of normal-scoped beans).
    // FIXME: This does not work with OpenWebBeans for bean whose bean type is an
    // interface as the Object methods does not get forwarded to the bean instances!
    eagerBeans.forEach(type -> getReferencesByType(manager, type.getJavaClass(), ANY).toString());
    manager
        .getBeans(Object.class, ANY, STARTUP)
        .stream()
        .forEach(bean -> getReference(manager, bean.getBeanClass(), bean).toString());

    // Start Camel contexts
    for (CamelContext context : contexts) {
      if (ServiceStatus.Started.equals(context.getStatus())) {
        continue;
      }
      logger.info("Camel CDI is starting Camel context [{}]", context.getName());
      try {
        context.start();
      } catch (Exception exception) {
        adv.addDeploymentProblem(exception);
      }
    }

    // Clean-up
    Stream.of(converters, camelBeans, eagerBeans, cdiBeans).forEach(Set::clear);
    Stream.of(producerBeans, producerQualifiers).forEach(Map::clear);
  }
Exemple #20
0
  protected boolean hasBackwardExtensionItem(SequenceDatabase<T> sdb) {
    List<T> prefix = sdb.getPrefix();
    prefix_loop:
    for (int i = 0; i < prefix.size(); i++) {
      Set<T> intersection = null;

      for (Sequence<T> sequence : sdb.getSequences()) {
        List<T> mp_i = sequence.getMaximumPeriodOf(prefix, i);
        if (mp_i == null || mp_i.isEmpty()) {
          continue prefix_loop;
        }

        if (intersection == null) {
          intersection = new HashSet<T>(mp_i);
        } else {
          intersection.retainAll(mp_i);
          if (intersection.isEmpty()) {
            continue prefix_loop;
          }
        }
      }

      if (logger.isTraceEnabled()) {
        logger.trace(
            "Open: The prefix can be extended by backward extension items {}, found in each of the {} maximum period.",
            intersection,
            toOrdinal(i));
      }
      return true;
    }
    return false;
  }
  private Map<TrustedEntityId, TrustSimilarity> extractTrustSimilarity(
      final TrustedEntityId myTeid, final Map<String, Set<TrustEvidence>> trustEvidenceMap)
      throws TrustException {

    final Map<TrustedEntityId, TrustSimilarity> trustSimilarityMap =
        new LinkedHashMap<TrustedEntityId, TrustSimilarity>();

    final Map<TrustedEntityId, Set<TrustedEntityId>> trustPrefsMap =
        this.extractTrustPrefs(trustEvidenceMap);
    LOG.info("extractTrustSimilarity: trustPrefsMap={}", trustPrefsMap);
    final Set<TrustedEntityId> myTrustees =
        (trustPrefsMap.get(myTeid) != null)
            ? trustPrefsMap.get(myTeid)
            : Collections.<TrustedEntityId>emptySet();
    for (final Map.Entry<TrustedEntityId, Set<TrustedEntityId>> trustPrefs :
        trustPrefsMap.entrySet()) {
      if (trustPrefs.getKey().equals(myTeid)) {
        continue;
      }
      final Set<TrustedEntityId> sharedTrustees =
          new LinkedHashSet<TrustedEntityId>(trustPrefs.getValue());
      sharedTrustees.retainAll(myTrustees);
      final Set<TrustedEntityId> unsharedTrustees =
          new LinkedHashSet<TrustedEntityId>(trustPrefs.getValue());
      unsharedTrustees.removeAll(myTrustees);
      trustSimilarityMap.put(
          trustPrefs.getKey(), new TrustSimilarity(sharedTrustees, unsharedTrustees));
    }

    return trustSimilarityMap;
  }
 private void addProtein(final List<Protein> proteins, Protein current_protein) {
   if ((getMaxAllowedOverlap() != HmmscanPerDomainTableParser.MAX_ALLOWED_OVERLAP_DEFAULT)
       || isIgnoreEngulfedDomains()) {
     final int domains_count = current_protein.getNumberOfProteinDomains();
     current_protein =
         SurfacingUtil.removeOverlappingDomains(
             getMaxAllowedOverlap(), isIgnoreEngulfedDomains(), current_protein);
     final int domains_removed = domains_count - current_protein.getNumberOfProteinDomains();
     _domains_stored -= domains_removed;
     _domains_ignored_due_to_overlap += domains_removed;
   }
   if ((getFilterType() == FilterType.POSITIVE_PROTEIN)
       || (getFilterType() == FilterType.NEGATIVE_PROTEIN)) {
     final Set<DomainId> domain_ids_in_protein = new HashSet<DomainId>();
     for (final Domain d : current_protein.getProteinDomains()) {
       domain_ids_in_protein.add(d.getDomainId());
     }
     domain_ids_in_protein.retainAll(getFilter());
     if (getFilterType() == FilterType.POSITIVE_PROTEIN) {
       if (domain_ids_in_protein.size() > 0) {
         actuallyAddProtein(proteins, current_protein);
       } else {
         ++_proteins_ignored_due_to_filter;
       }
     } else {
       if (domain_ids_in_protein.size() < 1) {
         actuallyAddProtein(proteins, current_protein);
       } else {
         ++_proteins_ignored_due_to_filter;
       }
     }
   } else {
     actuallyAddProtein(proteins, current_protein);
   }
 }
  @Override
  public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    _fieldLocations = new HashMap<String, GlobalStreamId>();
    _collector = collector;
    int timeout = ((Number) conf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS)).intValue();
    _pending =
        new TimeCacheMap<List<Object>, Map<GlobalStreamId, Tuple>>(timeout, new ExpireCallback());
    _numSources = context.getThisSources().size();
    Set<String> idFields = null;
    for (GlobalStreamId source : context.getThisSources().keySet()) {
      Fields fields =
          context.getComponentOutputFields(source.get_componentId(), source.get_streamId());
      Set<String> setFields = new HashSet<String>(fields.toList());
      if (idFields == null) idFields = setFields;
      else idFields.retainAll(setFields);

      for (String outfield : _outFields) {
        for (String sourcefield : fields) {
          if (outfield.equals(sourcefield)) {
            _fieldLocations.put(outfield, source);
          }
        }
      }
    }
    _idFields = new Fields(new ArrayList<String>(idFields));

    if (_fieldLocations.size() != _outFields.size()) {
      throw new RuntimeException("Cannot find all outfields among sources");
    }
  }
Exemple #24
0
 public List<NamedEntity> intersect(TSignature s2) {
   List<NamedEntity> ret = new ArrayList<NamedEntity>();
   Set<NamedEntity> s = new HashSet<NamedEntity>(set);
   s.retainAll(s2.set);
   ret.addAll(s);
   return ret;
 }
  private Set<Vertex> computeMinVertexCover(Set<Vertex> side1, Set<Vertex> side2) {
    Set<Vertex> konigSet = new HashSet<Vertex>();
    Set<Vertex> unmatched = new TreeSet<Vertex>(side1);
    unmatched.removeAll(matches);
    // System.out.println("Matches: " + matches);
    // System.out.println("side 1 unmatched set: " + unmatched);

    for (Vertex v : unmatched) {
      konigDFS(konigSet, v, false);
    }

    // System.out.println("Konig set: " + konigSet);

    Set<Vertex> result = new HashSet<Vertex>(side2);
    result.retainAll(konigSet);
    // System.out.println("side 2 intersect konigSet: " + result);

    Set<Vertex> side1notInKonigSet = new HashSet<Vertex>(side1);
    side1notInKonigSet.removeAll(konigSet);
    // System.out.println("side 1 not in Konig set: " + side1notInKonigSet);

    result.addAll(side1notInKonigSet);

    return result;
  }
Exemple #26
0
    @Override
    public void update(Observable o, Object arg) {

      Set<Integer> oldSelection = new HashSet<Integer>();
      oldSelection.addAll(selection);

      SortedSet<Feature> fs = model.selectionModel().getFeatureSelection();
      // System.out.println(fs);
      // int prevIndex = selectedIndex;
      if (fs.size() > 0) {
        for (Feature f : fs) {
          selection.add(listModel.getRow(f));
        }

      } else {
        selection.clear();
      }
      oldSelection.retainAll(selection);
      if (oldSelection.size() != selection.size()) {
        fireValueChanged(false);
      }

      // getSelectionModel().setSelectionInterval(row, row);

    }
Exemple #27
0
  private static int getNumberGems(String[] a) {
    Set<Character> gemSet = new HashSet<>();
    char[] firstString = a[0].toCharArray();

    // Set<Character> gemSet1 = new HashSet<Character>(Arrays.asList(firstString));
    // "abcdde" , baccd , eeabg
    for (char letter : firstString) {
      // "abcdde"
      // Only add first characterset to set.
      gemSet.add(letter);
    }
    // Iterate over each other string from array of Strings
    // abcdde , "baccd , eeabg"
    for (int j = 1; j < a.length; j++) {
      Set<Character> gemSet2 = new HashSet<>();
      // accd
      char[] strC = a[j].toCharArray();
      for (char letter : strC) {
        gemSet2.add(letter);
      }
      // Now find all character which are in gemSet2 and keep only those,
      // and store them back into getSet
      // Keep doing this with all string hence its set and gemSet
      // finally will have common letter only
      gemSet.retainAll(gemSet2);
    }
    return gemSet.size();
  }
Exemple #28
0
  public ArrayList<FileSystemItem> getSelectedFiles() {
    // first make sure there are no leftover items in the selected set
    Set<FileSystemItem> selectedSet = selectionModel_.getSelectedSet();
    selectedSet.retainAll(dataProvider_.getList());

    return new ArrayList<FileSystemItem>(selectedSet);
  }
  /**
   * Adds the given POP application satisfier set as a subset of this set, with the given
   * exceptions. The exceptions set must be a subset of the given satisfier set. If the given POP
   * application was already a subset of this set, then the new exceptions set is the intersection
   * of the given exceptions set with the old one. Otherwise, the exceptions set is the given one
   * minus any individual elements of this set that satisfy the given POP application.
   */
  public void addSatisfiers(NumberVar popApp, ObjectSet satisfiers, Set newExceptions) {
    if (popAppSatisfiers.containsKey(popApp)) {
      // already in set; assume satisfiers the same
      Set curExceptions = (Set) popAppExceptions.get(popApp);
      int oldNumExceptions = curExceptions.size();
      curExceptions.retainAll(newExceptions);
      size += (oldNumExceptions - curExceptions.size());
    } else {
      popAppSatisfiers.put(popApp, satisfiers);
      Set oldIndivs = (Set) popAppIndivs.remove(popApp);
      for (Iterator iter = oldIndivs.iterator(); iter.hasNext(); ) {
        individuals.remove(iter.next());
      }

      Set curExceptions = new HashSet(newExceptions);
      curExceptions.removeAll(oldIndivs);
      popAppExceptions.put(popApp, curExceptions);

      size +=
          (satisfiers.size()
              - oldIndivs.size() // because they were already
              // here
              - curExceptions.size()); // because they weren't added
    }
  }
  /**
   * Calculates data nodes for replicated caches on unstable topology.
   *
   * @param cctx Cache context for main space.
   * @param extraSpaces Extra spaces.
   * @return Collection of all data nodes owning all the caches or {@code null} for retry.
   */
  private Collection<ClusterNode> replicatedUnstableDataNodes(
      final GridCacheContext<?, ?> cctx, List<String> extraSpaces) {
    assert cctx.isReplicated() : cctx.name() + " must be replicated";

    Set<ClusterNode> nodes = replicatedUnstableDataNodes(cctx);

    if (F.isEmpty(nodes)) return null; // Retry.

    if (!F.isEmpty(extraSpaces)) {
      for (String extraSpace : extraSpaces) {
        GridCacheContext<?, ?> extraCctx = cacheContext(extraSpace);

        if (extraCctx.isLocal()) continue;

        if (!extraCctx.isReplicated())
          throw new CacheException(
              "Queries running on replicated cache should not contain JOINs "
                  + "with tables in partitioned caches [rCache="
                  + cctx.name()
                  + ", pCache="
                  + extraSpace
                  + "]");

        Set<ClusterNode> extraOwners = replicatedUnstableDataNodes(extraCctx);

        if (F.isEmpty(extraOwners)) return null; // Retry.

        nodes.retainAll(extraOwners);

        if (nodes.isEmpty()) return null; // Retry.
      }
    }

    return nodes;
  }