public String getInfo() { if (crossValidationResults != null) { return crossValidationResults.getInfo(); } else { StringBuffer result = new StringBuffer(); if (oneVsAllMode != OneVsAllMode.None) { Multiset<Float> cs = HashMultiset.create(); Multiset<KernelFunction> kernels = HashMultiset.create(); for (BinaryModel<L, P> binaryModel : oneVsAllModels.values()) { cs.add(binaryModel.param.C); kernels.add(binaryModel.param.kernel); } result.append("OneVsAll:C=" + cs + "; gamma=" + kernels + " "); } if (allVsAllMode != AllVsAllMode.None) { Multiset<Float> cs = HashMultiset.create(); Multiset<KernelFunction> kernels = HashMultiset.create(); for (BinaryModel<L, P> binaryModel : oneVsOneModels.values()) { cs.add(binaryModel.param.C); kernels.add(binaryModel.param.kernel); } result.append("AllVsAll:C=" + cs + "; gamma=" + kernels + " "); } return result.toString(); } // return ""; }
private void computeViolationsPerRules(DecoratorContext context) { Map<RulePriority, Multiset<Rule>> rulesPerSeverity = Maps.newHashMap(); for (Violation violation : context.getViolations()) { Multiset<Rule> rulesBag = initRules(rulesPerSeverity, violation.getSeverity()); rulesBag.add(violation.getRule()); } for (RulePriority severity : RulePriority.values()) { Metric metric = SeverityUtils.severityToViolationMetric(severity); Collection<Measure> children = context.getChildrenMeasures(MeasuresFilters.rules(metric)); for (Measure child : children) { RuleMeasure childRuleMeasure = (RuleMeasure) child; Rule rule = childRuleMeasure.getRule(); if (rule != null && MeasureUtils.hasValue(childRuleMeasure)) { Multiset<Rule> rulesBag = initRules(rulesPerSeverity, severity); rulesBag.add(rule, childRuleMeasure.getIntValue()); } } Multiset<Rule> rulesBag = rulesPerSeverity.get(severity); if (rulesBag != null) { for (Multiset.Entry<Rule> entry : rulesBag.entrySet()) { RuleMeasure measure = RuleMeasure.createForRule(metric, entry.getElement(), (double) entry.getCount()); measure.setSeverity(severity); context.saveMeasure(measure); } } } }
private void loadTestData(Multiset<String> m) { m.add("a"); m.add("a"); m.add("b"); m.add("c"); m.add("a"); m.add("-1"); }
public static void main(String args[]) { // create a multiset collection Multiset<String> multiset = HashMultiset.create(); multiset.add("a"); multiset.add("b"); multiset.add("c"); multiset.add("d"); multiset.add("a"); multiset.add("b"); multiset.add("c"); multiset.add("b"); multiset.add("b"); multiset.add("b"); // print the occurrence of an element System.out.println("Occurrence of 'b' : " + multiset.count("b")); // print the total size of the multiset System.out.println("Total Size : " + multiset.size()); // get the distinct elements of the multiset as set Set<String> set = multiset.elementSet(); // display the elements of the set System.out.println("Set ["); for (String s : set) { System.out.println(s); } System.out.println("]"); // display all the elements of the multiset using iterator Iterator<String> iterator = multiset.iterator(); System.out.println("MultiSet ["); while (iterator.hasNext()) { System.out.println(iterator.next()); } System.out.println("]"); // display the distinct elements of the multiset with their occurrence count System.out.println("MultiSet ["); for (Multiset.Entry<String> entry : multiset.entrySet()) { System.out.println("Element: " + entry.getElement() + ", Occurrence(s): " + entry.getCount()); } System.out.println("]"); // remove extra occurrences multiset.remove("b", 2); // print the occurrence of an element System.out.println("Occurence of 'b' : " + multiset.count("b")); }
@Test public void multisetTest() { Multiset<String> multiset = HashMultiset.create(); multiset.add("123"); multiset.add("123"); multiset.add("123"); multiset.add("1234"); System.out.println(multiset.count("123")); System.out.println(multiset); }
/** * From A to B * * @param from * @param to */ public void addOneWayConnection(int from, int to) { addNode(from); addNode(to); Multiset<Integer> outSet = nodeOutEdges.get(from); outSet.add(to); Multiset<Integer> inSet = nodeInEdges.get(to); inSet.add(from); }
private static <T> int checkCounts(Collection<T> original) { Multiset<T> hashCounts = HashMultiset.create(); for (T v : original) { hashCounts.add(v); } Multiset<Integer> countCounts = HashMultiset.create(); for (T hash : hashCounts) { countCounts.add(hashCounts.count(hash)); } return original.size() - countCounts.count(1); }
@Test public void whenGetTopUsingMultiSet_thenCorrect() { final Multiset<String> names = HashMultiset.create(); names.add("John"); names.add("Adam", 5); names.add("Jane"); names.add("Tom", 2); final Set<String> sorted = Multisets.copyHighestCountFirst(names).elementSet(); final List<String> topTwo = Lists.newArrayList(sorted).subList(0, 2); assertEquals(2, topTwo.size()); assertEquals("Adam", topTwo.get(0)); assertEquals("Tom", topTwo.get(1)); }
@Test public void whenInsertDuplicatesInMultiSet_thenInserted() { final Multiset<String> names = HashMultiset.create(); names.add("John"); names.add("Adam", 3); names.add("John"); assertEquals(2, names.count("John")); names.remove("John"); assertEquals(1, names.count("John")); assertEquals(3, names.count("Adam")); names.remove("Adam", 2); assertEquals(1, names.count("Adam")); }
private void initializeForFile(CounterInitializationContext context) { String language = context.getLeaf().getFileAttributes().getLanguageKey(); Optional<Measure> ncloc = context.getMeasure(CoreMetrics.NCLOC_KEY); if (ncloc.isPresent()) { multiset.add(language == null ? UNKNOWN_LANGUAGE_KEY : language, ncloc.get().getIntValue()); } }
/** * Populate the FeatureVector with Bag of Words. * * @param c * @param fv */ protected void populateFV(String text, FeatureVector<E> fv) { List<String> unnormalized = tokenizer.tokenize(text); Multiset<String> terms = HashMultiset.create(); for (String token : unnormalized) { String norm = Util.normalize(token); if (!norm.isEmpty()) { terms.add(norm); } } // sparse representation... no need to put in 0's for (String term : terms.elementSet()) { // rare words don't get included, so check first if (!integerFeatureNames && train.getMetadata().containsKey(term)) { DoubleFeature bagFeat = new DoubleFeature(term, (double) terms.count(term)); fv.put(term, bagFeat); } else if (integerFeatureNames && train.getMetadata().containsKey(String.valueOf(wordIndexMap.get(term)))) { String featureName = String.valueOf(wordIndexMap.get(term)); DoubleFeature bagFeat = new DoubleFeature(featureName, (double) terms.count(term)); fv.put(featureName, bagFeat); } } }
private void restoreTimers(DataInputView in) throws IOException { int numWatermarkTimers = in.readInt(); watermarkTimers = new HashSet<>(numWatermarkTimers); watermarkTimersQueue = new PriorityQueue<>(Math.max(numWatermarkTimers, 1)); for (int i = 0; i < numWatermarkTimers; i++) { K key = keySerializer.deserialize(in); W window = windowSerializer.deserialize(in); long timestamp = in.readLong(); Timer<K, W> timer = new Timer<>(timestamp, key, window); watermarkTimers.add(timer); watermarkTimersQueue.add(timer); } int numProcessingTimeTimers = in.readInt(); processingTimeTimersQueue = new PriorityQueue<>(Math.max(numProcessingTimeTimers, 1)); processingTimeTimers = new HashSet<>(); for (int i = 0; i < numProcessingTimeTimers; i++) { K key = keySerializer.deserialize(in); W window = windowSerializer.deserialize(in); long timestamp = in.readLong(); Timer<K, W> timer = new Timer<>(timestamp, key, window); processingTimeTimersQueue.add(timer); processingTimeTimers.add(timer); } int numProcessingTimeTimerTimestamp = in.readInt(); processingTimeTimerTimestamps = HashMultiset.create(); for (int i = 0; i < numProcessingTimeTimerTimestamp; i++) { long timestamp = in.readLong(); int count = in.readInt(); processingTimeTimerTimestamps.add(timestamp, count); } }
private static void validateAnalyzerMessage( Map<IssueAttribute, String> attrs, AnalyzerMessage analyzerMessage) { Double effortToFix = analyzerMessage.getCost(); if (effortToFix != null) { assertEquals(Integer.toString(effortToFix.intValue()), attrs, IssueAttribute.EFFORT_TO_FIX); } AnalyzerMessage.TextSpan textSpan = analyzerMessage.primaryLocation(); assertEquals(normalizeColumn(textSpan.startCharacter), attrs, IssueAttribute.START_COLUMN); assertEquals(Integer.toString(textSpan.endLine), attrs, IssueAttribute.END_LINE); assertEquals(normalizeColumn(textSpan.endCharacter), attrs, IssueAttribute.END_COLUMN); if (attrs.containsKey(IssueAttribute.SECONDARY_LOCATIONS)) { List<AnalyzerMessage> secondaryLocations = analyzerMessage.secondaryLocations; Multiset<String> actualLines = HashMultiset.create(); for (AnalyzerMessage secondaryLocation : secondaryLocations) { actualLines.add(Integer.toString(secondaryLocation.getLine())); } List<String> expected = Lists.newArrayList( Splitter.on(",") .omitEmptyStrings() .trimResults() .split(attrs.get(IssueAttribute.SECONDARY_LOCATIONS))); List<String> unexpected = new ArrayList<>(); for (String actualLine : actualLines) { if (expected.contains(actualLine)) { expected.remove(actualLine); } else { unexpected.add(actualLine); } } if (!expected.isEmpty() || !unexpected.isEmpty()) { Fail.fail("Secondary locations: expected: " + expected + " unexpected:" + unexpected); } } }
private static void logStats(Collection<PsiFile> otherFiles, long start) { long time = System.currentTimeMillis() - start; final Multiset<String> stats = HashMultiset.create(); for (PsiFile file : otherFiles) { stats.add( StringUtil.notNullize(file.getViewProvider().getVirtualFile().getExtension()) .toLowerCase()); } List<String> extensions = ContainerUtil.newArrayList(stats.elementSet()); Collections.sort( extensions, new Comparator<String>() { @Override public int compare(String o1, String o2) { return stats.count(o2) - stats.count(o1); } }); String message = "Search in " + otherFiles.size() + " files with unknown types took " + time + "ms.\n" + "Mapping their extensions to an existing file type (e.g. Plain Text) might speed up the search.\n" + "Most frequent non-indexed file extensions: "; for (int i = 0; i < Math.min(10, extensions.size()); i++) { String extension = extensions.get(i); message += extension + "(" + stats.count(extension) + ") "; } LOG.info(message); }
private Integer transferResource(ResourceStock buyer, Resource outResource) { while (true) { int wanted = buying.count(buyer); int available = externalRepository.count(outResource); int transfer = Math.min(available, wanted); if (transfer == 0) { return 0; } boolean procured = externalRepository.setCount(outResource, available, available - transfer); if (procured) { boolean sent = buying.setCount(buyer, wanted, wanted - transfer); if (sent) { try { buyer.add(transfer); } catch (RuntimeException e) { buying.remove(buyer, transfer); continue; } myLogTransfer(buyer, transfer, outResource); return transfer; } else { externalRepository.add(outResource, transfer); } } } }
KVariable newDotVariable(Sort sort) { KVariable newLabel; do { newLabel = KVariable("_" + (counter++), Att().add("sort", sort)); } while (vars.contains(newLabel)); vars.add(newLabel); return newLabel; }
@Override public void visitNode(Tree tree) { if (tree.is(Tree.Kind.RETURN_STATEMENT)) { returnStatementCounter.add(methods.peek()); } else { methods.push(tree); } }
@Test public void should_pass_if_actual_contains_value_number_of_times_expected() { // given Multiset<String> actual = HashMultiset.create(); actual.add("test", 2); // when assertThat(actual).containsAtMost(2, "test"); // then pass }
protected String fixIndexedParameters(String name, Multiset<String> indexes) { if (name.contains("[]")) { String newName = name.replace("[]", "[" + (indexes.count(name)) + "]"); indexes.add(name); logger.debug("{} was renamed to {}", name, newName); return newName; } return name; }
private void initializeForOtherLeaf(CounterInitializationContext context) { Optional<Measure> measure = context.getMeasure(NCLOC_LANGUAGE_DISTRIBUTION_KEY); if (measure.isPresent()) { Map<String, Integer> parse = KeyValueFormat.parse( measure.get().getData(), newStringConverter(), newIntegerConverter()); for (Map.Entry<String, Integer> entry : parse.entrySet()) { multiset.add(entry.getKey(), entry.getValue()); } } }
@Override public void reorderChilds(Iterable<CreatedOutput> outDesc) { final Multiset<TreeItemMapping> subMappings = LinkedHashMultiset.create(); final Map<EObject, CreatedOutput> outputToItem = Maps.newHashMap(); for (CreatedOutput createdOutput : outDesc) { EObject createdElement = createdOutput.getCreatedElement(); outputToItem.put(createdElement, createdOutput); if (createdElement instanceof DTreeItem) { DTreeItem createdDTreeItem = (DTreeItem) createdElement; TreeItemMapping actualMapping = createdDTreeItem.getActualMapping(); subMappings.add(actualMapping); } } // Counts subMappings to correctly sort tree items regarding mapping // order (items have been created regarding the semantic candidates // order) int startIndex = 0; final Map<TreeItemMapping, Integer> startIndexes = Maps.newHashMap(); for (TreeItemMapping itemMapping : subMappings) { startIndexes.put(itemMapping, startIndex); startIndex += subMappings.count(itemMapping); } // Pre-compute the new indices final Map<DTreeItem, Integer> newIndices = Maps.newHashMap(); for (DTreeItem treeItem : container.getOwnedTreeItems()) { // init with element count : elements with unknown mapping // will be placed at the end. int index = outputToItem.size(); TreeItemMapping itemMapping = treeItem.getActualMapping(); if (itemMapping != null && startIndexes.containsKey(itemMapping)) { index = startIndexes.get(itemMapping); } CreatedOutput createdOutput = outputToItem.get(treeItem); if (createdOutput != null) { index = index + createdOutput.getNewIndex(); } else { index = -1; } newIndices.put(treeItem, index); } ECollections.sort( container.getOwnedTreeItems(), new Comparator<DTreeItem>() { @Override public int compare(DTreeItem o1, DTreeItem o2) { return newIndices.get(o1).compareTo(newIndices.get(o2)); } }); }
/** * Create the Bag of Words features. * * @param citations */ public void createFeatures(Collection<String> documents) { Multiset<String> terms = HashMultiset.create(); for (String s : documents) { List<String> unnormalized = tokenizer.tokenize(s); // normalize them for (int i = 0; i < unnormalized.size(); i++) { String u = unnormalized.get(i); String norm = Util.normalize(u); if (!norm.isEmpty()) { terms.add(norm); } if (bigrams && (i < unnormalized.size() - 1)) { String second = unnormalized.get(i + 1); String normSecond = Util.normalize(second); if (!normSecond.isEmpty()) { terms.add(norm + "_" + normSecond); } } } } int i = 0; for (String term : terms.elementSet()) { if (terms.count(term) >= minOccurs // don't count infreq. words && term.length() >= minLength) { // or super short words if (!integerFeatureNames) { train.getMetadata().put(term, "boolean"); } else { wordIndexMap.put(term, i++); train.getMetadata().put(String.valueOf(i), "boolean"); } } } }
@Override public Multiset<Edge> getEdgesAt(int vertex) { if (vertex >= getNoOfVertices()) { throw new VertexNotInGraphException(); } Multiset<Edge> edgesAt = HashMultiset.create(); for (int i = 0; i < getNoOfVertices(); i++) { Edge e = new Edge(i, vertex); edgesAt.add(e, getMatrix()[vertex][i]); } return edgesAt; }
private void computeViolationsPerSeverities(DecoratorContext context) { Multiset<RulePriority> severitiesBag = HashMultiset.create(); for (Violation violation : context.getViolations()) { severitiesBag.add(violation.getSeverity()); } for (RulePriority severity : RulePriority.values()) { Metric metric = SeverityUtils.severityToViolationMetric(severity); if (context.getMeasure(metric) == null) { Collection<Measure> children = context.getChildrenMeasures(MeasuresFilters.metric(metric)); int sum = MeasureUtils.sum(true, children).intValue() + severitiesBag.count(severity); context.saveMeasure(metric, (double) sum); } } }
@Override public Multiset<Edge> getEdges() { Multiset<Edge> edges = HashMultiset.create(); for (int i = 0; i < getNoOfVertices(); i++) { for (int j = 0; j <= i; j++) { // we consider only the lower half of the matrix to avoid duplicate edges Edge e = new Edge(i, j); edges.add(e, getMatrix()[i][j]); } } return edges; }
@Test public void should_fail_if_actual_contains_value_more_times_than_expected() { // given Multiset<String> actual = HashMultiset.create(); actual.add("test", 2); // expect expectException( AssertionError.class, "%nExpecting:%n" + " <[\"test\", \"test\"]>%n" + "to contain:%n" + " <\"test\">%n" + "at most 1 times but was found 2 times."); // when assertThat(actual).containsAtMost(1, "test"); }
public void testHashFloat() { Multiset<Integer> violations = HashMultiset.create(); for (int k = 0; k < 1000; k++) { List<Float> original = Lists.newArrayList(); Random gen = RandomUtils.getRandom(); for (int i = 0; i < 10000; i++) { float x = (float) gen.nextDouble(); original.add(x); } violations.add(checkCounts(original) <= 12 ? 0 : 1); } // the hashes for floats don't really have 32 bits of entropy so the test // only succeeds at better than about 99% rate. assertTrue(violations.count(0) >= 985); }
private void checkAttributeNamesForDuplicates(ValueType type, Protoclass protoclass) { if (!type.attributes.isEmpty()) { Multiset<String> attributeNames = HashMultiset.create(type.attributes.size()); for (ValueAttribute attribute : type.attributes) { attributeNames.add(attribute.name()); } List<String> duplicates = Lists.newArrayList(); for (Multiset.Entry<String> entry : attributeNames.entrySet()) { if (entry.getCount() > 1) { duplicates.add(entry.getElement()); } } if (!duplicates.isEmpty()) { protoclass .report() .error( "Duplicate attribute names %s. You should check if correct @Value.Style applied", duplicates); } } }
public static Integer[] getIDs(boolean check) { if (check) { List<World> allWorlds = Lists.newArrayList(weakWorldMap.keySet()); allWorlds.removeAll(worlds.values()); for (ListIterator<World> li = allWorlds.listIterator(); li.hasNext(); ) { World w = li.next(); leakedWorlds.add(System.identityHashCode(w)); } for (World w : allWorlds) { int leakCount = leakedWorlds.count(System.identityHashCode(w)); if (leakCount == 5) { FMLLog.fine( "The world %x (%s) may have leaked: first encounter (5 occurences).\n", System.identityHashCode(w), w.getWorldInfo().getWorldName()); } else if (leakCount % 5 == 0) { FMLLog.fine( "The world %x (%s) may have leaked: seen %d times.\n", System.identityHashCode(w), w.getWorldInfo().getWorldName(), leakCount); } } } return getIDs(); }
@Test public void testMultiset() { // Multiset is also known as a bag // From Daniel Hinojosa's examples Multiset<String> worldCupChampionships = HashMultiset.<String>create(); worldCupChampionships.add("Brazil"); worldCupChampionships.add("Brazil"); worldCupChampionships.add("Brazil"); worldCupChampionships.add("Brazil"); worldCupChampionships.add("Brazil"); worldCupChampionships.add("Italy"); worldCupChampionships.add("Italy"); worldCupChampionships.add("Italy"); worldCupChampionships.add("Italy"); worldCupChampionships.add("Germany", 3); // explicitly add count assertEquals(5, worldCupChampionships.count("Brazil")); assertEquals(4, worldCupChampionships.count("Italy")); assertEquals(3, worldCupChampionships.count("Germany")); // This doesn't give an exception... just says its in there zero times assertEquals(0, worldCupChampionships.count("United States")); }