// TODO: roll check into tokens regex pattern? // That allows for better matching because unmatched sequences will be eliminated at match time private boolean checkPosTags(List<CoreLabel> tokens, int start, int end) { if (validPosPattern != null) { // Need to check POS tag too... switch (posMatchType) { case MATCH_ONE_TOKEN_PHRASE_ONLY: if (tokens.size() > 1) return true; // fall through case MATCH_AT_LEAST_ONE_TOKEN: for (int i = start; i < end; i++) { CoreLabel token = tokens.get(i); String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class); if (pos != null && validPosPattern.matcher(pos).matches()) { return true; } } return false; case MATCH_ALL_TOKENS: // Checked else where return true; default: // Don't know this match type.... return true; } } return true; }
/** * Create a searcher manually, suppling a dependency tree, an optional classifier for when to * split clauses, and a featurizer for that classifier. You almost certainly want to use {@link * ClauseSplitter#load(String)} instead of this constructor. * * @param tree The dependency tree to search over. * @param assumedTruth The assumed truth of the tree (relevant for natural logic inference). If in * doubt, pass in true. * @param isClauseClassifier The classifier for whether a given dependency arc should be a new * clause. If this is not given, all arcs are treated as clause separators. * @param featurizer The featurizer for the classifier. If no featurizer is given, one should be * given in {@link ClauseSplitterSearchProblem#search(java.util.function.Predicate, * Classifier, Map, java.util.function.Function, int)}, or else the classifier will be * useless. * @see ClauseSplitter#load(String) */ protected ClauseSplitterSearchProblem( SemanticGraph tree, boolean assumedTruth, Optional<Classifier<ClauseSplitter.ClauseClassifierLabel, String>> isClauseClassifier, Optional< Function< Triple< ClauseSplitterSearchProblem.State, ClauseSplitterSearchProblem.Action, ClauseSplitterSearchProblem.State>, Counter<String>>> featurizer) { this.tree = new SemanticGraph(tree); this.assumedTruth = assumedTruth; this.isClauseClassifier = isClauseClassifier; this.featurizer = featurizer; // Index edges this.tree.edgeIterable().forEach(edgeToIndex::addToIndex); // Get length List<IndexedWord> sortedVertices = tree.vertexListSorted(); sentenceLength = sortedVertices.get(sortedVertices.size() - 1).index(); // Register extra edges for (IndexedWord vertex : sortedVertices) { extraEdgesByGovernor.put(vertex, new ArrayList<>()); extraEdgesByDependent.put(vertex, new ArrayList<>()); } List<SemanticGraphEdge> extraEdges = Util.cleanTree(this.tree); assert Util.isTree(this.tree); for (SemanticGraphEdge edge : extraEdges) { extraEdgesByGovernor.get(edge.getGovernor()).add(edge); extraEdgesByDependent.get(edge.getDependent()).add(edge); } }
private List<Tree> helper(List<Tree> treeList, int start) { List<Tree> newTreeList = new ArrayList<Tree>(treeList.size()); for (Tree tree : treeList) { int end = start + tree.yield().size(); newTreeList.add(prune(tree, start)); start = end; } return newTreeList; }
private static List<TaggedWord> cleanTags(List twList, TreebankLanguagePack tlp) { int sz = twList.size(); List<TaggedWord> l = new ArrayList<TaggedWord>(sz); for (int i = 0; i < sz; i++) { TaggedWord tw = (TaggedWord) twList.get(i); TaggedWord tw2 = new TaggedWord(tw.word(), tlp.basicCategory(tw.tag())); l.add(tw2); } return l; }
public AnnotationPipeline(List<Annotator> annotators) { this.annotators = annotators; if (TIME) { int num = annotators.size(); accumulatedTime = new ArrayList<MutableLong>(num); for (int i = 0; i < num; i++) { accumulatedTime.add(new MutableLong()); } } }
List<Tree> prune(List<Tree> treeList, Label label, int start, int end) { // get reference tree if (treeList.size() == 1) { return treeList; } Tree testTree = treeList.get(0).treeFactory().newTreeNode(label, treeList); int goal = Numberer.getGlobalNumberer("states").number(label.value()); Tree tempTree = parser.extractBestParse(goal, start, end); // parser.restoreUnaries(tempTree); Tree pcfgTree = debinarizer.transformTree(tempTree); Set<Constituent> pcfgConstituents = pcfgTree.constituents(new LabeledScoredConstituentFactory()); // delete child labels that are not in reference but do not cross reference List<Tree> prunedChildren = new ArrayList<Tree>(); int childStart = 0; for (int c = 0, numCh = testTree.numChildren(); c < numCh; c++) { Tree child = testTree.getChild(c); boolean isExtra = true; int childEnd = childStart + child.yield().size(); Constituent childConstituent = new LabeledScoredConstituent(childStart, childEnd, child.label(), 0); if (pcfgConstituents.contains(childConstituent)) { isExtra = false; } if (childConstituent.crosses(pcfgConstituents)) { isExtra = false; } if (child.isLeaf() || child.isPreTerminal()) { isExtra = false; } if (pcfgTree.yield().size() != testTree.yield().size()) { isExtra = false; } if (!label.value().startsWith("NP^NP")) { isExtra = false; } if (isExtra) { System.err.println( "Pruning: " + child.label() + " from " + (childStart + start) + " to " + (childEnd + start)); System.err.println("Was: " + testTree + " vs " + pcfgTree); prunedChildren.addAll(child.getChildrenAsList()); } else { prunedChildren.add(child); } childStart = childEnd; } return prunedChildren; }
protected String historyToString(List history) { String str = (String) historyToString.get(history); if (str == null) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < history.size(); i++) { sb.append('^'); sb.append(history.get(i)); } str = sb.toString(); historyToString.put(history, str); } return str; }
private List<CoreMap> toCoreMaps( CoreMap annotation, List<TimeExpression> timeExpressions, SUTime.TimeIndex timeIndex) { if (timeExpressions == null) return null; List<CoreMap> coreMaps = new ArrayList<CoreMap>(timeExpressions.size()); for (TimeExpression te : timeExpressions) { CoreMap cm = te.getAnnotation(); SUTime.Temporal temporal = te.getTemporal(); if (temporal != null) { String origText = annotation.get(CoreAnnotations.TextAnnotation.class); String text = cm.get(CoreAnnotations.TextAnnotation.class); if (origText != null) { // Make sure the text is from original (and not from concatenated tokens) ChunkAnnotationUtils.annotateChunkText(cm, annotation); text = cm.get(CoreAnnotations.TextAnnotation.class); } Map<String, String> timexAttributes; try { timexAttributes = temporal.getTimexAttributes(timeIndex); if (options.includeRange) { SUTime.Temporal rangeTemporal = temporal.getRange(); if (rangeTemporal != null) { timexAttributes.put("range", rangeTemporal.toString()); } } } catch (Exception e) { logger.log( Level.WARNING, "Failed to get attributes from " + text + ", timeIndex " + timeIndex, e); continue; } Timex timex; try { timex = Timex.fromMap(text, timexAttributes); } catch (Exception e) { logger.log( Level.WARNING, "Failed to process " + text + " with attributes " + timexAttributes, e); continue; } cm.set(TimexAnnotation.class, timex); if (timex != null) { coreMaps.add(cm); } else { logger.warning("No timex expression for: " + text); } } } return coreMaps; }
public Object formResult() { Set brs = new HashSet(); Set urs = new HashSet(); // scan each rule / history pair int ruleCount = 0; for (Iterator pairI = rulePairs.keySet().iterator(); pairI.hasNext(); ) { if (ruleCount % 100 == 0) { System.err.println("Rules multiplied: " + ruleCount); } ruleCount++; Pair rulePair = (Pair) pairI.next(); Rule baseRule = (Rule) rulePair.first; String baseLabel = (String) ruleToLabel.get(baseRule); List history = (List) rulePair.second; double totalProb = 0; for (int depth = 1; depth <= HISTORY_DEPTH() && depth <= history.size(); depth++) { List subHistory = history.subList(0, depth); double c_label = labelPairs.getCount(new Pair(baseLabel, subHistory)); double c_rule = rulePairs.getCount(new Pair(baseRule, subHistory)); // System.out.println("Multiplying out "+baseRule+" with history "+subHistory); // System.out.println("Count of "+baseLabel+" with "+subHistory+" is "+c_label); // System.out.println("Count of "+baseRule+" with "+subHistory+" is "+c_rule ); double prob = (1.0 / HISTORY_DEPTH()) * (c_rule) / (c_label); totalProb += prob; for (int childDepth = 0; childDepth <= Math.min(HISTORY_DEPTH() - 1, depth); childDepth++) { Rule rule = specifyRule(baseRule, subHistory, childDepth); rule.score = (float) Math.log(totalProb); // System.out.println("Created "+rule+" with score "+rule.score); if (rule instanceof UnaryRule) { urs.add(rule); } else { brs.add(rule); } } } } System.out.println("Total states: " + stateNumberer.total()); BinaryGrammar bg = new BinaryGrammar(stateNumberer.total()); UnaryGrammar ug = new UnaryGrammar(stateNumberer.total()); for (Iterator brI = brs.iterator(); brI.hasNext(); ) { BinaryRule br = (BinaryRule) brI.next(); bg.addRule(br); } for (Iterator urI = urs.iterator(); urI.hasNext(); ) { UnaryRule ur = (UnaryRule) urI.next(); ug.addRule(ur); } return new Pair(ug, bg); }
public List<TimeExpression> extractTimeExpressions(CoreMap annotation, String docDateStr) { List<CoreMap> mergedNumbers = NumberNormalizer.findAndMergeNumbers(annotation); annotation.set(CoreAnnotations.NumerizedTokensAnnotation.class, mergedNumbers); // TODO: docDate may not have century.... SUTime.Time docDate = timexPatterns.parseDateTime(docDateStr); List<? extends MatchedExpression> matchedExpressions = expressionExtractor.extractExpressions(annotation); List<TimeExpression> timeExpressions = new ArrayList<TimeExpression>(matchedExpressions.size()); for (MatchedExpression expr : matchedExpressions) { if (expr instanceof TimeExpression) { timeExpressions.add((TimeExpression) expr); } else { timeExpressions.add(new TimeExpression(expr)); } } // Add back nested time expressions for ranges.... // For now only one level of nesting... if (options.includeNested) { List<TimeExpression> nestedTimeExpressions = new ArrayList<TimeExpression>(); for (TimeExpression te : timeExpressions) { if (te.isIncludeNested()) { List<? extends CoreMap> children = te.getAnnotation().get(TimeExpression.ChildrenAnnotation.class); if (children != null) { for (CoreMap child : children) { TimeExpression childTe = child.get(TimeExpression.Annotation.class); if (childTe != null) { nestedTimeExpressions.add(childTe); } } } } } timeExpressions.addAll(nestedTimeExpressions); } Collections.sort(timeExpressions, MatchedExpression.EXPR_TOKEN_OFFSETS_NESTED_FIRST_COMPARATOR); timeExpressions = filterInvalidTimeExpressions(timeExpressions); // Some resolving is done even if docDate null... if ( /*docDate != null && */ timeExpressions != null) { resolveTimeExpressions(annotation, timeExpressions, docDate); } // Annotate timex return timeExpressions; }
protected void tallyInternalNode(Tree lt, List parents) { // form base rule String label = lt.label().value(); Rule baseR = ltToRule(lt); ruleToLabel.put(baseR, label); // act on each history depth for (int depth = 0, maxDepth = Math.min(HISTORY_DEPTH(), parents.size()); depth <= maxDepth; depth++) { List history = new ArrayList(parents.subList(0, depth)); // tally each history level / rewrite pair rulePairs.incrementCount(new Pair(baseR, history), 1); labelPairs.incrementCount(new Pair(label, history), 1); } }
private List<TimeExpression> filterInvalidTimeExpressions(List<TimeExpression> timeExprs) { int nfiltered = 0; List<TimeExpression> filtered = new ArrayList<TimeExpression>(timeExprs.size()); // Approximate size for (TimeExpression timeExpr : timeExprs) { if (timexPatterns.checkTimeExpression(timeExpr)) { filtered.add(timeExpr); } else { nfiltered++; } } if (nfiltered > 0) { logger.finest("Filtered " + nfiltered); } return filtered; }
public SUTime.Temporal apply(MatchResult in) { if (in instanceof SequenceMatchResult) { SequenceMatchResult<CoreMap> mr = (SequenceMatchResult<CoreMap>) (in); if (group >= 0) { List<? extends CoreMap> matched = mr.groupNodes(group); if (matched != null) { int i = (nodeIndex >= 0) ? 0 : (matched.size() + nodeIndex); TimeExpression te = getTimeExpression(matched, i); if (te != null) { return te.getTemporal(); } } } } return null; }
/** * Creates a combined list of Entries using the provided mapping files. * * @param mappings List of mapping files * @return list of Entries */ private static List<Entry> readEntries( String annotatorName, Set<String> noDefaultOverwriteLabels, boolean ignoreCase, boolean verbose, String... mappings) { // Unlike RegexNERClassifier, we don't bother sorting the entries // We leave it to TokensRegex NER to sort out the priorities and matches // (typically after all the matches has been made since for some TokenRegex expression, // we don't know how many tokens are matched until after the matching is done) List<Entry> entries = new ArrayList<>(); TrieMap<String, Entry> seenRegexes = new TrieMap<>(); Arrays.sort(mappings); for (String mapping : mappings) { BufferedReader rd = null; try { rd = IOUtils.readerFromString(mapping); readEntries( annotatorName, entries, seenRegexes, mapping, rd, noDefaultOverwriteLabels, ignoreCase, verbose); } catch (IOException e) { throw new RuntimeIOException("Couldn't read TokensRegexNER from " + mapping, e); } finally { IOUtils.closeIgnoringExceptions(rd); } } if (mappings.length != 1) { logger.log( "TokensRegexNERAnnotator " + annotatorName + ": Read " + entries.size() + " unique entries from " + mappings.length + " files"); } return entries; }
protected Rule specifyRule(Rule rule, List history, int childDepth) { Rule r; String topHistoryStr = historyToString(history.subList(1, history.size())); String bottomHistoryStr = historyToString(history.subList(0, childDepth)); if (rule instanceof UnaryRule) { UnaryRule ur = new UnaryRule(); UnaryRule urule = (UnaryRule) rule; ur.parent = stateNumberer.number(stateNumberer.object(urule.parent) + topHistoryStr); if (isSynthetic(urule.child)) { ur.child = stateNumberer.number(stateNumberer.object(urule.child) + topHistoryStr); } else if (isTag(urule.child)) { ur.child = urule.child; } else { ur.child = stateNumberer.number(stateNumberer.object(urule.child) + bottomHistoryStr); } r = ur; } else { BinaryRule br = new BinaryRule(); BinaryRule brule = (BinaryRule) rule; br.parent = stateNumberer.number(stateNumberer.object(brule.parent) + topHistoryStr); if (isSynthetic(brule.leftChild)) { br.leftChild = stateNumberer.number(stateNumberer.object(brule.leftChild) + topHistoryStr); } else if (isTag(brule.leftChild)) { br.leftChild = brule.leftChild; } else { br.leftChild = stateNumberer.number(stateNumberer.object(brule.leftChild) + bottomHistoryStr); } if (isSynthetic(brule.rightChild)) { br.rightChild = stateNumberer.number(stateNumberer.object(brule.rightChild) + topHistoryStr); } else if (isTag(brule.rightChild)) { br.rightChild = brule.rightChild; } else { br.rightChild = stateNumberer.number(stateNumberer.object(brule.rightChild) + bottomHistoryStr); } r = br; } return r; }
private MultiPatternMatcher<CoreMap> createPatternMatcher( Map<SequencePattern<CoreMap>, Entry> patternToEntry) { // Convert to tokensregex pattern int patternFlags = ignoreCase ? Pattern.CASE_INSENSITIVE : 0; int stringMatchFlags = ignoreCase ? NodePattern.CASE_INSENSITIVE : 0; Env env = TokenSequencePattern.getNewEnv(); env.setDefaultStringPatternFlags(patternFlags); env.setDefaultStringMatchFlags(stringMatchFlags); NodePattern<String> posTagPattern = (validPosPattern != null && PosMatchType.MATCH_ALL_TOKENS.equals(posMatchType)) ? new CoreMapNodePattern.StringAnnotationRegexPattern(validPosPattern) : null; List<TokenSequencePattern> patterns = new ArrayList<>(entries.size()); for (Entry entry : entries) { TokenSequencePattern pattern; if (entry.tokensRegex != null) { // TODO: posTagPatterns... pattern = TokenSequencePattern.compile(env, entry.tokensRegex); } else { List<SequencePattern.PatternExpr> nodePatterns = new ArrayList<>(); for (String p : entry.regex) { CoreMapNodePattern c = CoreMapNodePattern.valueOf(p, patternFlags); if (posTagPattern != null) { c.add(CoreAnnotations.PartOfSpeechAnnotation.class, posTagPattern); } nodePatterns.add(new SequencePattern.NodePatternExpr(c)); } pattern = TokenSequencePattern.compile(new SequencePattern.SequencePatternExpr(nodePatterns)); } if (entry.annotateGroup < 0 || entry.annotateGroup > pattern.getTotalGroups()) { throw new RuntimeException("Invalid match group for entry " + entry); } pattern.setPriority(entry.priority); patterns.add(pattern); patternToEntry.put(pattern, entry); } return TokenSequencePattern.getMultiPatternMatcher(patterns); }
public List<Pair<String, Double>> selectWeightedKeysWithSampling( ActiveLearningSelectionCriterion criterion, int numSamples, int seed) { List<Pair<String, Double>> result = new ArrayList<>(); forceTrack("Sampling Keys"); log("" + numSamples + " to collect"); // Get uncertainty forceTrack("Computing Uncertainties"); Counter<String> weightCounter = uncertainty(criterion); assert weightCounter.equals(uncertainty(criterion)); endTrack("Computing Uncertainties"); // Compute some statistics startTrack("Uncertainty Histogram"); // log(new Histogram(weightCounter, 50).toString()); // removed to make the release easier // (Histogram isn't in CoreNLP) endTrack("Uncertainty Histogram"); double totalCount = weightCounter.totalCount(); Random random = new Random(seed); // Flatten counter List<String> keys = new LinkedList<>(); List<Double> weights = new LinkedList<>(); List<String> zeroUncertaintyKeys = new LinkedList<>(); for (Pair<String, Double> elem : Counters.toSortedListWithCounts( weightCounter, (o1, o2) -> { int value = o1.compareTo(o2); if (value == 0) { return o1.first.compareTo(o2.first); } else { return value; } })) { if (elem.second != 0.0 || weightCounter.totalCount() == 0.0 || weightCounter.size() <= numSamples) { // ignore 0 probability weights keys.add(elem.first); weights.add(elem.second); } else { zeroUncertaintyKeys.add(elem.first); } } // Error check if (Utils.assertionsEnabled()) { for (Double elem : weights) { if (!(elem >= 0 && !Double.isInfinite(elem) && !Double.isNaN(elem))) { throw new IllegalArgumentException("Invalid weight: " + elem); } } } // Sample SAMPLE_ITER: for (int i = 1; i <= numSamples; ++i) { // For each sample if (i % 1000 == 0) { // Debug log log("sampled " + (i / 1000) + "k keys"); // Recompute total count to mitigate floating point errors totalCount = 0.0; for (double val : weights) { totalCount += val; } } if (weights.size() == 0) { continue; } assert totalCount >= 0.0; assert weights.size() == keys.size(); double target = random.nextDouble() * totalCount; Iterator<String> keyIter = keys.iterator(); Iterator<Double> weightIter = weights.iterator(); double runningTotal = 0.0; while (keyIter.hasNext()) { // For each candidate String key = keyIter.next(); double weight = weightIter.next(); runningTotal += weight; if (target <= runningTotal) { // Select that sample result.add(Pair.makePair(key, weight)); keyIter.remove(); weightIter.remove(); totalCount -= weight; continue SAMPLE_ITER; // continue sampling } } // We should get here only if the keys list is empty warn( "No more uncertain samples left to draw from! (target=" + target + " totalCount=" + totalCount + " size=" + keys.size()); assert keys.size() == 0; if (zeroUncertaintyKeys.size() > 0) { result.add(Pair.makePair(zeroUncertaintyKeys.remove(0), 0.0)); } else { break; } } endTrack("Sampling Keys"); return result; }
private boolean checkOrigNerTags(Entry entry, List<CoreLabel> tokens, int start, int end) { int prevNerEndIndex = start - 1; int nextNerStartIndex = end; // Check if we found a pattern that overlaps with existing ner labels // tag1 tag1 x x tag2 tag2 // tag tag tag tag // Don't overwrite the old ner label if we overlap like this String startNer = tokens.get(start).ner(); String endNer = tokens.get(end - 1).ner(); if (startNer != null && !myLabels.contains(startNer)) { while (prevNerEndIndex >= 0) { // go backwards to find different entity type String ner = tokens.get(prevNerEndIndex).ner(); if (ner == null || !ner.equals(startNer)) { break; } prevNerEndIndex--; } } if (endNer != null && !myLabels.contains(endNer)) { while (nextNerStartIndex < tokens.size()) { // go backwards to find different entity type String ner = tokens.get(nextNerStartIndex).ner(); if (ner == null || !ner.equals(endNer)) { break; } nextNerStartIndex++; } } boolean overwriteOriginalNer = false; //noinspection StatementWithEmptyBody if (prevNerEndIndex != (start - 1) || nextNerStartIndex != end) { // Cutting across already recognized NEs don't disturb } else if (startNer == null) { // No old ner, okay to replace overwriteOriginalNer = true; } else { // Check if we have one consistent NER tag // if not, overwrite // if consistent, overwrite only if in our set of ner tags that we overwrite for (int i = start + 1; i < end; i++) { if (!startNer.equals(tokens.get(i).ner())) { overwriteOriginalNer = true; break; } } if (!overwriteOriginalNer) { // check if old ner type was one that was specified as explicitly overwritable by this entry if (entry.overwritableTypes.contains(startNer)) { overwriteOriginalNer = true; } else { // if this ner type doesn't belong to the labels for which we don't overwrite the default // labels (noDefaultOverwriteLabels) // we check mylabels to see if we can overwrite this entry if ( /*entry.overwritableTypes.isEmpty() || */ !noDefaultOverwriteLabels.contains( entry.type)) { overwriteOriginalNer = myLabels.contains(startNer); } } } } return overwriteOriginalNer; }
/** * Reads a list of Entries from a mapping file and update the given entries. Line numbers start * from 1. * * @return the updated list of Entries */ private static List<Entry> readEntries( String annotatorName, List<Entry> entries, TrieMap<String, Entry> seenRegexes, String mappingFilename, BufferedReader mapping, Set<String> noDefaultOverwriteLabels, boolean ignoreCase, boolean verbose) throws IOException { int origEntriesSize = entries.size(); int isTokensRegex = 0; int lineCount = 0; for (String line; (line = mapping.readLine()) != null; ) { lineCount++; String[] split = line.split("\t"); if (split.length < 2 || split.length > 5) { throw new IllegalArgumentException( "Provided mapping file is in wrong format. This line is bad: " + line); } String regex = split[0].trim(); String tokensRegex = null; String[] regexes = null; if (regex.startsWith("( ") && regex.endsWith(" )")) { // Tokens regex (remove start and end parenthesis) tokensRegex = regex.substring(1, regex.length() - 1).trim(); } else { regexes = regex.split("\\s+"); } String[] key = (regexes != null) ? regexes : new String[] {tokensRegex}; if (ignoreCase) { String[] norm = new String[key.length]; for (int i = 0; i < key.length; i++) { norm[i] = key[i].toLowerCase(); } key = norm; } String type = split[1].trim(); Set<String> overwritableTypes = Generics.newHashSet(); double priority = 0.0; if (split.length >= 3) { overwritableTypes.addAll(Arrays.asList(split[2].trim().split("\\s*,\\s*"))); } if (split.length >= 4) { try { priority = Double.parseDouble(split[3].trim()); } catch (NumberFormatException e) { throw new IllegalArgumentException( "ERROR: Invalid priority in line " + lineCount + " in regexner file " + mappingFilename + ": \"" + line + "\"!", e); } } int annotateGroup = 0; // Get annotate group from input.... if (split.length >= 5) { // Which group to take (allow for context) String context = split[4].trim(); try { annotateGroup = Integer.parseInt(context); } catch (NumberFormatException e) { throw new IllegalArgumentException( "ERROR: Invalid group in line " + lineCount + " in regexner file " + mappingFilename + ": \"" + line + "\"!", e); } } // Print some warning about the type int commaPos = type.indexOf(','); if (commaPos > 0) { // Strip the "," and just take first type String newType = type.substring(0, commaPos).trim(); logger.warn( "TokensRegexNERAnnotator " + annotatorName + ": Entry has multiple types: " + line + ". Taking type to be " + newType); type = newType; } Entry entry = new Entry(tokensRegex, regexes, type, overwritableTypes, priority, annotateGroup); if (seenRegexes.containsKey(key)) { Entry oldEntry = seenRegexes.get(key); if (priority > oldEntry.priority) { logger.warn( "TokensRegexNERAnnotator " + annotatorName + ": Replace duplicate entry (higher priority): old=" + oldEntry + ", new=" + entry); } else { if (!oldEntry.type.equals(type)) { if (verbose) { logger.warn( "TokensRegexNERAnnotator " + annotatorName + ": Ignoring duplicate entry: " + split[0] + ", old type = " + oldEntry.type + ", new type = " + type); } // } else { // if (verbose) { // logger.warn("TokensRegexNERAnnotator " + annotatorName + // ": Duplicate entry [ignored]: " + split[0] + ", old type = " + // oldEntry.type + ", new type = " + type); // } } continue; } } // Print some warning if label belongs to noDefaultOverwriteLabels but there is no // overwritable types if (entry.overwritableTypes.isEmpty() && noDefaultOverwriteLabels.contains(entry.type)) { logger.warn( "TokensRegexNERAnnotator " + annotatorName + ": Entry doesn't have overwriteable types " + entry + ", but entry type is in noDefaultOverwriteLabels"); } entries.add(entry); seenRegexes.put(key, entry); if (entry.tokensRegex != null) isTokensRegex++; } logger.log( "TokensRegexNERAnnotator " + annotatorName + ": Read " + (entries.size() - origEntriesSize) + " unique entries out of " + lineCount + " from " + mappingFilename + ", " + isTokensRegex + " TokensRegex patterns."); return entries; }