public void testChangeGaps() throws Exception { // LUCENE-5324: check that it is possible to change the wrapper's gaps final int positionGap = random().nextInt(1000); final int offsetGap = random().nextInt(1000); final Analyzer delegate = new MockAnalyzer(random()); final Analyzer a = new DelegatingAnalyzerWrapper(delegate.getReuseStrategy()) { @Override protected Analyzer getWrappedAnalyzer(String fieldName) { return delegate; } @Override public int getPositionIncrementGap(String fieldName) { return positionGap; } @Override public int getOffsetGap(String fieldName) { return offsetGap; } }; final RandomIndexWriter writer = new RandomIndexWriter(random(), newDirectory(), a); final Document doc = new Document(); final FieldType ft = new FieldType(); ft.setIndexOptions(IndexOptions.DOCS); ft.setTokenized(true); ft.setStoreTermVectors(true); ft.setStoreTermVectorPositions(true); ft.setStoreTermVectorOffsets(true); doc.add(new Field("f", "a", ft)); doc.add(new Field("f", "a", ft)); writer.addDocument(doc); final LeafReader reader = getOnlySegmentReader(writer.getReader()); final Fields fields = reader.getTermVectors(0); final Terms terms = fields.terms("f"); final TermsEnum te = terms.iterator(); assertEquals(new BytesRef("a"), te.next()); final PostingsEnum dpe = te.postings(null, PostingsEnum.ALL); assertEquals(0, dpe.nextDoc()); assertEquals(2, dpe.freq()); assertEquals(0, dpe.nextPosition()); assertEquals(0, dpe.startOffset()); final int endOffset = dpe.endOffset(); assertEquals(1 + positionGap, dpe.nextPosition()); assertEquals(1 + endOffset + offsetGap, dpe.endOffset()); assertEquals(null, te.next()); reader.close(); writer.close(); writer.w.getDirectory().close(); }
public TermInfo collect(String term) throws IOException { TermInfo info = new TermInfo(); BytesRef luceneTerm = new BytesRef(term.getBytes()); // this gives documents in which the term is found, but no offset information can be retrieved PostingsEnum postings = MultiFields.getTermDocsEnum(indexReader, ngramInfoFieldname, luceneTerm); // now go through each document int docId = postings.nextDoc(); while (docId != PostingsEnum.NO_MORE_DOCS) { // get the term vector for that document. TermsEnum it = indexReader.getTermVector(docId, ngramInfoFieldname).iterator(); // find the term of interest it.seekExact(luceneTerm); // get its posting info. this will contain offset info PostingsEnum postingsInDoc = it.postings(null, PostingsEnum.OFFSETS); postingsInDoc.nextDoc(); Document doc = indexReader.document(docId); String id = doc.get(idFieldname); JATEDocument jd = new JATEDocument(id); Set<int[]> offsets = new HashSet<>(); int totalFreq = postingsInDoc.freq(); for (int i = 0; i < totalFreq; i++) { postingsInDoc.nextPosition(); offsets.add(new int[] {postingsInDoc.startOffset(), postingsInDoc.endOffset()}); } info.getOffsets().put(jd, offsets); docId = postings.nextDoc(); } return info; }
public void testDocsAndPositionsEnumStart() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); int numIters = atLeast(3); MemoryIndex memory = new MemoryIndex(true, false, random().nextInt(50) * 1024 * 1024); for (int i = 0; i < numIters; i++) { // check reuse memory.addField("foo", "bar", analyzer); LeafReader reader = (LeafReader) memory.createSearcher().getIndexReader(); TestUtil.checkReader(reader); assertEquals(1, reader.terms("foo").getSumTotalTermFreq()); PostingsEnum disi = reader.postings(new Term("foo", "bar"), PostingsEnum.ALL); int docid = disi.docID(); assertEquals(-1, docid); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(0, disi.nextPosition()); assertEquals(0, disi.startOffset()); assertEquals(3, disi.endOffset()); // now reuse and check again TermsEnum te = reader.terms("foo").iterator(); assertTrue(te.seekExact(new BytesRef("bar"))); disi = te.postings(disi); docid = disi.docID(); assertEquals(-1, docid); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); reader.close(); memory.reset(); } }
protected void compareTermVectors(Terms terms, Terms memTerms, String field_name) throws IOException { TermsEnum termEnum = terms.iterator(); TermsEnum memTermEnum = memTerms.iterator(); while (termEnum.next() != null) { assertNotNull(memTermEnum.next()); assertThat(termEnum.totalTermFreq(), equalTo(memTermEnum.totalTermFreq())); PostingsEnum docsPosEnum = termEnum.postings(null, PostingsEnum.POSITIONS); PostingsEnum memDocsPosEnum = memTermEnum.postings(null, PostingsEnum.POSITIONS); String currentTerm = termEnum.term().utf8ToString(); assertThat( "Token mismatch for field: " + field_name, currentTerm, equalTo(memTermEnum.term().utf8ToString())); docsPosEnum.nextDoc(); memDocsPosEnum.nextDoc(); int freq = docsPosEnum.freq(); assertThat(freq, equalTo(memDocsPosEnum.freq())); for (int i = 0; i < freq; i++) { String failDesc = " (field:" + field_name + " term:" + currentTerm + ")"; int memPos = memDocsPosEnum.nextPosition(); int pos = docsPosEnum.nextPosition(); assertThat("Position test failed" + failDesc, memPos, equalTo(pos)); assertThat( "Start offset test failed" + failDesc, memDocsPosEnum.startOffset(), equalTo(docsPosEnum.startOffset())); assertThat( "End offset test failed" + failDesc, memDocsPosEnum.endOffset(), equalTo(docsPosEnum.endOffset())); assertThat( "Missing payload test failed" + failDesc, docsPosEnum.getPayload(), equalTo(docsPosEnum.getPayload())); } } assertNull("Still some tokens not processed", memTermEnum.next()); }
private void initValues(Terms curTerms, PostingsEnum posEnum, int termFreq) throws IOException { for (int j = 0; j < termFreq; j++) { int nextPos = posEnum.nextPosition(); if (curTerms.hasPositions()) { currentPositions[j] = nextPos; } if (curTerms.hasOffsets()) { currentStartOffset[j] = posEnum.startOffset(); currentEndOffset[j] = posEnum.endOffset(); } if (curTerms.hasPayloads()) { BytesRef curPayload = posEnum.getPayload(); if (curPayload != null) { currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length); } else { currentPayloads[j] = null; } } } }
/** * Get all words between the specified start and end positions from the term vector. * * <p>NOTE: this may return an array of less than the size requested, if the document ends before * the requested end position. * * @param reader the index * @param doc doc id * @param luceneName the index field from which to use the term vector * @param start start position (first word we want to request) * @param end end position (last word we want to request) * @param partialOk is it okay if we're missing words in the middle, or do we need them all? * (debug) * @return the words found, in order */ public static String[] getWordsFromTermVector( IndexReader reader, int doc, String luceneName, int start, int end, boolean partialOk) { // Retrieve the term position vector of the contents of this document. // NOTE: might be faster to retrieve all term vectors at once try { org.apache.lucene.index.Terms terms = reader.getTermVector(doc, luceneName); if (terms == null) { throw new IllegalArgumentException("Field " + luceneName + " has no Terms"); } if (!terms.hasPositions()) throw new IllegalArgumentException( "Field " + luceneName + " has no character postion information"); // String[] docTerms = new String[(int) terms.size()]; // final List<BytesRef> termsList = new ArrayList<BytesRef>(); TermsEnum termsEnum = terms.iterator(); // Verzamel concordantiewoorden uit term vector PostingsEnum docPosEnum = null; int numFound = 0; String[] concordanceWords = new String[end - start + 1]; while (termsEnum.next() != null) { docPosEnum = termsEnum.postings(null, docPosEnum, PostingsEnum.POSITIONS); while (docPosEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { // NOTE: .docId() will always return 0 in this case // if (docPosEnum.docID() != doc) // throw new RuntimeException("Wrong doc id: " + docPosEnum.docID() + " (expected " + doc // + ")"); for (int i = 0; i < docPosEnum.freq(); i++) { int position = docPosEnum.nextPosition(); if (position == -1) throw new RuntimeException( "Unexpected missing position (i=" + i + ", docPosEnum.freq() = " + docPosEnum.freq() + ")"); if (position >= start && position <= end) { if (concordanceWords[position - start] == null) concordanceWords[position - start] = termsEnum.term().utf8ToString(); else concordanceWords[position - start] += "|" + termsEnum.term().utf8ToString(); numFound++; } } if (numFound == concordanceWords.length) return concordanceWords; } } if (numFound < concordanceWords.length && !partialOk) { // If we simply ran into the end of the document, that's okay; // but if words are missing in the middle, that's not. String[] partial = new String[numFound]; for (int i = 0; i < numFound; i++) { partial[i] = concordanceWords[i]; if (partial[i] == null) { throw new RuntimeException( "Not all words found (" + numFound + " out of " + concordanceWords.length + "); missing words in the middle of concordance!"); } } return partial; } return concordanceWords; } catch (Exception e) { throw ExUtil.wrapRuntimeException(e); } }
public void testSetPosition() throws Exception { Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName) { return new TokenStreamComponents( new Tokenizer() { // TODO: use CannedTokenStream private final String[] TOKENS = {"1", "2", "3", "4", "5"}; private final int[] INCREMENTS = {1, 2, 1, 0, 1}; private int i = 0; PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class); CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); @Override public boolean incrementToken() { if (i == TOKENS.length) return false; clearAttributes(); termAtt.append(TOKENS[i]); offsetAtt.setOffset(i, i); posIncrAtt.setPositionIncrement(INCREMENTS[i]); i++; return true; } @Override public void reset() throws IOException { super.reset(); this.i = 0; } }); } }; Directory store = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), store, analyzer); Document d = new Document(); d.add(newTextField("field", "bogus", Field.Store.YES)); writer.addDocument(d); IndexReader reader = writer.getReader(); writer.close(); IndexSearcher searcher = newSearcher(reader); PostingsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(), "field", new BytesRef("1")); pos.nextDoc(); // first token should be at position 0 assertEquals(0, pos.nextPosition()); pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(), "field", new BytesRef("2")); pos.nextDoc(); // second token should be at position 2 assertEquals(2, pos.nextPosition()); PhraseQuery q; ScoreDoc[] hits; q = new PhraseQuery("field", "1", "2"); hits = searcher.search(q, 1000).scoreDocs; assertEquals(0, hits.length); // same as previous, using the builder with implicit positions PhraseQuery.Builder builder = new PhraseQuery.Builder(); builder.add(new Term("field", "1")); builder.add(new Term("field", "2")); q = builder.build(); hits = searcher.search(q, 1000).scoreDocs; assertEquals(0, hits.length); // same as previous, just specify positions explicitely. builder = new PhraseQuery.Builder(); builder.add(new Term("field", "1"), 0); builder.add(new Term("field", "2"), 1); q = builder.build(); hits = searcher.search(q, 1000).scoreDocs; assertEquals(0, hits.length); // specifying correct positions should find the phrase. builder = new PhraseQuery.Builder(); builder.add(new Term("field", "1"), 0); builder.add(new Term("field", "2"), 2); q = builder.build(); hits = searcher.search(q, 1000).scoreDocs; assertEquals(1, hits.length); q = new PhraseQuery("field", "2", "3"); hits = searcher.search(q, 1000).scoreDocs; assertEquals(1, hits.length); q = new PhraseQuery("field", "3", "4"); hits = searcher.search(q, 1000).scoreDocs; assertEquals(0, hits.length); // phrase query would find it when correct positions are specified. builder = new PhraseQuery.Builder(); builder.add(new Term("field", "3"), 0); builder.add(new Term("field", "4"), 0); q = builder.build(); hits = searcher.search(q, 1000).scoreDocs; assertEquals(1, hits.length); // phrase query should fail for non existing searched term // even if there exist another searched terms in the same searched position. builder = new PhraseQuery.Builder(); builder.add(new Term("field", "3"), 0); builder.add(new Term("field", "9"), 0); q = builder.build(); hits = searcher.search(q, 1000).scoreDocs; assertEquals(0, hits.length); // multi-phrase query should succed for non existing searched term // because there exist another searched terms in the same searched position. MultiPhraseQuery mq = new MultiPhraseQuery(); mq.add(new Term[] {new Term("field", "3"), new Term("field", "9")}, 0); hits = searcher.search(mq, 1000).scoreDocs; assertEquals(1, hits.length); q = new PhraseQuery("field", "2", "4"); hits = searcher.search(q, 1000).scoreDocs; assertEquals(1, hits.length); q = new PhraseQuery("field", "3", "5"); hits = searcher.search(q, 1000).scoreDocs; assertEquals(1, hits.length); q = new PhraseQuery("field", "4", "5"); hits = searcher.search(q, 1000).scoreDocs; assertEquals(1, hits.length); q = new PhraseQuery("field", "2", "5"); hits = searcher.search(q, 1000).scoreDocs; assertEquals(0, hits.length); reader.close(); store.close(); }
public void testPayloadsPos0() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockPayloadAnalyzer()); Document doc = new Document(); doc.add(new TextField("content", new StringReader("a a b c d e a f g h i j a b k k"))); writer.addDocument(doc); final IndexReader readerFromWriter = writer.getReader(); LeafReader r = SlowCompositeReaderWrapper.wrap(readerFromWriter); PostingsEnum tp = r.postings(new Term("content", "a"), PostingsEnum.ALL); int count = 0; assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); // "a" occurs 4 times assertEquals(4, tp.freq()); assertEquals(0, tp.nextPosition()); assertEquals(1, tp.nextPosition()); assertEquals(3, tp.nextPosition()); assertEquals(6, tp.nextPosition()); // only one doc has "a" assertEquals(DocIdSetIterator.NO_MORE_DOCS, tp.nextDoc()); IndexSearcher is = newSearcher(readerFromWriter); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); SpanQuery[] sqs = {stq1, stq2}; SpanNearQuery snq = new SpanNearQuery(sqs, 30, false); count = 0; boolean sawZero = false; if (VERBOSE) { System.out.println("\ngetPayloadSpans test"); } PayloadSpanCollector collector = new PayloadSpanCollector(); Spans pspans = MultiSpansWrapper.wrap(is.getIndexReader(), snq, SpanWeight.Postings.PAYLOADS); while (pspans.nextDoc() != Spans.NO_MORE_DOCS) { while (pspans.nextStartPosition() != Spans.NO_MORE_POSITIONS) { if (VERBOSE) { System.out.println( "doc " + pspans.docID() + ": span " + pspans.startPosition() + " to " + pspans.endPosition()); } collector.reset(); pspans.collect(collector); sawZero |= pspans.startPosition() == 0; for (BytesRef payload : collector.payloads) { count++; if (VERBOSE) { System.out.println(" payload: " + Term.toString(payload)); } } } } assertTrue(sawZero); assertEquals(8, count); // System.out.println("\ngetSpans test"); Spans spans = MultiSpansWrapper.wrap(is.getIndexReader(), snq); count = 0; sawZero = false; while (spans.nextDoc() != Spans.NO_MORE_DOCS) { while (spans.nextStartPosition() != Spans.NO_MORE_POSITIONS) { count++; sawZero |= spans.startPosition() == 0; // System.out.println(spans.doc() + " - " + spans.start() + " - " + // spans.end()); } } assertEquals(4, count); assertTrue(sawZero); writer.close(); is.getIndexReader().close(); dir.close(); }
private void checkAllInfo( int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset, int i) throws IOException { TermVectorsRequestBuilder resp = client() .prepareTermVectors("test", "type1", Integer.toString(i)) .setPayloads(true) .setOffsets(true) .setPositions(true) .setFieldStatistics(true) .setTermStatistics(true) .setSelectedFields(); assertThat(resp.request().fieldStatistics(), equalTo(true)); TermVectorsResponse response = resp.execute().actionGet(); assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true)); Fields fields = response.getFields(); assertThat(fields.size(), equalTo(1)); Terms terms = fields.terms("field"); assertThat(terms.size(), equalTo(8l)); assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs))); assertThat(terms.getDocCount(), Matchers.equalTo(numDocs)); assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length)); TermsEnum iterator = terms.iterator(); for (int j = 0; j < values.length; j++) { String string = values[j]; BytesRef next = iterator.next(); assertThat(next, Matchers.notNullValue()); assertThat("expected " + string, string, equalTo(next.utf8ToString())); assertThat(next, Matchers.notNullValue()); if (string.equals("the")) { assertThat( "expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq())); } else { assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq())); } PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); assertThat(iterator.docFreq(), equalTo(numDocs)); int[] termPos = pos[j]; int[] termStartOffset = startOffset[j]; int[] termEndOffset = endOffset[j]; assertThat(termPos.length, equalTo(freq[j])); assertThat(termStartOffset.length, equalTo(freq[j])); assertThat(termEndOffset.length, equalTo(freq[j])); for (int k = 0; k < freq[j]; k++) { int nextPosition = docsAndPositions.nextPosition(); assertThat("term: " + string, nextPosition, equalTo(termPos[k])); assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k])); assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k])); assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word"))); } } assertThat(iterator.next(), Matchers.nullValue()); XContentBuilder xBuilder = XContentFactory.jsonBuilder(); xBuilder.startObject(); response.toXContent(xBuilder, ToXContent.EMPTY_PARAMS); xBuilder.endObject(); BytesStream bytesStream = xBuilder.bytesStream(); String utf8 = bytesStream.bytes().toUtf8().replaceFirst("\"took\":\\d+,", ""); ; String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\"" + i + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}"; assertThat(utf8, equalTo(expectedString)); }
private void duellReaders(CompositeReader other, LeafReader memIndexReader) throws IOException { Fields memFields = memIndexReader.fields(); for (String field : MultiFields.getFields(other)) { Terms memTerms = memFields.terms(field); Terms iwTerms = memIndexReader.terms(field); if (iwTerms == null) { assertNull(memTerms); } else { NumericDocValues normValues = MultiDocValues.getNormValues(other, field); NumericDocValues memNormValues = memIndexReader.getNormValues(field); if (normValues != null) { // mem idx always computes norms on the fly assertNotNull(memNormValues); assertEquals(normValues.get(0), memNormValues.get(0)); } assertNotNull(memTerms); assertEquals(iwTerms.getDocCount(), memTerms.getDocCount()); assertEquals(iwTerms.getSumDocFreq(), memTerms.getSumDocFreq()); assertEquals(iwTerms.getSumTotalTermFreq(), memTerms.getSumTotalTermFreq()); TermsEnum iwTermsIter = iwTerms.iterator(); TermsEnum memTermsIter = memTerms.iterator(); if (iwTerms.hasPositions()) { final boolean offsets = iwTerms.hasOffsets() && memTerms.hasOffsets(); while (iwTermsIter.next() != null) { assertNotNull(memTermsIter.next()); assertEquals(iwTermsIter.term(), memTermsIter.term()); PostingsEnum iwDocsAndPos = iwTermsIter.postings(null, PostingsEnum.ALL); PostingsEnum memDocsAndPos = memTermsIter.postings(null, PostingsEnum.ALL); while (iwDocsAndPos.nextDoc() != PostingsEnum.NO_MORE_DOCS) { assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc()); assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq()); for (int i = 0; i < iwDocsAndPos.freq(); i++) { assertEquals( "term: " + iwTermsIter.term().utf8ToString(), iwDocsAndPos.nextPosition(), memDocsAndPos.nextPosition()); if (offsets) { assertEquals(iwDocsAndPos.startOffset(), memDocsAndPos.startOffset()); assertEquals(iwDocsAndPos.endOffset(), memDocsAndPos.endOffset()); } if (iwTerms.hasPayloads()) { assertEquals(iwDocsAndPos.getPayload(), memDocsAndPos.getPayload()); } } } } } else { while (iwTermsIter.next() != null) { assertEquals(iwTermsIter.term(), memTermsIter.term()); PostingsEnum iwDocsAndPos = iwTermsIter.postings(null); PostingsEnum memDocsAndPos = memTermsIter.postings(null); while (iwDocsAndPos.nextDoc() != PostingsEnum.NO_MORE_DOCS) { assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc()); assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq()); } } } } } }