public void testBackToTheFuture() throws Exception { Directory dir = newDirectory(); IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, null)); Document doc = new Document(); doc.add(newStringField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); doc = new Document(); doc.add(newStringField("foo", "baz", Field.Store.NO)); iw.addDocument(doc); DirectoryReader r1 = DirectoryReader.open(iw, true); iw.deleteDocuments(new Term("foo", "baz")); DirectoryReader r2 = DirectoryReader.open(iw, true); FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r2), "foo"); SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(getOnlySegmentReader(r1), "foo"); assertEquals(2, v.getValueCount()); v.setDocument(1); assertEquals(1, v.nextOrd()); iw.close(); r1.close(); r2.close(); dir.close(); }
// LUCENE-3870 public void testLengthPrefixAcrossTwoPages() throws Exception { Directory d = newDirectory(); IndexWriter w = new IndexWriter(d, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))); Document doc = new Document(); byte[] bytes = new byte[32764]; BytesRef b = new BytesRef(); b.bytes = bytes; b.length = bytes.length; doc.add(new SortedDocValuesField("field", b)); w.addDocument(doc); bytes[0] = 1; w.addDocument(doc); w.forceMerge(1); DirectoryReader r = w.getReader(); BinaryDocValues s = FieldCache.DEFAULT.getTerms(getOnlySegmentReader(r), "field"); BytesRef bytes1 = new BytesRef(); s.get(0, bytes1); assertEquals(bytes.length, bytes1.length); bytes[0] = 0; assertEquals(b, bytes1); s.get(1, bytes1); assertEquals(bytes.length, bytes1.length); bytes[0] = 1; assertEquals(b, bytes1); r.close(); w.close(); d.close(); }
public void testDocsWithField() throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); IndexWriter writer = new IndexWriter(dir, conf); Document doc = new Document(); doc.add(new NumericDocValuesField("dv", 0L)); writer.addDocument(doc); doc = new Document(); doc.add(new TextField("dv", "some text", Field.Store.NO)); doc.add(new NumericDocValuesField("dv", 0L)); writer.addDocument(doc); DirectoryReader r = writer.getReader(); writer.close(); AtomicReader subR = r.leaves().get(0).reader(); assertEquals(2, subR.numDocs()); Bits bits = FieldCache.DEFAULT.getDocsWithField(subR, "dv"); assertTrue(bits.get(0)); assertTrue(bits.get(1)); r.close(); dir.close(); }
public void testMultiValuedDocValuesField() throws Exception { Directory d = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), d); Document doc = new Document(); Field f = new NumericDocValuesField("field", 17); // Index doc values are single-valued so we should not // be able to add same field more than once: doc.add(f); doc.add(f); try { w.addDocument(doc); fail("didn't hit expected exception"); } catch (IllegalArgumentException iae) { // expected } doc = new Document(); doc.add(f); w.addDocument(doc); w.forceMerge(1); DirectoryReader r = w.getReader(); w.close(); assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0)); r.close(); d.close(); }
private Hits translateHits(TopDocs topDocs, String dedupField, String sortField) throws IOException { String[] dedupValues = null; if (dedupField != null) dedupValues = FieldCache.DEFAULT.getStrings(reader, dedupField); ScoreDoc[] scoreDocs = topDocs.scoreDocs; int length = scoreDocs.length; Hit[] hits = new Hit[length]; for (int i = 0; i < length; i++) { int doc = scoreDocs[i].doc; WritableComparable sortValue; // convert value to writable if (sortField == null) { sortValue = new FloatWritable(scoreDocs[i].score); } else { Object raw = ((FieldDoc) scoreDocs[i]).fields[0]; if (raw instanceof Integer) { sortValue = new IntWritable(((Integer) raw).intValue()); } else if (raw instanceof Float) { sortValue = new FloatWritable(((Float) raw).floatValue()); } else if (raw instanceof String) { sortValue = new Text((String) raw); } else { throw new RuntimeException("Unknown sort value type!"); } } String dedupValue = dedupValues == null ? null : dedupValues[doc]; hits[i] = new Hit(Integer.toString(doc), sortValue, dedupValue); } return new Hits(topDocs.totalHits, hits); }
public DocTermsIndexDocValues(ValueSource vs, AtomicReaderContext context, String field) throws IOException { try { termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), field); } catch (RuntimeException e) { throw new DocTermsIndexException(field, e); } this.vs = vs; }
@Override public void setNextReader(IndexReader reader, int docBase) throws IOException { termsIndex = FieldCache.DEFAULT.getTermsIndex(reader, field); currentReaderGen++; assert termsIndex.numOrd() > 0; if (bottomSlot != -1) { convert(bottomSlot); bottomOrd = ords[bottomSlot]; } }
@Override public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException { final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), field); // figure out what ord maps to true int nord = sindex.getValueCount(); BytesRef br = new BytesRef(); // if no values in the segment, default trueOrd to something other then -1 (missing) int tord = -2; for (int i = 0; i < nord; i++) { sindex.lookupOrd(i, br); if (br.length == 1 && br.bytes[br.offset] == 'T') { tord = i; break; } } final int trueOrd = tord; return new BoolDocValues(this) { @Override public boolean boolVal(int doc) { return sindex.getOrd(doc) == trueOrd; } @Override public boolean exists(int doc) { return sindex.getOrd(doc) != -1; } @Override public ValueFiller getValueFiller() { return new ValueFiller() { private final MutableValueBool mval = new MutableValueBool(); @Override public MutableValue getValue() { return mval; } @Override public void fillValue(int doc) { int ord = sindex.getOrd(doc); mval.value = (ord == trueOrd); mval.exists = (ord != -1); } }; } }; }
/** * Asserts that FieldCacheSanityChecker does not detect any problems with FieldCache.DEFAULT. * * <p>If any problems are found, they are logged to System.err (allong with the msg) when the * Assertion is thrown. * * <p>This method is called by tearDown after every test method, however IndexReaders scoped * inside test methods may be garbage collected prior to this method being called, causing errors * to be overlooked. Tests are encouraged to keep their IndexReaders scoped at the class level, or * to explicitly call this method directly in the same scope as the IndexReader. * * @see FieldCacheSanityChecker */ protected void assertSaneFieldCaches(final String msg) { final CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries(); Insanity[] insanity = null; try { try { insanity = FieldCacheSanityChecker.checkSanity(entries); } catch (RuntimeException e) { dumpArray(msg + ": FieldCache", entries, System.err); throw e; } assertEquals(msg + ": Insane FieldCache usage(s) found", 0, insanity.length); insanity = null; } finally { // report this in the event of any exception/failure // if no failure, then insanity will be null anyway if (null != insanity) { dumpArray(msg + ": Insane FieldCache usage(s)", insanity, System.err); } } }
public NamedList getStatistics() { NamedList stats = new SimpleOrderedMap(); CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries(); stats.add("entries_count", entries.length); for (int i = 0; i < entries.length; i++) { CacheEntry e = entries[i]; stats.add("entry#" + i, e.toString()); } Insanity[] insanity = checker.check(entries); stats.add("insanity_count", insanity.length); for (int i = 0; i < insanity.length; i++) { /** * RAM estimation is both CPU and memory intensive... we don't want to do it unless asked. // * we only estimate the size of insane entries for (CacheEntry e : * insanity[i].getCacheEntries()) { // don't re-estimate if we've already done it. if (null == * e.getEstimatedSize()) e.estimateSize(); } */ stats.add("insanity#" + i, insanity[i].toString()); } return stats; }
@Test public void testRandomGrouping() throws Exception { try { int indexIter = 50 * RANDOM_MULTIPLIER; // make >0 to enable test int queryIter = 100 * RANDOM_MULTIPLIER; while (--indexIter >= 0) { int indexSize = random.nextInt(25 * RANDOM_MULTIPLIER); List<FldType> types = new ArrayList<FldType>(); types.add(new FldType("id", ONE_ONE, new SVal('A', 'Z', 4, 4))); types.add( new FldType("score_s1", ONE_ONE, new SVal('a', 'c', 1, 1))); // field used to score types.add(new FldType("bar_s1", ONE_ONE, new SVal('a', 'z', 3, 5))); types.add(new FldType(FOO_STRING_FIELD, ONE_ONE, new SVal('a', 'z', 1, 2))); types.add( new FldType( SMALL_STRING_FIELD, ZERO_ONE, new SVal('a', (char) ('c' + indexSize / 10), 1, 1))); clearIndex(); Map<Comparable, Doc> model = indexDocs(types, null, indexSize); // test with specific docs if (false) { clearIndex(); model.clear(); Doc d1 = createDoc(types); d1.getValues(SMALL_STRING_FIELD).set(0, "c"); d1.getValues(SMALL_INT_FIELD).set(0, 5); d1.order = 0; updateJ(toJSON(d1), params("commit", "true")); model.put(d1.id, d1); d1 = createDoc(types); d1.getValues(SMALL_STRING_FIELD).set(0, "b"); d1.getValues(SMALL_INT_FIELD).set(0, 5); d1.order = 1; updateJ(toJSON(d1), params("commit", "false")); model.put(d1.id, d1); d1 = createDoc(types); d1.getValues(SMALL_STRING_FIELD).set(0, "c"); d1.getValues(SMALL_INT_FIELD).set(0, 5); d1.order = 2; updateJ(toJSON(d1), params("commit", "false")); model.put(d1.id, d1); d1 = createDoc(types); d1.getValues(SMALL_STRING_FIELD).set(0, "c"); d1.getValues(SMALL_INT_FIELD).set(0, 5); d1.order = 3; updateJ(toJSON(d1), params("commit", "false")); model.put(d1.id, d1); d1 = createDoc(types); d1.getValues(SMALL_STRING_FIELD).set(0, "b"); d1.getValues(SMALL_INT_FIELD).set(0, 2); d1.order = 4; updateJ(toJSON(d1), params("commit", "true")); model.put(d1.id, d1); } for (int qiter = 0; qiter < queryIter; qiter++) { String groupField = types.get(random.nextInt(types.size())).fname; int rows = random.nextInt(10) == 0 ? random.nextInt(model.size() + 2) : random.nextInt(11) - 1; int start = random.nextInt(5) == 0 ? random.nextInt(model.size() + 2) : random.nextInt(5); // pick a small start normally for better coverage int group_limit = random.nextInt(10) == 0 ? random.nextInt(model.size() + 2) : random.nextInt(11) - 1; int group_offset = random.nextInt(10) == 0 ? random.nextInt(model.size() + 2) : random.nextInt(2); // pick a small start normally for better coverage String[] stringSortA = new String[1]; Comparator<Doc> sortComparator = createSort(h.getCore().getSchema(), types, stringSortA); String sortStr = stringSortA[0]; Comparator<Doc> groupComparator = random.nextBoolean() ? sortComparator : createSort(h.getCore().getSchema(), types, stringSortA); String groupSortStr = stringSortA[0]; // since groupSortStr defaults to sortStr, we need to normalize null to "score desc" if // sortStr != null. if (groupSortStr == null && groupSortStr != sortStr) { groupSortStr = "score desc"; } // Test specific case if (false) { groupField = SMALL_INT_FIELD; sortComparator = createComparator( Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, true))); sortStr = SMALL_STRING_FIELD + " asc"; groupComparator = createComparator( Arrays.asList(createComparator(SMALL_STRING_FIELD, true, true, false, false))); groupSortStr = SMALL_STRING_FIELD + " asc"; rows = 1; start = 0; group_offset = 1; group_limit = 1; } Map<Comparable, Grp> groups = groupBy(model.values(), groupField); // first sort the docs in each group for (Grp grp : groups.values()) { Collections.sort(grp.docs, groupComparator); } // now sort the groups // if sort != group.sort, we need to find the max doc by "sort" if (groupComparator != sortComparator) { for (Grp grp : groups.values()) grp.setMaxDoc(sortComparator); } List<Grp> sortedGroups = new ArrayList<Grp>(groups.values()); Collections.sort( sortedGroups, groupComparator == sortComparator ? createFirstDocComparator(sortComparator) : createMaxDocComparator(sortComparator)); boolean includeNGroups = random.nextBoolean(); Object modelResponse = buildGroupedResult( h.getCore().getSchema(), sortedGroups, start, rows, group_offset, group_limit, includeNGroups); boolean truncateGroups = random.nextBoolean(); Map<String, Integer> facetCounts = new TreeMap<String, Integer>(); if (truncateGroups) { for (Grp grp : sortedGroups) { Doc doc = grp.docs.get(0); if (doc.getValues(FOO_STRING_FIELD) == null) { continue; } String key = doc.getFirstValue(FOO_STRING_FIELD).toString(); boolean exists = facetCounts.containsKey(key); int count = exists ? facetCounts.get(key) : 0; facetCounts.put(key, ++count); } } else { for (Doc doc : model.values()) { if (doc.getValues(FOO_STRING_FIELD) == null) { continue; } for (Comparable field : doc.getValues(FOO_STRING_FIELD)) { String key = field.toString(); boolean exists = facetCounts.containsKey(key); int count = exists ? facetCounts.get(key) : 0; facetCounts.put(key, ++count); } } } List<Comparable> expectedFacetResponse = new ArrayList<Comparable>(); for (Map.Entry<String, Integer> stringIntegerEntry : facetCounts.entrySet()) { expectedFacetResponse.add(stringIntegerEntry.getKey()); expectedFacetResponse.add(stringIntegerEntry.getValue()); } int randomPercentage = random.nextInt(101); // TODO: create a random filter too SolrQueryRequest req = req( "group", "true", "wt", "json", "indent", "true", "echoParams", "all", "q", "{!func}score_f", "group.field", groupField, sortStr == null ? "nosort" : "sort", sortStr == null ? "" : sortStr, (groupSortStr == null || groupSortStr == sortStr) ? "noGroupsort" : "group.sort", groupSortStr == null ? "" : groupSortStr, "rows", "" + rows, "start", "" + start, "group.offset", "" + group_offset, "group.limit", "" + group_limit, GroupParams.GROUP_CACHE_PERCENTAGE, Integer.toString(randomPercentage), GroupParams.GROUP_TOTAL_COUNT, includeNGroups ? "true" : "false", "facet", "true", "facet.sort", "index", "facet.limit", "-1", "facet.field", FOO_STRING_FIELD, GroupParams.GROUP_TRUNCATE, truncateGroups ? "true" : "false", "facet.mincount", "1"); String strResponse = h.query(req); Object realResponse = ObjectBuilder.fromJSON(strResponse); String err = JSONTestUtil.matchObj("/grouped/" + groupField, realResponse, modelResponse); if (err != null) { log.error( "GROUPING MISMATCH: " + err + "\n\trequest=" + req + "\n\tresult=" + strResponse + "\n\texpected=" + JSONUtil.toJSON(modelResponse) + "\n\tsorted_model=" + sortedGroups); // re-execute the request... good for putting a breakpoint here for debugging String rsp = h.query(req); fail(err); } // assert post / pre grouping facets err = JSONTestUtil.matchObj( "/facet_counts/facet_fields/" + FOO_STRING_FIELD, realResponse, expectedFacetResponse); if (err != null) { log.error( "GROUPING MISMATCH: " + err + "\n\trequest=" + req + "\n\tresult=" + strResponse + "\n\texpected=" + JSONUtil.toJSON(expectedFacetResponse)); // re-execute the request... good for putting a breakpoint here for debugging h.query(req); fail(err); } } // end query iter } // end index iter } finally { // B/c the facet.field is also used of grouping we have the purge the FC to avoid FC insanity FieldCache.DEFAULT.purgeAllCaches(); } }
@Override public void setNextReader(AtomicReaderContext context) throws IOException { fromDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, false); }
@Override public void setNextReader(AtomicReaderContext context) throws IOException { docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field); }
/** 每当搜索一个新的segment时,会通过这个方法通知comparator */ @Override public void setNextReader(IndexReader indexReader, int docBase) throws IOException { // 可以读所有field的域缓存 xDoc = FieldCache.DEFAULT.getInts(indexReader, "x"); yDoc = FieldCache.DEFAULT.getInts(indexReader, "y"); }
private void verify(AtomicReader r, int[][] idToOrds, BytesRef[] termsArray, BytesRef prefixRef) throws Exception { final DocTermOrds dto = new DocTermOrds( r, r.getLiveDocs(), "field", prefixRef, Integer.MAX_VALUE, _TestUtil.nextInt(random(), 2, 10)); final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(r, "id", false); /* for(int docID=0;docID<subR.maxDoc();docID++) { System.out.println(" docID=" + docID + " id=" + docIDToID[docID]); } */ if (VERBOSE) { System.out.println( "TEST: verify prefix=" + (prefixRef == null ? "null" : prefixRef.utf8ToString())); System.out.println("TEST: all TERMS:"); TermsEnum allTE = MultiFields.getTerms(r, "field").iterator(null); int ord = 0; while (allTE.next() != null) { System.out.println(" ord=" + (ord++) + " term=" + allTE.term().utf8ToString()); } } // final TermsEnum te = subR.fields().terms("field").iterator(); final TermsEnum te = dto.getOrdTermsEnum(r); if (dto.numTerms() == 0) { if (prefixRef == null) { assertNull(MultiFields.getTerms(r, "field")); } else { Terms terms = MultiFields.getTerms(r, "field"); if (terms != null) { TermsEnum termsEnum = terms.iterator(null); TermsEnum.SeekStatus result = termsEnum.seekCeil(prefixRef, false); if (result != TermsEnum.SeekStatus.END) { assertFalse( "term=" + termsEnum.term().utf8ToString() + " matches prefix=" + prefixRef.utf8ToString(), StringHelper.startsWith(termsEnum.term(), prefixRef)); } else { // ok } } else { // ok } } return; } if (VERBOSE) { System.out.println("TEST: TERMS:"); te.seekExact(0); while (true) { System.out.println(" ord=" + te.ord() + " term=" + te.term().utf8ToString()); if (te.next() == null) { break; } } } SortedSetDocValues iter = dto.iterator(r); for (int docID = 0; docID < r.maxDoc(); docID++) { if (VERBOSE) { System.out.println( "TEST: docID=" + docID + " of " + r.maxDoc() + " (id=" + docIDToID.get(docID) + ")"); } iter.setDocument(docID); final int[] answers = idToOrds[docIDToID.get(docID)]; int upto = 0; long ord; while ((ord = iter.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) { te.seekExact(ord); final BytesRef expected = termsArray[answers[upto++]]; if (VERBOSE) { System.out.println( " exp=" + expected.utf8ToString() + " actual=" + te.term().utf8ToString()); } assertEquals( "expected=" + expected.utf8ToString() + " actual=" + te.term().utf8ToString() + " ord=" + ord, expected, te.term()); } assertEquals(answers.length, upto); } }
public void testNumericField() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); final int numDocs = atLeast(500); final Number[] answers = new Number[numDocs]; final NumericType[] typeAnswers = new NumericType[numDocs]; for (int id = 0; id < numDocs; id++) { Document doc = new Document(); final Field nf; final Field sf; final Number answer; final NumericType typeAnswer; if (random().nextBoolean()) { // float/double if (random().nextBoolean()) { final float f = random().nextFloat(); answer = Float.valueOf(f); nf = new FloatField("nf", f, Field.Store.NO); sf = new StoredField("nf", f); typeAnswer = NumericType.FLOAT; } else { final double d = random().nextDouble(); answer = Double.valueOf(d); nf = new DoubleField("nf", d, Field.Store.NO); sf = new StoredField("nf", d); typeAnswer = NumericType.DOUBLE; } } else { // int/long if (random().nextBoolean()) { final int i = random().nextInt(); answer = Integer.valueOf(i); nf = new IntField("nf", i, Field.Store.NO); sf = new StoredField("nf", i); typeAnswer = NumericType.INT; } else { final long l = random().nextLong(); answer = Long.valueOf(l); nf = new LongField("nf", l, Field.Store.NO); sf = new StoredField("nf", l); typeAnswer = NumericType.LONG; } } doc.add(nf); doc.add(sf); answers[id] = answer; typeAnswers[id] = typeAnswer; FieldType ft = new FieldType(IntField.TYPE_STORED); ft.setNumericPrecisionStep(Integer.MAX_VALUE); doc.add(new IntField("id", id, ft)); w.addDocument(doc); } final DirectoryReader r = w.getReader(); w.close(); assertEquals(numDocs, r.numDocs()); for (AtomicReaderContext ctx : r.leaves()) { final AtomicReader sub = ctx.reader(); final FieldCache.Ints ids = FieldCache.DEFAULT.getInts(sub, "id", false); for (int docID = 0; docID < sub.numDocs(); docID++) { final Document doc = sub.document(docID); final Field f = (Field) doc.getField("nf"); assertTrue("got f=" + f, f instanceof StoredField); assertEquals(answers[ids.get(docID)], f.numericValue()); } } r.close(); dir.close(); }
private void assertNoFieldCaches() { // docvalues sorting should NOT create any fieldcache entries! assertEquals(0, FieldCache.DEFAULT.getCacheEntries().length); }
public void testRandom() throws Exception { Directory dir = newDirectory(); final int NUM_TERMS = atLeast(20); final Set<BytesRef> terms = new HashSet<BytesRef>(); while (terms.size() < NUM_TERMS) { final String s = _TestUtil.randomRealisticUnicodeString(random()); // final String s = _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); } } final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]); Arrays.sort(termsArray); final int NUM_DOCS = atLeast(100); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())); // Sometimes swap in codec that impls ord(): if (random().nextInt(10) == 7) { // Make sure terms index has ords: Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene41WithOrds")); conf.setCodec(codec); } final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; final Set<Integer> ordsForDocSet = new HashSet<Integer>(); for (int id = 0; id < NUM_DOCS; id++) { Document doc = new Document(); doc.add(new IntField("id", id, Field.Store.YES)); final int termCount = _TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER); while (ordsForDocSet.size() < termCount) { ordsForDocSet.add(random().nextInt(termsArray.length)); } final int[] ordsForDoc = new int[termCount]; int upto = 0; if (VERBOSE) { System.out.println("TEST: doc id=" + id); } for (int ord : ordsForDocSet) { ordsForDoc[upto++] = ord; Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO); if (VERBOSE) { System.out.println(" f=" + termsArray[ord].utf8ToString()); } doc.add(field); } ordsForDocSet.clear(); Arrays.sort(ordsForDoc); idToOrds[id] = ordsForDoc; w.addDocument(doc); } final DirectoryReader r = w.getReader(); w.close(); if (VERBOSE) { System.out.println("TEST: reader=" + r); } for (AtomicReaderContext ctx : r.leaves()) { if (VERBOSE) { System.out.println("\nTEST: sub=" + ctx.reader()); } verify(ctx.reader(), idToOrds, termsArray, null); } // Also test top-level reader: its enum does not support // ord, so this forces the OrdWrapper to run: if (VERBOSE) { System.out.println("TEST: top reader"); } AtomicReader slowR = SlowCompositeReaderWrapper.wrap(r); verify(slowR, idToOrds, termsArray, null); FieldCache.DEFAULT.purge(slowR); r.close(); dir.close(); }
public void testSortedTermsEnum() throws IOException { Directory directory = newDirectory(); Analyzer analyzer = new MockAnalyzer(random()); IndexWriterConfig iwconfig = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); iwconfig.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iwriter = new RandomIndexWriter(random(), directory, iwconfig); Document doc = new Document(); doc.add(new StringField("field", "hello", Field.Store.NO)); iwriter.addDocument(doc); doc = new Document(); doc.add(new StringField("field", "world", Field.Store.NO)); iwriter.addDocument(doc); doc = new Document(); doc.add(new StringField("field", "beer", Field.Store.NO)); iwriter.addDocument(doc); iwriter.forceMerge(1); DirectoryReader ireader = iwriter.getReader(); iwriter.close(); AtomicReader ar = getOnlySegmentReader(ireader); SortedSetDocValues dv = FieldCache.DEFAULT.getDocTermOrds(ar, "field"); assertEquals(3, dv.getValueCount()); TermsEnum termsEnum = dv.termsEnum(); // next() assertEquals("beer", termsEnum.next().utf8ToString()); assertEquals(0, termsEnum.ord()); assertEquals("hello", termsEnum.next().utf8ToString()); assertEquals(1, termsEnum.ord()); assertEquals("world", termsEnum.next().utf8ToString()); assertEquals(2, termsEnum.ord()); // seekCeil() assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("ha!"))); assertEquals("hello", termsEnum.term().utf8ToString()); assertEquals(1, termsEnum.ord()); assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("beer"))); assertEquals("beer", termsEnum.term().utf8ToString()); assertEquals(0, termsEnum.ord()); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("zzz"))); // seekExact() assertTrue(termsEnum.seekExact(new BytesRef("beer"), true)); assertEquals("beer", termsEnum.term().utf8ToString()); assertEquals(0, termsEnum.ord()); assertTrue(termsEnum.seekExact(new BytesRef("hello"), true)); assertEquals("hello", termsEnum.term().utf8ToString()); assertEquals(1, termsEnum.ord()); assertTrue(termsEnum.seekExact(new BytesRef("world"), true)); assertEquals("world", termsEnum.term().utf8ToString()); assertEquals(2, termsEnum.ord()); assertFalse(termsEnum.seekExact(new BytesRef("bogus"), true)); // seek(ord) termsEnum.seekExact(0); assertEquals("beer", termsEnum.term().utf8ToString()); assertEquals(0, termsEnum.ord()); termsEnum.seekExact(1); assertEquals("hello", termsEnum.term().utf8ToString()); assertEquals(1, termsEnum.ord()); termsEnum.seekExact(2); assertEquals("world", termsEnum.term().utf8ToString()); assertEquals(2, termsEnum.ord()); ireader.close(); directory.close(); }
@Override public void setUp() throws Exception { super.setUp(); // ensure there is nothing in fieldcache before test starts FieldCache.DEFAULT.purgeAllCaches(); }
public void init(IndexReader reader) throws IOException { initBiggerDiagonal(reader); // This line must be after initBiggerDiagonal to lucene can put vales in cache diagonalIndex = FieldCache.DEFAULT.getStrings(reader, Globals.LUCENE_DIAGONAL_INDEX); internalCircleRadiumIndex = FieldCache.DEFAULT.getStrings(reader, Globals.LUCENE_RADIUM_INDEX); }